diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index af03700..873463c 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,12 +7,17 @@ on:
- 'integrated/**'
- 'stl-preview-head/**'
- 'stl-preview-base/**'
+ pull_request:
+ branches-ignore:
+ - 'stl-preview-head/**'
+ - 'stl-preview-base/**'
jobs:
lint:
timeout-minutes: 10
name: lint
runs-on: ${{ github.repository == 'stainless-sdks/sunrise-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
steps:
- uses: actions/checkout@v4
@@ -31,6 +36,7 @@ jobs:
timeout-minutes: 5
name: build
runs-on: ${{ github.repository == 'stainless-sdks/sunrise-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
permissions:
contents: read
id-token: write
@@ -66,6 +72,7 @@ jobs:
timeout-minutes: 10
name: test
runs-on: ${{ github.repository == 'stainless-sdks/sunrise-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
steps:
- uses: actions/checkout@v4
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index e7ca613..64f3cdd 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.7.0"
+ ".": "0.8.0"
}
diff --git a/.stats.yml b/.stats.yml
index aa7b006..b868f52 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 52
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-8d75c58c83d13f67b6a125c3eb4639d213c91aec7dbb6e06f0cd5bdfc074d54e.yml
-openapi_spec_hash: 47795284631814d0f8eb42f6a0d5a3b3
-config_hash: 1ecef0ff4fd125bbc00eec65e3dd4798
+configured_endpoints: 34
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-db7245c74772a8cd47c02886619fed0568fbb58b1fa8aba0dc77524b924a4fb6.yml
+openapi_spec_hash: ca3de8d7b14b78683e39464fe7d4b1e1
+config_hash: 410f8a2f86f605885911277be47c3c78
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8a3c2da..68ba9e6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,47 @@
# Changelog
+## 0.8.0 (2025-08-26)
+
+Full Changelog: [v0.7.0...v0.8.0](https://github.com/ContextualAI/contextual-client-node/compare/v0.7.0...v0.8.0)
+
+### Features
+
+* **api:** update via SDK Studio ([1cd61de](https://github.com/ContextualAI/contextual-client-node/commit/1cd61de5fa3d11098d3ca5f4afef6caa67cac129))
+* **api:** update via SDK Studio ([461f964](https://github.com/ContextualAI/contextual-client-node/commit/461f964fd0e3209c1dd3094e347455d492afbe7c))
+* **client:** add support for endpoint-specific base URLs ([4375198](https://github.com/ContextualAI/contextual-client-node/commit/4375198ddd324eadac0053fbf0e9284f0686f6a5))
+
+
+### Bug Fixes
+
+* **ci:** release-doctor — report correct token name ([2339044](https://github.com/ContextualAI/contextual-client-node/commit/2339044a28b8491311eb984fbe0f5b2497e57b32))
+* **client:** don't send `Content-Type` for bodyless methods ([70fe78d](https://github.com/ContextualAI/contextual-client-node/commit/70fe78dada5b43beb40a5efb9d371cdcb2ac8ea0))
+* publish script — handle NPM errors correctly ([9855ea4](https://github.com/ContextualAI/contextual-client-node/commit/9855ea41c55355a17d1a6282bc8d158b72da2f63))
+
+
+### Chores
+
+* **ci:** enable for pull requests ([23d5fa7](https://github.com/ContextualAI/contextual-client-node/commit/23d5fa79636f991e3d4ff7a454ee973bb1fc2691))
+* **ci:** only run for pushes and fork pull requests ([da8f41a](https://github.com/ContextualAI/contextual-client-node/commit/da8f41a626313a61f8eec1ccb52834021ec5c134))
+* **deps:** update dependency node-fetch to v2.6.13 ([a78c708](https://github.com/ContextualAI/contextual-client-node/commit/a78c708d54ad4a15626784c2cd28f9f5badd32bf))
+* **docs:** grammar improvements ([92210b2](https://github.com/ContextualAI/contextual-client-node/commit/92210b2f9cbfd930dfb7fcc8c6464a8d0c89bda7))
+* **docs:** use top-level-await in example snippets ([39783f4](https://github.com/ContextualAI/contextual-client-node/commit/39783f455c8d9d669d42a0671b239b51a196f743))
+* improve publish-npm script --latest tag logic ([bf8c320](https://github.com/ContextualAI/contextual-client-node/commit/bf8c320845e3781558b5c89c2d0dfda127138d13))
+* **internal:** formatting change ([377f894](https://github.com/ContextualAI/contextual-client-node/commit/377f8945d985646d593b5b7ab5d4b4f8b49e3f8c))
+* **internal:** make base APIResource abstract ([122895d](https://github.com/ContextualAI/contextual-client-node/commit/122895d96bcc6de97dc918dca7b62d8ababb0af5))
+* **internal:** move publish config ([119c90e](https://github.com/ContextualAI/contextual-client-node/commit/119c90eb630dd8f659ceaa54b0b8e2b9b066c5f4))
+* **internal:** remove redundant imports config ([81939bc](https://github.com/ContextualAI/contextual-client-node/commit/81939bc05046667a6abe50b9a004b876b69a2cbc))
+* **internal:** update comment in script ([4c94556](https://github.com/ContextualAI/contextual-client-node/commit/4c94556258643943f9b0bb20ec704031cd5aaea2))
+* **internal:** update examples ([8b244f3](https://github.com/ContextualAI/contextual-client-node/commit/8b244f3de7b94001daf4e2de4761d57454d34fb9))
+* make some internal functions async ([cfbf5bc](https://github.com/ContextualAI/contextual-client-node/commit/cfbf5bc81cb65347306a63f6901c0682f2a827cf))
+* mention unit type in timeout docs ([4271686](https://github.com/ContextualAI/contextual-client-node/commit/427168601652284f4f664680cd66302d4ac5485a))
+* update @stainless-api/prism-cli to v5.15.0 ([5357389](https://github.com/ContextualAI/contextual-client-node/commit/5357389ae828af04cb9595175c2603554d43902a))
+* update CI script ([b12ee6f](https://github.com/ContextualAI/contextual-client-node/commit/b12ee6ff77acc76f4e15a75e6b980fb01a09b1e0))
+
+
+### Refactors
+
+* **types:** replace Record with mapped types ([b50e9f1](https://github.com/ContextualAI/contextual-client-node/commit/b50e9f1b6306889aff9ee34fb2d29dcad6d8c3f7))
+
## 0.7.0 (2025-05-13)
Full Changelog: [v0.6.0...v0.7.0](https://github.com/ContextualAI/contextual-client-node/compare/v0.6.0...v0.7.0)
diff --git a/README.md b/README.md
index db698e4..4d38e40 100644
--- a/README.md
+++ b/README.md
@@ -24,13 +24,9 @@ const client = new ContextualAI({
apiKey: process.env['CONTEXTUAL_API_KEY'], // This is the default and can be omitted
});
-async function main() {
- const createAgentOutput = await client.agents.create({ name: 'Example' });
+const createAgentOutput = await client.agents.create({ name: 'Example' });
- console.log(createAgentOutput.id);
-}
-
-main();
+console.log(createAgentOutput.id);
```
### Request & Response types
@@ -45,12 +41,8 @@ const client = new ContextualAI({
apiKey: process.env['CONTEXTUAL_API_KEY'], // This is the default and can be omitted
});
-async function main() {
- const params: ContextualAI.AgentCreateParams = { name: 'Example' };
- const createAgentOutput: ContextualAI.CreateAgentOutput = await client.agents.create(params);
-}
-
-main();
+const params: ContextualAI.AgentCreateParams = { name: 'Example' };
+const createAgentOutput: ContextualAI.CreateAgentOutput = await client.agents.create(params);
```
Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors.
@@ -103,19 +95,15 @@ a subclass of `APIError` will be thrown:
```ts
-async function main() {
- const createAgentOutput = await client.agents.create({ name: 'Example' }).catch(async (err) => {
- if (err instanceof ContextualAI.APIError) {
- console.log(err.status); // 400
- console.log(err.name); // BadRequestError
- console.log(err.headers); // {server: 'nginx', ...}
- } else {
- throw err;
- }
- });
-}
-
-main();
+const createAgentOutput = await client.agents.create({ name: 'Example' }).catch(async (err) => {
+ if (err instanceof ContextualAI.APIError) {
+ console.log(err.status); // 400
+ console.log(err.name); // BadRequestError
+ console.log(err.headers); // {server: 'nginx', ...}
+ } else {
+ throw err;
+ }
+});
```
Error codes are as follows:
diff --git a/SECURITY.md b/SECURITY.md
index 92a473f..97e18f0 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -16,11 +16,11 @@ before making any information public.
## Reporting Non-SDK Related Security Issues
If you encounter security issues that are not directly related to SDKs but pertain to the services
-or products provided by Contextual AI please follow the respective company's security reporting guidelines.
+or products provided by Contextual AI, please follow the respective company's security reporting guidelines.
### Contextual AI Terms and Policies
-Please contact support@contextual.ai for any questions or concerns regarding security of our services.
+Please contact support@contextual.ai for any questions or concerns regarding the security of our services.
---
diff --git a/api.md b/api.md
index 79bb2fa..0ee2ce5 100644
--- a/api.md
+++ b/api.md
@@ -6,12 +6,14 @@ Types:
- Datastore
- DatastoreMetadata
- ListDatastoresResponse
+- DatastoreUpdateResponse
- DatastoreDeleteResponse
- DatastoreResetResponse
Methods:
- client.datastores.create({ ...params }) -> CreateDatastoreResponse
+- client.datastores.update(datastoreId, { ...params }) -> DatastoreUpdateResponse
- client.datastores.list({ ...params }) -> DatastoresDatastoresPage
- client.datastores.delete(datastoreId) -> unknown
- client.datastores.metadata(datastoreId) -> DatastoreMetadata
@@ -21,16 +23,19 @@ Methods:
Types:
+- BaseMetadataFilter
- CompositeMetadataFilter
- DocumentMetadata
- IngestionResponse
- ListDocumentsResponse
- DocumentDeleteResponse
+- DocumentGetParseResultResponse
Methods:
- client.datastores.documents.list(datastoreId, { ...params }) -> DocumentMetadataDocumentsPage
- client.datastores.documents.delete(datastoreId, documentId) -> unknown
+- client.datastores.documents.getParseResult(datastoreId, documentId, { ...params }) -> DocumentGetParseResultResponse
- client.datastores.documents.ingest(datastoreId, { ...params }) -> IngestionResponse
- client.datastores.documents.metadata(datastoreId, documentId) -> DocumentMetadata
- client.datastores.documents.setMetadata(datastoreId, documentId, { ...params }) -> DocumentMetadata
@@ -59,6 +64,7 @@ Methods:
- client.agents.update(agentId, { ...params }) -> unknown
- client.agents.list({ ...params }) -> AgentsPage
- client.agents.delete(agentId) -> unknown
+- client.agents.copy(agentId) -> CreateAgentOutput
- client.agents.metadata(agentId) -> AgentMetadataResponse
- client.agents.reset(agentId) -> unknown
@@ -78,102 +84,6 @@ Methods:
- client.agents.query.metrics(agentId, { ...params }) -> QueryMetricsResponse
- client.agents.query.retrievalInfo(agentId, messageId, { ...params }) -> RetrievalInfoResponse
-## Evaluate
-
-Types:
-
-- CreateEvaluationResponse
-
-Methods:
-
-- client.agents.evaluate.create(agentId, { ...params }) -> CreateEvaluationResponse
-
-### Jobs
-
-Types:
-
-- EvaluationJobMetadata
-- ListEvaluationJobsResponse
-- JobCancelResponse
-
-Methods:
-
-- client.agents.evaluate.jobs.list(agentId) -> ListEvaluationJobsResponse
-- client.agents.evaluate.jobs.cancel(agentId, jobId) -> unknown
-- client.agents.evaluate.jobs.metadata(agentId, jobId) -> EvaluationJobMetadata
-
-## Datasets
-
-Types:
-
-- CreateDatasetResponse
-- DatasetMetadata
-- ListDatasetsResponse
-
-### Tune
-
-Types:
-
-- TuneDeleteResponse
-
-Methods:
-
-- client.agents.datasets.tune.create(agentId, { ...params }) -> CreateDatasetResponse
-- client.agents.datasets.tune.retrieve(agentId, datasetName, { ...params }) -> Response
-- client.agents.datasets.tune.update(agentId, datasetName, { ...params }) -> CreateDatasetResponse
-- client.agents.datasets.tune.list(agentId, { ...params }) -> ListDatasetsResponse
-- client.agents.datasets.tune.delete(agentId, datasetName) -> unknown
-- client.agents.datasets.tune.metadata(agentId, datasetName, { ...params }) -> DatasetMetadata
-
-### Evaluate
-
-Types:
-
-- EvaluateDeleteResponse
-
-Methods:
-
-- client.agents.datasets.evaluate.create(agentId, { ...params }) -> CreateDatasetResponse
-- client.agents.datasets.evaluate.retrieve(agentId, datasetName, { ...params }) -> Response
-- client.agents.datasets.evaluate.update(agentId, datasetName, { ...params }) -> CreateDatasetResponse
-- client.agents.datasets.evaluate.list(agentId, { ...params }) -> ListDatasetsResponse
-- client.agents.datasets.evaluate.delete(agentId, datasetName) -> unknown
-- client.agents.datasets.evaluate.metadata(agentId, datasetName, { ...params }) -> DatasetMetadata
-
-## Tune
-
-Types:
-
-- CreateTuneResponse
-
-Methods:
-
-- client.agents.tune.create(agentId, { ...params }) -> CreateTuneResponse
-
-### Jobs
-
-Types:
-
-- ListTuneJobsResponse
-- TuneJobMetadata
-- JobDeleteResponse
-
-Methods:
-
-- client.agents.tune.jobs.list(agentId) -> ListTuneJobsResponse
-- client.agents.tune.jobs.delete(agentId, jobId) -> unknown
-- client.agents.tune.jobs.metadata(agentId, jobId) -> TuneJobMetadata
-
-### Models
-
-Types:
-
-- ListTuneModelsResponse
-
-Methods:
-
-- client.agents.tune.models.list(agentId) -> ListTuneModelsResponse
-
# Users
Types:
diff --git a/bin/check-release-environment b/bin/check-release-environment
index c2bca0c..e4b6d58 100644
--- a/bin/check-release-environment
+++ b/bin/check-release-environment
@@ -3,7 +3,7 @@
errors=()
if [ -z "${NPM_TOKEN}" ]; then
- errors+=("The CONTEXTUAL_AI_NPM_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets")
+ errors+=("The NPM_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets")
fi
lenErrors=${#errors[@]}
diff --git a/bin/publish-npm b/bin/publish-npm
index 4c21181..45e8aa8 100644
--- a/bin/publish-npm
+++ b/bin/publish-npm
@@ -4,22 +4,58 @@ set -eux
npm config set '//registry.npmjs.org/:_authToken' "$NPM_TOKEN"
-# Build the project
yarn build
-
-# Navigate to the dist directory
cd dist
-# Get the version from package.json
-VERSION="$(node -p "require('./package.json').version")"
+# Get package name and version from package.json
+PACKAGE_NAME="$(jq -r -e '.name' ./package.json)"
+VERSION="$(jq -r -e '.version' ./package.json)"
+
+# Get latest version from npm
+#
+# If the package doesn't exist, npm will return:
+# {
+# "error": {
+# "code": "E404",
+# "summary": "Unpublished on 2025-06-05T09:54:53.528Z",
+# "detail": "'the_package' is not in this registry..."
+# }
+# }
+NPM_INFO="$(npm view "$PACKAGE_NAME" version --json 2>/dev/null || true)"
+
+# Check if we got an E404 error
+if echo "$NPM_INFO" | jq -e '.error.code == "E404"' > /dev/null 2>&1; then
+ # Package doesn't exist yet, no last version
+ LAST_VERSION=""
+elif echo "$NPM_INFO" | jq -e '.error' > /dev/null 2>&1; then
+ # Report other errors
+ echo "ERROR: npm returned unexpected data:"
+ echo "$NPM_INFO"
+ exit 1
+else
+ # Success - get the version
+ LAST_VERSION=$(echo "$NPM_INFO" | jq -r '.') # strip quotes
+fi
-# Extract the pre-release tag if it exists
+# Check if current version is pre-release (e.g. alpha / beta / rc)
+CURRENT_IS_PRERELEASE=false
if [[ "$VERSION" =~ -([a-zA-Z]+) ]]; then
- # Extract the part before any dot in the pre-release identifier
- TAG="${BASH_REMATCH[1]}"
+ CURRENT_IS_PRERELEASE=true
+ CURRENT_TAG="${BASH_REMATCH[1]}"
+fi
+
+# Check if last version is a stable release
+LAST_IS_STABLE_RELEASE=true
+if [[ -z "$LAST_VERSION" || "$LAST_VERSION" =~ -([a-zA-Z]+) ]]; then
+ LAST_IS_STABLE_RELEASE=false
+fi
+
+# Use a corresponding alpha/beta tag if there already is a stable release and we're publishing a prerelease.
+if $CURRENT_IS_PRERELEASE && $LAST_IS_STABLE_RELEASE; then
+ TAG="$CURRENT_TAG"
else
TAG="latest"
fi
# Publish with the appropriate tag
-yarn publish --access public --tag "$TAG"
+yarn publish --tag "$TAG"
diff --git a/package.json b/package.json
index 7837a69..3eeab3d 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "contextual-client",
- "version": "0.7.0",
+ "version": "0.8.0",
"description": "The official TypeScript library for the Contextual AI API",
"author": "Contextual AI ",
"types": "dist/index.d.ts",
@@ -13,6 +13,9 @@
"**/*"
],
"private": false,
+ "publishConfig": {
+ "access": "public"
+ },
"scripts": {
"test": "./scripts/test",
"build": "./scripts/build",
@@ -58,10 +61,6 @@
"./shims/web.js",
"./shims/web.mjs"
],
- "imports": {
- "contextual-client": ".",
- "contextual-client/*": "./src/*"
- },
"exports": {
"./_shims/auto/*": {
"deno": {
diff --git a/scripts/build b/scripts/build
index d4a6875..dba69bb 100755
--- a/scripts/build
+++ b/scripts/build
@@ -28,7 +28,7 @@ fi
node scripts/utils/make-dist-package-json.cjs > dist/package.json
# build to .js/.mjs/.d.ts files
-npm exec tsc-multi
+./node_modules/.bin/tsc-multi
# copy over handwritten .js/.mjs/.d.ts files
cp src/_shims/*.{d.ts,js,mjs,md} dist/_shims
cp src/_shims/auto/*.{d.ts,js,mjs} dist/_shims/auto
diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh
index 1c5eabf..04daa27 100755
--- a/scripts/utils/upload-artifact.sh
+++ b/scripts/utils/upload-artifact.sh
@@ -12,7 +12,7 @@ if [[ "$SIGNED_URL" == "null" ]]; then
exit 1
fi
-UPLOAD_RESPONSE=$(tar -cz dist | curl -v -X PUT \
+UPLOAD_RESPONSE=$(tar -cz "${BUILD_PATH:-dist}" | curl -v -X PUT \
-H "Content-Type: application/gzip" \
--data-binary @- "$SIGNED_URL" 2>&1)
diff --git a/src/core.ts b/src/core.ts
index 57e9e88..0824fa9 100644
--- a/src/core.ts
+++ b/src/core.ts
@@ -170,6 +170,7 @@ export class APIPromise extends Promise {
export abstract class APIClient {
baseURL: string;
+ #baseURLOverridden: boolean;
maxRetries: number;
timeout: number;
httpAgent: Agent | undefined;
@@ -179,18 +180,21 @@ export abstract class APIClient {
constructor({
baseURL,
+ baseURLOverridden,
maxRetries = 2,
timeout = 60000, // 1 minute
httpAgent,
fetch: overriddenFetch,
}: {
baseURL: string;
+ baseURLOverridden: boolean;
maxRetries?: number | undefined;
timeout: number | undefined;
httpAgent: Agent | undefined;
fetch: Fetch | undefined;
}) {
this.baseURL = baseURL;
+ this.#baseURLOverridden = baseURLOverridden;
this.maxRetries = validatePositiveInteger('maxRetries', maxRetries);
this.timeout = validatePositiveInteger('timeout', timeout);
this.httpAgent = httpAgent;
@@ -213,7 +217,7 @@ export abstract class APIClient {
protected defaultHeaders(opts: FinalRequestOptions): Headers {
return {
Accept: 'application/json',
- 'Content-Type': 'application/json',
+ ...(['head', 'get'].includes(opts.method) ? {} : { 'Content-Type': 'application/json' }),
'User-Agent': this.getUserAgent(),
...getPlatformHeaders(),
...this.authHeaders(opts),
@@ -295,12 +299,12 @@ export abstract class APIClient {
return null;
}
- buildRequest(
+ async buildRequest(
inputOptions: FinalRequestOptions,
{ retryCount = 0 }: { retryCount?: number } = {},
- ): { req: RequestInit; url: string; timeout: number } {
+ ): Promise<{ req: RequestInit; url: string; timeout: number }> {
const options = { ...inputOptions };
- const { method, path, query, headers: headers = {} } = options;
+ const { method, path, query, defaultBaseURL, headers: headers = {} } = options;
const body =
ArrayBuffer.isView(options.body) || (options.__binaryRequest && typeof options.body === 'string') ?
@@ -310,7 +314,7 @@ export abstract class APIClient {
: null;
const contentLength = this.calculateContentLength(body);
- const url = this.buildURL(path!, query);
+ const url = this.buildURL(path!, query, defaultBaseURL);
if ('timeout' in options) validatePositiveInteger('timeout', options.timeout);
options.timeout = options.timeout ?? this.timeout;
const httpAgent = options.httpAgent ?? this.httpAgent ?? getDefaultAgent(url);
@@ -446,7 +450,9 @@ export abstract class APIClient {
await this.prepareOptions(options);
- const { req, url, timeout } = this.buildRequest(options, { retryCount: maxRetries - retriesRemaining });
+ const { req, url, timeout } = await this.buildRequest(options, {
+ retryCount: maxRetries - retriesRemaining,
+ });
await this.prepareRequest(req, { url, options });
@@ -503,11 +509,12 @@ export abstract class APIClient {
return new PagePromise(this, request, Page);
}
- buildURL(path: string, query: Req | null | undefined): string {
+ buildURL(path: string, query: Req | null | undefined, defaultBaseURL?: string | undefined): string {
+ const baseURL = (!this.#baseURLOverridden && defaultBaseURL) || this.baseURL;
const url =
isAbsoluteURL(path) ?
new URL(path)
- : new URL(this.baseURL + (this.baseURL.endsWith('/') && path.startsWith('/') ? path.slice(1) : path));
+ : new URL(baseURL + (baseURL.endsWith('/') && path.startsWith('/') ? path.slice(1) : path));
const defaultQuery = this.defaultQuery();
if (!isEmptyObj(defaultQuery)) {
@@ -792,6 +799,7 @@ export type RequestOptions<
query?: Req | undefined;
body?: Req | null | undefined;
headers?: Headers | undefined;
+ defaultBaseURL?: string | undefined;
maxRetries?: number;
stream?: boolean | undefined;
@@ -813,6 +821,7 @@ const requestOptionsKeys: KeysEnum = {
query: true,
body: true,
headers: true,
+ defaultBaseURL: true,
maxRetries: true,
stream: true,
diff --git a/src/index.ts b/src/index.ts
index f17be44..327ce39 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -71,6 +71,8 @@ import {
DatastoreListParams,
DatastoreMetadata,
DatastoreResetResponse,
+ DatastoreUpdateParams,
+ DatastoreUpdateResponse,
Datastores,
DatastoresDatastoresPage,
ListDatastoresResponse,
@@ -95,6 +97,8 @@ export interface ClientOptions {
*
* Note that request timeouts are retried by default, so in a worst-case scenario you may wait
* much longer than this timeout before the promise succeeds or fails.
+ *
+ * @unit milliseconds
*/
timeout?: number | undefined;
@@ -178,6 +182,7 @@ export class ContextualAI extends Core.APIClient {
super({
baseURL: options.baseURL!,
+ baseURLOverridden: baseURL ? baseURL !== 'https://api.contextual.ai/v1' : false,
timeout: options.timeout ?? 60000 /* 1 minute */,
httpAgent: options.httpAgent,
maxRetries: options.maxRetries,
@@ -197,6 +202,13 @@ export class ContextualAI extends Core.APIClient {
generate: API.Generate = new API.Generate(this);
parse: API.Parse = new API.Parse(this);
+ /**
+ * Check whether the base URL is set to its default.
+ */
+ #baseURLOverridden(): boolean {
+ return this.baseURL !== 'https://api.contextual.ai/v1';
+ }
+
protected override defaultQuery(): Core.DefaultQuery | undefined {
return this._options.defaultQuery;
}
@@ -247,6 +259,7 @@ ContextualAI.LMUnit = LMUnit;
ContextualAI.Rerank = Rerank;
ContextualAI.Generate = Generate;
ContextualAI.Parse = Parse;
+
export declare namespace ContextualAI {
export type RequestOptions = Core.RequestOptions;
@@ -274,10 +287,12 @@ export declare namespace ContextualAI {
type Datastore as Datastore,
type DatastoreMetadata as DatastoreMetadata,
type ListDatastoresResponse as ListDatastoresResponse,
+ type DatastoreUpdateResponse as DatastoreUpdateResponse,
type DatastoreDeleteResponse as DatastoreDeleteResponse,
type DatastoreResetResponse as DatastoreResetResponse,
DatastoresDatastoresPage as DatastoresDatastoresPage,
type DatastoreCreateParams as DatastoreCreateParams,
+ type DatastoreUpdateParams as DatastoreUpdateParams,
type DatastoreListParams as DatastoreListParams,
};
diff --git a/src/resource.ts b/src/resource.ts
index 7f9b100..adf0750 100644
--- a/src/resource.ts
+++ b/src/resource.ts
@@ -2,7 +2,7 @@
import type { ContextualAI } from './index';
-export class APIResource {
+export abstract class APIResource {
protected _client: ContextualAI;
constructor(client: ContextualAI) {
diff --git a/src/resources/agents/agents.ts b/src/resources/agents/agents.ts
index 1189cdc..b5b9285 100644
--- a/src/resources/agents/agents.ts
+++ b/src/resources/agents/agents.ts
@@ -15,19 +15,11 @@ import {
QueryRetrievalInfoParams,
RetrievalInfoResponse,
} from './query';
-import * as DatasetsAPI from './datasets/datasets';
-import { CreateDatasetResponse, DatasetMetadata, Datasets, ListDatasetsResponse } from './datasets/datasets';
-import * as EvaluateAPI from './evaluate/evaluate';
-import { CreateEvaluationResponse, Evaluate, EvaluateCreateParams } from './evaluate/evaluate';
-import * as TuneAPI from './tune/tune';
-import { CreateTuneResponse, Tune, TuneCreateParams } from './tune/tune';
+import * as DocumentsAPI from '../datastores/documents';
import { Page, type PageParams } from '../../pagination';
export class Agents extends APIResource {
query: QueryAPI.Query = new QueryAPI.Query(this._client);
- evaluate: EvaluateAPI.Evaluate = new EvaluateAPI.Evaluate(this._client);
- datasets: DatasetsAPI.Datasets = new DatasetsAPI.Datasets(this._client);
- tune: TuneAPI.Tune = new TuneAPI.Tune(this._client);
/**
* Create a new `Agent` with a specific configuration.
@@ -86,6 +78,14 @@ export class Agents extends APIResource {
return this._client.delete(`/agents/${agentId}`, options);
}
+ /**
+ * Copy an existing agent with all its configurations and datastore associations.
+ * The copied agent will have "[COPY]" appended to its name.
+ */
+ copy(agentId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post(`/agents/${agentId}/copy`, options);
+ }
+
/**
* Get metadata and configuration of a given `Agent`.
*/
@@ -139,12 +139,44 @@ export interface AgentConfigs {
*/
global_config?: GlobalConfig;
+ /**
+ * Parameters that affect the agent's query reformulation
+ */
+ reformulation_config?: AgentConfigs.ReformulationConfig;
+
/**
* Parameters that affect how the agent retrieves from datastore(s)
*/
retrieval_config?: RetrievalConfig;
}
+export namespace AgentConfigs {
+ /**
+ * Parameters that affect the agent's query reformulation
+ */
+ export interface ReformulationConfig {
+ /**
+ * Whether to enable query decomposition.
+ */
+ enable_query_decomposition?: boolean;
+
+ /**
+ * Whether to enable query expansion.
+ */
+ enable_query_expansion?: boolean;
+
+ /**
+ * The prompt to use for query decomposition.
+ */
+ query_decomposition_prompt?: string;
+
+ /**
+ * The prompt to use for query expansion.
+ */
+ query_expansion_prompt?: string;
+ }
+}
+
/**
* Response to GET Agent request
*/
@@ -159,6 +191,11 @@ export interface AgentMetadata {
*/
name: string;
+ /**
+ * The template used to create this agent.
+ */
+ template_name: string;
+
/**
* The following advanced parameters are experimental and subject to change.
*/
@@ -187,6 +224,11 @@ export interface AgentMetadata {
*/
llm_model_id?: string;
+ /**
+ * Instructions on how the agent should handle multi-turn conversations.
+ */
+ multiturn_system_prompt?: string;
+
/**
* Instructions on how the agent should respond when there are no relevant
* retrievals that can be used to answer a query.
@@ -251,6 +293,21 @@ export interface CreateAgentOutput {
* Captures Filter and Rerank configurations for an Agent
*/
export interface FilterAndRerankConfig {
+ /**
+ * Optional metadata filter which is applied while retrieving from every datastore
+ * linked to this agent.
+ */
+ default_metadata_filters?: DocumentsAPI.BaseMetadataFilter | DocumentsAPI.CompositeMetadataFilter;
+
+ /**
+ * Defines an optional custom metadata filter per datastore ID. Each entry in the
+ * dictionary should have a datastore UUID as the key, and the value should be a
+ * metadata filter definition. The filter will be applied in addition to filter(s)
+ * specified in `default_metadata_filters` and in the `documents_filters` field in
+ * the `/query` request during retrieval.
+ */
+ per_datastore_metadata_filters?: { [key: string]: DocumentsAPI.CompositeMetadataFilter };
+
/**
* Instructions that the reranker references when ranking retrievals. Note that we
* do not guarantee that the reranker will follow these instructions exactly.
@@ -414,6 +471,8 @@ export namespace AgentMetadataResponse {
*/
name: string;
+ template_name: string;
+
/**
* The following advanced parameters are experimental and subject to change.
*/
@@ -482,6 +541,11 @@ export interface AgentCreateParams {
*/
filter_prompt?: string;
+ /**
+ * Instructions on how the agent should handle multi-turn conversations.
+ */
+ multiturn_system_prompt?: string;
+
/**
* Instructions on how the agent should respond when there are no relevant
* retrievals that can be used to answer a query.
@@ -527,6 +591,11 @@ export interface AgentUpdateParams {
*/
llm_model_id?: string;
+ /**
+ * Instructions on how the agent should handle multi-turn conversations.
+ */
+ multiturn_system_prompt?: string;
+
/**
* Instructions on how the agent should respond when there are no relevant
* retrievals that can be used to answer a query.
@@ -552,9 +621,6 @@ export interface AgentListParams extends PageParams {}
Agents.AgentsPage = AgentsPage;
Agents.Query = Query;
-Agents.Evaluate = Evaluate;
-Agents.Datasets = Datasets;
-Agents.Tune = Tune;
export declare namespace Agents {
export {
@@ -588,23 +654,4 @@ export declare namespace Agents {
type QueryMetricsParams as QueryMetricsParams,
type QueryRetrievalInfoParams as QueryRetrievalInfoParams,
};
-
- export {
- Evaluate as Evaluate,
- type CreateEvaluationResponse as CreateEvaluationResponse,
- type EvaluateCreateParams as EvaluateCreateParams,
- };
-
- export {
- Datasets as Datasets,
- type CreateDatasetResponse as CreateDatasetResponse,
- type DatasetMetadata as DatasetMetadata,
- type ListDatasetsResponse as ListDatasetsResponse,
- };
-
- export {
- Tune as Tune,
- type CreateTuneResponse as CreateTuneResponse,
- type TuneCreateParams as TuneCreateParams,
- };
}
diff --git a/src/resources/agents/datasets.ts b/src/resources/agents/datasets.ts
deleted file mode 100644
index 4b86aa0..0000000
--- a/src/resources/agents/datasets.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export * from './datasets/index';
diff --git a/src/resources/agents/datasets/datasets.ts b/src/resources/agents/datasets/datasets.ts
deleted file mode 100644
index 60cf387..0000000
--- a/src/resources/agents/datasets/datasets.ts
+++ /dev/null
@@ -1,168 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../../resource';
-import * as EvaluateAPI from './evaluate';
-import {
- Evaluate,
- EvaluateCreateParams,
- EvaluateDeleteResponse,
- EvaluateListParams,
- EvaluateMetadataParams,
- EvaluateRetrieveParams,
- EvaluateUpdateParams,
-} from './evaluate';
-import * as TuneAPI from './tune';
-import {
- Tune,
- TuneCreateParams,
- TuneDeleteResponse,
- TuneListParams,
- TuneMetadataParams,
- TuneRetrieveParams,
- TuneUpdateParams,
-} from './tune';
-
-export class Datasets extends APIResource {
- tune: TuneAPI.Tune = new TuneAPI.Tune(this._client);
- evaluate: EvaluateAPI.Evaluate = new EvaluateAPI.Evaluate(this._client);
-}
-
-/**
- * Response to POST /datasets request
- */
-export interface CreateDatasetResponse {
- /**
- * Name of the dataset
- */
- name: string;
-
- /**
- * Type of the dataset
- */
- type: 'tuning_set' | 'evaluation_set' | 'evaluation_set_prediction' | 'evaluation_run_result';
-
- /**
- * Version number of the dataset
- */
- version: string;
-}
-
-/**
- * Response to GET /datasets/{name}
- */
-export interface DatasetMetadata {
- /**
- * Timestamp indicating when the dataset was created
- */
- created_at: string;
-
- /**
- * Number of samples in the dataset
- */
- num_samples: number;
-
- /**
- * Schema of the dataset
- */
- schema: unknown;
-
- /**
- * Validation status of the dataset
- */
- status: 'validated' | 'validating' | 'failed';
-
- /**
- * Type of the dataset
- */
- type: 'tuning_set' | 'evaluation_set' | 'evaluation_set_prediction' | 'evaluation_run_result';
-
- /**
- * Version of the dataset
- */
- version: string;
-}
-
-/**
- * Response to GET /datasets list endpoint
- */
-export interface ListDatasetsResponse {
- dataset_summaries: Array;
-
- /**
- * Total number of datasets
- */
- total_count: number;
-}
-
-export namespace ListDatasetsResponse {
- /**
- * Summary information for a dataset
- */
- export interface DatasetSummary {
- /**
- * Timestamp indicating when the dataset was created
- */
- created_at: string;
-
- /**
- * Name of the dataset
- */
- name: string;
-
- /**
- * Number of samples in the dataset
- */
- num_samples: number;
-
- /**
- * Schema of the dataset
- */
- schema: unknown;
-
- /**
- * Validation status of the dataset
- */
- status: 'validated' | 'validating' | 'failed';
-
- /**
- * Type of the dataset
- */
- type: 'tuning_set' | 'evaluation_set' | 'evaluation_set_prediction' | 'evaluation_run_result';
-
- /**
- * Version of the dataset
- */
- version: string;
- }
-}
-
-Datasets.Tune = Tune;
-Datasets.Evaluate = Evaluate;
-
-export declare namespace Datasets {
- export {
- type CreateDatasetResponse as CreateDatasetResponse,
- type DatasetMetadata as DatasetMetadata,
- type ListDatasetsResponse as ListDatasetsResponse,
- };
-
- export {
- Tune as Tune,
- type TuneDeleteResponse as TuneDeleteResponse,
- type TuneCreateParams as TuneCreateParams,
- type TuneRetrieveParams as TuneRetrieveParams,
- type TuneUpdateParams as TuneUpdateParams,
- type TuneListParams as TuneListParams,
- type TuneMetadataParams as TuneMetadataParams,
- };
-
- export {
- Evaluate as Evaluate,
- type EvaluateDeleteResponse as EvaluateDeleteResponse,
- type EvaluateCreateParams as EvaluateCreateParams,
- type EvaluateRetrieveParams as EvaluateRetrieveParams,
- type EvaluateUpdateParams as EvaluateUpdateParams,
- type EvaluateListParams as EvaluateListParams,
- type EvaluateMetadataParams as EvaluateMetadataParams,
- };
-}
diff --git a/src/resources/agents/datasets/evaluate.ts b/src/resources/agents/datasets/evaluate.ts
deleted file mode 100644
index 511ce10..0000000
--- a/src/resources/agents/datasets/evaluate.ts
+++ /dev/null
@@ -1,242 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../../resource';
-import { isRequestOptions } from '../../../core';
-import * as Core from '../../../core';
-import * as DatasetsAPI from './datasets';
-import { type Response } from '../../../_shims/index';
-
-export class Evaluate extends APIResource {
- /**
- * Create a new evaluation `Dataset` for the specified `Agent` using the provided
- * JSONL or CSV file. A `Dataset` is a versioned collection of samples conforming
- * to a particular schema, and can be used to store `Evaluation` test-sets and
- * retrieve `Evaluation` results.
- *
- * Each `Dataset` is versioned and validated against its schema during creation and
- * subsequent updates. The provided `Dataset` file must conform to the schema
- * defined for the `dataset_type`.
- *
- * File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
- * where each line is one JSON object. The following keys are required:
- *
- * - `prompt` (`string`): Prompt or question
- *
- * - `reference` (`string`): Reference or ground truth response
- */
- create(
- agentId: string,
- body: EvaluateCreateParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post(
- `/agents/${agentId}/datasets/evaluate`,
- Core.multipartFormRequestOptions({ body, ...options }),
- );
- }
-
- /**
- * Stream the raw content of an evaluation `Dataset` version. If no version is
- * specified, the latest version is used.
- *
- * The `Dataset` content is downloaded in batches. Batch size can be configured to
- * meet specific processing requirements.
- *
- * Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content with:
- *
- * - Content-Type: application/octet-stream
- *
- * - Content-Disposition: attachment
- *
- * - Chunked transfer encoding
- */
- retrieve(
- agentId: string,
- datasetName: string,
- query?: EvaluateRetrieveParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- retrieve(agentId: string, datasetName: string, options?: Core.RequestOptions): Core.APIPromise;
- retrieve(
- agentId: string,
- datasetName: string,
- query: EvaluateRetrieveParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- if (isRequestOptions(query)) {
- return this.retrieve(agentId, datasetName, {}, query);
- }
- return this._client.get(`/agents/${agentId}/datasets/evaluate/${datasetName}`, {
- query,
- ...options,
- headers: { Accept: 'application/octet-stream', ...options?.headers },
- __binaryResponse: true,
- });
- }
-
- /**
- * Append to an existing evaluation `Dataset`.
- *
- * Create a new version of the dataset by appending content to the `Dataset` and
- * validating against its schema.
- *
- * File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
- * where each line is one JSON object. The following keys are required:
- *
- * - `prompt` (`string`): Prompt or question
- *
- * - `reference` (`string`): Reference or ground truth response
- */
- update(
- agentId: string,
- datasetName: string,
- body: EvaluateUpdateParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.put(
- `/agents/${agentId}/datasets/evaluate/${datasetName}`,
- Core.multipartFormRequestOptions({ body, ...options }),
- );
- }
-
- /**
- * List all evaluation `Datasets` and their versions belonging to a particular
- * `Agent`.
- *
- * If a `dataset_name` filter is provided, all versions of that `Dataset` will be
- * listed.
- *
- * Includes metadata and schema for each `Dataset` version.
- */
- list(
- agentId: string,
- query?: EvaluateListParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- list(agentId: string, options?: Core.RequestOptions): Core.APIPromise;
- list(
- agentId: string,
- query: EvaluateListParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- if (isRequestOptions(query)) {
- return this.list(agentId, {}, query);
- }
- return this._client.get(`/agents/${agentId}/datasets/evaluate`, { query, ...options });
- }
-
- /**
- * Delete an evaluation `Dataset` and all its versions.
- *
- * Permanently removes the `Dataset`, including all associated metadata.
- *
- * This operation is irreversible.
- */
- delete(agentId: string, datasetName: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/agents/${agentId}/datasets/evaluate/${datasetName}`, options);
- }
-
- /**
- * Retrieve details of a specific evaluation `Dataset` version, or the latest
- * version if no `version` is specified.
- *
- * Provides comprehensive information about the `Dataset`, including its metadata
- * and schema.
- */
- metadata(
- agentId: string,
- datasetName: string,
- query?: EvaluateMetadataParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- metadata(
- agentId: string,
- datasetName: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- metadata(
- agentId: string,
- datasetName: string,
- query: EvaluateMetadataParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- if (isRequestOptions(query)) {
- return this.metadata(agentId, datasetName, {}, query);
- }
- return this._client.get(`/agents/${agentId}/datasets/evaluate/${datasetName}/metadata`, {
- query,
- ...options,
- });
- }
-}
-
-export type EvaluateDeleteResponse = unknown;
-
-export interface EvaluateCreateParams {
- /**
- * Name of the evaluation dataset
- */
- dataset_name: string;
-
- /**
- * Type of evaluation dataset which determines its schema and validation rules.
- */
- dataset_type: 'evaluation_set';
-
- /**
- * JSONL or CSV file containing the evaluation dataset
- */
- file: Core.Uploadable;
-}
-
-export interface EvaluateRetrieveParams {
- /**
- * Batch size for processing
- */
- batch_size?: number;
-
- /**
- * Version number of the evaluation dataset to retrieve. Defaults to the latest
- * version if not specified.
- */
- version?: string;
-}
-
-export interface EvaluateUpdateParams {
- /**
- * Type of evaluation dataset which determines its schema and validation rules.
- * Must match the `dataset_type` used at dataset creation time.
- */
- dataset_type: 'evaluation_set';
-
- /**
- * JSONL or CSV file containing the entries to append to the evaluation dataset
- */
- file: Core.Uploadable;
-}
-
-export interface EvaluateListParams {
- /**
- * Optional dataset name to filter the results by. If provided, only versions from
- * that dataset are listed.
- */
- dataset_name?: string;
-}
-
-export interface EvaluateMetadataParams {
- /**
- * Version number of the dataset. Defaults to the latest version if not specified.
- */
- version?: string;
-}
-
-export declare namespace Evaluate {
- export {
- type EvaluateDeleteResponse as EvaluateDeleteResponse,
- type EvaluateCreateParams as EvaluateCreateParams,
- type EvaluateRetrieveParams as EvaluateRetrieveParams,
- type EvaluateUpdateParams as EvaluateUpdateParams,
- type EvaluateListParams as EvaluateListParams,
- type EvaluateMetadataParams as EvaluateMetadataParams,
- };
-}
diff --git a/src/resources/agents/datasets/index.ts b/src/resources/agents/datasets/index.ts
deleted file mode 100644
index c7b7181..0000000
--- a/src/resources/agents/datasets/index.ts
+++ /dev/null
@@ -1,26 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export {
- Datasets,
- type CreateDatasetResponse,
- type DatasetMetadata,
- type ListDatasetsResponse,
-} from './datasets';
-export {
- Evaluate,
- type EvaluateDeleteResponse,
- type EvaluateCreateParams,
- type EvaluateRetrieveParams,
- type EvaluateUpdateParams,
- type EvaluateListParams,
- type EvaluateMetadataParams,
-} from './evaluate';
-export {
- Tune,
- type TuneDeleteResponse,
- type TuneCreateParams,
- type TuneRetrieveParams,
- type TuneUpdateParams,
- type TuneListParams,
- type TuneMetadataParams,
-} from './tune';
diff --git a/src/resources/agents/datasets/tune.ts b/src/resources/agents/datasets/tune.ts
deleted file mode 100644
index 4b76fde..0000000
--- a/src/resources/agents/datasets/tune.ts
+++ /dev/null
@@ -1,261 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../../resource';
-import { isRequestOptions } from '../../../core';
-import * as Core from '../../../core';
-import * as DatasetsAPI from './datasets';
-import { type Response } from '../../../_shims/index';
-
-export class Tune extends APIResource {
- /**
- * Create a new tuning `Dataset` for the specified `Agent` using the provided JSONL
- * or CSV file. A `Dataset` is a versioned collection of samples conforming to a
- * particular schema, and can be used as a source of training and test data for
- * tuning jobs.
- *
- * Each `Dataset` is versioned and validated against its schema during creation and
- * subsequent updates. The provided `Dataset` file must conform to the schema
- * defined for the `dataset_type`.
- *
- * File schema for `dataset_type` `tuning_set` is a CSV file or a JSONL file where
- * each line is one JSON object. The following keys are required:
- *
- * - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
- * answer. `knowledge` is a list of retrieved text chunks.
- *
- * - `reference` (`str`): The gold-standard answer to the prompt.
- *
- * - `guideline` (`str`): Guidelines for model output. If you do not have special
- * guidelines for the model's output, you can use the `System Prompt` defined in
- * your Agent configuration as the `guideline`.
- *
- * - `prompt` (`str`): Question for the model to respond to.
- *
- * For examples of what `tuning_set` should look like, check out our
- * `Tune & Evaluation Guide`.
- */
- create(
- agentId: string,
- body: TuneCreateParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post(
- `/agents/${agentId}/datasets/tune`,
- Core.multipartFormRequestOptions({ body, ...options }),
- );
- }
-
- /**
- * Stream the raw content of a tuning `Dataset` version. If no version is
- * specified, the latest version is used.
- *
- * The `Dataset` content is downloaded in batches. Batch size can be configured to
- * meet specific processing requirements.
- *
- * Returns a `StreamingResponse`, an asynchronous stream of `Dataset` content with:
- *
- * - Content-Type: application/octet-stream
- *
- * - Content-Disposition: attachment
- *
- * - Chunked transfer encoding
- */
- retrieve(
- agentId: string,
- datasetName: string,
- query?: TuneRetrieveParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- retrieve(agentId: string, datasetName: string, options?: Core.RequestOptions): Core.APIPromise;
- retrieve(
- agentId: string,
- datasetName: string,
- query: TuneRetrieveParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- if (isRequestOptions(query)) {
- return this.retrieve(agentId, datasetName, {}, query);
- }
- return this._client.get(`/agents/${agentId}/datasets/tune/${datasetName}`, {
- query,
- ...options,
- headers: { Accept: 'application/octet-stream', ...options?.headers },
- __binaryResponse: true,
- });
- }
-
- /**
- * Append to an existing tuning `Dataset`.
- *
- * Create a new version of the dataset by appending content to the `Dataset` and
- * validating against its schema.
- *
- * File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
- * where each line is one JSON object. The following keys are required:
- *
- * - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
- * answer. `knowledge` is a list of retrieved text chunks.
- *
- * - `reference` (`str`): The gold-standard answer to the prompt.
- *
- * - `guideline` (`str`): Guidelines for model output. If you do not have special
- * guidelines for the model's output, you can use the `System Prompt` defined in
- * your Agent configuration as the `guideline`.
- *
- * - `prompt` (`str`): Question for the model to respond to.
- *
- * For examples of what `tuning_set` should look like, check out our
- * `Tune & Evaluation Guide`.
- */
- update(
- agentId: string,
- datasetName: string,
- body: TuneUpdateParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.put(
- `/agents/${agentId}/datasets/tune/${datasetName}`,
- Core.multipartFormRequestOptions({ body, ...options }),
- );
- }
-
- /**
- * List all tuning `Datasets` and their versions belonging to a particular `Agent`.
- *
- * If a `dataset_name` filter is provided, all versions of that `Dataset` will be
- * listed.
- *
- * Includes metadata and schema for each `Dataset` version.
- */
- list(
- agentId: string,
- query?: TuneListParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- list(agentId: string, options?: Core.RequestOptions): Core.APIPromise;
- list(
- agentId: string,
- query: TuneListParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- if (isRequestOptions(query)) {
- return this.list(agentId, {}, query);
- }
- return this._client.get(`/agents/${agentId}/datasets/tune`, { query, ...options });
- }
-
- /**
- * Delete a tuning `Dataset` and all its versions.
- *
- * Permanently removes the `Dataset`, including all associated metadata.
- *
- * This operation is irreversible.
- */
- delete(agentId: string, datasetName: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/agents/${agentId}/datasets/tune/${datasetName}`, options);
- }
-
- /**
- * Retrieve details of a specific tuning `Dataset` version, or the latest version
- * if no `version` is specified.
- *
- * Provides comprehensive information about the `Dataset`, including its metadata
- * and schema.
- */
- metadata(
- agentId: string,
- datasetName: string,
- query?: TuneMetadataParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- metadata(
- agentId: string,
- datasetName: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- metadata(
- agentId: string,
- datasetName: string,
- query: TuneMetadataParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- if (isRequestOptions(query)) {
- return this.metadata(agentId, datasetName, {}, query);
- }
- return this._client.get(`/agents/${agentId}/datasets/tune/${datasetName}/metadata`, {
- query,
- ...options,
- });
- }
-}
-
-export type TuneDeleteResponse = unknown;
-
-export interface TuneCreateParams {
- /**
- * Name of the tune dataset
- */
- dataset_name: string;
-
- /**
- * Type of tune dataset which determines its schema and validation rules.
- */
- dataset_type: 'tuning_set';
-
- /**
- * JSONL or CSV file containing the tune dataset
- */
- file: Core.Uploadable;
-}
-
-export interface TuneRetrieveParams {
- /**
- * Batch size for processing
- */
- batch_size?: number;
-
- /**
- * Version number of the tune dataset to retrieve. Defaults to the latest version
- * if not specified.
- */
- version?: string;
-}
-
-export interface TuneUpdateParams {
- /**
- * Type of tune dataset which determines its schema and validation rules. Must
- * match the `dataset_type` used at dataset creation time.
- */
- dataset_type: 'tuning_set';
-
- /**
- * JSONL or CSV file containing the entries to append to the tune dataset
- */
- file: Core.Uploadable;
-}
-
-export interface TuneListParams {
- /**
- * Optional dataset name to filter the results by. If provided, only versions from
- * that dataset are listed.
- */
- dataset_name?: string;
-}
-
-export interface TuneMetadataParams {
- /**
- * Version number of the dataset. Defaults to the latest version if not specified.
- */
- version?: string;
-}
-
-export declare namespace Tune {
- export {
- type TuneDeleteResponse as TuneDeleteResponse,
- type TuneCreateParams as TuneCreateParams,
- type TuneRetrieveParams as TuneRetrieveParams,
- type TuneUpdateParams as TuneUpdateParams,
- type TuneListParams as TuneListParams,
- type TuneMetadataParams as TuneMetadataParams,
- };
-}
diff --git a/src/resources/agents/evaluate.ts b/src/resources/agents/evaluate.ts
deleted file mode 100644
index 74e2612..0000000
--- a/src/resources/agents/evaluate.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export * from './evaluate/index';
diff --git a/src/resources/agents/evaluate/evaluate.ts b/src/resources/agents/evaluate/evaluate.ts
deleted file mode 100644
index b1c105d..0000000
--- a/src/resources/agents/evaluate/evaluate.ts
+++ /dev/null
@@ -1,103 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../../resource';
-import * as Core from '../../../core';
-import * as JobsAPI from './jobs';
-import { EvaluationJobMetadata, JobCancelResponse, Jobs, ListEvaluationJobsResponse } from './jobs';
-
-export class Evaluate extends APIResource {
- jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client);
-
- /**
- * Launch an `Evaluation` job which evaluates an `Agent` on a set of test questions
- * and reference answers.
- *
- * An `Evaluation` is an asynchronous operation. Users can select one or more
- * metrics to assess the quality of generated answers. These metrics include
- * `equivalence` and `groundedness`. `equivalence` evaluates if the Agent response
- * is equivalent to the ground truth (model-driven binary classification).
- * `groundedness` decomposes the Agent response into claims and then evaluates if
- * the claims are grounded by the retrieved documents.
- *
- * `Evaluation` data can be provided in one of two forms:
- *
- * - A CSV `evalset_file` containing the columns `prompt` (i.e. questions) and
- * `reference` (i.e. gold-answers).
- *
- * - An `evalset_name` which refers to a `Dataset` created through the
- * `/datasets/evaluate` API.
- */
- create(
- agentId: string,
- body: EvaluateCreateParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post(
- `/agents/${agentId}/evaluate`,
- Core.multipartFormRequestOptions({ body, ...options }),
- );
- }
-}
-
-/**
- * Response from Launch Evaluation request
- */
-export interface CreateEvaluationResponse {
- /**
- * ID of the launched evaluation
- */
- id: string;
-}
-
-export interface EvaluateCreateParams {
- /**
- * List of metrics to use. Supported metrics are `equivalence` and `groundedness`.
- */
- metrics: Array<'equivalence' | 'groundedness'>;
-
- /**
- * Evalset file (CSV) to use for evaluation, containing the columns `prompt` (i.e.
- * question) and `reference` (i.e. ground truth response). Either `evalset_name` or
- * `evalset_file` must be provided, but not both.
- */
- evalset_file?: Core.Uploadable;
-
- /**
- * Name of the Dataset to use for evaluation, created through the
- * `/datasets/evaluate` API. Either `evalset_name` or `evalset_file` must be
- * provided, but not both.
- */
- evalset_name?: string;
-
- /**
- * ID of the model to evaluate. Uses the default model if not specified.
- */
- llm_model_id?: string;
-
- /**
- * User notes for the evaluation job.
- */
- notes?: string;
-
- /**
- * Override the configuration for the query. This will override the configuration
- * for the agent during evaluation.
- */
- override_configuration?: string;
-}
-
-Evaluate.Jobs = Jobs;
-
-export declare namespace Evaluate {
- export {
- type CreateEvaluationResponse as CreateEvaluationResponse,
- type EvaluateCreateParams as EvaluateCreateParams,
- };
-
- export {
- Jobs as Jobs,
- type EvaluationJobMetadata as EvaluationJobMetadata,
- type ListEvaluationJobsResponse as ListEvaluationJobsResponse,
- type JobCancelResponse as JobCancelResponse,
- };
-}
diff --git a/src/resources/agents/evaluate/index.ts b/src/resources/agents/evaluate/index.ts
deleted file mode 100644
index 98bb2b7..0000000
--- a/src/resources/agents/evaluate/index.ts
+++ /dev/null
@@ -1,9 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export { Evaluate, type CreateEvaluationResponse, type EvaluateCreateParams } from './evaluate';
-export {
- Jobs,
- type EvaluationJobMetadata,
- type ListEvaluationJobsResponse,
- type JobCancelResponse,
-} from './jobs';
diff --git a/src/resources/agents/evaluate/jobs.ts b/src/resources/agents/evaluate/jobs.ts
deleted file mode 100644
index 38a4978..0000000
--- a/src/resources/agents/evaluate/jobs.ts
+++ /dev/null
@@ -1,206 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../../resource';
-import * as Core from '../../../core';
-
-export class Jobs extends APIResource {
- /**
- * Retrieve a list of `Evaluation` jobs run for a given `Agent`, including the
- * `Evaluation`'s status and other metadata.
- */
- list(agentId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/agents/${agentId}/evaluate/jobs`, options);
- }
-
- /**
- * Cancels an `Evaluation` job if it is still in progress.
- */
- cancel(agentId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.post(`/agents/${agentId}/evaluate/jobs/${jobId}/cancel`, options);
- }
-
- /**
- * Get an `Evaluation` job's status and results. There are six possible statuses:
- * 'pending', 'processing', 'retrying', 'completed', 'failed', 'cancelled'.
- *
- * If the evaluation job has completed, you will see your evaluation `metrics` ,
- * `job_metadata`, and the `dataset_name` where your eval metrics and row-by-row
- * results are stored. You can use the `/datasets/evaluate` API to view the
- * specified `dataset`.
- */
- metadata(
- agentId: string,
- jobId: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.get(`/agents/${agentId}/evaluate/jobs/${jobId}/metadata`, options);
- }
-}
-
-/**
- * Response from Get Evaluation Results request
- */
-export interface EvaluationJobMetadata {
- /**
- * Dataset name containing the individual results of the evaluation round
- */
- dataset_name: string;
-
- /**
- * Metadata of the evaluation round with the number of predictions, failed
- * predictions, and successful predictions.
- */
- job_metadata: EvaluationJobMetadata.JobMetadata;
-
- /**
- * Results of the evaluation round, grouped by each metric
- */
- metrics: unknown;
-
- /**
- * Status of the evaluation round
- */
- status:
- | 'pending'
- | 'processing'
- | 'retrying'
- | 'completed'
- | 'failed'
- | 'cancelled'
- | 'failed_to_provision'
- | 'generating_data'
- | 'training_in_progress'
- | 'failed_to_generate_data'
- | 'provisioning';
-}
-
-export namespace EvaluationJobMetadata {
- /**
- * Metadata of the evaluation round with the number of predictions, failed
- * predictions, and successful predictions.
- */
- export interface JobMetadata {
- /**
- * Number of predictions that failed during the evaluation job
- */
- num_failed_predictions?: number;
-
- /**
- * Total number of predictions made during the evaluation job
- */
- num_predictions?: number;
-
- /**
- * Number of predictions that were processed during the evaluation job
- */
- num_processed_predictions?: number;
-
- /**
- * Number of predictions that were successful during the evaluation job
- */
- num_successful_predictions?: number;
- }
-}
-
-/**
- * Response from List Evaluations request
- */
-export interface ListEvaluationJobsResponse {
- /**
- * List of evaluation results
- */
- evaluation_rounds: Array;
-}
-
-export namespace ListEvaluationJobsResponse {
- /**
- * Metadata of an evaluation round
- */
- export interface EvaluationRound {
- /**
- * ID of the evaluation round
- */
- id: string;
-
- /**
- * Timestamp indicating when the evaluation round was created
- */
- created_at: string;
-
- /**
- * Status of the evaluation round
- */
- status:
- | 'pending'
- | 'processing'
- | 'retrying'
- | 'completed'
- | 'failed'
- | 'cancelled'
- | 'failed_to_provision'
- | 'generating_data'
- | 'training_in_progress'
- | 'failed_to_generate_data'
- | 'provisioning';
-
- /**
- * Email of the user who launched the evaluation round
- */
- user_email: string;
-
- /**
- * Timestamp indicating when the evaluation round finished processing
- */
- finished_at?: string;
-
- /**
- * User notes for the evaluation job
- */
- notes?: string;
-
- /**
- * Number of predictions that failed during the evaluation round
- */
- num_failed_predictions?: number;
-
- /**
- * Total number of predictions made during the evaluation round
- */
- num_predictions?: number;
-
- /**
- * Number of predictions that have been processed during the evaluation round
- */
- num_processed_predictions?: number;
-
- /**
- * Number of predictions that were successful during the evaluation round
- */
- num_successful_predictions?: number;
-
- /**
- * Timestamp indicating when the evaluation round started processing
- */
- processing_started_at?: string;
-
- /**
- * Name of the dataset with the evaluation results
- */
- results_dataset_name?: string;
-
- /**
- * Score of the evaluation round
- */
- summary_results?: unknown;
- }
-}
-
-export type JobCancelResponse = unknown;
-
-export declare namespace Jobs {
- export {
- type EvaluationJobMetadata as EvaluationJobMetadata,
- type ListEvaluationJobsResponse as ListEvaluationJobsResponse,
- type JobCancelResponse as JobCancelResponse,
- };
-}
diff --git a/src/resources/agents/index.ts b/src/resources/agents/index.ts
index 96668a4..06c55d7 100644
--- a/src/resources/agents/index.ts
+++ b/src/resources/agents/index.ts
@@ -20,13 +20,6 @@ export {
type AgentUpdateParams,
type AgentListParams,
} from './agents';
-export {
- Datasets,
- type CreateDatasetResponse,
- type DatasetMetadata,
- type ListDatasetsResponse,
-} from './datasets/index';
-export { Evaluate, type CreateEvaluationResponse, type EvaluateCreateParams } from './evaluate/index';
export {
Query,
type QueryResponse,
@@ -38,4 +31,3 @@ export {
type QueryMetricsParams,
type QueryRetrievalInfoParams,
} from './query';
-export { Tune, type CreateTuneResponse, type TuneCreateParams } from './tune/index';
diff --git a/src/resources/agents/query.ts b/src/resources/agents/query.ts
index b3a383e..721556b 100644
--- a/src/resources/agents/query.ts
+++ b/src/resources/agents/query.ts
@@ -150,6 +150,40 @@ export namespace QueryResponse {
*/
content_text?: string;
+ /**
+ * Default metadata from the retrieval
+ */
+ ctxl_metadata?: RetrievalContent.CtxlMetadata;
+
+ /**
+ * Custom metadata for the document, provided by the user at ingestion time.Must be
+ * a JSON-serializable dictionary with string keys and simple primitive values
+ * (str, int, float, bool). The total size must not exceed 2 KB.The strings with
+ * date format must stay in date format or be avodied if not in date format.The
+ * 'custom_metadata.url' field is automatically included in returned attributions
+ * during query time, if provided.The default maximum metadata fields that can be
+ * used is 15, contact support if more is needed.
+ */
+ custom_metadata?: { [key: string]: boolean | number | string };
+
+ /**
+ * A dictionary mapping metadata field names to the configuration to use for each
+ * field.
+ *
+ * - If a metadata field is not present in the dictionary, the default configuration will be used.
+ *
+ * - If the dictionary is not provided, metadata will be added in chunks but will not be retrievable.
+ *
+ *
+ * Limits: - Maximum characters per metadata field (for prompt or rerank): 400
+ *
+ * - Maximum number of metadata fields (for prompt or retrieval): 10
+ *
+ *
+ * Contact support@contextual.ai to request quota increases.
+ */
+ custom_metadata_config?: { [key: string]: RetrievalContent.CustomMetadataConfig };
+
/**
* Index of the retrieved item in the retrieval_contents list (starting from 1)
*/
@@ -171,6 +205,85 @@ export namespace QueryResponse {
url?: string;
}
+ export namespace RetrievalContent {
+ /**
+ * Default metadata from the retrieval
+ */
+ export interface CtxlMetadata {
+ /**
+ * Unique identifier for the chunk.
+ */
+ chunk_id?: string;
+
+ /**
+ * Size of the chunk in tokens or characters.
+ */
+ chunk_size?: number;
+
+ /**
+ * Date when the document or chunk was created.
+ */
+ date_created?: string;
+
+ /**
+ * Title of the document.
+ */
+ document_title?: string;
+
+ /**
+ * Format of the file (e.g., PDF, DOCX).
+ */
+ file_format?: string;
+
+ /**
+ * Name of the source file.
+ */
+ file_name?: string;
+
+ /**
+ * Whether this chunk represents a figure.
+ */
+ is_figure?: boolean;
+
+ /**
+ * Page number in the source document.
+ */
+ page?: number;
+
+ /**
+ * The HTML id of the nearest element of the chunk
+ */
+ section_id?: string;
+
+ /**
+ * Title of the section.
+ */
+ section_title?: string;
+
+ [k: string]: unknown;
+ }
+
+ export interface CustomMetadataConfig {
+ /**
+ * Whether to use in filtering. Defaults to True
+ */
+ filterable?: boolean;
+
+ /**
+ * Whether to add in chunks. Defaults to True. The maximum amount of characters per
+ * metadata field that can be added to the prompt or rerank is 400. The maximum
+ * amount of metadata fields that can be added for prompt or retrieval is 10.
+ * Contact support@contextual.ai to request quota increases.
+ */
+ in_chunks?: boolean;
+
+ /**
+ * Whether to add in response. Defaults to False
+ */
+ returned_in_response?: boolean;
+ }
+ }
+
/**
* Attribution for some claim made in a generated message`.
*/
@@ -248,6 +361,11 @@ export namespace RetrievalInfoResponse {
*/
content_text: string;
+ /**
+ * Id of the document which the content belongs to.
+ */
+ document_id: string;
+
/**
* Height of the image.
*/
@@ -393,7 +511,7 @@ export interface QueryCreateParams {
* }
* ```
*/
- documents_filters?: QueryCreateParams.BaseMetadataFilter | DocumentsAPI.CompositeMetadataFilter;
+ documents_filters?: DocumentsAPI.BaseMetadataFilter | DocumentsAPI.CompositeMetadataFilter;
/**
* Body param: Model ID of the specific fine-tuned or aligned LLM model to use.
@@ -401,6 +519,12 @@ export interface QueryCreateParams {
*/
llm_model_id?: string;
+ /**
+ * Body param: This will modify select configuration parameters for the agent
+ * during the response generation.
+ */
+ override_configuration?: QueryCreateParams.OverrideConfiguration;
+
/**
* Body param: Set to `true` to receive a streamed response
*/
@@ -429,40 +553,95 @@ export namespace QueryCreateParams {
}
/**
- * Defines a custom metadata filter. The expected input is a dict which can have
- * different operators, fields and values. For example:
- *
- * {"field": "title", "operator": "startswith", "value": "hr-"}
- *
- * For document_id and date_created the query is built using direct query without
- * nesting.
+ * This will modify select configuration parameters for the agent during the
+ * response generation.
*/
- export interface BaseMetadataFilter {
+ export interface OverrideConfiguration {
+ /**
+ * Override the filter_retrievals for the query. This will override the
+ * filter_retrievals for the agent during evaluation.
+ */
+ enable_filter?: boolean;
+
+ /**
+ * Override the rerank_retrievals for the agent during evaluation.
+ */
+ enable_rerank?: boolean;
+
+ /**
+ * Override the filter_model for the query. This will override the filter_model for
+ * the agent during evaluation.
+ */
+ filter_model?: string;
+
+ /**
+ * Override the filter prompt for the agent during evaluation.
+ */
+ filter_prompt?: string;
+
+ /**
+ * Override the lexical_alpha for the agent during evaluation.
+ */
+ lexical_alpha?: number;
+
+ /**
+ * Override the max new tokens for the agent during evaluation.
+ */
+ max_new_tokens?: number;
+
+ /**
+ * Override the model for the agent during evaluation.
+ */
+ model?: string;
+
+ /**
+ * Override the rerank_instructions for the agent during evaluation.
+ */
+ rerank_instructions?: string;
+
+ /**
+ * Override the reranker for the agent during evaluation.
+ */
+ reranker?: string;
+
+ /**
+ * Override the reranker_score_filter_threshold for the agent during evaluation.
+ */
+ reranker_score_filter_threshold?: number;
+
+ /**
+ * Override the semantic_alpha for the agent during evaluation.
+ */
+ semantic_alpha?: number;
+
+ /**
+ * Override the system prompt for the agent during evaluation.
+ */
+ system_prompt?: string;
+
+ /**
+ * Override the temperature for the query. This will override the temperature for
+ * the agent during evaluation.
+ */
+ temperature?: number;
+
/**
- * Field name to search for in the metadata
+ * Override the rerank_top_k for the query. This will override the rerank_top_k for
+ * the agent during evaluation.
*/
- field: string;
+ top_k_reranked_chunks?: number;
/**
- * Operator to be used for the filter.
+ * Override the top_k for the query. This will override the top_k for the agent
+ * during evaluation.
*/
- operator:
- | 'equals'
- | 'containsany'
- | 'exists'
- | 'startswith'
- | 'gt'
- | 'gte'
- | 'lt'
- | 'lte'
- | 'notequals'
- | 'between';
+ top_k_retrieved_chunks?: number;
/**
- * The value to be searched for in the field. In case of exists operator, it is not
- * needed.
+ * Override the top_p for the query. This will override the top_p for the agent
+ * during evaluation.
*/
- value?: string | number | boolean | Array | null;
+ top_p?: number;
}
/**
diff --git a/src/resources/agents/tune.ts b/src/resources/agents/tune.ts
deleted file mode 100644
index bd81324..0000000
--- a/src/resources/agents/tune.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export * from './tune/index';
diff --git a/src/resources/agents/tune/index.ts b/src/resources/agents/tune/index.ts
deleted file mode 100644
index 671454d..0000000
--- a/src/resources/agents/tune/index.ts
+++ /dev/null
@@ -1,5 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export { Jobs, type ListTuneJobsResponse, type TuneJobMetadata, type JobDeleteResponse } from './jobs';
-export { Models, type ListTuneModelsResponse } from './models';
-export { Tune, type CreateTuneResponse, type TuneCreateParams } from './tune';
diff --git a/src/resources/agents/tune/jobs.ts b/src/resources/agents/tune/jobs.ts
deleted file mode 100644
index 955955e..0000000
--- a/src/resources/agents/tune/jobs.ts
+++ /dev/null
@@ -1,85 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../../resource';
-import * as Core from '../../../core';
-
-export class Jobs extends APIResource {
- /**
- * Retrieve a list of all fine-tuning jobs for a specified Agent.
- */
- list(agentId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/agents/${agentId}/tune/jobs`, options);
- }
-
- /**
- * Cancel a specific fine-tuning job. Terminates the fine-tuning job if it is still
- * in progress.
- */
- delete(agentId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.delete(`/agents/${agentId}/tune/jobs/${jobId}`, options);
- }
-
- /**
- * Retrieve the status of a specific tuning job. Fetches the current status and
- * evaluation results, if available, for the specified tuning job. After the tuning
- * job is complete, the metadata associated with the tune job will include
- * evaluation results and a model ID. You can then activate the tuned model for
- * your agent by editing its config with the tuned model ID and the "Edit Agent"
- * API (i.e. the `PUT /agents/{agent_id}` API). To deactivate the tuned model, you
- * will need to edit the Agent's config again and set the `llm_model_id` field to
- * "default". For an end-to-end walkthrough, see the `Tune & Evaluation Guide`.
- */
- metadata(agentId: string, jobId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/agents/${agentId}/tune/jobs/${jobId}/metadata`, options);
- }
-}
-
-/**
- * Response model to list tune jobs
- */
-export interface ListTuneJobsResponse {
- /**
- * List of fine-tuning jobs for the agent
- */
- jobs: Array;
-
- /**
- * Total number of jobs associated with the agent
- */
- total_count: number;
-}
-
-/**
- * Response to GET /applications/{application_id}/tune/jobs/{job_id}
- */
-export interface TuneJobMetadata {
- /**
- * ID of the tune job
- */
- id: string;
-
- /**
- * Status of the tune job
- */
- job_status: string;
-
- /**
- * Metadata about the model evaluation, including status and results if completed.
- */
- evaluation_metadata?: Array;
-
- /**
- * ID of the tuned model. Omitted if the tuning job failed or is still in progress.
- */
- model_id?: string;
-}
-
-export type JobDeleteResponse = unknown;
-
-export declare namespace Jobs {
- export {
- type ListTuneJobsResponse as ListTuneJobsResponse,
- type TuneJobMetadata as TuneJobMetadata,
- type JobDeleteResponse as JobDeleteResponse,
- };
-}
diff --git a/src/resources/agents/tune/models.ts b/src/resources/agents/tune/models.ts
deleted file mode 100644
index d8285c0..0000000
--- a/src/resources/agents/tune/models.ts
+++ /dev/null
@@ -1,64 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../../resource';
-import * as Core from '../../../core';
-
-export class Models extends APIResource {
- /**
- * Retrieves a list of tuned models associated with the specified Agent.
- */
- list(agentId: string, options?: Core.RequestOptions): Core.APIPromise {
- return this._client.get(`/agents/${agentId}/tune/models`, options);
- }
-}
-
-/**
- * Response model to list registered models
- */
-export interface ListTuneModelsResponse {
- /**
- * List of registered models for the agent
- */
- models: Array;
-
- /**
- * Total number of models associated with the agent
- */
- total_count: number;
-}
-
-export namespace ListTuneModelsResponse {
- /**
- * Response model to list individual registered model
- */
- export interface Model {
- /**
- * ID of the associated agent
- */
- application_id: string;
-
- /**
- * Timestamp indicating when the model was created
- */
- created_at: string;
-
- /**
- * ID of the tuning job that produced the model
- */
- job_id: string;
-
- /**
- * ID of the registered model
- */
- model_id: string;
-
- /**
- * State of the model
- */
- state: 'active' | 'inactive' | 'pending';
- }
-}
-
-export declare namespace Models {
- export { type ListTuneModelsResponse as ListTuneModelsResponse };
-}
diff --git a/src/resources/agents/tune/tune.ts b/src/resources/agents/tune/tune.ts
deleted file mode 100644
index af7846f..0000000
--- a/src/resources/agents/tune/tune.ts
+++ /dev/null
@@ -1,196 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../../resource';
-import { isRequestOptions } from '../../../core';
-import * as Core from '../../../core';
-import * as JobsAPI from './jobs';
-import { JobDeleteResponse, Jobs, ListTuneJobsResponse, TuneJobMetadata } from './jobs';
-import * as ModelsAPI from './models';
-import { ListTuneModelsResponse, Models } from './models';
-
-export class Tune extends APIResource {
- jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client);
- models: ModelsAPI.Models = new ModelsAPI.Models(this._client);
-
- /**
- * Create a tuning job for the specified `Agent` to specialize it to your specific
- * domain or use case.
- *
- * This API initiates an asynchronous tuning task. You can provide the required
- * data through one of two ways:
- *
- * - Provide a `training_file` and an optional `test_file`. If no `test_file` is
- * provided, a portion of the `training_file` will be held out as the test set.
- * For easy reusability, the `training_file` is automatically saved as a `Tuning`
- * `Dataset`, and the `test_file` as an `Evaluation` `Dataset`. You can manage
- * them via the `/datasets/tune` and `/datasets/evaluation` endpoints.
- *
- * - Provide a `Tuning` `Dataset` and an optional `Evaluation` `Dataset`. You can
- * create a `Tuning` `Dataset` and `Evaluation` `Dataset` using the
- * `/datasets/tune` and `/datasets/evaluation` endpoints respectively.
- *
- * The API returns a tune job `id` which can be used to check on the status of your
- * tuning task through the `GET /tune/jobs/{job_id}/metadata` endpoint.
- *
- * After the tuning job is complete, the metadata associated with the tune job will
- * include evaluation results and a model ID. You can then deploy the tuned model
- * to the agent by editing its config with the tuned model ID and the "Edit Agent"
- * API (i.e. the `PUT /agents/{agent_id}` API). To deactivate the tuned model, you
- * will need to edit the Agent's config again and set the `llm_model_id` field to
- * "default". For an end-to-end walkthrough, see the `Tune & Evaluation Guide`.
- */
- create(
- agentId: string,
- body?: TuneCreateParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise;
- create(agentId: string, options?: Core.RequestOptions): Core.APIPromise;
- create(
- agentId: string,
- body: TuneCreateParams | Core.RequestOptions = {},
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- if (isRequestOptions(body)) {
- return this.create(agentId, {}, body);
- }
- return this._client.post(
- `/agents/${agentId}/tune`,
- Core.multipartFormRequestOptions({ body, ...options }),
- );
- }
-}
-
-/**
- * Response to POST /applications/{application_id}/tune request
- */
-export interface CreateTuneResponse {
- /**
- * ID of the created tune job
- */
- id: string;
-}
-
-export interface TuneCreateParams {
- /**
- * Controls how quickly the model adapts to the training data. Must be greater than
- * 0 and less than or equal to 0.1.
- */
- 'hyperparams[learning_rate]'?: number;
-
- /**
- * Scaling factor that controls the magnitude of LoRA updates. Higher values lead
- * to stronger adaptation effects. The effective learning strength is determined by
- * the ratio of lora_alpha/lora_rank. Must be one of: 8, 16, 32, 64 or 128
- */
- 'hyperparams[lora_alpha]'?: 8 | 16 | 32 | 64 | 128;
-
- /**
- * LoRA dropout randomly disables connections during training to prevent
- * overfitting and improve generalization when fine-tuning language models with
- * Low-Rank Adaptation. Must be between 0 and 1 (exclusive).
- */
- 'hyperparams[lora_dropout]'?: number;
-
- /**
- * Controls the capacity of the LoRA adapters. Must be one of: 8, 16, 32, or 64.
- */
- 'hyperparams[lora_rank]'?: 8 | 16 | 32 | 64;
-
- /**
- * Number of complete passes through the training dataset.
- */
- 'hyperparams[num_epochs]'?: number;
-
- /**
- * Fraction of training steps used for learning rate warmup. Must be between 0 and
- * 1 (exclusive).
- */
- 'hyperparams[warmup_ratio]'?: number;
-
- /**
- * Optional. Metadata file to use for synthetic data pipeline.
- */
- metadata_file?: Core.Uploadable;
-
- /**
- * Runs the SDP pipeline only if set to True.
- */
- sdp_only?: boolean;
-
- /**
- * Optional. Whether to generate synthetic data for training
- */
- synth_data?: boolean;
-
- /**
- * Optional. `Dataset` to use for testing model checkpoints, created through the
- * `/datasets/evaluate` API.
- */
- test_dataset_name?: string | null;
-
- /**
- * Optional. Local path to the test data file. The test file should follow the same
- * format as the training data file.
- */
- test_file?: Core.Uploadable | null;
-
- /**
- * `Dataset` to use for training, created through the `/datasets/tune` API. Either
- * `train_dataset_name` or `training_file` must be provided, but not both.
- */
- train_dataset_name?: string | null;
-
- /**
- * Local path to the training data file.
- *
- * The file should be in JSON array format, where each element of the array is a
- * JSON object represents a single training example. The four required fields are
- * `guideline`, `prompt`, `reference`, and `knowledge`.
- *
- * - `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
- * answer. `knowledge` is a list of retrieved text chunks.
- *
- * - `reference` (`str`): The gold-standard answer to the prompt.
- *
- * - `guideline` (`str`): Guidelines for model output. If you do not have special
- * guidelines for the model's output, you can use the `System Prompt` defined in
- * your Agent configuration as the `guideline`.
- *
- * - `prompt` (`str`): Question for the model to respond to.
- *
- * Example:
- *
- * ```json
- * [
- * {
- * "guideline": "The answer should be accurate.",
- * "prompt": "What was last quarter's revenue?",
- * "reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
- * "knowledge": [
- * "Quarterly report: Q3 revenue was $1.2 million.",
- * "Quarterly report: Q2 revenue was $1.1 million.",
- * ...
- * ],
- * },
- * ...
- * ]
- * ```
- */
- training_file?: Core.Uploadable | null;
-}
-
-Tune.Jobs = Jobs;
-Tune.Models = Models;
-
-export declare namespace Tune {
- export { type CreateTuneResponse as CreateTuneResponse, type TuneCreateParams as TuneCreateParams };
-
- export {
- Jobs as Jobs,
- type ListTuneJobsResponse as ListTuneJobsResponse,
- type TuneJobMetadata as TuneJobMetadata,
- type JobDeleteResponse as JobDeleteResponse,
- };
-
- export { Models as Models, type ListTuneModelsResponse as ListTuneModelsResponse };
-}
diff --git a/src/resources/datastores/datastores.ts b/src/resources/datastores/datastores.ts
index 8610c46..9704be8 100644
--- a/src/resources/datastores/datastores.ts
+++ b/src/resources/datastores/datastores.ts
@@ -5,8 +5,11 @@ import { isRequestOptions } from '../../core';
import * as Core from '../../core';
import * as DocumentsAPI from './documents';
import {
+ BaseMetadataFilter,
CompositeMetadataFilter,
DocumentDeleteResponse,
+ DocumentGetParseResultParams,
+ DocumentGetParseResultResponse,
DocumentIngestParams,
DocumentListParams,
DocumentMetadata,
@@ -45,6 +48,17 @@ export class Datastores extends APIResource {
return this._client.post('/datastores', { body, ...options });
}
+ /**
+ * Edit Datastore Configuration
+ */
+ update(
+ datastoreId: string,
+ body: DatastoreUpdateParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.put(`/datastores/${datastoreId}`, { body, ...options });
+ }
+
/**
* Retrieve a list of `Datastores`.
*
@@ -114,14 +128,126 @@ export interface Datastore {
id: string;
/**
- * Timestamp of when the datastore was created
+ * Timestamp of when the datastore was created, in ISO format
*/
created_at: string;
+ /**
+ * Type of the datastore
+ */
+ datastore_type: 'UNSTRUCTURED';
+
/**
* Name of the datastore
*/
name: string;
+
+ /**
+ * Configuration of the datastore
+ */
+ configuration?: Datastore.Configuration;
+}
+
+export namespace Datastore {
+ /**
+ * Configuration of the datastore
+ */
+ export interface Configuration {
+ /**
+ * Configuration for document chunking
+ */
+ chunking?: Configuration.Chunking;
+
+ /**
+ * Configuration for HTML Extraction
+ */
+ html_config?: Configuration.HTMLConfig;
+
+ /**
+ * Configuration for document parsing
+ */
+ parsing?: Configuration.Parsing;
+ }
+
+ export namespace Configuration {
+ /**
+ * Configuration for document chunking
+ */
+ export interface Chunking {
+ /**
+ * Chunking mode to use. Options are: `hierarchy_depth`, `hierarchy_heading`,
+ * `static_length`, `page_level`. `hierarchy_depth` groups chunks of the same
+ * hierarchy level or below, additionally merging or splitting based on length
+ * constraints. `hierarchy_heading` splits chunks at every heading in the document
+ * hierarchy, additionally merging or splitting based on length constraints.
+ * `static_length` creates chunks of a fixed length. `page_level` creates chunks
+ * that cannot run over page boundaries.
+ */
+ chunking_mode?: 'hierarchy_depth' | 'hierarchy_heading' | 'static_length' | 'page_level';
+
+ /**
+ * Whether to enable section-based contextualization for chunking
+ */
+ enable_hierarchy_based_contextualization?: boolean;
+
+ /**
+ * Target maximum length of text tokens chunks for chunking. Chunk length may
+ * exceed this value in some edge cases.
+ */
+ max_chunk_length_tokens?: number;
+
+ /**
+ * Target minimum length of chunks in tokens. Must be at least 384 tokens less than
+ * `max_chunk_length_tokens`. Chunk length may be shorter than this value in some
+ * edge cases. Ignored if `chunking_mode` is `page_level`.
+ */
+ min_chunk_length_tokens?: number;
+ }
+
+ /**
+ * Configuration for HTML Extraction
+ */
+ export interface HTMLConfig {
+ /**
+ * Target maximum length of text tokens chunks for chunking. Chunk length may
+ * exceed this value in some edge cases.
+ */
+ max_chunk_length_tokens?: number;
+ }
+
+ /**
+ * Configuration for document parsing
+ */
+ export interface Parsing {
+ /**
+ * Whether to enable table splitting, which splits large tables into smaller tables
+ * with at most `max_split_table_cells` cells each. In each split table, the table
+ * headers are reproduced as the first row(s). This is useful for preserving
+ * context when tables are too large to fit into one chunk.
+ */
+ enable_split_tables?: boolean;
+
+ /**
+ * Mode for figure captioning. Options are `default`, `custom`, or `ignore`. Set to
+ * `ignore` to disable figure captioning. Set to `default` to use the default
+ * figure prompt, which generates a detailed caption for each figure. Set to
+ * `custom` to use a custom prompt.
+ */
+ figure_caption_mode?: 'default' | 'custom' | 'ignore';
+
+ /**
+ * Prompt to use for generating image captions. Must be non-empty if
+ * `figure_caption_mode` is `custom`. Otherwise, must be null.
+ */
+ figure_captioning_prompt?: string;
+
+ /**
+ * Maximum number of cells for split tables. Ignored if `enable_split_tables` is
+ * False.
+ */
+ max_split_table_cells?: number;
+ }
+ }
}
export interface DatastoreMetadata {
@@ -140,6 +266,16 @@ export interface DatastoreMetadata {
*/
name: string;
+ /**
+ * Configuration of the datastore. Not set if default configuration is in use.
+ */
+ configuration?: DatastoreMetadata.Configuration;
+
+ /**
+ * Type of the datastore
+ */
+ datastore_type?: 'UNSTRUCTURED';
+
/**
* Datastore usage
*/
@@ -147,6 +283,106 @@ export interface DatastoreMetadata {
}
export namespace DatastoreMetadata {
+ /**
+ * Configuration of the datastore. Not set if default configuration is in use.
+ */
+ export interface Configuration {
+ /**
+ * Configuration for document chunking
+ */
+ chunking?: Configuration.Chunking;
+
+ /**
+ * Configuration for HTML Extraction
+ */
+ html_config?: Configuration.HTMLConfig;
+
+ /**
+ * Configuration for document parsing
+ */
+ parsing?: Configuration.Parsing;
+ }
+
+ export namespace Configuration {
+ /**
+ * Configuration for document chunking
+ */
+ export interface Chunking {
+ /**
+ * Chunking mode to use. Options are: `hierarchy_depth`, `hierarchy_heading`,
+ * `static_length`, `page_level`. `hierarchy_depth` groups chunks of the same
+ * hierarchy level or below, additionally merging or splitting based on length
+ * constraints. `hierarchy_heading` splits chunks at every heading in the document
+ * hierarchy, additionally merging or splitting based on length constraints.
+ * `static_length` creates chunks of a fixed length. `page_level` creates chunks
+ * that cannot run over page boundaries.
+ */
+ chunking_mode?: 'hierarchy_depth' | 'hierarchy_heading' | 'static_length' | 'page_level';
+
+ /**
+ * Whether to enable section-based contextualization for chunking
+ */
+ enable_hierarchy_based_contextualization?: boolean;
+
+ /**
+ * Target maximum length of text tokens chunks for chunking. Chunk length may
+ * exceed this value in some edge cases.
+ */
+ max_chunk_length_tokens?: number;
+
+ /**
+ * Target minimum length of chunks in tokens. Must be at least 384 tokens less than
+ * `max_chunk_length_tokens`. Chunk length may be shorter than this value in some
+ * edge cases. Ignored if `chunking_mode` is `page_level`.
+ */
+ min_chunk_length_tokens?: number;
+ }
+
+ /**
+ * Configuration for HTML Extraction
+ */
+ export interface HTMLConfig {
+ /**
+ * Target maximum length of text tokens chunks for chunking. Chunk length may
+ * exceed this value in some edge cases.
+ */
+ max_chunk_length_tokens?: number;
+ }
+
+ /**
+ * Configuration for document parsing
+ */
+ export interface Parsing {
+ /**
+ * Whether to enable table splitting, which splits large tables into smaller tables
+ * with at most `max_split_table_cells` cells each. In each split table, the table
+ * headers are reproduced as the first row(s). This is useful for preserving
+ * context when tables are too large to fit into one chunk.
+ */
+ enable_split_tables?: boolean;
+
+ /**
+ * Mode for figure captioning. Options are `default`, `custom`, or `ignore`. Set to
+ * `ignore` to disable figure captioning. Set to `default` to use the default
+ * figure prompt, which generates a detailed caption for each figure. Set to
+ * `custom` to use a custom prompt.
+ */
+ figure_caption_mode?: 'default' | 'custom' | 'ignore';
+
+ /**
+ * Prompt to use for generating image captions. Must be non-empty if
+ * `figure_caption_mode` is `custom`. Otherwise, must be null.
+ */
+ figure_captioning_prompt?: string;
+
+ /**
+ * Maximum number of cells for split tables. Ignored if `enable_split_tables` is
+ * False.
+ */
+ max_split_table_cells?: number;
+ }
+ }
+
/**
* Datastore usage
*/
@@ -176,6 +412,13 @@ export interface ListDatastoresResponse {
next_cursor?: string;
}
+export interface DatastoreUpdateResponse {
+ /**
+ * ID of the datastore
+ */
+ id: string;
+}
+
export type DatastoreDeleteResponse = unknown;
export type DatastoreResetResponse = unknown;
@@ -185,6 +428,229 @@ export interface DatastoreCreateParams {
* Name of the datastore
*/
name: string;
+
+ /**
+ * Configuration of the datastore. If not provided, default configuration is used.
+ */
+ configuration?: DatastoreCreateParams.Configuration;
+}
+
+export namespace DatastoreCreateParams {
+ /**
+ * Configuration of the datastore. If not provided, default configuration is used.
+ */
+ export interface Configuration {
+ /**
+ * Configuration for document chunking
+ */
+ chunking?: Configuration.Chunking;
+
+ /**
+ * Configuration for HTML Extraction
+ */
+ html_config?: Configuration.HTMLConfig;
+
+ /**
+ * Configuration for document parsing
+ */
+ parsing?: Configuration.Parsing;
+ }
+
+ export namespace Configuration {
+ /**
+ * Configuration for document chunking
+ */
+ export interface Chunking {
+ /**
+ * Chunking mode to use. Options are: `hierarchy_depth`, `hierarchy_heading`,
+ * `static_length`, `page_level`. `hierarchy_depth` groups chunks of the same
+ * hierarchy level or below, additionally merging or splitting based on length
+ * constraints. `hierarchy_heading` splits chunks at every heading in the document
+ * hierarchy, additionally merging or splitting based on length constraints.
+ * `static_length` creates chunks of a fixed length. `page_level` creates chunks
+ * that cannot run over page boundaries.
+ */
+ chunking_mode?: 'hierarchy_depth' | 'hierarchy_heading' | 'static_length' | 'page_level';
+
+ /**
+ * Whether to enable section-based contextualization for chunking
+ */
+ enable_hierarchy_based_contextualization?: boolean;
+
+ /**
+ * Target maximum length of text tokens chunks for chunking. Chunk length may
+ * exceed this value in some edge cases.
+ */
+ max_chunk_length_tokens?: number;
+
+ /**
+ * Target minimum length of chunks in tokens. Must be at least 384 tokens less than
+ * `max_chunk_length_tokens`. Chunk length may be shorter than this value in some
+ * edge cases. Ignored if `chunking_mode` is `page_level`.
+ */
+ min_chunk_length_tokens?: number;
+ }
+
+ /**
+ * Configuration for HTML Extraction
+ */
+ export interface HTMLConfig {
+ /**
+ * Target maximum length of text tokens chunks for chunking. Chunk length may
+ * exceed this value in some edge cases.
+ */
+ max_chunk_length_tokens?: number;
+ }
+
+ /**
+ * Configuration for document parsing
+ */
+ export interface Parsing {
+ /**
+ * Whether to enable table splitting, which splits large tables into smaller tables
+ * with at most `max_split_table_cells` cells each. In each split table, the table
+ * headers are reproduced as the first row(s). This is useful for preserving
+ * context when tables are too large to fit into one chunk.
+ */
+ enable_split_tables?: boolean;
+
+ /**
+ * Mode for figure captioning. Options are `default`, `custom`, or `ignore`. Set to
+ * `ignore` to disable figure captioning. Set to `default` to use the default
+ * figure prompt, which generates a detailed caption for each figure. Set to
+ * `custom` to use a custom prompt.
+ */
+ figure_caption_mode?: 'default' | 'custom' | 'ignore';
+
+ /**
+ * Prompt to use for generating image captions. Must be non-empty if
+ * `figure_caption_mode` is `custom`. Otherwise, must be null.
+ */
+ figure_captioning_prompt?: string;
+
+ /**
+ * Maximum number of cells for split tables. Ignored if `enable_split_tables` is
+ * False.
+ */
+ max_split_table_cells?: number;
+ }
+ }
+}
+
+export interface DatastoreUpdateParams {
+ /**
+ * Configuration of the datastore. If not provided, current configuration is
+ * retained.
+ */
+ configuration?: DatastoreUpdateParams.Configuration;
+
+ /**
+ * Name of the datastore
+ */
+ name?: string;
+}
+
+export namespace DatastoreUpdateParams {
+ /**
+ * Configuration of the datastore. If not provided, current configuration is
+ * retained.
+ */
+ export interface Configuration {
+ /**
+ * Configuration for document chunking
+ */
+ chunking?: Configuration.Chunking;
+
+ /**
+ * Configuration for HTML Extraction
+ */
+ html_config?: Configuration.HTMLConfig;
+
+ /**
+ * Configuration for document parsing
+ */
+ parsing?: Configuration.Parsing;
+ }
+
+ export namespace Configuration {
+ /**
+ * Configuration for document chunking
+ */
+ export interface Chunking {
+ /**
+ * Chunking mode to use. Options are: `hierarchy_depth`, `hierarchy_heading`,
+ * `static_length`, `page_level`. `hierarchy_depth` groups chunks of the same
+ * hierarchy level or below, additionally merging or splitting based on length
+ * constraints. `hierarchy_heading` splits chunks at every heading in the document
+ * hierarchy, additionally merging or splitting based on length constraints.
+ * `static_length` creates chunks of a fixed length. `page_level` creates chunks
+ * that cannot run over page boundaries.
+ */
+ chunking_mode?: 'hierarchy_depth' | 'hierarchy_heading' | 'static_length' | 'page_level';
+
+ /**
+ * Whether to enable section-based contextualization for chunking
+ */
+ enable_hierarchy_based_contextualization?: boolean;
+
+ /**
+ * Target maximum length of text tokens chunks for chunking. Chunk length may
+ * exceed this value in some edge cases.
+ */
+ max_chunk_length_tokens?: number;
+
+ /**
+ * Target minimum length of chunks in tokens. Must be at least 384 tokens less than
+ * `max_chunk_length_tokens`. Chunk length may be shorter than this value in some
+ * edge cases. Ignored if `chunking_mode` is `page_level`.
+ */
+ min_chunk_length_tokens?: number;
+ }
+
+ /**
+ * Configuration for HTML Extraction
+ */
+ export interface HTMLConfig {
+ /**
+ * Target maximum length of text tokens chunks for chunking. Chunk length may
+ * exceed this value in some edge cases.
+ */
+ max_chunk_length_tokens?: number;
+ }
+
+ /**
+ * Configuration for document parsing
+ */
+ export interface Parsing {
+ /**
+ * Whether to enable table splitting, which splits large tables into smaller tables
+ * with at most `max_split_table_cells` cells each. In each split table, the table
+ * headers are reproduced as the first row(s). This is useful for preserving
+ * context when tables are too large to fit into one chunk.
+ */
+ enable_split_tables?: boolean;
+
+ /**
+ * Mode for figure captioning. Options are `default`, `custom`, or `ignore`. Set to
+ * `ignore` to disable figure captioning. Set to `default` to use the default
+ * figure prompt, which generates a detailed caption for each figure. Set to
+ * `custom` to use a custom prompt.
+ */
+ figure_caption_mode?: 'default' | 'custom' | 'ignore';
+
+ /**
+ * Prompt to use for generating image captions. Must be non-empty if
+ * `figure_caption_mode` is `custom`. Otherwise, must be null.
+ */
+ figure_captioning_prompt?: string;
+
+ /**
+ * Maximum number of cells for split tables. Ignored if `enable_split_tables` is
+ * False.
+ */
+ max_split_table_cells?: number;
+ }
+ }
}
export interface DatastoreListParams extends DatastoresPageParams {
@@ -205,22 +671,27 @@ export declare namespace Datastores {
type Datastore as Datastore,
type DatastoreMetadata as DatastoreMetadata,
type ListDatastoresResponse as ListDatastoresResponse,
+ type DatastoreUpdateResponse as DatastoreUpdateResponse,
type DatastoreDeleteResponse as DatastoreDeleteResponse,
type DatastoreResetResponse as DatastoreResetResponse,
DatastoresDatastoresPage as DatastoresDatastoresPage,
type DatastoreCreateParams as DatastoreCreateParams,
+ type DatastoreUpdateParams as DatastoreUpdateParams,
type DatastoreListParams as DatastoreListParams,
};
export {
Documents as Documents,
+ type BaseMetadataFilter as BaseMetadataFilter,
type CompositeMetadataFilter as CompositeMetadataFilter,
type DocumentMetadata as DocumentMetadata,
type IngestionResponse as IngestionResponse,
type ListDocumentsResponse as ListDocumentsResponse,
type DocumentDeleteResponse as DocumentDeleteResponse,
+ type DocumentGetParseResultResponse as DocumentGetParseResultResponse,
DocumentMetadataDocumentsPage as DocumentMetadataDocumentsPage,
type DocumentListParams as DocumentListParams,
+ type DocumentGetParseResultParams as DocumentGetParseResultParams,
type DocumentIngestParams as DocumentIngestParams,
type DocumentSetMetadataParams as DocumentSetMetadataParams,
};
diff --git a/src/resources/datastores/documents.ts b/src/resources/datastores/documents.ts
index f1c7b2a..4a42ee3 100644
--- a/src/resources/datastores/documents.ts
+++ b/src/resources/datastores/documents.ts
@@ -45,6 +45,37 @@ export class Documents extends APIResource {
return this._client.delete(`/datastores/${datastoreId}/documents/${documentId}`, options);
}
+ /**
+ * Get the parse results that are generated during ingestion for a given document.
+ * Retrieving parse results for existing documents ingested before the release of
+ * this endpoint is not supported and will return a 404 error.
+ */
+ getParseResult(
+ datastoreId: string,
+ documentId: string,
+ query?: DocumentGetParseResultParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise;
+ getParseResult(
+ datastoreId: string,
+ documentId: string,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise;
+ getParseResult(
+ datastoreId: string,
+ documentId: string,
+ query: DocumentGetParseResultParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ if (isRequestOptions(query)) {
+ return this.getParseResult(datastoreId, documentId, {}, query);
+ }
+ return this._client.get(`/datastores/${datastoreId}/documents/${documentId}/parse`, {
+ query,
+ ...options,
+ });
+ }
+
/**
* Ingest a document into a given `Datastore`.
*
@@ -102,6 +133,44 @@ export class Documents extends APIResource {
export class DocumentMetadataDocumentsPage extends DocumentsPage {}
+/**
+ * Defines a custom metadata filter. The expected input is a dict which can have
+ * different operators, fields and values. For example:
+ *
+ * {"field": "title", "operator": "startswith", "value": "hr-"}
+ *
+ * For document_id and date_created the query is built using direct query without
+ * nesting.
+ */
+export interface BaseMetadataFilter {
+ /**
+ * Field name to search for in the metadata
+ */
+ field: string;
+
+ /**
+ * Operator to be used for the filter.
+ */
+ operator:
+ | 'equals'
+ | 'containsany'
+ | 'exists'
+ | 'startswith'
+ | 'gt'
+ | 'gte'
+ | 'lt'
+ | 'lte'
+ | 'notequals'
+ | 'between'
+ | 'wildcard';
+
+ /**
+ * The value to be searched for in the field. In case of exists operator, it is not
+ * needed.
+ */
+ value?: string | number | boolean | Array | null;
+}
+
/**
* "Defines a custom metadata filter as a Composite MetadataFilter. Which can be be
* a list of filters or nested filters.
@@ -110,7 +179,7 @@ export interface CompositeMetadataFilter {
/**
* Filters added to the query for filtering docs
*/
- filters: Array;
+ filters: Array;
/**
* Composite operator to be used to combine filters
@@ -118,45 +187,6 @@ export interface CompositeMetadataFilter {
operator?: 'AND' | 'OR' | 'AND_NOT' | null;
}
-export namespace CompositeMetadataFilter {
- /**
- * Defines a custom metadata filter. The expected input is a dict which can have
- * different operators, fields and values. For example:
- *
- * {"field": "title", "operator": "startswith", "value": "hr-"}
- *
- * For document_id and date_created the query is built using direct query without
- * nesting.
- */
- export interface BaseMetadataFilter {
- /**
- * Field name to search for in the metadata
- */
- field: string;
-
- /**
- * Operator to be used for the filter.
- */
- operator:
- | 'equals'
- | 'containsany'
- | 'exists'
- | 'startswith'
- | 'gt'
- | 'gte'
- | 'lt'
- | 'lte'
- | 'notequals'
- | 'between';
-
- /**
- * The value to be searched for in the field. In case of exists operator, it is not
- * needed.
- */
- value?: string | number | boolean | Array | null;
- }
-}
-
/**
* Document description
*/
@@ -181,7 +211,45 @@ export interface DocumentMetadata {
*/
status: 'pending' | 'processing' | 'retrying' | 'completed' | 'failed' | 'cancelled';
- custom_metadata?: Record;
+ /**
+ * Custom metadata for the document, provided by the user at ingestion time.Must be
+ * a JSON-serializable dictionary with string keys and simple primitive values
+ * (str, int, float, bool). The total size must not exceed 2 KB.The strings with
+ * date format must stay in date format or be avodied if not in date format.The
+ * 'custom_metadata.url' field is automatically included in returned attributions
+ * during query time, if provided.The default maximum metadata fields that can be
+ * used is 15, contact support if more is needed.
+ */
+ custom_metadata?: { [key: string]: boolean | number | string };
+
+ /**
+ * A dictionary mapping metadata field names to the configuration to use for each
+ * field.
+ *
+ * - If a metadata field is not present in the dictionary, the default configuration will be used.
+ *
+ * - If the dictionary is not provided, metadata will be added in chunks but will not be retrievable.
+ *
+ *
+ * Limits: - Maximum characters per metadata field (for prompt or rerank): 400
+ *
+ * - Maximum number of metadata fields (for prompt or retrieval): 10
+ *
+ *
+ * Contact support@contextual.ai to request quota increases.
+ */
+ custom_metadata_config?: { [key: string]: DocumentMetadata.CustomMetadataConfig };
+
+ /**
+ * Whether the user has access to this document.
+ */
+ has_access?: boolean;
+
+ /**
+ * Ingestion configuration for the document when the document was ingested. It may
+ * be different from the current datastore configuration.
+ */
+ ingestion_config?: unknown;
/**
* Timestamp of when the document was modified in ISO format.
@@ -189,6 +257,28 @@ export interface DocumentMetadata {
updated_at?: string;
}
+export namespace DocumentMetadata {
+ export interface CustomMetadataConfig {
+ /**
+ * Whether to use in filtering. Defaults to True
+ */
+ filterable?: boolean;
+
+ /**
+ * Whether to add in chunks. Defaults to True. The maximum amount of characters per
+ * metadata field that can be added to the prompt or rerank is 400. The maximum
+ * amount of metadata fields that can be added for prompt or retrieval is 10.
+ * Contact support@contextual.ai to request quota increases.
+ */
+ in_chunks?: boolean;
+
+ /**
+ * Whether to add in response. Defaults to False
+ */
+ returned_in_response?: boolean;
+ }
+}
+
/**
* Response body from POST /data/documents
*/
@@ -223,24 +313,266 @@ export interface ListDocumentsResponse {
export type DocumentDeleteResponse = unknown;
+/**
+ * /parse results reponse object.
+ */
+export interface DocumentGetParseResultResponse {
+ /**
+ * The name of the file that was uploaded for parsing
+ */
+ file_name: string;
+
+ /**
+ * The current status of the parse job
+ */
+ status: 'pending' | 'processing' | 'retrying' | 'completed' | 'failed' | 'cancelled';
+
+ /**
+ * Document-level metadata parsed from the document
+ */
+ document_metadata?: DocumentGetParseResultResponse.DocumentMetadata;
+
+ /**
+ * The parsed, structured Markdown of the input file. Only present if
+ * `markdown-document` was among the requested output types.
+ */
+ markdown_document?: string;
+
+ /**
+ * Per-page parse results, containing per-page Markdown (if `markdown-per-page` was
+ * requested) and/or per-page `ParsedBlock`s (if `blocks-per-page` was requested).
+ */
+ pages?: Array;
+}
+
+export namespace DocumentGetParseResultResponse {
+ /**
+ * Document-level metadata parsed from the document
+ */
+ export interface DocumentMetadata {
+ /**
+ * Hierarchy of the document, as both heading blocks and a markdown table of
+ * contents
+ */
+ hierarchy?: DocumentMetadata.Hierarchy;
+ }
+
+ export namespace DocumentMetadata {
+ /**
+ * Hierarchy of the document, as both heading blocks and a markdown table of
+ * contents
+ */
+ export interface Hierarchy {
+ /**
+ * Heading blocks which define the hierarchy of the document
+ */
+ blocks?: Array;
+
+ /**
+ * Markdown representation of the table of contents for this document
+ */
+ table_of_contents?: string;
+ }
+
+ export namespace Hierarchy {
+ /**
+ * One logical block of content from a parsed page.
+ */
+ export interface Block {
+ /**
+ * Unique ID of the block
+ */
+ id: string;
+
+ /**
+ * The normalized bounding box of the block, as relative percentages of the page
+ * width and height
+ */
+ bounding_box: Block.BoundingBox;
+
+ /**
+ * The Markdown representation of the block
+ */
+ markdown: string;
+
+ /**
+ * The type of the block
+ */
+ type: 'heading' | 'text' | 'table' | 'figure';
+
+ /**
+ * The confidence level of this block categorized as 'low', 'medium', or 'high'.
+ * Only available for blocks of type 'table' currently.
+ */
+ confidence_level?: 'low' | 'medium' | 'high';
+
+ /**
+ * The level of the block in the document hierarchy, starting at 0 for the
+ * root-level title block. Only present if `enable_document_hierarchy` was set to
+ * true in the request.
+ */
+ hierarchy_level?: number;
+
+ /**
+ * The page (0-indexed) that this block belongs to. Only set for heading blocks
+ * that are returned in the table of contents.
+ */
+ page_index?: number;
+
+ /**
+ * The IDs of the parent in the document hierarchy, sorted from root-level to
+ * bottom. For root-level heading blocks, this will be an empty list. Only present
+ * if `enable_document_hierarchy` was set to true in the request.
+ */
+ parent_ids?: Array;
+ }
+
+ export namespace Block {
+ /**
+ * The normalized bounding box of the block, as relative percentages of the page
+ * width and height
+ */
+ export interface BoundingBox {
+ /**
+ * The x-coordinate of the top-left corner of the bounding box
+ */
+ x0: number;
+
+ /**
+ * The x-coordinate of the bottom-right corner of the bounding box
+ */
+ x1: number;
+
+ /**
+ * The y-coordinate of the top-left corner of the bounding box
+ */
+ y0: number;
+
+ /**
+ * The y-coordinate of the bottom-right corner of the bounding box
+ */
+ y1: number;
+ }
+ }
+ }
+ }
+
+ /**
+ * Per-page parse results.
+ */
+ export interface Page {
+ /**
+ * The index of the parsed page (zero-indexed)
+ */
+ index: number;
+
+ /**
+ * The parsed, structured blocks of this page. Present if `blocks-per-page` was
+ * among the requested output types.
+ */
+ blocks?: Array;
+
+ /**
+ * The parsed, structured Markdown of this page. Present if `markdown-per-page` was
+ * among the requested output types.
+ */
+ markdown?: string;
+ }
+
+ export namespace Page {
+ /**
+ * One logical block of content from a parsed page.
+ */
+ export interface Block {
+ /**
+ * Unique ID of the block
+ */
+ id: string;
+
+ /**
+ * The normalized bounding box of the block, as relative percentages of the page
+ * width and height
+ */
+ bounding_box: Block.BoundingBox;
+
+ /**
+ * The Markdown representation of the block
+ */
+ markdown: string;
+
+ /**
+ * The type of the block
+ */
+ type: 'heading' | 'text' | 'table' | 'figure';
+
+ /**
+ * The confidence level of this block categorized as 'low', 'medium', or 'high'.
+ * Only available for blocks of type 'table' currently.
+ */
+ confidence_level?: 'low' | 'medium' | 'high';
+
+ /**
+ * The level of the block in the document hierarchy, starting at 0 for the
+ * root-level title block. Only present if `enable_document_hierarchy` was set to
+ * true in the request.
+ */
+ hierarchy_level?: number;
+
+ /**
+ * The page (0-indexed) that this block belongs to. Only set for heading blocks
+ * that are returned in the table of contents.
+ */
+ page_index?: number;
+
+ /**
+ * The IDs of the parent in the document hierarchy, sorted from root-level to
+ * bottom. For root-level heading blocks, this will be an empty list. Only present
+ * if `enable_document_hierarchy` was set to true in the request.
+ */
+ parent_ids?: Array;
+ }
+
+ export namespace Block {
+ /**
+ * The normalized bounding box of the block, as relative percentages of the page
+ * width and height
+ */
+ export interface BoundingBox {
+ /**
+ * The x-coordinate of the top-left corner of the bounding box
+ */
+ x0: number;
+
+ /**
+ * The x-coordinate of the bottom-right corner of the bounding box
+ */
+ x1: number;
+
+ /**
+ * The y-coordinate of the top-left corner of the bounding box
+ */
+ y0: number;
+
+ /**
+ * The y-coordinate of the bottom-right corner of the bounding box
+ */
+ y1: number;
+ }
+ }
+ }
+}
+
export interface DocumentListParams extends DocumentsPageParams {
+ /**
+ * Filters documents with the given prefix.
+ */
+ document_name_prefix?: string;
+
/**
* Filters documents whose ingestion job status matches (one of) the provided
* status(es).
*/
- ingestion_job_status?: Array<
- | 'pending'
- | 'processing'
- | 'retrying'
- | 'completed'
- | 'failed'
- | 'cancelled'
- | 'failed_to_provision'
- | 'generating_data'
- | 'training_in_progress'
- | 'failed_to_generate_data'
- | 'provisioning'
- >;
+ ingestion_job_status?: Array<'pending' | 'processing' | 'retrying' | 'completed' | 'failed' | 'cancelled'>;
/**
* Filters documents uploaded at or after specified timestamp.
@@ -253,6 +585,18 @@ export interface DocumentListParams extends DocumentsPageParams {
uploaded_before?: string;
}
+export interface DocumentGetParseResultParams {
+ /**
+ * The desired output format(s) of the parsed file. Must be `markdown-document`,
+ * `markdown-per-page`, and/or `blocks-per-page`. Specify multiple values to get
+ * multiple formats in the response. `markdown-document` parses the whole document
+ * into a single concatenated markdown output. `markdown-per-page` provides
+ * markdown output per page. `blocks-per-page` provides a structured JSON
+ * representation of the content blocks on each page, sorted by reading order.
+ */
+ output_types?: Array<'markdown-document' | 'markdown-per-page' | 'blocks-per-page'>;
+}
+
export interface DocumentIngestParams {
/**
* File to ingest.
@@ -260,21 +604,23 @@ export interface DocumentIngestParams {
file: Core.Uploadable;
/**
- * Metadata in `JSON` format. Metadata should be passed as a nested dictionary
- * structure where:
+ * Metadata request in JSON format. `custom_metadata` is a flat dictionary
+ * containing one or more key-value pairs, where each value must be a primitive
+ * type (`str`, `bool`, `float`, or `int`). The default maximum metadata fields
+ * that can be used is 15, contact support if more is needed.The combined size of
+ * the metadata must not exceed **2 KB** when encoded as JSON.The strings with date
+ * format must stay in date format or be avoided if not in date format.The
+ * `custom_metadata.url` field is automatically included in returned attributions
+ * during query time, if provided.
*
- * - The **metadata type** `custom_metadata` is mapped to a dictionary. - The
- * **dictionary keys** represent metadata attributes. - The **values** can be of
- * type `str`, `bool`, `float`, or `int`.
- *
- * **Example Metadata JSON:**
+ * **Example Request Body:**
*
* ```json
- * metadata = {
- * "custom_metadata": {
- * "field1": "value1",
- * "field2": "value2"
- * }
+ * {
+ * "custom_metadata": {
+ * "topic": "science",
+ * "difficulty": 3
+ * }
* }
* ```
*/
@@ -282,20 +628,72 @@ export interface DocumentIngestParams {
}
export interface DocumentSetMetadataParams {
- custom_metadata?: Record;
+ /**
+ * Custom metadata for the document, provided by the user at ingestion time.Must be
+ * a JSON-serializable dictionary with string keys and simple primitive values
+ * (str, int, float, bool). The total size must not exceed 2 KB.The strings with
+ * date format must stay in date format or be avodied if not in date format.The
+ * 'custom_metadata.url' field is automatically included in returned attributions
+ * during query time, if provided.The default maximum metadata fields that can be
+ * used is 15, contact support if more is needed.
+ */
+ custom_metadata?: { [key: string]: boolean | number | string };
+
+ /**
+ * A dictionary mapping metadata field names to the configuration to use for each
+ * field.
+ *
+ * - If a metadata field is not present in the dictionary, the default configuration will be used.
+ *
+ * - If the dictionary is not provided, metadata will be added in chunks but will not be retrievable.
+ *
+ *
+ * Limits: - Maximum characters per metadata field (for prompt or rerank): 400
+ *
+ * - Maximum number of metadata fields (for prompt or retrieval): 10
+ *
+ *
+ * Contact support@contextual.ai to request quota increases.
+ */
+ custom_metadata_config?: { [key: string]: DocumentSetMetadataParams.CustomMetadataConfig };
+}
+
+export namespace DocumentSetMetadataParams {
+ export interface CustomMetadataConfig {
+ /**
+ * Whether to use in filtering. Defaults to True
+ */
+ filterable?: boolean;
+
+ /**
+ * Whether to add in chunks. Defaults to True. The maximum amount of characters per
+ * metadata field that can be added to the prompt or rerank is 400. The maximum
+ * amount of metadata fields that can be added for prompt or retrieval is 10.
+ * Contact support@contextual.ai to request quota increases.
+ */
+ in_chunks?: boolean;
+
+ /**
+ * Whether to add in response. Defaults to False
+ */
+ returned_in_response?: boolean;
+ }
}
Documents.DocumentMetadataDocumentsPage = DocumentMetadataDocumentsPage;
export declare namespace Documents {
export {
+ type BaseMetadataFilter as BaseMetadataFilter,
type CompositeMetadataFilter as CompositeMetadataFilter,
type DocumentMetadata as DocumentMetadata,
type IngestionResponse as IngestionResponse,
type ListDocumentsResponse as ListDocumentsResponse,
type DocumentDeleteResponse as DocumentDeleteResponse,
+ type DocumentGetParseResultResponse as DocumentGetParseResultResponse,
DocumentMetadataDocumentsPage as DocumentMetadataDocumentsPage,
type DocumentListParams as DocumentListParams,
+ type DocumentGetParseResultParams as DocumentGetParseResultParams,
type DocumentIngestParams as DocumentIngestParams,
type DocumentSetMetadataParams as DocumentSetMetadataParams,
};
diff --git a/src/resources/datastores/index.ts b/src/resources/datastores/index.ts
index c341e07..b8e3e30 100644
--- a/src/resources/datastores/index.ts
+++ b/src/resources/datastores/index.ts
@@ -7,20 +7,25 @@ export {
type Datastore,
type DatastoreMetadata,
type ListDatastoresResponse,
+ type DatastoreUpdateResponse,
type DatastoreDeleteResponse,
type DatastoreResetResponse,
type DatastoreCreateParams,
+ type DatastoreUpdateParams,
type DatastoreListParams,
} from './datastores';
export {
DocumentMetadataDocumentsPage,
Documents,
+ type BaseMetadataFilter,
type CompositeMetadataFilter,
type DocumentMetadata,
type IngestionResponse,
type ListDocumentsResponse,
type DocumentDeleteResponse,
+ type DocumentGetParseResultResponse,
type DocumentListParams,
+ type DocumentGetParseResultParams,
type DocumentIngestParams,
type DocumentSetMetadataParams,
} from './documents';
diff --git a/src/resources/generate.ts b/src/resources/generate.ts
index 71ddabd..6a0e75a 100644
--- a/src/resources/generate.ts
+++ b/src/resources/generate.ts
@@ -46,7 +46,7 @@ export interface GenerateCreateParams {
messages: Array;
/**
- * The version of the Contextual's GLM to use. Currently, we just have "v1".
+ * The version of the Contextual's GLM to use. Currently, we have `v1` and `v2`.
*/
model: string;
diff --git a/src/resources/index.ts b/src/resources/index.ts
index d60a1a9..d7d75b0 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -27,9 +27,11 @@ export {
type Datastore,
type DatastoreMetadata,
type ListDatastoresResponse,
+ type DatastoreUpdateResponse,
type DatastoreDeleteResponse,
type DatastoreResetResponse,
type DatastoreCreateParams,
+ type DatastoreUpdateParams,
type DatastoreListParams,
} from './datastores/datastores';
export { Generate, type GenerateCreateResponse, type GenerateCreateParams } from './generate';
diff --git a/src/resources/parse.ts b/src/resources/parse.ts
index aa43d94..cffb748 100644
--- a/src/resources/parse.ts
+++ b/src/resources/parse.ts
@@ -358,6 +358,12 @@ export interface ParseJobsResponse {
* Total number of parse jobs
*/
total_jobs: number;
+
+ /**
+ * Next cursor to continue pagination. Omitted if there are no more parse jobs
+ * after these ones.
+ */
+ next_cursor?: string;
}
export namespace ParseJobsResponse {
@@ -409,7 +415,7 @@ export interface ParseCreateParams {
/**
* Threshold number of table cells beyond which large tables are split if
- * `enable_split_tables` is True. Not permitted in `basic` parsing_mode.
+ * `enable_split_tables` is True. Must be null if `enable_split_tables` is False.
*/
max_split_table_cells?: number;
@@ -441,6 +447,23 @@ export interface ParseJobResultsParams {
}
export interface ParseJobsParams {
+ /**
+ * Cursor from the previous call to list parse jobs, used to retrieve the next set
+ * of results
+ */
+ cursor?: string;
+
+ /**
+ * Maximum number of parse jobs to return
+ */
+ limit?: number;
+
+ /**
+ * Filters to only documents uploaded to `/parse` at or after specified UTC
+ * timestamp. If not provided, or if the provided timestamp is before the maximum
+ * parse job retention period (30 days), the maximum retention period will be used
+ * instead.
+ */
uploaded_after?: string;
}
diff --git a/src/resources/rerank.ts b/src/resources/rerank.ts
index ac2fc3d..0cfb788 100644
--- a/src/resources/rerank.ts
+++ b/src/resources/rerank.ts
@@ -8,7 +8,7 @@ export class Rerank extends APIResource {
* Rank a list of documents according to their relevance to a query primarily and
* your custom instructions secondarily. We evaluated the model on instructions for
* recency, document type, source, and metadata, and it can generalize to other
- * instructions as well.
+ * instructions as well. The reranker supports multilinguality.
*
* The total request cannot exceed 400,000 tokens. The combined length of the
* query, instruction and any document with its metadata must not exceed 8,000
@@ -67,8 +67,9 @@ export interface RerankCreateParams {
documents: Array;
/**
- * The version of the reranker to use. Currently, we just have
- * "ctxl-rerank-en-v1-instruct".
+ * The version of the reranker to use. Currently, we have:
+ * "ctxl-rerank-v2-instruct-multilingual",
+ * "ctxl-rerank-v2-instruct-multilingual-mini", "ctxl-rerank-v1-instruct".
*/
model: string;
diff --git a/src/resources/users.ts b/src/resources/users.ts
index 8339bf6..6687e26 100644
--- a/src/resources/users.ts
+++ b/src/resources/users.ts
@@ -59,7 +59,7 @@ export interface InviteUsersResponse {
* Details of the errors occurred while inviting users, where keys are the emails
* and values are the error messages
*/
- error_details: Record;
+ error_details: { [key: string]: string };
/**
* List of emails of the invited users
@@ -91,6 +91,11 @@ export namespace ListUsersResponse {
*/
email: string;
+ /**
+ * The user level roles of the user for agent level roles.
+ */
+ agent_level_roles?: Array<'AGENT_LEVEL_USER'>;
+
/**
* The effective roles of the user.
*/
@@ -103,6 +108,7 @@ export namespace ListUsersResponse {
| 'CONTEXTUAL_INTERNAL_STAFF_USER'
| 'TENANT_ADMIN'
| 'SUPER_ADMIN'
+ | 'SERVICE_ACCOUNT'
>;
/**
@@ -110,6 +116,13 @@ export namespace ListUsersResponse {
*/
is_tenant_admin?: boolean;
+ /**
+ * Per agent level roles for the user. If a user is granted any role under
+ * `agent_level_roles`, then the user has that role for all the agents. Only the
+ * roles that need to be updated should be part of this.
+ */
+ per_agent_roles?: Array;
+
/**
* The user level roles of the user.
*/
@@ -122,8 +135,31 @@ export namespace ListUsersResponse {
| 'CONTEXTUAL_INTERNAL_STAFF_USER'
| 'TENANT_ADMIN'
| 'SUPER_ADMIN'
+ | 'SERVICE_ACCOUNT'
>;
}
+
+ export namespace User {
+ /**
+ * The schema used to capture agent level roles
+ */
+ export interface PerAgentRole {
+ /**
+ * ID of the agent on which to grant/revoke the role.
+ */
+ agent_id: string;
+
+ /**
+ * When set to true, the roles will be granted o/w revoked.
+ */
+ grant: boolean;
+
+ /**
+ * The roles that are granted/revoked
+ */
+ roles: Array<'AGENT_LEVEL_USER'>;
+ }
+ }
}
/**
@@ -135,11 +171,23 @@ export interface NewUser {
*/
email: string;
+ /**
+ * The user level roles of the user for agent level roles.
+ */
+ agent_level_roles?: Array<'AGENT_LEVEL_USER'>;
+
/**
* Flag indicating if the user is a tenant admin
*/
is_tenant_admin?: boolean;
+ /**
+ * Per agent level roles for the user. If a user is granted any role under
+ * `agent_level_roles`, then the user has that role for all the agents. Only the
+ * roles that need to be updated should be part of this.
+ */
+ per_agent_roles?: Array;
+
/**
* The user level roles of the user.
*/
@@ -152,9 +200,32 @@ export interface NewUser {
| 'CONTEXTUAL_INTERNAL_STAFF_USER'
| 'TENANT_ADMIN'
| 'SUPER_ADMIN'
+ | 'SERVICE_ACCOUNT'
>;
}
+export namespace NewUser {
+ /**
+ * The schema used to capture agent level roles
+ */
+ export interface PerAgentRole {
+ /**
+ * ID of the agent on which to grant/revoke the role.
+ */
+ agent_id: string;
+
+ /**
+ * When set to true, the roles will be granted o/w revoked.
+ */
+ grant: boolean;
+
+ /**
+ * The roles that are granted/revoked
+ */
+ roles: Array<'AGENT_LEVEL_USER'>;
+ }
+}
+
export type UserUpdateResponse = unknown;
export type UserDeactivateResponse = unknown;
@@ -165,11 +236,23 @@ export interface UserUpdateParams {
*/
email: string;
+ /**
+ * The user level roles of the user for agent level roles.
+ */
+ agent_level_roles?: Array<'AGENT_LEVEL_USER'>;
+
/**
* Flag indicating if the user is a tenant admin
*/
is_tenant_admin?: boolean;
+ /**
+ * Per agent level roles for the user. If a user is granted any role under
+ * `agent_level_roles`, then the user has that role for all the agents. Only the
+ * roles that need to be updated should be part of this.
+ */
+ per_agent_roles?: Array;
+
/**
* The user level roles of the user.
*/
@@ -182,9 +265,32 @@ export interface UserUpdateParams {
| 'CONTEXTUAL_INTERNAL_STAFF_USER'
| 'TENANT_ADMIN'
| 'SUPER_ADMIN'
+ | 'SERVICE_ACCOUNT'
>;
}
+export namespace UserUpdateParams {
+ /**
+ * The schema used to capture agent level roles
+ */
+ export interface PerAgentRole {
+ /**
+ * ID of the agent on which to grant/revoke the role.
+ */
+ agent_id: string;
+
+ /**
+ * When set to true, the roles will be granted o/w revoked.
+ */
+ grant: boolean;
+
+ /**
+ * The roles that are granted/revoked
+ */
+ roles: Array<'AGENT_LEVEL_USER'>;
+ }
+}
+
export interface UserListParams extends UsersPageParams {
/**
* When set to true, return deactivated users instead.
diff --git a/src/version.ts b/src/version.ts
index d9da9f7..23f967c 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '0.7.0'; // x-release-please-version
+export const VERSION = '0.8.0'; // x-release-please-version
diff --git a/tests/api-resources/agents/agents.test.ts b/tests/api-resources/agents/agents.test.ts
index 35e5685..a70da7c 100644
--- a/tests/api-resources/agents/agents.test.ts
+++ b/tests/api-resources/agents/agents.test.ts
@@ -25,6 +25,10 @@ describe('resource agents', () => {
name: 'xxx',
agent_configs: {
filter_and_rerank_config: {
+ default_metadata_filters: { filters: [], operator: 'AND' },
+ per_datastore_metadata_filters: {
+ 'd49609d9-61c3-4a67-b3bd-5196b10da560': { filters: [], operator: 'AND' },
+ },
rerank_instructions: 'rerank_instructions',
reranker_score_filter_threshold: 0,
top_k_reranked_chunks: 0,
@@ -44,11 +48,18 @@ describe('resource agents', () => {
enable_rerank: true,
should_check_retrieval_need: true,
},
+ reformulation_config: {
+ enable_query_decomposition: true,
+ enable_query_expansion: true,
+ query_decomposition_prompt: 'query_decomposition_prompt',
+ query_expansion_prompt: 'query_expansion_prompt',
+ },
retrieval_config: { lexical_alpha: 0, semantic_alpha: 0, top_k_retrieved_chunks: 0 },
},
datastore_ids: ['182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e'],
description: 'description',
filter_prompt: 'filter_prompt',
+ multiturn_system_prompt: 'multiturn_system_prompt',
no_retrieval_system_prompt: 'no_retrieval_system_prompt',
suggested_queries: ['string'],
system_prompt: 'system_prompt',
@@ -109,6 +120,24 @@ describe('resource agents', () => {
).rejects.toThrow(ContextualAI.NotFoundError);
});
+ test('copy', async () => {
+ const responsePromise = client.agents.copy('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('copy: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.agents.copy('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(ContextualAI.NotFoundError);
+ });
+
test('metadata', async () => {
const responsePromise = client.agents.metadata('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e');
const rawResponse = await responsePromise.asResponse();
diff --git a/tests/api-resources/agents/datasets/evaluate.test.ts b/tests/api-resources/agents/datasets/evaluate.test.ts
deleted file mode 100644
index 0315c61..0000000
--- a/tests/api-resources/agents/datasets/evaluate.test.ts
+++ /dev/null
@@ -1,167 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import ContextualAI, { toFile } from 'contextual-client';
-import { Response } from 'node-fetch';
-
-const client = new ContextualAI({
- apiKey: 'My API Key',
- baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
-});
-
-describe('resource evaluate', () => {
- test('create: only required params', async () => {
- const responsePromise = client.agents.datasets.evaluate.create('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
- dataset_name: 'dataset_name',
- dataset_type: 'evaluation_set',
- file: await toFile(Buffer.from('# my file contents'), 'README.md'),
- });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('create: required and optional params', async () => {
- const response = await client.agents.datasets.evaluate.create('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
- dataset_name: 'dataset_name',
- dataset_type: 'evaluation_set',
- file: await toFile(Buffer.from('# my file contents'), 'README.md'),
- });
- });
-
- test('retrieve: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.evaluate.retrieve('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', 'dataset_name', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('retrieve: request options and params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.evaluate.retrieve(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- { batch_size: 1, version: 'version' },
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('update: only required params', async () => {
- const responsePromise = client.agents.datasets.evaluate.update(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- { dataset_type: 'evaluation_set', file: await toFile(Buffer.from('# my file contents'), 'README.md') },
- );
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('update: required and optional params', async () => {
- const response = await client.agents.datasets.evaluate.update(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- { dataset_type: 'evaluation_set', file: await toFile(Buffer.from('# my file contents'), 'README.md') },
- );
- });
-
- test('list', async () => {
- const responsePromise = client.agents.datasets.evaluate.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e');
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('list: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.evaluate.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('list: request options and params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.evaluate.list(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- { dataset_name: 'dataset_name' },
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('delete', async () => {
- const responsePromise = client.agents.datasets.evaluate.delete(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- );
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('delete: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.evaluate.delete('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', 'dataset_name', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('metadata', async () => {
- const responsePromise = client.agents.datasets.evaluate.metadata(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- );
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('metadata: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.evaluate.metadata('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', 'dataset_name', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('metadata: request options and params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.evaluate.metadata(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- { version: 'version' },
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-});
diff --git a/tests/api-resources/agents/datasets/tune.test.ts b/tests/api-resources/agents/datasets/tune.test.ts
deleted file mode 100644
index 81b1e40..0000000
--- a/tests/api-resources/agents/datasets/tune.test.ts
+++ /dev/null
@@ -1,167 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import ContextualAI, { toFile } from 'contextual-client';
-import { Response } from 'node-fetch';
-
-const client = new ContextualAI({
- apiKey: 'My API Key',
- baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
-});
-
-describe('resource tune', () => {
- test('create: only required params', async () => {
- const responsePromise = client.agents.datasets.tune.create('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
- dataset_name: 'dataset_name',
- dataset_type: 'tuning_set',
- file: await toFile(Buffer.from('# my file contents'), 'README.md'),
- });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('create: required and optional params', async () => {
- const response = await client.agents.datasets.tune.create('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
- dataset_name: 'dataset_name',
- dataset_type: 'tuning_set',
- file: await toFile(Buffer.from('# my file contents'), 'README.md'),
- });
- });
-
- test('retrieve: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.tune.retrieve('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', 'dataset_name', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('retrieve: request options and params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.tune.retrieve(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- { batch_size: 1, version: 'version' },
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('update: only required params', async () => {
- const responsePromise = client.agents.datasets.tune.update(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- { dataset_type: 'tuning_set', file: await toFile(Buffer.from('# my file contents'), 'README.md') },
- );
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('update: required and optional params', async () => {
- const response = await client.agents.datasets.tune.update(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- { dataset_type: 'tuning_set', file: await toFile(Buffer.from('# my file contents'), 'README.md') },
- );
- });
-
- test('list', async () => {
- const responsePromise = client.agents.datasets.tune.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e');
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('list: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.tune.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('list: request options and params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.tune.list(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- { dataset_name: 'dataset_name' },
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('delete', async () => {
- const responsePromise = client.agents.datasets.tune.delete(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- );
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('delete: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.tune.delete('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', 'dataset_name', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('metadata', async () => {
- const responsePromise = client.agents.datasets.tune.metadata(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- );
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('metadata: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.tune.metadata('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', 'dataset_name', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('metadata: request options and params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.datasets.tune.metadata(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- 'dataset_name',
- { version: 'version' },
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-});
diff --git a/tests/api-resources/agents/evaluate/evaluate.test.ts b/tests/api-resources/agents/evaluate/evaluate.test.ts
deleted file mode 100644
index e8bb513..0000000
--- a/tests/api-resources/agents/evaluate/evaluate.test.ts
+++ /dev/null
@@ -1,35 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import ContextualAI, { toFile } from 'contextual-client';
-import { Response } from 'node-fetch';
-
-const client = new ContextualAI({
- apiKey: 'My API Key',
- baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
-});
-
-describe('resource evaluate', () => {
- test('create: only required params', async () => {
- const responsePromise = client.agents.evaluate.create('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
- metrics: ['equivalence'],
- });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('create: required and optional params', async () => {
- const response = await client.agents.evaluate.create('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
- metrics: ['equivalence'],
- evalset_file: await toFile(Buffer.from('# my file contents'), 'README.md'),
- evalset_name: 'evalset_name',
- llm_model_id: 'llm_model_id',
- notes: 'notes',
- override_configuration: 'override_configuration',
- });
- });
-});
diff --git a/tests/api-resources/agents/evaluate/jobs.test.ts b/tests/api-resources/agents/evaluate/jobs.test.ts
deleted file mode 100644
index 65b3251..0000000
--- a/tests/api-resources/agents/evaluate/jobs.test.ts
+++ /dev/null
@@ -1,81 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import ContextualAI from 'contextual-client';
-import { Response } from 'node-fetch';
-
-const client = new ContextualAI({
- apiKey: 'My API Key',
- baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
-});
-
-describe('resource jobs', () => {
- test('list', async () => {
- const responsePromise = client.agents.evaluate.jobs.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e');
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('list: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.evaluate.jobs.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('cancel', async () => {
- const responsePromise = client.agents.evaluate.jobs.cancel(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- );
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('cancel: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.evaluate.jobs.cancel(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('metadata', async () => {
- const responsePromise = client.agents.evaluate.jobs.metadata(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- );
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('metadata: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.evaluate.jobs.metadata(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-});
diff --git a/tests/api-resources/agents/query.test.ts b/tests/api-resources/agents/query.test.ts
index 371e6c7..2e541d2 100644
--- a/tests/api-resources/agents/query.test.ts
+++ b/tests/api-resources/agents/query.test.ts
@@ -28,11 +28,26 @@ describe('resource query', () => {
include_retrieval_content_text: true,
retrievals_only: true,
conversation_id: '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- documents_filters: {
- filters: [{ field: 'field1', operator: 'equals', value: 'value1' }],
- operator: 'AND',
- },
+ documents_filters: { filters: [], operator: 'AND' },
llm_model_id: 'llm_model_id',
+ override_configuration: {
+ enable_filter: true,
+ enable_rerank: true,
+ filter_model: 'filter_model',
+ filter_prompt: 'filter_prompt',
+ lexical_alpha: 0,
+ max_new_tokens: 0,
+ model: 'model',
+ rerank_instructions: 'rerank_instructions',
+ reranker: 'reranker',
+ reranker_score_filter_threshold: 0,
+ semantic_alpha: 0,
+ system_prompt: 'system_prompt',
+ temperature: 0,
+ top_k_reranked_chunks: 0,
+ top_k_retrieved_chunks: 0,
+ top_p: 0,
+ },
stream: true,
structured_output: { json_schema: {}, type: 'JSON' },
});
diff --git a/tests/api-resources/agents/tune/jobs.test.ts b/tests/api-resources/agents/tune/jobs.test.ts
deleted file mode 100644
index e3755c3..0000000
--- a/tests/api-resources/agents/tune/jobs.test.ts
+++ /dev/null
@@ -1,81 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import ContextualAI from 'contextual-client';
-import { Response } from 'node-fetch';
-
-const client = new ContextualAI({
- apiKey: 'My API Key',
- baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
-});
-
-describe('resource jobs', () => {
- test('list', async () => {
- const responsePromise = client.agents.tune.jobs.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e');
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('list: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.tune.jobs.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('delete', async () => {
- const responsePromise = client.agents.tune.jobs.delete(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- );
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('delete: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.tune.jobs.delete(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('metadata', async () => {
- const responsePromise = client.agents.tune.jobs.metadata(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- );
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('metadata: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.tune.jobs.metadata(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-});
diff --git a/tests/api-resources/agents/tune/models.test.ts b/tests/api-resources/agents/tune/models.test.ts
deleted file mode 100644
index 651f683..0000000
--- a/tests/api-resources/agents/tune/models.test.ts
+++ /dev/null
@@ -1,31 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import ContextualAI from 'contextual-client';
-import { Response } from 'node-fetch';
-
-const client = new ContextualAI({
- apiKey: 'My API Key',
- baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
-});
-
-describe('resource models', () => {
- test('list', async () => {
- const responsePromise = client.agents.tune.models.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e');
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('list: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.tune.models.list('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
- path: '/_stainless_unknown_path',
- }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-});
diff --git a/tests/api-resources/agents/tune/tune.test.ts b/tests/api-resources/agents/tune/tune.test.ts
deleted file mode 100644
index 88359c4..0000000
--- a/tests/api-resources/agents/tune/tune.test.ts
+++ /dev/null
@@ -1,54 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import ContextualAI, { toFile } from 'contextual-client';
-import { Response } from 'node-fetch';
-
-const client = new ContextualAI({
- apiKey: 'My API Key',
- baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
-});
-
-describe('resource tune', () => {
- test('create', async () => {
- const responsePromise = client.agents.tune.create('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e');
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('create: request options instead of params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.tune.create('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', { path: '/_stainless_unknown_path' }),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-
- test('create: request options and params are passed correctly', async () => {
- // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
- await expect(
- client.agents.tune.create(
- '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
- {
- 'hyperparams[learning_rate]': 0.05,
- 'hyperparams[lora_alpha]': 8,
- 'hyperparams[lora_dropout]': 0,
- 'hyperparams[lora_rank]': 8,
- 'hyperparams[num_epochs]': 1,
- 'hyperparams[warmup_ratio]': 0,
- metadata_file: await toFile(Buffer.from('# my file contents'), 'README.md'),
- sdp_only: true,
- synth_data: true,
- test_dataset_name: 'test_dataset_name',
- test_file: await toFile(Buffer.from('# my file contents'), 'README.md'),
- train_dataset_name: 'train_dataset_name',
- training_file: await toFile(Buffer.from('# my file contents'), 'README.md'),
- },
- { path: '/_stainless_unknown_path' },
- ),
- ).rejects.toThrow(ContextualAI.NotFoundError);
- });
-});
diff --git a/tests/api-resources/datastores/datastores.test.ts b/tests/api-resources/datastores/datastores.test.ts
index f16cfd2..67bbf5f 100644
--- a/tests/api-resources/datastores/datastores.test.ts
+++ b/tests/api-resources/datastores/datastores.test.ts
@@ -21,7 +21,35 @@ describe('resource datastores', () => {
});
test('create: required and optional params', async () => {
- const response = await client.datastores.create({ name: 'name' });
+ const response = await client.datastores.create({
+ name: 'name',
+ configuration: {
+ chunking: {
+ chunking_mode: 'hierarchy_depth',
+ enable_hierarchy_based_contextualization: true,
+ max_chunk_length_tokens: 512,
+ min_chunk_length_tokens: 128,
+ },
+ html_config: { max_chunk_length_tokens: 512 },
+ parsing: {
+ enable_split_tables: true,
+ figure_caption_mode: 'default',
+ figure_captioning_prompt: 'figure_captioning_prompt',
+ max_split_table_cells: 0,
+ },
+ },
+ });
+ });
+
+ test('update', async () => {
+ const responsePromise = client.datastores.update('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {});
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
});
test('list', async () => {
diff --git a/tests/api-resources/datastores/documents.test.ts b/tests/api-resources/datastores/documents.test.ts
index 999c790..1ee9772 100644
--- a/tests/api-resources/datastores/documents.test.ts
+++ b/tests/api-resources/datastores/documents.test.ts
@@ -36,6 +36,7 @@ describe('resource documents', () => {
'182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
{
cursor: 'cursor',
+ document_name_prefix: 'document_name_prefix',
ingestion_job_status: ['pending'],
limit: 1,
uploaded_after: '2019-12-27T18:11:19.117Z',
@@ -71,6 +72,43 @@ describe('resource documents', () => {
).rejects.toThrow(ContextualAI.NotFoundError);
});
+ test('getParseResult', async () => {
+ const responsePromise = client.datastores.documents.getParseResult(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ );
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('getParseResult: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.datastores.documents.getParseResult(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(ContextualAI.NotFoundError);
+ });
+
+ test('getParseResult: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.datastores.documents.getParseResult(
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e',
+ { output_types: ['markdown-document'] },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(ContextualAI.NotFoundError);
+ });
+
test('ingest: only required params', async () => {
const responsePromise = client.datastores.documents.ingest('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
@@ -87,7 +125,7 @@ describe('resource documents', () => {
test('ingest: required and optional params', async () => {
const response = await client.datastores.documents.ingest('182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', {
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
- metadata: '{"field1": "value1", "field2": "value2"}}',
+ metadata: 'metadata',
});
});
diff --git a/tests/api-resources/parse.test.ts b/tests/api-resources/parse.test.ts
index 2734ec0..1f71c69 100644
--- a/tests/api-resources/parse.test.ts
+++ b/tests/api-resources/parse.test.ts
@@ -26,7 +26,7 @@ describe('resource parse', () => {
const response = await client.parse.create({
raw_file: await toFile(Buffer.from('# my file contents'), 'README.md'),
enable_document_hierarchy: true,
- enable_split_tables: true,
+ enable_split_tables: false,
figure_caption_mode: 'concise',
max_split_table_cells: 0,
page_range: 'page_range',
@@ -102,7 +102,10 @@ describe('resource parse', () => {
test('jobs: request options and params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.parse.jobs({ uploaded_after: '2019-12-27T18:11:19.117Z' }, { path: '/_stainless_unknown_path' }),
+ client.parse.jobs(
+ { cursor: 'cursor', limit: 1, uploaded_after: '2019-12-27T18:11:19.117Z' },
+ { path: '/_stainless_unknown_path' },
+ ),
).rejects.toThrow(ContextualAI.NotFoundError);
});
});
diff --git a/tests/api-resources/users.test.ts b/tests/api-resources/users.test.ts
index ea67a4d..2f335ce 100644
--- a/tests/api-resources/users.test.ts
+++ b/tests/api-resources/users.test.ts
@@ -21,7 +21,15 @@ describe('resource users', () => {
});
test('update: required and optional params', async () => {
- const response = await client.users.update({ email: 'email', is_tenant_admin: true, roles: ['VISITOR'] });
+ const response = await client.users.update({
+ email: 'email',
+ agent_level_roles: ['AGENT_LEVEL_USER'],
+ is_tenant_admin: true,
+ per_agent_roles: [
+ { agent_id: '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', grant: true, roles: ['AGENT_LEVEL_USER'] },
+ ],
+ roles: ['VISITOR'],
+ });
});
test('list', async () => {
@@ -83,7 +91,17 @@ describe('resource users', () => {
test('invite: required and optional params', async () => {
const response = await client.users.invite({
- new_users: [{ email: 'email', is_tenant_admin: true, roles: ['VISITOR'] }],
+ new_users: [
+ {
+ email: 'email',
+ agent_level_roles: ['AGENT_LEVEL_USER'],
+ is_tenant_admin: true,
+ per_agent_roles: [
+ { agent_id: '182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e', grant: true, roles: ['AGENT_LEVEL_USER'] },
+ ],
+ roles: ['VISITOR'],
+ },
+ ],
tenant_short_name: 'tenant_short_name',
});
});
diff --git a/tests/index.test.ts b/tests/index.test.ts
index 90855ce..1c96dbd 100644
--- a/tests/index.test.ts
+++ b/tests/index.test.ts
@@ -26,13 +26,13 @@ describe('instantiate client', () => {
apiKey: 'My API Key',
});
- test('they are used in the request', () => {
- const { req } = client.buildRequest({ path: '/foo', method: 'post' });
+ test('they are used in the request', async () => {
+ const { req } = await client.buildRequest({ path: '/foo', method: 'post' });
expect((req.headers as Headers)['x-my-default-header']).toEqual('2');
});
- test('can ignore `undefined` and leave the default', () => {
- const { req } = client.buildRequest({
+ test('can ignore `undefined` and leave the default', async () => {
+ const { req } = await client.buildRequest({
path: '/foo',
method: 'post',
headers: { 'X-My-Default-Header': undefined },
@@ -40,8 +40,8 @@ describe('instantiate client', () => {
expect((req.headers as Headers)['x-my-default-header']).toEqual('2');
});
- test('can be removed with `null`', () => {
- const { req } = client.buildRequest({
+ test('can be removed with `null`', async () => {
+ const { req } = await client.buildRequest({
path: '/foo',
method: 'post',
headers: { 'X-My-Default-Header': null },
@@ -188,6 +188,28 @@ describe('instantiate client', () => {
const client = new ContextualAI({ apiKey: 'My API Key' });
expect(client.baseURL).toEqual('https://api.contextual.ai/v1');
});
+
+ test('in request options', () => {
+ const client = new ContextualAI({ apiKey: 'My API Key' });
+ expect(client.buildURL('/foo', null, 'http://localhost:5000/option')).toEqual(
+ 'http://localhost:5000/option/foo',
+ );
+ });
+
+ test('in request options overridden by client options', () => {
+ const client = new ContextualAI({ apiKey: 'My API Key', baseURL: 'http://localhost:5000/client' });
+ expect(client.buildURL('/foo', null, 'http://localhost:5000/option')).toEqual(
+ 'http://localhost:5000/client/foo',
+ );
+ });
+
+ test('in request options overridden by env variable', () => {
+ process.env['CONTEXTUAL_AI_BASE_URL'] = 'http://localhost:5000/env';
+ const client = new ContextualAI({ apiKey: 'My API Key' });
+ expect(client.buildURL('/foo', null, 'http://localhost:5000/option')).toEqual(
+ 'http://localhost:5000/env/foo',
+ );
+ });
});
test('maxRetries option is correctly set', () => {
@@ -218,20 +240,20 @@ describe('request building', () => {
const client = new ContextualAI({ apiKey: 'My API Key' });
describe('Content-Length', () => {
- test('handles multi-byte characters', () => {
- const { req } = client.buildRequest({ path: '/foo', method: 'post', body: { value: '—' } });
+ test('handles multi-byte characters', async () => {
+ const { req } = await client.buildRequest({ path: '/foo', method: 'post', body: { value: '—' } });
expect((req.headers as Record)['content-length']).toEqual('20');
});
- test('handles standard characters', () => {
- const { req } = client.buildRequest({ path: '/foo', method: 'post', body: { value: 'hello' } });
+ test('handles standard characters', async () => {
+ const { req } = await client.buildRequest({ path: '/foo', method: 'post', body: { value: 'hello' } });
expect((req.headers as Record)['content-length']).toEqual('22');
});
});
describe('custom headers', () => {
- test('handles undefined', () => {
- const { req } = client.buildRequest({
+ test('handles undefined', async () => {
+ const { req } = await client.buildRequest({
path: '/foo',
method: 'post',
body: { value: 'hello' },
diff --git a/yarn.lock b/yarn.lock
index bb17942..2bcc59e 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -852,19 +852,19 @@
integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==
"@types/node-fetch@^2.6.4":
- version "2.6.4"
- resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.4.tgz#1bc3a26de814f6bf466b25aeb1473fa1afe6a660"
- integrity sha512-1ZX9fcN4Rvkvgv4E6PAY5WXUFWFcRWxZa3EW83UjycOB9ljJCedb2CupIP4RZMEwF/M3eTcCihbBRgwtGbg5Rg==
+ version "2.6.13"
+ resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.13.tgz#e0c9b7b5edbdb1b50ce32c127e85e880872d56ee"
+ integrity sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==
dependencies:
"@types/node" "*"
- form-data "^3.0.0"
+ form-data "^4.0.4"
"@types/node@*":
- version "20.10.5"
- resolved "https://registry.yarnpkg.com/@types/node/-/node-20.10.5.tgz#47ad460b514096b7ed63a1dae26fad0914ed3ab2"
- integrity sha512-nNPsNE65wjMxEKI93yOP+NPGGBJz/PoN3kZsVLee0XMiJolxSekEVD8wRwBUBqkwc7UWop0edW50yrCQW4CyRw==
+ version "24.3.0"
+ resolved "https://registry.yarnpkg.com/@types/node/-/node-24.3.0.tgz#89b09f45cb9a8ee69466f18ee5864e4c3eb84dec"
+ integrity sha512-aPTXCrfwnDLj4VvXrm+UUCQjNEvJgNA8s5F1cvwQU+3KNltTOkBm1j30uNLyqqPNe7gE3KFzImYoZEfLhp4Yow==
dependencies:
- undici-types "~5.26.4"
+ undici-types "~7.10.0"
"@types/node@^18.11.18":
version "18.11.18"
@@ -1097,7 +1097,7 @@ array-union@^2.1.0:
asynckit@^0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
- integrity sha1-x57Zf380y48robyXkLzDZkdLS3k=
+ integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==
babel-jest@^29.7.0:
version "29.7.0"
@@ -1234,6 +1234,14 @@ bundle-name@^3.0.0:
dependencies:
run-applescript "^5.0.0"
+call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6"
+ integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==
+ dependencies:
+ es-errors "^1.3.0"
+ function-bind "^1.1.2"
+
callsites@^3.0.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73"
@@ -1433,7 +1441,7 @@ define-lazy-prop@^3.0.0:
delayed-stream@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
- integrity sha1-3zrhmayt+31ECqrgsp4icrJOxhk=
+ integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==
depd@^1.1.2:
version "1.1.2"
@@ -1469,6 +1477,15 @@ doctrine@^3.0.0:
dependencies:
esutils "^2.0.2"
+dunder-proto@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a"
+ integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==
+ dependencies:
+ call-bind-apply-helpers "^1.0.1"
+ es-errors "^1.3.0"
+ gopd "^1.2.0"
+
electron-to-chromium@^1.4.601:
version "1.4.614"
resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.614.tgz#2fe789d61fa09cb875569f37c309d0c2701f91c0"
@@ -1491,6 +1508,33 @@ error-ex@^1.3.1:
dependencies:
is-arrayish "^0.2.1"
+es-define-property@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa"
+ integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==
+
+es-errors@^1.3.0:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f"
+ integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==
+
+es-object-atoms@^1.0.0, es-object-atoms@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1"
+ integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==
+ dependencies:
+ es-errors "^1.3.0"
+
+es-set-tostringtag@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d"
+ integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==
+ dependencies:
+ es-errors "^1.3.0"
+ get-intrinsic "^1.2.6"
+ has-tostringtag "^1.0.2"
+ hasown "^2.0.2"
+
escalade@^3.1.1:
version "3.1.1"
resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40"
@@ -1780,13 +1824,15 @@ form-data-encoder@1.7.2:
resolved "https://registry.yarnpkg.com/form-data-encoder/-/form-data-encoder-1.7.2.tgz#1f1ae3dccf58ed4690b86d87e4f57c654fbab040"
integrity sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==
-form-data@^3.0.0:
- version "3.0.1"
- resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.1.tgz#ebd53791b78356a99af9a300d4282c4d5eb9755f"
- integrity sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==
+form-data@^4.0.4:
+ version "4.0.4"
+ resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.4.tgz#784cdcce0669a9d68e94d11ac4eea98088edd2c4"
+ integrity sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==
dependencies:
asynckit "^0.4.0"
combined-stream "^1.0.8"
+ es-set-tostringtag "^2.1.0"
+ hasown "^2.0.2"
mime-types "^2.1.12"
formdata-node@^4.3.2:
@@ -1822,11 +1868,35 @@ get-caller-file@^2.0.5:
resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e"
integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==
+get-intrinsic@^1.2.6:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01"
+ integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==
+ dependencies:
+ call-bind-apply-helpers "^1.0.2"
+ es-define-property "^1.0.1"
+ es-errors "^1.3.0"
+ es-object-atoms "^1.1.1"
+ function-bind "^1.1.2"
+ get-proto "^1.0.1"
+ gopd "^1.2.0"
+ has-symbols "^1.1.0"
+ hasown "^2.0.2"
+ math-intrinsics "^1.1.0"
+
get-package-type@^0.1.0:
version "0.1.0"
resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a"
integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==
+get-proto@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1"
+ integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==
+ dependencies:
+ dunder-proto "^1.0.1"
+ es-object-atoms "^1.0.0"
+
get-stdin@^8.0.0:
version "8.0.0"
resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-8.0.0.tgz#cbad6a73feb75f6eeb22ba9e01f89aa28aa97a53"
@@ -1887,6 +1957,11 @@ globby@^11.1.0:
merge2 "^1.4.1"
slash "^3.0.0"
+gopd@^1.2.0:
+ version "1.2.0"
+ resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1"
+ integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==
+
graceful-fs@^4.2.9:
version "4.2.11"
resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3"
@@ -1907,6 +1982,18 @@ has-flag@^4.0.0:
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b"
integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==
+has-symbols@^1.0.3, has-symbols@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338"
+ integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==
+
+has-tostringtag@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc"
+ integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==
+ dependencies:
+ has-symbols "^1.0.3"
+
hasown@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c"
@@ -1914,6 +2001,13 @@ hasown@^2.0.0:
dependencies:
function-bind "^1.1.2"
+hasown@^2.0.2:
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003"
+ integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==
+ dependencies:
+ function-bind "^1.1.2"
+
html-escaper@^2.0.0:
version "2.0.2"
resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453"
@@ -2611,6 +2705,11 @@ makeerror@1.0.12:
dependencies:
tmpl "1.0.5"
+math-intrinsics@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9"
+ integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==
+
merge-stream@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60"
@@ -2629,17 +2728,17 @@ micromatch@^4.0.4:
braces "^3.0.3"
picomatch "^2.3.1"
-mime-db@1.51.0:
- version "1.51.0"
- resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.51.0.tgz#d9ff62451859b18342d960850dc3cfb77e63fb0c"
- integrity sha512-5y8A56jg7XVQx2mbv1lu49NR4dokRnhZYTtL+KGfaa27uq4pSTXkwQkFJl4pkRMyNFz/EtYDSkiiEHx3F7UN6g==
+mime-db@1.52.0:
+ version "1.52.0"
+ resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70"
+ integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
mime-types@^2.1.12:
- version "2.1.34"
- resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.34.tgz#5a712f9ec1503511a945803640fafe09d3793c24"
- integrity sha512-6cP692WwGIs9XXdOO4++N+7qjqv0rqxxVvJ3VHPh/Sc9mVZcQP+ZGhkKiTvWMQRr2tbHkJP/Yn7Y0npb3ZBs4A==
+ version "2.1.35"
+ resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a"
+ integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
dependencies:
- mime-db "1.51.0"
+ mime-db "1.52.0"
mimic-fn@^2.1.0:
version "2.1.0"
@@ -2691,9 +2790,9 @@ node-domexception@1.0.0:
integrity sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==
node-fetch@^2.6.7:
- version "2.6.11"
- resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.11.tgz#cde7fc71deef3131ef80a738919f999e6edfff25"
- integrity sha512-4I6pdBY1EthSqDmJkiNk3JIT8cswwR9nfeW/cPdUagJYEQG7R95WRH74wpz7ma8Gh/9dI9FP+OU+0E4FvtA55w==
+ version "2.7.0"
+ resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d"
+ integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==
dependencies:
whatwg-url "^5.0.0"
@@ -3230,7 +3329,7 @@ to-regex-range@^5.0.1:
tr46@~0.0.3:
version "0.0.3"
resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a"
- integrity sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o=
+ integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==
ts-api-utils@^1.0.1:
version "1.3.0"
@@ -3332,10 +3431,10 @@ typescript@^4.8.2:
resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.9.5.tgz#095979f9bcc0d09da324d58d03ce8f8374cbe65a"
integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==
-undici-types@~5.26.4:
- version "5.26.5"
- resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
- integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==
+undici-types@~7.10.0:
+ version "7.10.0"
+ resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-7.10.0.tgz#4ac2e058ce56b462b056e629cc6a02393d3ff350"
+ integrity sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag==
untildify@^4.0.0:
version "4.0.0"
@@ -3391,12 +3490,12 @@ web-streams-polyfill@4.0.0-beta.1:
webidl-conversions@^3.0.0:
version "3.0.1"
resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871"
- integrity sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE=
+ integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==
whatwg-url@^5.0.0:
version "5.0.0"
resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d"
- integrity sha1-lmRU6HZUYuN2RNNib2dCzotwll0=
+ integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==
dependencies:
tr46 "~0.0.3"
webidl-conversions "^3.0.0"