diff --git a/docs/classes/BaseModelProvider.html b/docs/classes/BaseModelProvider.html index f6a56dd..4773fde 100644 --- a/docs/classes/BaseModelProvider.html +++ b/docs/classes/BaseModelProvider.html @@ -1,7 +1,7 @@ -
Abstract Abstract Readonly apiReadonly configReadonly historyProtected Abstract dispatchOptional meta: TMetaOptionsOptional meta: TMetaOptionsReadonly apiReadonly configReadonly historyProtected Abstract dispatchOptional meta: TMetaOptionsOptional meta: TMetaOptionsReadonly sourceOptional auth?: AuthStrategy<ModelRequestOptions, unknown>Optional client?: HttpClient<THttpClientOptions>Optional headers?: Headers | HeadersStrategy<ModelRequestOptions, unknown>Readonly apiPrivate authReadonly clientReadonly configPrivate endpointPrivate headersReadonly historyProtected applyProtected dispatchProtected getProtected getProtected getOptional meta: THttpClientOptionsOptional auth?: AuthStrategy<ModelRequestOptions, unknown>Optional client?: HttpClient<THttpClientOptions>Optional headers?: Headers | HeadersStrategy<ModelRequestOptions, unknown>Readonly apiPrivate authReadonly clientReadonly configPrivate endpointPrivate headersReadonly historyProtected applyProtected dispatchProtected getProtected getProtected getOptional meta: THttpClientOptionsCreates an AWS Bedrock ModelProvider with the provided ModelApi.
+Creates an AWS Bedrock ModelProvider with the provided ModelApi.
import {
AmazonTitanTextApi,
createAwsBedrockModelProvider
} from "generative-ts";
// Bedrock supports many different APIs and models. See below for full list.
const titanText = createAwsBedrockModelProvider({
api: AmazonTitanTextApi,
modelId: "amazon.titan-text-express-v1",
// If your code is running in an AWS Environment (eg, Lambda) authorization will happen automatically. Otherwise, explicitly pass in `auth`
});
const response = await titanText.sendRequest({
$prompt: "Brief history of NY Mets:"
// all other options for the specified `api` available here
});
console.log(response.results[0]?.outputText);
import {
Ai21Jurassic2Api,
AmazonTitanTextApi,
CohereGenerateApi,
createAwsBedrockModelProvider,
Llama3ChatApi,
MistralBedrockApi,
} from "generative-ts";
const titanText = createAwsBedrockModelProvider({
api: AmazonTitanTextApi,
modelId: "amazon.titan-text-express-v1",
});
const cohereCommand = createAwsBedrockModelProvider({
api: CohereGenerateApi,
modelId: "cohere.command-text-v14",
});
const llama3 = createAwsBedrockModelProvider({
api: Llama3ChatApi,
modelId: "meta.llama3-8b-instruct-v1:0",
});
const mistral = createAwsBedrockModelProvider({
api: MistralBedrockApi,
modelId: "mistral.mistral-7b-instruct-v0:2",
});
const jurassic = createAwsBedrockModelProvider({
api: Ai21Jurassic2Api,
modelId: "ai21.j2-mid-v1",
});
const params = { $prompt: "Brief history of NY Mets:" };
const responses = await Promise.all([
titanText.sendRequest(params),
cohereCommand.sendRequest(params),
llama3.sendRequest(params),
mistral.sendRequest(params),
jurassic.sendRequest(params),
]);
-Creates a Cohere ModelProvider with the CohereChatApi or legacy CohereGenerateApi
+Creates a Cohere ModelProvider with the CohereChatApi or legacy CohereGenerateApi
import { createCohereModelProvider } from "generative-ts";
const commandR = createCohereModelProvider({
modelId: "command-r-plus", // Cohere defined model ID
// you can explicitly pass auth here, otherwise by default it is read from process.env
});
const response = await commandR.sendRequest({
$prompt: "Brief History of NY Mets:",
preamble: "Talk like Jafar from Aladdin",
// all other Cohere /generate options available here
});
console.log(response.text);
import<
import { createCohereModelProvider, CohereGenerateApi } from "generative-ts";
const command = createCohereModelProvider({
api: CohereGenerateApi,
modelId: "command",
});
const response = await command.sendRequest({
$prompt: "Brief History of NY Mets:",
});
-Creates a Groq ModelProvider with the OpenAiChatApi
+Creates a Groq ModelProvider with the OpenAiChatApi
import { createGroqModelProvider } from "generative-ts";
const llama3 = createGroqModelProvider({
modelId: "llama3-70b-8192", // Groq defined model ID
// you can explicitly pass auth here, otherwise by default it is read from process.env
});
const response = await llama3.sendRequest({
$prompt: "Brief History of NY Mets:"
// all other OpenAI ChatCompletion options available here (Groq uses the OpenAI ChatCompletion API for all the models it hosts)
});
console.log(response.choices[0]?.message.content);
Create an API account at Groq
@@ -22,4 +22,4 @@If no auth is passed and GROQ_API_KEY is not found in process.env
import { createGroqModelProvider } from "generative-ts";
const llama3 = createGroqModelProvider({
modelId: "llama3-70b-8192",
});
const response = await llama3.sendRequest({ $prompt: "Brief History of NY Mets:" });
console.log(response.choices[0]?.message.content);
-Creates a Huggingface Inference ModelProvider with the specified ModelApi
+Creates a Huggingface Inference ModelProvider with the specified ModelApi
import {
createHuggingfaceInferenceModelProvider,
HfTextGenerationTaskApi
} from "generative-ts";
// Huggingface Inference supports many different APIs and models. See below for full list.
const gpt2 = createHuggingfaceInferenceModelProvider({
api: HfTextGenerationTaskApi,
modelId: "gpt2",
// you can explicitly pass auth here, otherwise by default it is read from process.env
});
const response = await gpt2.sendRequest({
$prompt: "Hello,"
// all other options for the specified `api` available here
});
console.log(response[0]?.generated_text);
If no auth is passed and HUGGINGFACE_API_TOKEN is not found in process.env
import {
createHuggingfaceInferenceModelProvider,
HfTextGenerationTaskApi
} from "generative-ts";
// Huggingface Inference supports many different APIs and models. See below for full list.
const gpt2 = createHuggingfaceInferenceModelProvider({
api: HfTextGenerationTaskApi,
modelId: "gpt2",
// you can explicitly pass auth here, otherwise by default it is read from process.env
});
const response = await gpt2.sendRequest({
$prompt: "Hello,"
// all other options for the specified `api` available here
});
console.log(response[0]?.generated_text);
-Creates a LMStudio ModelProvider with the OpenAiChatApi
+Creates a LMStudio ModelProvider with the OpenAiChatApi
import { createLmStudioModelProvider } from "generative-ts";
const llama3 = createLmStudioModelProvider({
modelId: "lmstudio-community/Meta-Llama-3-70B-Instruct-GGUF", // a ID of a model you have downloaded in LMStudio
});
const response = await llama3.sendRequest({
$prompt: "Brief History of NY Mets:"
// all other OpenAI ChatCompletion options available here (LMStudio uses the OpenAI ChatCompletion API for all the models it hosts)
});
console.log(response.choices[0]?.message.content);
Follow LMStudio's instructions to set up the LMStudio local server.
@@ -19,4 +19,4 @@import { createLmStudioModelProvider } from "generative-ts";
const llama3 = createLmStudioModelProvider({
modelId: "lmstudio-community/Meta-Llama-3-70B-Instruct-GGUF", // a ID of a model you have downloaded in LMStudio
});
const response = await llama3.sendRequest({
$prompt: "Brief History of NY Mets:"
// all other OpenAI ChatCompletion options available here (LMStudio uses the OpenAI ChatCompletion API for all the models it hosts)
});
console.log(response.choices[0]?.message.content);
-Creates a Mistral ModelProvider with the MistralAiApi
+Creates a Mistral ModelProvider with the MistralAiApi
import { createMistralModelProvider } from "generative-ts";
const mistralLarge = createMistralModelProvider({
modelId: "mistral-large-latest", // Mistral defined model ID
// you can explicitly pass auth here, otherwise by default it is read from process.env
});
const response = await mistralLarge.sendRequest({
$prompt: "Brief History of NY Mets:"
// all other Mistral ChatCompletion API options available here
});
console.log(response.choices[0]?.message.content);
Create an API account at Mistral
@@ -21,4 +21,4 @@If no auth is passed and MISTRAL_API_KEY is not found in process.env
import { createMistralModelProvider } from "generative-ts";
const mistralLarge = createMistralModelProvider({
modelId: "mistral-large-latest", // Mistral defined model ID
// you can explicitly pass auth here, otherwise by default it is read from process.env
});
const response = await mistralLarge.sendRequest({
$prompt: "Brief History of NY Mets:"
// all other Mistral ChatCompletion API options available here
});
console.log(response.choices[0]?.message.content);
-Creates a OpenAI ModelProvider with the OpenAiChatApi
+Creates a OpenAI ModelProvider with the OpenAiChatApi
import { createOpenAiChatModelProvider } from "generative-ts";
const gpt = createOpenAiChatModelProvider({
modelId: "gpt-4-turbo", // OpenAI defined model ID
// you can explicitly pass auth here, otherwise by default it is read from process.env
});
const response = await gpt.sendRequest({
$prompt: "Brief History of NY Mets:",
max_tokens: 100,
// all other OpenAI ChatCompletion options available here
});
console.log(response.choices[0]?.message.content);
Create an API account at OpenAI
@@ -21,4 +21,4 @@If no auth is passed and OPENAI_API_KEY is not found in process.env
import { createOpenAiChatModelProvider } from "generative-ts";
const gpt = createOpenAiChatModelProvider({
modelId: "gpt-4-turbo", // OpenAI defined model ID
// you can explicitly pass auth here, otherwise by default it is read from process.env
});
const response = await gpt.sendRequest({
$prompt: "Brief History of NY Mets:",
max_tokens: 100,
// all other OpenAI ChatCompletion options available here
});
console.log(response.choices[0]?.message.content);
-Creates a Google Cloud VertexAI ModelProvider with the GoogleGeminiApi.
+Creates a Google Cloud VertexAI ModelProvider with the GoogleGeminiApi.
import { createVertexAiModelProvider } from "@packages/gcloud-vertex-ai";
const gemini = await createVertexAiModelProvider({
modelId: "gemini-1.0-pro", // VertexAI defined model ID
// you can explicitly pass auth here, otherwise by default it is read from process.env
});
const response = await gemini.sendRequest({
$prompt: "Brief History of NY Mets:",
// all other Gemini options available here
});
console.log(response.data.candidates[0]);
Enable VertexAI in your Google Cloud Console. Note: VertexAI is currently only available in certain regions.
@@ -16,7 +16,7 @@Optional auth?: VertexAiAuthConfigAuthorization configuration for VertexAI. If not supplied, it will be loaded from the environment.
Optional client?: HttpClient<THttpClientOptions>HTTP client to use for requests. If not supplied, a client implementing Google Cloud Application Default Credentials will be used.
The model ID as defined by Google Cloud VertexAI.
-The VertexAI Model Provider
+The VertexAI Model Provider
If no auth is passed and GCLOUD_LOCATION or GCLOUD_PROJECT_ID are not found in process.env
import { createVertexAiModelProvider } from "@packages/gcloud-vertex-ai";
const gemini = await createVertexAiModelProvider({
modelId: "gemini-1.0-pro", // VertexAI defined model ID
// you can explicitly pass auth here, otherwise by default it is read from process.env
});
const response = await gemini.sendRequest({
$prompt: "Brief History of NY Mets:",
// all other Gemini options available here
});
console.log(response.data.candidates[0]);
-a typescript library for building LLM applications+agents
+a typescript library for building LLM applications+agents
@@ -147,4 +147,4 @@To run examples and integration/e2e tests, create an .env file by running cp .env.example .env and then add values where necessary
The "main" generative-ts package and the scoped @generative-ts packages both are controlled by the generative-ts npm organization. Releases are published via circleci job upon pushes of tags that have a name starting with release/. The job requires an NPM token that has publishing permissions to both generative-ts and @generative-ts. Currently this is a "granular" token set to expire every 30 days, created by @jnaglick, set in a circleci context.
Optional nameOptional nameOptional countOptional frequencyOptional maxOptional minOptional numOptional presenceOptional stopOptional temperatureOptional topKReturnOptional topPOptional countOptional frequencyOptional maxOptional minOptional numOptional presenceOptional stopOptional temperatureOptional topKReturnOptional topPOptional nameOptional nameOptional maxOptional stopOptional temperatureOptional topPOptional maxOptional stopOptional temperatureOptional topPOptional AWS_The AWS Access Key ID
Optional AWS_The AWS Secret Access Key
-Optional nameOptional nameOptional chat_Optional citation_Optional conversation_Optional documentsOptional exampleOptional force_Optional frequency_Optional kOptional max_Optional max_Optional pOptional preambleOptional presence_Optional prompt_Optional search_Optional seedOptional stop_Optional streamOptional systemOptional temperatureOptional tool_Optional toolsOptional parameter_Optional chat_Optional citation_Optional conversation_Optional documentsOptional exampleOptional force_Optional frequency_Optional kOptional max_Optional max_Optional pOptional preambleOptional presence_Optional prompt_Optional search_Optional seedOptional stop_Optional streamOptional systemOptional temperatureOptional tool_Optional toolsOptional parameter_Optional citationsOptional documentsOptional is_Optional warnings?: string[]Optional response_Optional search_Optional search_Optional tool_Optional citationsOptional documentsOptional is_Optional warnings?: string[]Optional response_Optional search_Optional search_Optional tool_Optional nameOptional nameOptional end_Optional frequency_Optional kOptional logit_Optional max_Optional num_Optional pOptional presence_Optional presetOptional return_Optional seedOptional stop_Optional streamOptional temperatureOptional truncateOptional end_Optional frequency_Optional kOptional logit_Optional max_Optional num_Optional pOptional presence_Optional presetOptional return_Optional seedOptional stop_Optional streamOptional temperatureOptional truncateOptional metaOptional metaOptional nameOptional nameOptional $toolsOptional contentsOptional exampleOptional generation_Optional candidate_Optional frequency_Optional max_Optional presence_Optional response_Optional stop_Optional temperature?: numberOptional top_Optional top_Optional safety_Optional category?: stringOptional max_Optional method?: stringOptional threshold?: stringOptional systemOptional system_Optional toolsOptional tools_Optional allowed_Optional mode?: "NONE" | "AUTO" | "ANY"Optional $toolsOptional contentsOptional exampleOptional generation_Optional candidate_Optional frequency_Optional max_Optional presence_Optional response_Optional stop_Optional temperature?: numberOptional top_Optional top_Optional safety_Optional category?: stringOptional max_Optional method?: stringOptional threshold?: stringOptional systemOptional system_Optional toolsOptional tools_Optional allowed_Optional mode?: "NONE" | "AUTO" | "ANY"Optional nameOptional nameOptional generated_Optional optionsOptional use_Optional wait_Optional parametersOptional max_Optional max_Optional min_Optional repetition_Optional temperature?: numberOptional top_Optional top_Optional past_Optional generated_Optional optionsOptional use_Optional wait_Optional parametersOptional max_Optional max_Optional min_Optional repetition_Optional temperature?: numberOptional top_Optional top_Optional past_Optional optionsOptional use_Optional wait_Optional optionsOptional use_Optional wait_Optional nameOptional nameOptional optionsOptional use_Optional wait_Optional parametersOptional do_Optional max_Optional max_Optional num_Optional repetition_Optional return_Optional temperature?: numberOptional top_Optional top_Optional optionsOptional use_Optional wait_Optional parametersOptional do_Optional max_Optional max_Optional num_Optional repetition_Optional return_Optional temperature?: numberOptional top_Optional top_Optional nameOptional nameOptional exampleOptional max_Optional systemOptional temperatureOptional top_Optional exampleOptional max_Optional systemOptional temperatureOptional top_Optional nameOptional nameOptional exampleOptional max_Optional systemOptional temperatureOptional top_Optional exampleOptional max_Optional systemOptional temperatureOptional top_Optional exampleOptional max_Optional messagesOptional random_Optional safe_Optional streamOptional systemOptional temperatureOptional top_Optional exampleOptional max_Optional messagesOptional random_Optional safe_Optional streamOptional systemOptional temperatureOptional top_Optional nameOptional nameOptional exampleOptional max_Optional stopOptional systemOptional temperatureOptional top_Optional top_Optional exampleOptional max_Optional stopOptional systemOptional temperatureOptional top_Optional top_Optional nameOptional nameOptional exampleOptional frequency_Optional function_Optional functionsOptional description?: stringOptional parameters?: objectOptional logit_Optional logprobsOptional max_Optional messagesOptional nOptional presence_Optional response_Optional seedOptional stopOptional streamOptional stream_Optional systemOptional temperatureOptional tool_Optional toolsOptional description?: stringOptional parameters?: objectOptional top_Optional top_Optional userOptional exampleOptional frequency_Optional function_Optional functionsOptional description?: stringOptional parameters?: objectOptional logit_Optional logprobsOptional max_Optional messagesOptional nOptional presence_Optional response_Optional seedOptional stopOptional streamOptional stream_Optional systemOptional temperatureOptional tool_Optional toolsOptional description?: stringOptional parameters?: objectOptional top_Optional top_Optional userOptional system_Optional usageOptional system_Optional usageConst Const Const Const Const Const Const Const Const Const Huggingface Conversational Task
+Const Const Huggingface Text Generation Task
+Const Const Const Const Const Const Const Const Const Const Const Const Const Const Const Const
Implementation of the Template interface using a ts function
+Class FnTemplate<TVars>
Implementation of the Template interface using a ts function
Type Parameters
Implements
Index
Constructors
Properties
Methods
Constructors
constructor
Type Parameters
Parameters
Returns FnTemplate<TVars>
Properties
ReadonlysourceMethods
render
Parameters
Returns string