Skip to content

Commit

Permalink
add wpm config, --as-sys, --bump-sys
Browse files Browse the repository at this point in the history
  • Loading branch information
mattvr committed Jul 3, 2023
1 parent a405f2d commit d8e16be
Show file tree
Hide file tree
Showing 3 changed files with 94 additions and 43 deletions.
6 changes: 4 additions & 2 deletions lib/data.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { ChatCompletionRequest } from "./ai-types.ts";
import { genDescriptiveNameForChat } from "./prompts.ts";

export const VERSION = "0.3.0";
export const VERSION = "0.3.1";
export const AUTO_UPDATE_PROBABILITY = 0.1;

export type Config = {
Expand All @@ -14,6 +14,7 @@ export type Config = {
model?: string;
systemPrompt?: string;
openAiApiKey?: string;
wpm?: number;
};

export const DEFAULT_CONFIG: Config = {
Expand All @@ -26,6 +27,7 @@ export const DEFAULT_CONFIG: Config = {
model: undefined,
systemPrompt: undefined,
openAiApiKey: undefined,
wpm: undefined,
};

let cachedConfig: Config | null = null;
Expand Down Expand Up @@ -151,7 +153,7 @@ const meta_write = async (
`${await getOrCreateHistoryPath()}/${latestName}.json`;
let finalFullPath = latestFullPath;

let hasDescriptiveName = !isNewOrName && config?.hasDescriptiveName;
let hasDescriptiveName = !isNewOrName || config?.hasDescriptiveName;
if (!hasDescriptiveName && req.messages.length >= 5) {
// Write out a descriptive name for continued chats of a certain length
const descName = await genDescriptiveNameForChat(req);
Expand Down
65 changes: 39 additions & 26 deletions lib/prompts.ts
Original file line number Diff line number Diff line change
@@ -1,37 +1,50 @@
import { ChatCompletionRequest, getChatResponse_withRetries } from "./ai.ts";
import { getChatResponse_withRetries } from "./ai.ts";
import { ChatCompletionRequest } from "./ai-types.ts";

export const genDescriptiveNameForChat = async (req: ChatCompletionRequest): Promise<string | null> => {
export const genDescriptiveNameForChat = async (
req: ChatCompletionRequest,
): Promise<string | null> => {
// This function generates a descriptive name from a chat for your history
// e.g. if you're talking about which car to buy, it will generate a name like "car-buying"

const newReq = {
...req, messages: [
...req.messages.filter(m => m.role !== 'system'),
...req,
messages: [
...req.messages.filter((m) => m.role !== "system"),
{
role: 'system' as const,
content: `[IMPORTANT INSTRUCTION] Response ONLY with a short, descriptive, hyphenated name that describes the above conversation, in the format: my-chat-name`
}
role: "system" as const,
content:
`[IMPORTANT INSTRUCTION] Response ONLY with a short, descriptive, hyphenated name that describes the above conversation, in the format: my-chat-name`,
},
],
model: 'gpt-3.5-turbo' // use turbo as its cheaper/faster
}
const chatName = await getChatResponse_withRetries(newReq)
return chatName
}
model: "gpt-3.5-turbo", // use turbo as its cheaper/faster
};
const chatName = await getChatResponse_withRetries(newReq);
return chatName;
};

export const setExecutableCmdParamsForChat = (req: ChatCompletionRequest): ChatCompletionRequest => {
req.messages = req.messages.filter(m => m.role !== 'system') // other system messages tend to conflict
export const setExecutableCmdParamsForChat = (
req: ChatCompletionRequest,
): ChatCompletionRequest => {
req.messages = req.messages.filter((m) => m.role !== "system"); // other system messages tend to conflict
req.messages.push({
role: 'system',
content: `[IMPORTANT INSTRUCTION] Reply ONLY with an executable shell command(s) for the given prompt and no other text. (OS: ${Deno.build.os}} Shell: ${Deno.env.get('SHELL') ?? 'unknown'})`
})
return req
}
role: "system",
content:
`[IMPORTANT INSTRUCTION] Reply ONLY with an executable shell command(s) for the given prompt and no other text. (OS: ${Deno.build.os}} Shell: ${
Deno.env.get("SHELL") ?? "unknown"
})`,
});
return req;
};

export const setCodeCmdParamsForChat = (req: ChatCompletionRequest): ChatCompletionRequest => {
req.messages = req.messages.filter(m => m.role !== 'system') // other system messages tend to conflict
export const setCodeCmdParamsForChat = (
req: ChatCompletionRequest,
): ChatCompletionRequest => {
req.messages = req.messages.filter((m) => m.role !== "system"); // other system messages tend to conflict
req.messages.push({
role: 'system',
content: `[IMPORTANT INSTRUCTION] Reply ONLY with code (and comments) for the given prompt and no other text or chat.`
})
return req
}
role: "system",
content:
`[IMPORTANT INSTRUCTION] Reply ONLY with code (and comments) for the given prompt and no other text or chat.`,
});
return req;
};
66 changes: 51 additions & 15 deletions mod.ts
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,10 @@ const args = parse(Deno.args, {

// Debug (print debug info)
"debug",

// System (primary input treated as system prompt)
"as-system",
"as-sys",
],
string: [
// Name (select a conversation from history to use)
Expand Down Expand Up @@ -112,7 +116,6 @@ const args = parse(Deno.args, {
// --- Parse Args ---
const DEFAULT_MODEL = "gpt-3.5-turbo";
const DEFAULT_WPM = 800;
const DEFAULT_FAST_WPM = 1200;
const AVG_CHARS_PER_WORD = 4.8;

const help = args.help;
Expand All @@ -131,18 +134,20 @@ const slice = args.s || args.slice;
const dump = args.dump || args.d;
const cont = slice || pop || retry || rewrite || print || dump ||
(Boolean(args.c || args.cont || args.continue));
const wpm = args.wpm ? Number(args.wpm) : fast ? DEFAULT_FAST_WPM : DEFAULT_WPM;
const history = args.h || args.history;
const system = args.sys || args.system;
const maxTokens = args.max || args.max_tokens;
const readStdin = args._.at(0) === "-" || args._.at(-1) === "-";
const repl = args.repl;
const code = args.code;
const debug = args.debug;
const asSys = args["as-sys"] || args["as-system"];
const bumpSys = args["as-sys"] || args["as-system"];
// --- END Parse Args ---

let config = await loadConfig();
const gptCommand = config?.command ?? DEFAULT_CONFIG.command;
const wpm = args.wpm ? Number(args.wpm) : config?.wpm || DEFAULT_WPM;
const configWasEmpty = Object.keys(config ?? {}).length === 0;
const messageWasEmpty = args._.length === 0;
const shouldAutoUpdate = config?.autoUpdate !== "never" &&
Expand All @@ -151,7 +156,7 @@ const shouldAutoUpdate = config?.autoUpdate !== "never" &&
const messageContent = args._.join(" ");

const message: Message = {
role: "user",
role: asSys ? "system" : "user",
content: messageContent,
};

Expand Down Expand Up @@ -186,9 +191,11 @@ Options:
-f, --fast Use GPT-3.5-turbo model (faster)
--repl Start a continuous conversation
--code Output code instead of chat text
--bump-sys Bump the most recent system prompt/context to front
-n, --name NAME Select a conversation from history to use
--sys, --system Set a system prompt/context
--sys[tem] Set a system prompt/context
--as-sys[tem] Treat the primary input as a system prompt
-t, --temp Set the creativity temperature
--wpm WPM Set the words per minute
--max MAX_TOKENS Set the maximum number of tokens
Expand Down Expand Up @@ -465,6 +472,22 @@ if (history) {
}
Deno.exit();
}

if (bumpSys) {
let mostRecentSystemIndex = -1;
for (let i = req.messages.length - 1; i > 0; i--) {
if (req.messages[i].role === "system") {
mostRecentSystemIndex = i;
break;
}
}

// splice+push it
if (mostRecentSystemIndex !== -1) {
const systemMessage = req.messages.splice(mostRecentSystemIndex, 1)[0];
req.messages.push(systemMessage);
}
}
// --- END HANDLE ARGS ---

let streamResponse: AsyncIterableIterator<StreamResponse> | null = null;
Expand All @@ -479,8 +502,8 @@ const doStreamResponse = async () => {
};

// STATE
type DoneType = "with_net" | "with_write" | "with_print" | "none";
let done: DoneType = "none";
type DoneType = "none" | "with_net" | "with_write" | "with_print";
let done: DoneType = "none"; // none -> with_net -> with_write -> with_print -> [done]
let responseStr = "";
let intermediateStr = "";
let printStr = "";
Expand Down Expand Up @@ -508,8 +531,8 @@ const flush = async () => {
console.log(
`\n%cAre you SURE you wish to run the above command? (y/N):`,
"color: red; font-weight: bold;",
)
const promptValue = prompt('> ');
);
const promptValue = prompt("> ");
if (["y", "yes"].includes(promptValue?.toLowerCase() ?? "")) {
// do it
await shExec(responseStr);
Expand Down Expand Up @@ -562,22 +585,35 @@ const flush = async () => {
) {
if (response.delta) {
responseStr += response.delta;
intermediateStr += response.delta;

if (wpm >= 0) {
// Write to a buffer
intermediateStr += response.delta;
}
else {
// Print immediately
printStr += response.delta;
}
}
}
} catch (e) {
console.error("Unhandled error", e);
Deno.exit(1);
}
done = "with_net";

if (wpm >= 0) {
done = "with_net";
} else {
done = "with_write";
}
}
})();
}

// Intermediate string
let startTime = -1;
const targetCps = (AVG_CHARS_PER_WORD * wpm) / 60;
{
if (wpm >= 0) {
// Writes to a buffer so we can print at a desired WPM
let startTime = -1;
const targetCps = (AVG_CHARS_PER_WORD * wpm) / 60;
(async () => {
while (true) {
if (done === "with_write" || (done as DoneType) === "with_print") {
Expand Down Expand Up @@ -605,7 +641,7 @@ const targetCps = (AVG_CHARS_PER_WORD * wpm) / 60;
})();
}

// Pull strings
// Pull strings to write as soon as available
{
const consumeFn = async () => {
const latest = printStr;
Expand Down

0 comments on commit d8e16be

Please sign in to comment.