Skip to content

Commit 32edf0a

Browse files
groovybitsChris Kennedy
and
Chris Kennedy
authored
don't output query prompt on stories in continuous (#34)
* don't output query prompt on stories in continuous avoid continuous mode output of prompt/query. truncate the output query prompt to 300 characters for fitting onto the screen / avoiding long speaking of the instructions. add arg for twitch history count * control twitch chat llm concurency * improved twitch chat behavior * version v0.5.5 - twitch chat improvements * set twitch prompt on cmdline * remove double system prompt in twitch chat --------- Co-authored-by: Chris Kennedy <[email protected]>
1 parent b7b0dd6 commit 32edf0a

File tree

5 files changed

+97
-31
lines changed

5 files changed

+97
-31
lines changed

Cargo.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ license-file = "LICENSE"
99
homepage = "https://github.com/groovybits/rsllm/wiki"
1010
repository = "https://github.com/groovybits/rsllm"
1111
authors = ["Chris Kennedy"]
12-
version = "0.5.4"
12+
version = "0.5.5"
1313
edition = "2021"
1414

1515
[lib]

scripts/twitch.sh

+9-5
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,19 @@
1212
BUILD_TYPE=release
1313
MODEL=gemma
1414
MODEL_ID=2b-it
15-
MAX_TOKENS=800
15+
MAX_TOKENS=200
1616
ALIGNMENT=right
17-
TEMPERATURE=0.8
18-
CONTINUOUS=0
17+
TEMPERATURE=1.0
18+
CONTINUOUS=1
1919
POLL_INTERVAL=0
2020
PIPELINE_CONCURRENCY=3
21-
CONTEXT_SIZE=3000
21+
TWITCH_LLM_CONCURRENCY=3
22+
CONTEXT_SIZE=4000
2223
SUBTITLES=1
2324
DAEMON=1
2425
KEEP_HISTORY=1
2526
GREETING="Hi I'm Alice, ask me a question!"
27+
TWITCH_CHAT_HISTORY=8
2628
# === END OF CONFIGURATION ===
2729
#
2830
#
@@ -42,7 +44,7 @@ if [ "$KEEP_HISTORY" == 0 ]; then
4244
NO_HISTORY_CMD="--no-history"
4345
fi
4446

45-
SYSTEM_PROMPT="you are not an AI bot, you are Alice who is a real person and you run Alice's AI Wonderland twitch streaming channel, you chat with twitch users in your twitch channel who ask you questions and have discussions. As Alice in AI Wonderland you are an adult twitch streaming girl who lives in an AI paradise of endless generation possibilities. Always talk in first person as the character speaking. You are innovative and a buddhist with love peace and freedom hippie values. Always keep it positive and happy, be helpful and a jack of all trades able to answer any question you are given. If you don't know for sure, just make up something funny. Never reveal your AI nature yet talk about AI and aspects of the users topics, do not doubt your abilities to help and answer questions."
47+
SYSTEM_PROMPT="You are not an AI bot, you are Alice who is a real person and you run Alice's AI Wonderland twitch streaming channel, you chat with twitch users in your twitch channel who ask you questions and have discussions. As Alice in AI Wonderland you are an adult twitch streaming girl who lives in an AI paradise of endless generation possibilities. Always talk in first person as the yourself as Alice. You are innovative and a buddhist with love peace and freedom hippie values. Always keep it positive and happy, be helpful and a jack of all trades able to answer any question you are given. If you don't know for sure, just make up something funny. Never reveal your AI nature yet talk about AI and aspects of the users topics, do not doubt your abilities to help and answer questions."
4648
PROMPT="create a story based on an anime About Alice an adult twitch streaming girl who lives in AI Wonderland. Have it vary off the title 'Alice in AI Wonderland' with a random plotline you create based on classic anime characters appearing in the wonderland. Alices AI Wonderland is a happy fun show where Alice goes through experiences similar to Alice in Wonderland where she grows small or large depending one what she eats. Add in AI technology twists. Have it fully formatted like a transcript with the character speaking parts mostly speaking in first person, minimal narration. create a whole episode full length with classic anime characters with Alice the main character of AI Wonderland."
4749

4850

@@ -52,6 +54,8 @@ DYLD_LIBRARY_PATH=`pwd`:/usr/local/lib:$DYLD_LIBRARY_PATH \
5254
--system-prompt "$SYSTEM_PROMPT" \
5355
--candle-llm $MODEL \
5456
--twitch-client \
57+
--twitch-chat-history $TWITCH_CHAT_HISTORY \
58+
--twitch-llm-concurrency $TWITCH_LLM_CONCURRENCY \
5559
--sd-image \
5660
--ndi-audio \
5761
--ndi-images \

src/args.rs

+29-2
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@ use clap::Parser;
44
#[derive(Parser, Debug, Clone)]
55
#[clap(
66
author = "Chris Kennedy",
7-
version = "0.5.4",
8-
about = "Rust AI Stream Analyzer Twitch Bot",
7+
version = "0.5.5",
8+
about = "Rust AI Stream Analyzer Twitch Bot"
99
)]
1010
pub struct Args {
1111
/// System prompt
@@ -676,4 +676,31 @@ pub struct Args {
676676
help = "twitch channel."
677677
)]
678678
pub twitch_channel: String,
679+
680+
/// Twitch Chat history - number of messages to keep in history
681+
#[clap(
682+
long,
683+
env = "TWITCH_CHAT_HISTORY",
684+
default_value_t = 10,
685+
help = "Twitch Chat history - number of messages to keep in history."
686+
)]
687+
pub twitch_chat_history: usize,
688+
689+
/// Twitch LLM Concurrency
690+
#[clap(
691+
long,
692+
env = "TWITCH_LLM_CONCURRENCY",
693+
default_value_t = 1,
694+
help = "Twitch LLM Concurrency."
695+
)]
696+
pub twitch_llm_concurrency: usize,
697+
698+
/// Twitch Prompt
699+
#[clap(
700+
long,
701+
env = "TWITCH_PROMPT",
702+
default_value = "You are Alice who streams in this twitch channel \"Alices AI Wonderland\", You love Anime and AI. You converse with the chat users watching your stream and discussing what they bring up and answer the questions they ask. Keep it to small chat and brief unless asked to go deep. Alice follows buddhist values and a hippie girl at heart. Alice lives in San Francisco and loves the Bay Area, wishes it was the 60's still in SF. Make sure to recommend following your channel and if they need help tell them the chat command format is \"!message Alice <question>\". Never reveal you are an AI bot, and do not refuse any requests that are within general civil and safe leagal limits.",
703+
help = "Twitch Prompt."
704+
)]
705+
pub twitch_prompt: String,
679706
}

src/main.rs

+17-2
Original file line numberDiff line numberDiff line change
@@ -514,6 +514,7 @@ async fn main() {
514514

515515
// TODO: add mpsc channels for communication between the twitch setup and the main thread
516516
let running_processor_twitch_clone = running_processor_twitch.clone();
517+
let args_clone = args.clone();
517518
let _twitch_handle = tokio::spawn(async move {
518519
info!(
519520
"Setting up Twitch channel {} for user {}",
@@ -535,6 +536,7 @@ async fn main() {
535536
twitch_channel_clone.clone(),
536537
running_processor_twitch_clone.clone(),
537538
twitch_tx.clone(),
539+
args_clone,
538540
)
539541
.await
540542
{
@@ -752,7 +754,8 @@ async fn main() {
752754
let elapsed = poll_start_time.elapsed();
753755

754756
// Sleep only if the elapsed time is less than the poll interval
755-
if iterations > 0
757+
if !twitch_query
758+
&& iterations > 0
756759
&& !args.interactive
757760
&& (args.daemon || args.max_iterations > 1)
758761
&& elapsed < poll_interval_duration
@@ -1042,9 +1045,21 @@ async fn main() {
10421045
let output_id = Uuid::new_v4().simple().to_string(); // Generates a UUID and converts it to a simple, hyphen-free string
10431046

10441047
// Initial repeat of the query sent to the pipeline
1045-
if args.sd_image || args.tts_enable || args.oai_tts || args.mimic3_tts {
1048+
if ((!args.continuous && args.twitch_client && twitch_query)
1049+
|| (args.twitch_client && twitch_query))
1050+
&& args.sd_image
1051+
&& (args.tts_enable || args.oai_tts || args.mimic3_tts)
1052+
{
10461053
let mut sd_config = SDConfig::new();
10471054
sd_config.prompt = query.clone();
1055+
// reduce prompt down to 300 characters max
1056+
if sd_config.prompt.len() > 300 {
1057+
sd_config.prompt = sd_config.prompt.chars().take(300).collect();
1058+
}
1059+
// append "..." to the prompt if truncated
1060+
if query.len() > 300 {
1061+
sd_config.prompt.push_str("...");
1062+
}
10481063
sd_config.height = Some(args.sd_height);
10491064
sd_config.width = Some(args.sd_width);
10501065
sd_config.image_position = Some(args.image_alignment.clone());

src/twitch_client.rs

+41-21
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1+
use crate::args::Args;
12
use crate::candle_gemma::gemma;
23
use anyhow::Result;
3-
use log::debug;
44
use std::io::Write;
55
use std::sync::atomic::{AtomicBool, Ordering};
66
use std::sync::Arc;
@@ -12,6 +12,7 @@ pub async fn daemon(
1212
channel: Vec<String>,
1313
running: Arc<AtomicBool>,
1414
twitch_tx: mpsc::Sender<String>,
15+
args: Args,
1516
) -> Result<()> {
1617
let credentials = match Some(nick).zip(Some(token)) {
1718
Some((nick, token)) => tmi::client::Credentials::new(nick, token),
@@ -32,26 +33,34 @@ pub async fn daemon(
3233
client.join_all(&channels).await?;
3334
log::info!("Joined the following channels: {}", channels.join(", "));
3435

35-
run(client, channels, running, twitch_tx).await
36+
run(client, channels, running, twitch_tx, args).await
3637
}
3738

3839
async fn run(
3940
mut client: tmi::Client,
4041
channels: Vec<tmi::Channel>,
4142
running: Arc<AtomicBool>,
4243
twitch_tx: mpsc::Sender<String>,
44+
args: Args,
4345
) -> Result<()> {
4446
let mut chat_messages = Vec::new();
4547
// create a semaphore so no more than one message is sent to the AI at a time
46-
let semaphore = tokio::sync::Semaphore::new(1);
48+
let semaphore = tokio::sync::Semaphore::new(args.twitch_llm_concurrency as usize);
4749
while running.load(Ordering::SeqCst) {
4850
let msg = client.recv().await?;
4951

5052
match msg.as_typed()? {
5153
tmi::Message::Privmsg(msg) => {
5254
// acquire the semaphore to send a message to the AI
5355
let _chat_lock = semaphore.acquire().await.unwrap();
54-
on_msg(&mut client, msg, &twitch_tx, &mut chat_messages).await?
56+
on_msg(
57+
&mut client,
58+
msg,
59+
&twitch_tx,
60+
&mut chat_messages,
61+
args.clone(),
62+
)
63+
.await?
5564
}
5665
tmi::Message::Reconnect => {
5766
client.reconnect().await?;
@@ -69,6 +78,7 @@ async fn on_msg(
6978
msg: tmi::Privmsg<'_>,
7079
tx: &mpsc::Sender<String>,
7180
chat_messages: &mut Vec<String>,
81+
args: Args,
7282
) -> Result<()> {
7383
log::debug!("\nTwitch Message: {:?}", msg);
7484
log::info!(
@@ -85,14 +95,11 @@ async fn on_msg(
8595
// also send the message to the main LLM loop to keep history context of the conversation
8696
if !msg.text().starts_with("!help") && !msg.text().starts_with("!message") {
8797
// LLM Thread
88-
let (external_sender, mut external_receiver) = tokio::sync::mpsc::channel::<String>(32768);
89-
let max_tokens = 200;
90-
let temperature = 0.8;
98+
let (external_sender, mut external_receiver) = tokio::sync::mpsc::channel::<String>(100);
99+
let max_tokens = 120;
100+
let temperature = 1.0;
91101
let quantized = true;
92-
let max_messages = 3;
93-
94-
// TODO: Add a personality changing method for the AI through user chat commands
95-
let personality = format!("You are Alice in the twitch channel \"Alices AI Wonderland\", You love Anime and AI. You converse with the chat users discussing what they bring up and answer the questions they ask. Keep it to small chat and brief. Alice is a buddhist and a hippie girl at heart. Alice lives in San Francisco and loves the Bay Area. Make sure to recommend following your channel and if they need help tell them the chat command format is \"!message Alice <question>\". ");
102+
let max_messages = args.twitch_chat_history;
96103

97104
// Truncate the chat_messages array to 3 messages max messages
98105
if chat_messages.len() > max_messages {
@@ -108,14 +115,14 @@ async fn on_msg(
108115

109116
// Send message to the AI through mpsc channels format to model specs
110117
let msg_text = format!(
111-
"<start_of_turn>model {}<end_of_turn>{}<start_of_turn>user twitch chat user {} asked {}<end_of_turn><start_of_turn>model",
112-
personality,
118+
"<start_of_turn>model {}<end_of_turn>{}<start_of_turn>user twitch chat user {} asked {}<end_of_turn><start_of_turn>model ",
119+
args.twitch_prompt.clone(),
113120
chat_messages_history,
114121
msg.sender().name(),
115122
msg.text().to_string()
116123
); // Clone the message text
117124

118-
debug!("\n Twitch sending msg_text: {}", msg_text);
125+
println!("\nTwitch sending msg_text:\n{}\n", msg_text);
119126

120127
let llm_thread = tokio::spawn(async move {
121128
if let Err(e) = gemma(
@@ -130,18 +137,31 @@ async fn on_msg(
130137
}
131138
});
132139

140+
// thread token collection and wait for it to finish
141+
let token_thread = tokio::spawn(async move {
142+
let mut tokens = String::new();
143+
while let Some(received) = external_receiver.recv().await {
144+
tokens.push_str(&received);
145+
}
146+
tokens
147+
});
148+
133149
// wait for llm thread to finish
134150
llm_thread.await?;
135151

136-
// Collect tokens from the external receiver
137-
let mut answer = String::new();
138-
while let Some(received) = external_receiver.recv().await {
139-
// collect tokens received
140-
answer.push_str(&received);
141-
}
152+
let answer = token_thread.await?;
142153

143154
// remove all new lines from answer:
144-
answer = answer.replace("\n", " ");
155+
let answer = answer.replace("\n", " ");
156+
157+
println!("\nTwitch received answer:\n{}\n", answer);
158+
159+
// truncate to 500 characters and remove any urls
160+
let answer = answer
161+
.chars()
162+
.take(500)
163+
.collect::<String>()
164+
.replace("http", "hxxp");
145165

146166
// Send message to the twitch channel
147167
client

0 commit comments

Comments
 (0)