1
+ '''
2
+ ##################### Streamlit Chat With LLM Model #########################################
3
+ Author: Adrián Baeza Prieto
4
+ Github: @adribaeza
5
+ Python 3.10+
6
+ '''
1
7
import streamlit as st
2
8
import requests , logging , os
3
9
from dotenv import load_dotenv
20
26
DEFAULT_TOP_K = 50
21
27
DEFAULT_TOP_P = 0.9
22
28
23
-
24
- # Función para limpiar el historial de mensajes
29
+ # Function to clear the chat history
25
30
def clear_chat ():
26
31
st .session_state .messages = []
27
32
28
33
def main ():
29
34
30
- # Configuración de la página
35
+ # Page configuration
31
36
st .set_page_config (
32
37
page_title = "Chat with TinyLlama" ,
33
- page_icon = ":robot_face:" , # Puedes usar un emoji o una URL a un favicon específico
38
+ page_icon = ":robot_face:" ,
34
39
layout = "centered" ,
35
40
initial_sidebar_state = "auto" ,
36
41
)
37
42
38
- # Configuración de la interfaz
43
+ # Interface title
39
44
st .title ("Chat with TinnyLLama LLM model" )
40
45
st .write ("Simple chat interface to interact with TinyLlama LLM model" )
41
46
42
- # Añadir un botón para iniciar un nuevo chat
47
+ # Add a button to clear the chat history
43
48
if st .button ("➕ New Chat" , help = "Click to start a new chat and clear the current conversation history" ):
44
49
clear_chat ()
45
50
46
- # Additional params with help text
51
+ # Additional params with help text to adjust the LLM model behavior
47
52
with st .expander ("Config params" , expanded = False ):
48
53
max_new_tokens = st .number_input (
49
54
"Max New Tokens" ,
@@ -75,28 +80,28 @@ def main():
75
80
help = "The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling."
76
81
)
77
82
78
-
83
+ # Check if the session state has the messages attribute to initialize it
79
84
if "messages" not in st .session_state :
80
85
st .session_state .messages = []
81
86
87
+ # Iterate over the messages in the session state to display them in the chat
82
88
for message in st .session_state .messages :
83
89
with st .chat_message (message ["role" ]):
84
90
st .markdown (message ["content" ])
85
91
86
-
92
+ # Add a chat input to interact with the assistant
87
93
if prompt := st .chat_input ("What is up?" ):
94
+
95
+ # Add the user message to the chat history
88
96
st .session_state .messages .append ({"role" : "user" , "content" : prompt })
89
97
with st .chat_message ("user" ):
90
98
st .markdown (prompt )
91
-
92
99
headers = {
93
100
"Authorization" : f"Bearer { STATIC_TOKEN } " ,
94
101
"Content-Type" : "application/json"
95
102
}
96
- # Construir el historial de la conversación
103
+ # Build the data payload for the API request
97
104
conversation_history = [{"role" : msg ["role" ], "content" : msg ["content" ]} for msg in st .session_state .messages ]
98
-
99
-
100
105
data = {
101
106
"messages" : conversation_history ,
102
107
"max_new_tokens" : max_new_tokens ,
@@ -106,6 +111,8 @@ def main():
106
111
"top_p" : top_p
107
112
}
108
113
logging .info (f"Request data: { data } " )
114
+
115
+ # Make a request to the API
109
116
try :
110
117
with st .spinner ("The assistant is thinking..." ):
111
118
response = requests .post ("http://host.docker.internal:8000/api/v1/chat" , headers = headers , json = data )
@@ -123,7 +130,7 @@ def main():
123
130
st .error ("Failed to connect to the API" )
124
131
logging .error (f"Failed to connect to the API: { e } " )
125
132
126
- # Añadir un footer con el texto deseado
133
+ # Add a footer with the app information
127
134
st .markdown (
128
135
"""
129
136
<style>
@@ -145,15 +152,7 @@ def main():
145
152
unsafe_allow_html = True
146
153
)
147
154
155
+ # Run the main function
148
156
if __name__ == "__main__" :
149
157
main ()
150
-
151
- #'''
152
- #### Run the Streamlit app
153
- #To run the Streamlit app, execute the following command in the terminal:
154
- #
155
- # ```bash
156
- # streamlit run frontend/app/main.py
157
- # ```
158
- #'''
159
158
0 commit comments