28
28
Dependencies:
29
29
- Requires an Ollama server running locally at http://localhost:11434/api/generate
30
30
"""
31
-
32
31
SCRIPT_NAME = "ollama"
33
32
SCRIPT_AUTHOR = "teraflops"
34
33
SCRIPT_VERSION = "2.1"
@@ -46,37 +45,48 @@ def setup_config():
46
45
setup_config ()
47
46
48
47
def ask_ollama_async (prompt , buffer , prefix = "" ):
49
- data = json .dumps ({
50
- "model" : "gemma " ,
48
+ payload = json .dumps ({
49
+ "model" : "gemma2:2b " ,
51
50
"prompt" : prompt ,
52
51
"stream" : False
53
52
})
54
- headers = "Content-Type: application/json\n "
55
- timeout_ms = 10000
53
+
54
+ curl_path = "/usr/bin/curl" # If curl is elsewhere, adjust this path
55
+ escaped_payload = payload .replace ('"' , '\\ "' )
56
+
57
+ cmd = (
58
+ f"{ curl_path } -s -X POST "
59
+ "-H 'Content-Type: application/json' "
60
+ f'--data \" { escaped_payload } \" { OLLAMA_API_URL } '
61
+ )
62
+
56
63
user_data = f"{ buffer } ||{ prefix } "
57
- weechat .hook_url (
58
- OLLAMA_API_URL ,
59
- headers ,
60
- "POST" ,
61
- data ,
62
- timeout_ms ,
64
+ # This is the non-blocking call that uses hook_process_hashtable:
65
+ weechat .hook_process_hashtable (
66
+ cmd ,
67
+ {"timeout" : "10000" }, # 10s
68
+ 10000 ,
63
69
"ollama_response_callback" ,
64
70
user_data
65
71
)
66
72
67
73
def ollama_response_callback (data , command , return_code , out , err ):
68
74
buffer , prefix = data .split ("||" , 1 )
69
- if return_code == 0 :
75
+
76
+ if return_code is None or return_code == weechat .WEECHAT_HOOK_PROCESS_ERROR :
77
+ response = "[Ollama] Error executing request."
78
+ elif out .strip () == "" :
79
+ response = "[Ollama] Empty response from Ollama."
80
+ else :
70
81
try :
71
- response = json .loads (out ).get ("response" , "No response received from Ollama." )
82
+ parsed = json .loads (out )
83
+ response = parsed .get ("response" , "[Ollama] No 'response' field in reply." )
72
84
except Exception :
73
- response = "Error parsing Ollama response."
74
- else :
75
- response = f"Error: { err } "
85
+ response = "[Ollama] Error parsing server response."
76
86
77
- if prefix : # Private message
87
+ if prefix :
78
88
weechat .command (buffer , f"/msg { prefix } { response } " )
79
- else : # Channel
89
+ else :
80
90
weechat .command (buffer , f"/say { response } " )
81
91
82
92
return weechat .WEECHAT_RC_OK
@@ -98,12 +108,15 @@ def message_callback(data, buffer, date, tags, displayed, highlight, prefix, mes
98
108
username = weechat .info_get ("irc_nick" , "" )
99
109
is_mentioned = f"@{ username .lower ()} " in message .lower ()
100
110
111
+ # Skip PM if pm_response=off
101
112
if is_private and weechat .config_get_plugin ("pm_response" ) == "off" :
102
113
return weechat .WEECHAT_RC_OK
103
114
115
+ # Only respond to PM if ends with '?'
104
116
if is_private and not message .strip ().endswith ("?" ):
105
117
return weechat .WEECHAT_RC_OK
106
118
119
+ # In channels, respond only if highlight or explicit mention
107
120
if not is_private and not is_mentioned and not int (highlight ):
108
121
return weechat .WEECHAT_RC_OK
109
122
@@ -118,7 +131,15 @@ def config_callback(data, option, value):
118
131
weechat .config_set_desc_plugin ("pm_response" , "Automatically respond to private messages (on/off)" )
119
132
weechat .hook_config ("plugins.var.python.ollama.highlight_response" , "config_callback" , "" )
120
133
weechat .hook_config ("plugins.var.python.ollama.pm_response" , "config_callback" , "" )
121
- weechat .hook_command ("ollama" , "Ask something to Ollama" , "<question>" , "Example: /ollama What is Python?" , "" , "command_ollama" , "" )
134
+ weechat .hook_command (
135
+ "ollama" ,
136
+ "Ask something to Ollama" ,
137
+ "<question>" ,
138
+ "Example: /ollama What is Python?" ,
139
+ "" ,
140
+ "command_ollama" ,
141
+ ""
142
+ )
122
143
weechat .hook_print ("" , "notify_highlight" , "" , 1 , "message_callback" , "" )
123
144
weechat .hook_print ("" , "notify_message" , "" , 1 , "message_callback" , "" )
124
145
weechat .hook_print ("" , "notify_private" , "" , 1 , "message_callback" , "" )
0 commit comments