Skip to content

Commit d2f8686

Browse files
committed
fix insert document
1 parent d1e665e commit d2f8686

File tree

2 files changed

+51
-36
lines changed

2 files changed

+51
-36
lines changed

R/ragnar-chat.R

Lines changed: 14 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -253,9 +253,10 @@ RagnarChat <- R6::R6Class(
253253
# when it's a tool result we drop the assistant turn that (usually a tool call)
254254
c(ti, ti - 1)
255255
} else if (S7::S7_inherits(content, ContentRagnarDocuments)) {
256-
# when it's content ragnar documents, we drop the next turn - because we proactively
257-
# inserted the documents and the next turn is just the LLM acknowledging them.
258-
c(ti, ti + 1)
256+
# when it's content ragnar documents, we drop only the current turn.
257+
# Note: tha's unlikely to ever drop a turn because the turn also contains
258+
# the user question, so we always have at least one content.
259+
ti
259260
}
260261
next
261262
}
@@ -327,9 +328,10 @@ RagnarChat <- R6::R6Class(
327328
# when it's a tool result we drop the assistant turn that (usually a tool call)
328329
c(ti, ti - 1)
329330
} else if (S7::S7_inherits(content, ContentRagnarDocuments)) {
330-
# when it's content ragnar documents, we drop the next turn - because we proactively
331-
# inserted the documents and the next turn is just the LLM acknowledging them.
332-
c(ti, ti + 1)
331+
# when it's content ragnar documents, we drop only the current turn.
332+
# Note: tha's unlikely to ever drop a turn because the turn also contains
333+
# the user question, so we always have at least one content.
334+
ti
333335
}
334336
next
335337
}
@@ -399,39 +401,19 @@ RagnarChat <- R6::R6Class(
399401
#' @description Some models do not support tool calling, so instead of adding a tool call
400402
#' request (faking that the model asked for some search results) we actually
401403
#' proactively insert context into the chat user - as if the user did it had done it.
402-
#' - User: {documents}
403-
#' - LLM: {llm_answer}.
404-
#' - {...} The contents of the user turn that generated the tool call.
404+
#' - User: {question} {documents}
405+
#' - LLM: answer
405406
#' @param ... The contents of the user turn that generated the tool call.
406-
#' @param llm_answer The answer that the LLM should give after the documents are inserted.
407-
#' @param after The index in the turns list to insert the user and assistant turns.
408407
turns_insert_documents = function(
409408
...,
410-
query,
411-
llm_answer = "Thanks for the information. I'll use the documents to answer your question. Please ask a question.",
412-
after = 0L
409+
query
413410
) {
414411
documents <- self$ragnar_tool(query)
415412

416-
user_turn <- ellmer::Turn(
417-
role = "user",
418-
contents = list(
419-
ContentRagnarDocuments(text = documents)
420-
)
421-
)
422-
423-
assistant_turn <- ellmer::Turn(
424-
role = "assistant",
425-
contents = list(
426-
ellmer::ContentText(text = llm_answer)
427-
)
413+
list(
414+
...,
415+
ContentRagnarDocuments(text = documents)
428416
)
429-
430-
turns <- self$get_turns()
431-
turns <- append(turns, list(user_turn, assistant_turn), after = after)
432-
self$set_turns(turns)
433-
434-
list(...)
435417
}
436418
),
437419

tests/testthat/test-ragnar-chat.R

Lines changed: 37 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
test_that("RagnarChat", {
22
store <- test_store()
3-
chat <- chat_ragnar(ellmer::chat_openai, .store = store)
3+
chat <- chat_ragnar(ellmer::chat_openai, model = "gpt-4.1-nano", .store = store)
44

55
out <- chat$chat("advanced R")
66

@@ -12,12 +12,18 @@ test_that("RagnarChat", {
1212
expect_equal(length(chat$get_turns()), 2)
1313

1414
out <- chat$chat("advanced R")
15-
expect_equal(length(chat$get_turns()), 2 + 2)
15+
# 2 turns that were already there + 2 turns of forced tool calls
16+
# + user + LLM
17+
expect_equal(length(chat$get_turns()), 2 + 2 + 2)
1618

1719
# the default pruning will clear the previous tool calls.
18-
# so we end up with 6 turns
20+
# so we end up with 6 turns + 1 pair
1921
out <- chat$chat("more chatting")
20-
expect_equal(length(chat$get_turns()), 2 + 2 + 2)
22+
expect_equal(length(chat$get_turns()), 6 + 2)
23+
24+
# prune tool calls will clear two turns
25+
chat$turns_prune_tool_calls()
26+
expect_equal(length(chat$get_turns()), 6)
2127
})
2228

2329
test_that("Implementing query rewriting", {
@@ -33,6 +39,7 @@ test_that("Implementing query rewriting", {
3339
store <- test_store()
3440
chat <- chat_ragnar(
3541
ellmer::chat_openai,
42+
model = "gpt-4.1-nano",
3643
.store = store,
3744
.on_user_turn = function(self, ...) {
3845

@@ -55,6 +62,7 @@ test_that("remove chunks by id works", {
5562
store <- test_store()
5663
chat <- chat_ragnar(
5764
ellmer::chat_openai,
65+
model = "gpt-4.1-nano",
5866
.store = store,
5967
.on_user_turn = function(self, ...) {
6068
self$turns_insert_tool_call_request(
@@ -84,6 +92,7 @@ test_that("duplicated chunks are not returned", {
8492
store <- test_store()
8593
chat <- chat_ragnar(
8694
ellmer::chat_openai,
95+
model = "gpt-4.1-nano",
8796
.store = store,
8897
.on_user_turn = function(self, ...) {
8998
self$turns_insert_tool_call_request(
@@ -102,3 +111,27 @@ test_that("duplicated chunks are not returned", {
102111

103112
expect_equal(length(unique(new_chunk_ids)), length(new_chunk_ids))
104113
})
114+
115+
test_that("Can insert chunks premptively in the user chat", {
116+
store <- test_store()
117+
chat <- chat_ragnar(
118+
ellmer::chat_openai,
119+
model = "gpt-4.1-nano",
120+
.store = store,
121+
.on_user_turn = function(self, ...) {
122+
self$turns_insert_documents(
123+
...,
124+
query = paste(..., collapse = " ")
125+
)
126+
}
127+
)
128+
129+
out <- chat$chat("advanced R")
130+
expect_equal(length(chat$get_turns()), 2)
131+
# it adds the documents into the context
132+
expect_equal(length(chat$get_turns()[[1]]@contents), 2)
133+
134+
chat$turns_prune_chunks()
135+
expect_equal(length(chat$get_turns()), 2)
136+
expect_equal(length(chat$get_turns()[[1]]@contents), 1)
137+
})

0 commit comments

Comments
 (0)