Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions src/llama-batch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -244,19 +244,21 @@ bool llama_batch_allocr::init(
continue;
}

if (memory) {
const llama_pos p0 = memory ? memory->seq_pos_max(s) : -1;

if (p0 >= 0) {
bool ok = true;

if (batch.token) {
if (seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
if (seq_pos_min(s) != p0 + 1) {
ok = false;
}
} else {
assert(batch.embd);

// for embeddings (typically used as vision input), we allow them to have repeating positions
// ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762
if (seq_pos_min(s) != memory->seq_pos_max(s) && seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
if (seq_pos_min(s) != p0 && seq_pos_min(s) != p0 + 1) {
ok = false;
}
}
Expand All @@ -267,7 +269,7 @@ bool llama_batch_allocr::init(
" - the last position stored in the memory module of the context (i.e. the KV cache) for sequence %d is X = %d\n"
" - the tokens for sequence %d in the input batch have a starting position of Y = %d\n"
" it is required that the sequence positions remain consecutive: Y = X + 1\n",
__func__, s, s, memory->seq_pos_max(s), s, seq_pos_min(s));
__func__, s, s, p0, s, seq_pos_min(s));

return false;
}
Expand Down
Loading