forked from ingyamilmolinar/doctorgpt
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbuffer.go
More file actions
94 lines (85 loc) · 2.58 KB
/
buffer.go
File metadata and controls
94 lines (85 loc) · 2.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
package main
import (
"fmt"
"go.uber.org/zap"
)
type logBuffer struct {
size int
maxTokens int
pointer int
capacity int
buffer []logEntry
logger *zap.SugaredLogger
}
func newLogBuffer(log *zap.SugaredLogger, size, maxTokens int) *logBuffer {
log.Debugf("Initializing ring buffer of size %d and max tokens %d", size, maxTokens)
return &logBuffer{
size: size,
maxTokens: maxTokens,
pointer: 0,
capacity: 0,
buffer: make([]logEntry, size, size),
logger: log,
}
}
func (lb *logBuffer) Append(entry logEntry) {
// update pointer to oldest entry
lb.logger.Debugf("Appending into index: %d", lb.pointer)
lb.buffer[lb.pointer] = entry
lb.pointer = (lb.pointer + 1) % lb.size
// TODO: It is weird that capacity can be > size
if lb.capacity <= lb.size {
lb.capacity = lb.capacity + 1
}
lb.logger.Debugf("New pointer: %d", lb.pointer)
lb.logger.Debugf("New capacity: %d", lb.capacity)
}
func (lb logBuffer) Dump() []logEntry {
lb.logger.Debugf("Dump capacity: %d", lb.capacity)
if lb.capacity > lb.size {
// loop around entire slice from here
composeSlice := append(lb.buffer[lb.pointer:], lb.buffer[0:lb.pointer]...)
trimmedSlice := trimSlice(lb.logger, composeSlice, lb.maxTokens)
lb.logger.Debugf("Dump (Max capacity): %s", stringify(trimmedSlice))
return trimmedSlice
}
// TODO: Avoid special case
if lb.pointer == 0 && lb.capacity > 0 {
// Buffer is full and pointer wrapped around
trimmedSlice := trimSlice(lb.logger, lb.buffer, lb.maxTokens)
lb.logger.Debugf("Dump: %s", stringify(trimmedSlice))
return trimmedSlice
}
trimmedSlice := trimSlice(lb.logger, lb.buffer[0:lb.pointer], lb.maxTokens)
lb.logger.Debugf("Dump: %s", stringify(trimmedSlice))
return trimmedSlice
}
func (lb *logBuffer) Clear() {
lb.pointer = 0
lb.capacity = 0
lb.buffer = make([]logEntry, lb.size, lb.size)
}
func (lb logBuffer) String() string {
return fmt.Sprintf("%v", lb.Dump())
}
func trimSlice(log *zap.SugaredLogger, entries []logEntry, maxTokens int) []logEntry {
tokens := 0
// Go from most recent logs into oldest logs
var i int
for i = len(entries) - 1; i >= 0; i-- {
logEntry := entries[i]
tokens += getTokens(logEntry.Text)
if tokens > maxTokens {
// Ignore the rest of the older entries
log.Debugf("Skipping oldest lines including: (%s)", logEntry.Text)
break
}
log.Debugf("Including (%s)", logEntry.Text)
log.Debugf("Tokens so far: %d, Max tokens: %d", tokens, maxTokens)
}
return entries[i+1:]
}
// https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
func getTokens(s string) int {
return len(s) / 4
}