Skip to content

Commit 2ec725d

Browse files
committed
Modify comment
1 parent 3150b5c commit 2ec725d

20 files changed

+341
-234
lines changed

config/options.go

+12-11
Original file line numberDiff line numberDiff line change
@@ -3,26 +3,27 @@ package config
33
import "os"
44

55
type Options struct {
6-
DirPath string //数据库数据目录
7-
DataFileSize int64 //数据文件的大小
8-
SyncWrite bool // 每次写数据是否持久化
6+
DirPath string // Database data directory
7+
DataFileSize int64 // Size of data files
8+
SyncWrite bool // Whether to persist data on every write
99
IndexType IndexerType
1010
FIOType FIOType
1111
}
1212

13-
// IteratorOptions 索引迭代器配置项
13+
// IteratorOptions is the configuration for index iteration.
1414
type IteratorOptions struct {
15-
// 遍历前缀为指定值的 Key,默认为空
15+
// Prefix specifies the prefix value for keys to iterate over. Default is empty.
1616
Prefix []byte
17-
// 是否反向遍历,默认 false 是正向
17+
// Reverse indicates whether to iterate in reverse order.
18+
// Default is false for forward iteration.
1819
Reverse bool
1920
}
2021

21-
// WriteBatchOptions 批量写入配置项
22+
// WriteBatchOptions is the configuration for batch writing.
2223
type WriteBatchOptions struct {
23-
// 一个批次当中最大的数据量
24+
// MaxBatchNum is the maximum number of data entries in a batch.
2425
MaxBatchNum uint
25-
// 提交时是否 sync 持久化
26+
// SyncWrites indicates whether to sync (persist) the data on batch commit.
2627
SyncWrites bool
2728
}
2829

@@ -37,10 +38,10 @@ const (
3738
type IndexerType = int8
3839

3940
const (
40-
// Btree 索引
41+
// Btree
4142
Btree IndexerType = iota + 1
4243

43-
// ART (Adpative Radix Tree) 自适应基数树
44+
// ART (Adpative Radix Tree)
4445
ART
4546
)
4647

engine/batch.go

+20-1
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ func (db *DB) NewWriteBatch(opt config.WriteBatchOptions) *WriteBatch {
3535

3636
// Put Data batch write
3737
func (wb *WriteBatch) Put(key []byte, value []byte) error {
38+
// Check if the key is empty
3839
if len(key) == 0 {
3940
return _const.ErrKeyIsEmpty
4041
}
@@ -46,19 +47,25 @@ func (wb *WriteBatch) Put(key []byte, value []byte) error {
4647
Key: key,
4748
Value: value,
4849
}
50+
51+
// Add the LogRecord to the temporaryDataWrites map using the key as a string
4952
wb.temporaryDataWrites[string(key)] = logRecord
5053
return nil
5154
}
5255

5356
// Delete Batch deletion of data
5457
func (wb *WriteBatch) Delete(key []byte) error {
58+
// Check if the key is empty
5559
if len(key) == 0 {
5660
return _const.ErrKeyIsEmpty
5761
}
62+
63+
// Acquire a lock to ensure thread safety
5864
wb.lock.Lock()
5965
defer wb.lock.Unlock()
6066

61-
// If the data does not exist, return it directly
67+
// If the data does not exist, delete it from
68+
// temporaryDataWrites if present and return directly
6269
logRecordPst := wb.db.index.Get(key)
6370
if logRecordPst == nil {
6471
if wb.temporaryDataWrites[string(key)] != nil {
@@ -73,6 +80,7 @@ func (wb *WriteBatch) Delete(key []byte) error {
7380
Type: data.LogRecordDeleted,
7481
}
7582
wb.temporaryDataWrites[string(key)] = logRecord
83+
7684
return nil
7785
}
7886

@@ -127,9 +135,11 @@ func (wb *WriteBatch) Commit() error {
127135
for _, record := range wb.temporaryDataWrites {
128136
pst := positions[string(record.Key)]
129137
if record.Type == data.LogRecordNormal {
138+
// Put the record in the index if it is of type LogRecordNormal
130139
wb.db.index.Put(record.Key, pst)
131140
}
132141
if record.Type == data.LogRecordDeleted {
142+
// Delete the record from the index if it is of type LogRecordDeleted
133143
wb.db.index.Delete(record.Key)
134144
}
135145
}
@@ -146,8 +156,13 @@ func encodeLogRecordKeyWithSeq(key []byte, seqNo uint64) []byte {
146156
seq := make([]byte, binary.MaxVarintLen64)
147157
n := binary.PutUvarint(seq[:], seqNo)
148158

159+
// Create a byte slice to hold the encoded key
149160
encodeKey := make([]byte, n+len(key))
161+
162+
// Copy the sequence number bytes to the encodeKey slice
150163
copy(encodeKey[:n], seq[:n])
164+
165+
// Copy the original key bytes to the encodeKey slice starting from offset n
151166
copy(encodeKey[n:], key)
152167

153168
return encodeKey
@@ -156,6 +171,10 @@ func encodeLogRecordKeyWithSeq(key []byte, seqNo uint64) []byte {
156171
// Parse the LogRecord key to get the actual key and transaction sequence number seq
157172
func parseLogRecordKeyAndSeq(key []byte) ([]byte, uint64) {
158173
seqNo, n := binary.Uvarint(key)
174+
175+
// Extract the real key from the remaining bytes
159176
realKey := key[n:]
177+
160178
return realKey, seqNo
161179
}
180+

engine/batch_test.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ func TestDB_WriteBatch(t *testing.T) {
1818
assert.Nil(t, err)
1919
assert.NotNil(t, db)
2020

21-
// 写数据之后不提交
21+
// Do not submit data after writing
2222
wb := db.NewWriteBatch(config.DefaultWriteBatchOptions)
2323
err = wb.Put(randkv.GetTestKey(1), randkv.RandomValue(10))
2424
assert.Nil(t, err)
@@ -28,7 +28,7 @@ func TestDB_WriteBatch(t *testing.T) {
2828
_, err = db.Get(randkv.GetTestKey(1))
2929
assert.Equal(t, _const.ErrKeyNotFound, err)
3030

31-
// 正常提交数据
31+
// Normal submission data
3232
err = wb.Commit()
3333
assert.Nil(t, err)
3434

@@ -72,7 +72,7 @@ func TestDB_WriteBatchRestart(t *testing.T) {
7272
err = wb.Commit()
7373
assert.Nil(t, err)
7474

75-
// 重启
75+
// Restart
7676
err = db.Close()
7777
assert.Nil(t, err)
7878

@@ -82,7 +82,7 @@ func TestDB_WriteBatchRestart(t *testing.T) {
8282
_, err = db2.Get(randkv.GetTestKey(1))
8383
assert.Equal(t, _const.ErrKeyNotFound, err)
8484

85-
// 判断事务序列号
85+
// Judgment transaction sequence number
8686
assert.Equal(t, uint64(2), db.transSeqNo)
8787
}
8888

@@ -94,7 +94,7 @@ func TestDB_WriteBatch1(t *testing.T) {
9494
assert.Nil(t, err)
9595
assert.NotNil(t, db)
9696

97-
// 批量提交中间手动停止
97+
// Manual stop during batch submission
9898
wbopt := config.DefaultWriteBatchOptions
9999
wbopt.MaxBatchNum = 1000000
100100
wb := db.NewWriteBatch(wbopt)

engine/data/data_file.go

+24-23
Original file line numberDiff line numberDiff line change
@@ -14,49 +14,50 @@ const (
1414
MergeFinaFileSuffix = "mergeFina"
1515
)
1616

17-
// DataFile 数据文件
17+
// DataFile represents a data file.
1818
type DataFile struct {
19-
FileID uint32 //文件id
20-
WriteOff int64 //文件写到了哪个位置
21-
IoManager fileio.IOManager //io 读写操作
19+
FileID uint32 // File ID
20+
WriteOff int64 // Position where the file is currently being written
21+
IoManager fileio.IOManager // IO read/write operations
2222
}
2323

24-
// OpenDataFile 打开新的数据文件
25-
func OpenDataFile(dirPath string, fildID uint32, fileSize int64, fioType int8) (*DataFile, error) {
26-
fileName := GetDataFileName(dirPath, fildID)
27-
return newDataFile(fileName, fildID, fileSize, fioType)
24+
// OpenDataFile opens a new data file.
25+
func OpenDataFile(dirPath string, fileID uint32, fileSize int64, fioType int8) (*DataFile, error) {
26+
fileName := GetDataFileName(dirPath, fileID)
27+
return newDataFile(fileName, fileID, fileSize, fioType)
2828
}
2929

30-
func GetDataFileName(dirPath string, fildID uint32) string {
31-
return filepath.Join(dirPath, fmt.Sprintf("%09d", fildID)+DataFileSuffix)
30+
// GetDataFileName returns the file name for a data file.
31+
func GetDataFileName(dirPath string, fileID uint32) string {
32+
return filepath.Join(dirPath, fmt.Sprintf("%09d", fileID)+DataFileSuffix)
3233
}
3334

34-
// OpenHintFile 打开 Hint 索引文件
35+
// OpenHintFile opens the hint index file.
3536
func OpenHintFile(dirPath string, fileSize int64, fioType int8) (*DataFile, error) {
3637
fileName := filepath.Join(dirPath, HintFileSuffix)
3738
return newDataFile(fileName, 0, fileSize, fioType)
3839
}
3940

40-
// OpenMergeFinaFile 打开标识 merge 完成的文件
41+
// OpenMergeFinaFile opens the file that indicates merge completion.
4142
func OpenMergeFinaFile(dirPath string, fileSize int64, fioType int8) (*DataFile, error) {
4243
fileName := filepath.Join(dirPath, MergeFinaFileSuffix)
4344
return newDataFile(fileName, 0, fileSize, fioType)
4445
}
4546

46-
func newDataFile(dirPath string, fildID uint32, fileSize int64, fioType int8) (*DataFile, error) {
47-
//初始化 IOManager 管理器接口
47+
func newDataFile(dirPath string, fileID uint32, fileSize int64, fioType int8) (*DataFile, error) {
48+
// Initialize the IOManager interface
4849
ioManager, err := fileio.NewIOManager(dirPath, fileSize, fioType)
4950
if err != nil {
5051
return nil, err
5152
}
5253
return &DataFile{
53-
FileID: fildID,
54+
FileID: fileID,
5455
WriteOff: 0,
5556
IoManager: ioManager,
5657
}, nil
5758
}
5859

59-
// ReadLogRecord 根据 offset 从数据文件中读取 logRecord
60+
// ReadLogRecord reads a log record from the data file based on the offset.
6061
func (df *DataFile) ReadLogRecord(offset int64) (*LogRecord, int64, error) {
6162
fileSize, err := df.IoManager.Size()
6263
if err != nil {
@@ -68,39 +69,39 @@ func (df *DataFile) ReadLogRecord(offset int64) (*LogRecord, int64, error) {
6869
headerBytes = fileSize - offset
6970
}
7071

71-
// 读取 header 信息
72+
// Read header information
7273
headerBuf, err := df.readNBytes(headerBytes, offset)
7374
if err != nil {
7475
return nil, 0, err
7576
}
7677

7778
header, headerSize := decodeLogRecordHeader(headerBuf)
78-
// 下面俩个条件表示读到了文件末尾,直接返回 EOF
79+
// The following conditions indicate reaching the end of the file, directly return EOF
7980
if header == nil {
8081
return nil, 0, io.EOF
8182
}
8283
if header.crc == 0 && header.keySize == 0 && header.valueSize == 0 {
8384
return nil, 0, io.EOF
8485
}
8586

86-
// 取出对应的 key value 的长度
87+
// Retrieve the lengths of the key and value
8788
keySize, valueSize := int64(header.keySize), int64(header.valueSize)
8889
var recordSize = headerSize + keySize + valueSize
8990

9091
logRecord := &LogRecord{Type: header.recordType}
9192

92-
// 读取用户实际存储的 key/value 数据
93+
// Read the actual user-stored key/value data
9394
if keySize > 0 || valueSize > 0 {
9495
kvBuf, err := df.readNBytes(keySize+valueSize, headerSize+offset)
9596
if err != nil {
9697
return nil, 0, err
9798
}
98-
// 解码
99+
// Decode
99100
logRecord.Key = kvBuf[:keySize]
100101
logRecord.Value = kvBuf[keySize:]
101102
}
102103

103-
// 校验 crc (检查数据的有效性)
104+
// Verify CRC (check data integrity)
104105
crc := getLogRecordCRC(logRecord, headerBuf[crc32.Size:headerSize])
105106
if crc != header.crc {
106107
return nil, 0, ErrInvalidCRC
@@ -117,7 +118,7 @@ func (df *DataFile) Write(buf []byte) error {
117118
return nil
118119
}
119120

120-
// WriteHintRecord 写入索引信息到 hint 文件中
121+
// WriteHintRecord writes index information to the hint file.
121122
func (df *DataFile) WriteHintRecord(key []byte, pst *LogRecordPst) error {
122123
record := &LogRecord{
123124
Key: key,

engine/data/data_file_test.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ func TestDataFile_ReadLogRecord(t *testing.T) {
6161
assert.Nil(t, err)
6262
assert.NotNil(t, dataFile)
6363

64-
// 只有一条 LogRecord
64+
// There is only one LogRecord
6565
record1 := &LogRecord{
6666
Key: []byte("name"),
6767
Value: []byte("flydb"),
@@ -76,7 +76,7 @@ func TestDataFile_ReadLogRecord(t *testing.T) {
7676
assert.Equal(t, size, readSize1)
7777
assert.Equal(t, record1, readRec1)
7878

79-
// 多条 LogRecord 从不同位置读取
79+
// Multiple logrecords are read from different locations
8080
record2 := &LogRecord{
8181
Key: []byte("name"),
8282
Value: []byte("bitcask-kv"),
@@ -89,7 +89,7 @@ func TestDataFile_ReadLogRecord(t *testing.T) {
8989
assert.Equal(t, size2, readSize2)
9090
assert.Equal(t, record2, readRec2)
9191

92-
// 被删除的数据在数据文件的末尾
92+
// The deleted data is at the end of the data file
9393
record3 := &LogRecord{
9494
Key: []byte("name"),
9595
Value: []byte("delete-data"),

0 commit comments

Comments
 (0)