Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 35404c1

Browse files
authoredFeb 12, 2025··
Merge pull request #55 from powersync-ja/feat/bucket-priorities
Support bucket with different priorities
2 parents 30e4ad2 + 4248310 commit 35404c1

File tree

14 files changed

+776
-215
lines changed

14 files changed

+776
-215
lines changed
 

‎crates/core/src/bucket_priority.rs

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
use serde::{de::Visitor, Deserialize};
2+
use sqlite_nostd::ResultCode;
3+
4+
use crate::error::SQLiteError;
5+
6+
#[repr(transparent)]
7+
#[derive(Clone, Copy, PartialEq, Eq)]
8+
pub struct BucketPriority {
9+
pub number: i32,
10+
}
11+
12+
impl BucketPriority {
13+
pub fn may_publish_with_outstanding_uploads(self) -> bool {
14+
self == BucketPriority::HIGHEST
15+
}
16+
17+
pub const HIGHEST: BucketPriority = BucketPriority { number: 0 };
18+
19+
/// A low priority used to represent fully-completed sync operations across all priorities.
20+
pub const SENTINEL: BucketPriority = BucketPriority { number: i32::MAX };
21+
}
22+
23+
impl TryFrom<i32> for BucketPriority {
24+
type Error = SQLiteError;
25+
26+
fn try_from(value: i32) -> Result<Self, Self::Error> {
27+
if value < BucketPriority::HIGHEST.number || value == Self::SENTINEL.number {
28+
return Err(SQLiteError(
29+
ResultCode::MISUSE,
30+
Some("Invalid bucket priority".into()),
31+
));
32+
}
33+
34+
return Ok(BucketPriority { number: value });
35+
}
36+
}
37+
38+
impl Into<i32> for BucketPriority {
39+
fn into(self) -> i32 {
40+
self.number
41+
}
42+
}
43+
44+
impl PartialOrd<BucketPriority> for BucketPriority {
45+
fn partial_cmp(&self, other: &BucketPriority) -> Option<core::cmp::Ordering> {
46+
Some(self.number.partial_cmp(&other.number)?.reverse())
47+
}
48+
}
49+
50+
impl<'de> Deserialize<'de> for BucketPriority {
51+
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
52+
where
53+
D: serde::Deserializer<'de>,
54+
{
55+
struct PriorityVisitor;
56+
impl<'de> Visitor<'de> for PriorityVisitor {
57+
type Value = BucketPriority;
58+
59+
fn expecting(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result {
60+
formatter.write_str("a priority as an integer between 0 and 3 (inclusive)")
61+
}
62+
63+
fn visit_i32<E>(self, v: i32) -> Result<Self::Value, E>
64+
where
65+
E: serde::de::Error,
66+
{
67+
BucketPriority::try_from(v).map_err(|e| E::custom(e.1.unwrap_or_default()))
68+
}
69+
70+
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
71+
where
72+
E: serde::de::Error,
73+
{
74+
let i: i32 = v.try_into().map_err(|_| E::custom("int too large"))?;
75+
Self::visit_i32(self, i)
76+
}
77+
78+
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
79+
where
80+
E: serde::de::Error,
81+
{
82+
let i: i32 = v.try_into().map_err(|_| E::custom("int too large"))?;
83+
Self::visit_i32(self, i)
84+
}
85+
}
86+
87+
deserializer.deserialize_i32(PriorityVisitor)
88+
}
89+
}

‎crates/core/src/kv.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ use sqlite::ResultCode;
99
use sqlite_nostd as sqlite;
1010
use sqlite_nostd::{Connection, Context};
1111

12+
use crate::bucket_priority::BucketPriority;
1213
use crate::create_sqlite_optional_text_fn;
1314
use crate::create_sqlite_text_fn;
1415
use crate::error::SQLiteError;
@@ -46,13 +47,14 @@ fn powersync_last_synced_at_impl(
4647
let db = ctx.db_handle();
4748

4849
// language=SQLite
49-
let statement = db.prepare_v2("select value from ps_kv where key = 'last_synced_at'")?;
50+
let statement = db.prepare_v2("select last_synced_at from ps_sync_state where priority = ?")?;
51+
statement.bind_int(1, BucketPriority::SENTINEL.into())?;
5052

5153
if statement.step()? == ResultCode::ROW {
5254
let client_id = statement.column_text(0)?;
53-
return Ok(Some(client_id.to_string()));
55+
Ok(Some(client_id.to_string()))
5456
} else {
55-
return Ok(None);
57+
Ok(None)
5658
}
5759
}
5860

‎crates/core/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ use core::ffi::{c_char, c_int};
1212
use sqlite::ResultCode;
1313
use sqlite_nostd as sqlite;
1414

15+
mod bucket_priority;
1516
mod checkpoint;
1617
mod crud_vtab;
1718
mod diff;

‎crates/core/src/migrations.rs

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ use sqlite::ResultCode;
88
use sqlite_nostd as sqlite;
99
use sqlite_nostd::{Connection, Context};
1010

11+
use crate::bucket_priority::BucketPriority;
1112
use crate::error::{PSResult, SQLiteError};
1213
use crate::fix035::apply_v035_fix;
1314

@@ -310,5 +311,27 @@ json_array(
310311
.into_db_result(local_db)?;
311312
}
312313

314+
if current_version < 7 && target_version >= 7 {
315+
const SENTINEL_PRIORITY: i32 = BucketPriority::SENTINEL.number;
316+
let stmt = format!("\
317+
CREATE TABLE ps_sync_state (
318+
priority INTEGER NOT NULL,
319+
last_synced_at TEXT NOT NULL
320+
) STRICT;
321+
INSERT OR IGNORE INTO ps_sync_state (priority, last_synced_at)
322+
SELECT {}, value from ps_kv where key = 'last_synced_at';
323+
324+
INSERT INTO ps_migration(id, down_migrations)
325+
VALUES(7,
326+
json_array(
327+
json_object('sql', 'INSERT OR REPLACE INTO ps_kv(key, value) SELECT ''last_synced_at'', last_synced_at FROM ps_sync_state WHERE priority = {}'),
328+
json_object('sql', 'DROP TABLE ps_sync_state'),
329+
json_object('sql', 'DELETE FROM ps_migration WHERE id >= 7')
330+
));
331+
", SENTINEL_PRIORITY, SENTINEL_PRIORITY);
332+
333+
local_db.exec_safe(&stmt).into_db_result(local_db)?;
334+
}
335+
313336
Ok(())
314337
}

‎crates/core/src/operations.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ SELECT
1818
json_extract(e.value, '$.has_more') as has_more,
1919
json_extract(e.value, '$.after') as after,
2020
json_extract(e.value, '$.next_after') as next_after
21-
FROM json_each(json_extract(?, '$.buckets')) e",
21+
FROM json_each(json_extract(?1, '$.buckets')) e",
2222
)?;
2323
statement.bind_text(1, data, sqlite::Destructor::STATIC)?;
2424

‎crates/core/src/operations_vtab.rs

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -76,30 +76,29 @@ extern "C" fn update(
7676
} else if rowid.value_type() == sqlite::ColumnType::Null {
7777
// INSERT
7878
let op = args[2].text();
79-
let data = args[3].text();
8079

8180
let tab = unsafe { &mut *vtab.cast::<VirtualTable>() };
8281
let db = tab.db;
8382

8483
if op == "save" {
85-
let result = insert_operation(db, data);
84+
let result = insert_operation(db, args[3].text());
8685
vtab_result(vtab, result)
8786
} else if op == "sync_local" {
88-
let result = sync_local(db, data);
87+
let result = sync_local(db, &args[3]);
8988
if let Ok(result_row) = result {
9089
unsafe {
9190
*p_row_id = result_row;
9291
}
9392
}
9493
vtab_result(vtab, result)
9594
} else if op == "clear_remove_ops" {
96-
let result = clear_remove_ops(db, data);
95+
let result = clear_remove_ops(db, args[3].text());
9796
vtab_result(vtab, result)
9897
} else if op == "delete_pending_buckets" {
99-
let result = delete_pending_buckets(db, data);
98+
let result = delete_pending_buckets(db, args[3].text());
10099
vtab_result(vtab, result)
101100
} else if op == "delete_bucket" {
102-
let result = delete_bucket(db, data);
101+
let result = delete_bucket(db, args[3].text());
103102
vtab_result(vtab, result)
104103
} else {
105104
ResultCode::MISUSE as c_int

‎crates/core/src/sync_local.rs

Lines changed: 267 additions & 101 deletions
Large diffs are not rendered by default.

‎crates/core/src/sync_types.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,4 +18,5 @@ pub struct Checkpoint {
1818
pub struct BucketChecksum {
1919
pub bucket: String,
2020
pub checksum: i32,
21+
pub priority: Option<i32>,
2122
}

‎crates/core/src/view_admin.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ fn powersync_init_impl(
120120

121121
setup_internal_views(local_db)?;
122122

123-
powersync_migrate(ctx, 6)?;
123+
powersync_migrate(ctx, 7)?;
124124

125125
Ok(String::from(""))
126126
}

‎crates/sqlite/build.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
fn main() {
32
let mut cfg = cc::Build::new();
43

‎dart/pubspec.lock

Lines changed: 100 additions & 79 deletions
Large diffs are not rendered by default.

‎dart/test/sync_test.dart

Lines changed: 208 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,208 @@
1+
import 'dart:convert';
2+
3+
import 'package:sqlite3/common.dart';
4+
import 'package:test/test.dart';
5+
6+
import 'utils/native_test_utils.dart';
7+
8+
void main() {
9+
group('sync tests', () {
10+
late CommonDatabase db;
11+
12+
setUp(() async {
13+
db = openTestDatabase()
14+
..select('select powersync_init();')
15+
..select('select powersync_replace_schema(?)', [json.encode(_schema)]);
16+
});
17+
18+
tearDown(() {
19+
db.dispose();
20+
});
21+
22+
void pushSyncData(
23+
String bucket,
24+
String opId,
25+
String rowId,
26+
Object op,
27+
Object? data, {
28+
Object? descriptions = _bucketDescriptions,
29+
}) {
30+
final encoded = json.encode({
31+
'buckets': [
32+
{
33+
'bucket': bucket,
34+
'data': [
35+
{
36+
'op_id': opId,
37+
'op': op,
38+
'object_type': 'items',
39+
'object_id': rowId,
40+
'checksum': 0,
41+
'data': data,
42+
}
43+
],
44+
}
45+
],
46+
if (descriptions != null) 'descriptions': descriptions,
47+
});
48+
49+
db.execute('insert into powersync_operations (op, data) VALUES (?, ?);',
50+
['save', encoded]);
51+
}
52+
53+
bool pushCheckpointComplete(
54+
String lastOpId, String? writeCheckpoint, List<Object?> checksums,
55+
{int? priority}) {
56+
final [row] = db.select('select powersync_validate_checkpoint(?) as r;', [
57+
json.encode({
58+
'last_op_id': lastOpId,
59+
'write_checkpoint': writeCheckpoint,
60+
'buckets': [
61+
for (final cs in checksums.cast<Map<String, dynamic>>())
62+
if (priority == null || cs['priority'] <= priority) cs
63+
],
64+
'priority': priority,
65+
})
66+
]);
67+
68+
final decoded = json.decode(row['r']);
69+
if (decoded['valid'] != true) {
70+
fail(row['r']);
71+
}
72+
73+
db.execute(
74+
'UPDATE ps_buckets SET last_op = ? WHERE name IN (SELECT json_each.value FROM json_each(?))',
75+
[
76+
lastOpId,
77+
json.encode(checksums.map((e) => (e as Map)['bucket']).toList())
78+
],
79+
);
80+
81+
db.execute('INSERT INTO powersync_operations(op, data) VALUES (?, ?)', [
82+
'sync_local',
83+
priority != null
84+
? jsonEncode({
85+
'priority': priority,
86+
'buckets': [
87+
for (final cs in checksums.cast<Map<String, dynamic>>())
88+
if (cs['priority'] <= priority) cs['bucket']
89+
],
90+
})
91+
: null,
92+
]);
93+
return db.lastInsertRowId == 1;
94+
}
95+
96+
ResultSet fetchRows() {
97+
return db.select('select * from items');
98+
}
99+
100+
test('does not publish until reaching checkpoint', () {
101+
expect(fetchRows(), isEmpty);
102+
pushSyncData('prio1', '1', 'row-0', 'PUT', {'col': 'hi'});
103+
expect(fetchRows(), isEmpty);
104+
105+
expect(
106+
pushCheckpointComplete(
107+
'1', null, [_bucketChecksum('prio1', 1, checksum: 0)]),
108+
isTrue);
109+
expect(fetchRows(), [
110+
{'id': 'row-0', 'col': 'hi'}
111+
]);
112+
});
113+
114+
test('does not publish with pending local data', () {
115+
expect(fetchRows(), isEmpty);
116+
db.execute("insert into items (id, col) values ('local', 'data');");
117+
expect(fetchRows(), isNotEmpty);
118+
119+
pushSyncData('prio1', '1', 'row-0', 'PUT', {'col': 'hi'});
120+
expect(
121+
pushCheckpointComplete(
122+
'1', null, [_bucketChecksum('prio1', 1, checksum: 0)]),
123+
isFalse);
124+
expect(fetchRows(), [
125+
{'id': 'local', 'col': 'data'}
126+
]);
127+
});
128+
129+
test('publishes with local data for prio=0 buckets', () {
130+
expect(fetchRows(), isEmpty);
131+
db.execute("insert into items (id, col) values ('local', 'data');");
132+
expect(fetchRows(), isNotEmpty);
133+
134+
pushSyncData('prio0', '1', 'row-0', 'PUT', {'col': 'hi'});
135+
expect(
136+
pushCheckpointComplete(
137+
'1',
138+
null,
139+
[_bucketChecksum('prio0', 0, checksum: 0)],
140+
priority: 0,
141+
),
142+
isTrue,
143+
);
144+
expect(fetchRows(), [
145+
{'id': 'local', 'col': 'data'},
146+
{'id': 'row-0', 'col': 'hi'},
147+
]);
148+
});
149+
150+
test('can publish partial checkpoints under different priorities', () {
151+
for (var i = 0; i < 4; i++) {
152+
pushSyncData('prio$i', '1', 'row-$i', 'PUT', {'col': '$i'});
153+
}
154+
expect(fetchRows(), isEmpty);
155+
156+
// Simulate a partial checkpoint complete for each of the buckets.
157+
for (var i = 0; i < 4; i++) {
158+
expect(
159+
pushCheckpointComplete(
160+
'1',
161+
null,
162+
[
163+
for (var j = 0; j <= 4; j++)
164+
_bucketChecksum(
165+
'prio$j',
166+
j,
167+
// Give buckets outside of the current priority a wrong
168+
// checksum. They should not be validated yet.
169+
checksum: j <= i ? 0 : 1234,
170+
),
171+
],
172+
priority: i,
173+
),
174+
isTrue,
175+
);
176+
177+
expect(fetchRows(), [
178+
for (var j = 0; j <= i; j++) {'id': 'row-$j', 'col': '$j'},
179+
]);
180+
181+
expect(db.select('select 1 from ps_sync_state where priority = ?', [i]),
182+
isNotEmpty);
183+
}
184+
});
185+
});
186+
}
187+
188+
Object? _bucketChecksum(String bucket, int prio, {int checksum = 0}) {
189+
return {'bucket': bucket, 'priority': prio, 'checksum': checksum};
190+
}
191+
192+
const _schema = {
193+
'tables': [
194+
{
195+
'name': 'items',
196+
'columns': [
197+
{'name': 'col', 'type': 'text'}
198+
],
199+
}
200+
]
201+
};
202+
203+
const _bucketDescriptions = {
204+
'prio0': {'priority': 0},
205+
'prio1': {'priority': 1},
206+
'prio2': {'priority': 2},
207+
'prio3': {'priority': 3},
208+
};

‎dart/test/utils/migration_fixtures.dart

Lines changed: 59 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/// The current database version
2-
const databaseVersion = 6;
2+
const databaseVersion = 7;
33

44
/// This is the base database state that we expect at various schema versions.
55
/// Generated by loading the specific library version, and exporting the schema.
@@ -172,7 +172,51 @@ const expectedState = <int, String>{
172172
;INSERT INTO ps_migration(id, down_migrations) VALUES(4, '[{"sql":"DELETE FROM ps_migration WHERE id >= 4"},{"sql":"ALTER TABLE ps_buckets DROP COLUMN op_checksum"},{"sql":"ALTER TABLE ps_buckets DROP COLUMN remove_operations"}]')
173173
;INSERT INTO ps_migration(id, down_migrations) VALUES(5, '[{"sql":"SELECT powersync_drop_view(view.name)\n FROM sqlite_master view\n WHERE view.type = ''view''\n AND view.sql GLOB ''*-- powersync-auto-generated''"},{"sql":"ALTER TABLE ps_buckets RENAME TO ps_buckets_5"},{"sql":"ALTER TABLE ps_oplog RENAME TO ps_oplog_5"},{"sql":"CREATE TABLE ps_buckets(\n name TEXT PRIMARY KEY,\n last_applied_op INTEGER NOT NULL DEFAULT 0,\n last_op INTEGER NOT NULL DEFAULT 0,\n target_op INTEGER NOT NULL DEFAULT 0,\n add_checksum INTEGER NOT NULL DEFAULT 0,\n pending_delete INTEGER NOT NULL DEFAULT 0\n, op_checksum INTEGER NOT NULL DEFAULT 0, remove_operations INTEGER NOT NULL DEFAULT 0)"},{"sql":"INSERT INTO ps_buckets(name, last_applied_op, last_op, target_op, add_checksum, op_checksum, pending_delete)\n SELECT name, last_applied_op, last_op, target_op, add_checksum, op_checksum, pending_delete FROM ps_buckets_5"},{"sql":"CREATE TABLE ps_oplog(\n bucket TEXT NOT NULL,\n op_id INTEGER NOT NULL,\n op INTEGER NOT NULL,\n row_type TEXT,\n row_id TEXT,\n key TEXT,\n data TEXT,\n hash INTEGER NOT NULL,\n superseded INTEGER NOT NULL)"},{"sql":"CREATE INDEX ps_oplog_by_row ON ps_oplog (row_type, row_id) WHERE superseded = 0"},{"sql":"CREATE INDEX ps_oplog_by_opid ON ps_oplog (bucket, op_id)"},{"sql":"CREATE INDEX ps_oplog_by_key ON ps_oplog (bucket, key) WHERE superseded = 0"},{"sql":"INSERT INTO ps_oplog(bucket, op_id, op, row_type, row_id, key, data, hash, superseded)\n SELECT ps_buckets_5.name, oplog.op_id, 3, oplog.row_type, oplog.row_id, oplog.key, oplog.data, oplog.hash, 0\n FROM ps_oplog_5 oplog\n JOIN ps_buckets_5\n ON ps_buckets_5.id = oplog.bucket"},{"sql":"DROP TABLE ps_oplog_5"},{"sql":"DROP TABLE ps_buckets_5"},{"sql":"INSERT INTO ps_oplog(bucket, op_id, op, row_type, row_id, hash, superseded)\n SELECT ''$local'', 1, 4, r.row_type, r.row_id, 0, 0\n FROM ps_updated_rows r"},{"sql":"INSERT OR REPLACE INTO ps_buckets(name, pending_delete, last_op, target_op) VALUES(''$local'', 1, 0, 9223372036854775807)"},{"sql":"DROP TABLE ps_updated_rows"},{"sql":"DELETE FROM ps_migration WHERE id >= 5"}]')
174174
;INSERT INTO ps_migration(id, down_migrations) VALUES(6, '[{"sql":"DELETE FROM ps_migration WHERE id >= 6"}]')
175-
'''
175+
''',
176+
7: r'''
177+
;CREATE TABLE ps_buckets(
178+
id INTEGER PRIMARY KEY,
179+
name TEXT NOT NULL,
180+
last_applied_op INTEGER NOT NULL DEFAULT 0,
181+
last_op INTEGER NOT NULL DEFAULT 0,
182+
target_op INTEGER NOT NULL DEFAULT 0,
183+
add_checksum INTEGER NOT NULL DEFAULT 0,
184+
op_checksum INTEGER NOT NULL DEFAULT 0,
185+
pending_delete INTEGER NOT NULL DEFAULT 0
186+
) STRICT
187+
;CREATE TABLE ps_crud (id INTEGER PRIMARY KEY AUTOINCREMENT, data TEXT, tx_id INTEGER)
188+
;CREATE TABLE ps_kv(key TEXT PRIMARY KEY NOT NULL, value BLOB)
189+
;CREATE TABLE ps_migration(id INTEGER PRIMARY KEY, down_migrations TEXT)
190+
;CREATE TABLE ps_oplog(
191+
bucket INTEGER NOT NULL,
192+
op_id INTEGER NOT NULL,
193+
row_type TEXT,
194+
row_id TEXT,
195+
key TEXT,
196+
data TEXT,
197+
hash INTEGER NOT NULL) STRICT
198+
;CREATE TABLE ps_sync_state (
199+
priority INTEGER NOT NULL,
200+
last_synced_at TEXT NOT NULL
201+
) STRICT
202+
;CREATE TABLE ps_tx(id INTEGER PRIMARY KEY NOT NULL, current_tx INTEGER, next_tx INTEGER)
203+
;CREATE TABLE ps_untyped(type TEXT NOT NULL, id TEXT NOT NULL, data TEXT, PRIMARY KEY (type, id))
204+
;CREATE TABLE ps_updated_rows(
205+
row_type TEXT,
206+
row_id TEXT,
207+
PRIMARY KEY(row_type, row_id)) STRICT, WITHOUT ROWID
208+
;CREATE UNIQUE INDEX ps_buckets_name ON ps_buckets (name)
209+
;CREATE INDEX ps_oplog_key ON ps_oplog (bucket, key)
210+
;CREATE INDEX ps_oplog_opid ON ps_oplog (bucket, op_id)
211+
;CREATE INDEX ps_oplog_row ON ps_oplog (row_type, row_id)
212+
;INSERT INTO ps_migration(id, down_migrations) VALUES(1, null)
213+
;INSERT INTO ps_migration(id, down_migrations) VALUES(2, '[{"sql":"DELETE FROM ps_migration WHERE id >= 2","params":[]},{"sql":"DROP TABLE ps_tx","params":[]},{"sql":"ALTER TABLE ps_crud DROP COLUMN tx_id","params":[]}]')
214+
;INSERT INTO ps_migration(id, down_migrations) VALUES(3, '[{"sql":"DELETE FROM ps_migration WHERE id >= 3"},{"sql":"DROP TABLE ps_kv"}]')
215+
;INSERT INTO ps_migration(id, down_migrations) VALUES(4, '[{"sql":"DELETE FROM ps_migration WHERE id >= 4"},{"sql":"ALTER TABLE ps_buckets DROP COLUMN op_checksum"},{"sql":"ALTER TABLE ps_buckets DROP COLUMN remove_operations"}]')
216+
;INSERT INTO ps_migration(id, down_migrations) VALUES(5, '[{"sql":"SELECT powersync_drop_view(view.name)\n FROM sqlite_master view\n WHERE view.type = ''view''\n AND view.sql GLOB ''*-- powersync-auto-generated''"},{"sql":"ALTER TABLE ps_buckets RENAME TO ps_buckets_5"},{"sql":"ALTER TABLE ps_oplog RENAME TO ps_oplog_5"},{"sql":"CREATE TABLE ps_buckets(\n name TEXT PRIMARY KEY,\n last_applied_op INTEGER NOT NULL DEFAULT 0,\n last_op INTEGER NOT NULL DEFAULT 0,\n target_op INTEGER NOT NULL DEFAULT 0,\n add_checksum INTEGER NOT NULL DEFAULT 0,\n pending_delete INTEGER NOT NULL DEFAULT 0\n, op_checksum INTEGER NOT NULL DEFAULT 0, remove_operations INTEGER NOT NULL DEFAULT 0)"},{"sql":"INSERT INTO ps_buckets(name, last_applied_op, last_op, target_op, add_checksum, op_checksum, pending_delete)\n SELECT name, last_applied_op, last_op, target_op, add_checksum, op_checksum, pending_delete FROM ps_buckets_5"},{"sql":"CREATE TABLE ps_oplog(\n bucket TEXT NOT NULL,\n op_id INTEGER NOT NULL,\n op INTEGER NOT NULL,\n row_type TEXT,\n row_id TEXT,\n key TEXT,\n data TEXT,\n hash INTEGER NOT NULL,\n superseded INTEGER NOT NULL)"},{"sql":"CREATE INDEX ps_oplog_by_row ON ps_oplog (row_type, row_id) WHERE superseded = 0"},{"sql":"CREATE INDEX ps_oplog_by_opid ON ps_oplog (bucket, op_id)"},{"sql":"CREATE INDEX ps_oplog_by_key ON ps_oplog (bucket, key) WHERE superseded = 0"},{"sql":"INSERT INTO ps_oplog(bucket, op_id, op, row_type, row_id, key, data, hash, superseded)\n SELECT ps_buckets_5.name, oplog.op_id, 3, oplog.row_type, oplog.row_id, oplog.key, oplog.data, oplog.hash, 0\n FROM ps_oplog_5 oplog\n JOIN ps_buckets_5\n ON ps_buckets_5.id = oplog.bucket"},{"sql":"DROP TABLE ps_oplog_5"},{"sql":"DROP TABLE ps_buckets_5"},{"sql":"INSERT INTO ps_oplog(bucket, op_id, op, row_type, row_id, hash, superseded)\n SELECT ''$local'', 1, 4, r.row_type, r.row_id, 0, 0\n FROM ps_updated_rows r"},{"sql":"INSERT OR REPLACE INTO ps_buckets(name, pending_delete, last_op, target_op) VALUES(''$local'', 1, 0, 9223372036854775807)"},{"sql":"DROP TABLE ps_updated_rows"},{"sql":"DELETE FROM ps_migration WHERE id >= 5"}]')
217+
;INSERT INTO ps_migration(id, down_migrations) VALUES(6, '[{"sql":"DELETE FROM ps_migration WHERE id >= 6"}]')
218+
;INSERT INTO ps_migration(id, down_migrations) VALUES(7, '[{"sql":"INSERT OR REPLACE INTO ps_kv(key, value) SELECT ''last_synced_at'', last_synced_at FROM ps_sync_state WHERE priority = 2147483647"},{"sql":"DROP TABLE ps_sync_state"},{"sql":"DELETE FROM ps_migration WHERE id >= 7"}]')
219+
''',
176220
};
177221

178222
final finalState = expectedState[databaseVersion]!;
@@ -230,6 +274,17 @@ const data1 = <int, String>{
230274
(2, 3, 'lists', 'l1', '', '{}', 3)
231275
;INSERT INTO ps_updated_rows(row_type, row_id) VALUES
232276
('lists', 'l2')
277+
''',
278+
7: r'''
279+
;INSERT INTO ps_buckets(id, name, last_applied_op, last_op, target_op, add_checksum, op_checksum, pending_delete) VALUES
280+
(1, 'b1', 0, 0, 0, 0, 120, 0),
281+
(2, 'b2', 0, 0, 0, 1005, 3, 0)
282+
;INSERT INTO ps_oplog(bucket, op_id, row_type, row_id, key, data, hash) VALUES
283+
(1, 1, 'todos', 't1', '', '{}', 100),
284+
(1, 2, 'todos', 't2', '', '{}', 20),
285+
(2, 3, 'lists', 'l1', '', '{}', 3)
286+
;INSERT INTO ps_updated_rows(row_type, row_id) VALUES
287+
('lists', 'l2')
233288
'''
234289
};
235290

@@ -270,7 +325,8 @@ final dataDown1 = <int, String>{
270325
('b1', 2, 3, 'todos', 't2', '', '{}', 20, 0),
271326
('b2', 3, 3, 'lists', 'l1', '', '{}', 3, 0)
272327
''',
273-
5: data1[5]!
328+
5: data1[5]!,
329+
6: data1[5]!
274330
};
275331

276332
final finalData1 = data1[databaseVersion]!;

‎dart/test/utils/native_test_utils.dart

Lines changed: 16 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ import 'dart:ffi';
33
import 'package:sqlite3/common.dart';
44
import 'package:sqlite3/open.dart' as sqlite_open;
55
import 'package:sqlite3/sqlite3.dart';
6+
import 'package:path/path.dart' as p;
67

78
const defaultSqlitePath = 'libsqlite3.so.0';
89

@@ -22,29 +23,24 @@ CommonDatabase openTestDatabase() {
2223
}
2324

2425
String getLibraryForPlatform({String? path = "."}) {
25-
switch (Abi.current()) {
26-
case Abi.androidArm:
27-
case Abi.androidArm64:
28-
case Abi.androidX64:
29-
return '$path/libpowersync.so';
30-
case Abi.macosArm64:
31-
case Abi.macosX64:
32-
return '$path/libpowersync.dylib';
33-
case Abi.linuxX64:
34-
case Abi.linuxArm64:
35-
return '$path/libpowersync.so';
36-
case Abi.windowsX64:
37-
return '$path/powersync.dll';
38-
case Abi.androidIA32:
39-
throw ArgumentError(
26+
// Using an absolute path is required for macOS, where Dart can't dlopen
27+
// relative paths due to being a "hardened program".
28+
return p.normalize(p.absolute(switch (Abi.current()) {
29+
Abi.androidArm ||
30+
Abi.androidArm64 ||
31+
Abi.androidX64 =>
32+
'$path/libpowersync.so',
33+
Abi.macosArm64 || Abi.macosX64 => '$path/libpowersync.dylib',
34+
Abi.linuxX64 || Abi.linuxArm64 => '$path/libpowersync.so',
35+
Abi.windowsX64 => '$path/powersync.dll',
36+
Abi.androidIA32 => throw ArgumentError(
4037
'Unsupported processor architecture. X86 Android emulators are not '
4138
'supported. Please use an x86_64 emulator instead. All physical '
4239
'Android devices are supported including 32bit ARM.',
43-
);
44-
default:
45-
throw ArgumentError(
40+
),
41+
_ => throw ArgumentError(
4642
'Unsupported processor architecture "${Abi.current()}". '
4743
'Please open an issue on GitHub to request it.',
48-
);
49-
}
44+
)
45+
}));
5046
}

0 commit comments

Comments
 (0)
Please sign in to comment.