Skip to content

Commit

Permalink
feat: enable createMany-related capabilities and tests for SQLite (#4779
Browse files Browse the repository at this point in the history
)

Co-authored-by: Flavian Desverne <[email protected]>
  • Loading branch information
laplab and Weakky authored Mar 28, 2024
1 parent 446e407 commit 473ed31
Show file tree
Hide file tree
Showing 26 changed files with 369 additions and 49 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ prisma-gpg-private.asc
.test_config
*.pending-snap
.pending.md
dev.db

*.class
*.log
Expand Down
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ const CAPABILITIES: ConnectorCapabilities = enumflags2::make_bitflags!(Connector
RowIn |
DeleteReturning |
SupportsFiltersOnRelationsWithoutJoins |
LateralJoin
LateralJoin |
SupportsDefaultInInsert
});

const SCALAR_TYPE_DEFAULTS: &[(ScalarType, CockroachType)] = &[
Expand Down
6 changes: 5 additions & 1 deletion psl/psl-core/src/builtin_connectors/mongodb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,11 @@ const CAPABILITIES: ConnectorCapabilities = enumflags2::make_bitflags!(Connector
DefaultValueAuto |
TwoWayEmbeddedManyToManyRelation |
UndefinedType |
DeleteReturning
DeleteReturning |
// MongoDB does not have a notion of default values for fields.
// This capability is enabled as a performance optimisation to avoid issuing multiple queries
// when using `createMany()` with MongoDB.
SupportsDefaultInInsert
});

pub(crate) struct MongoDbDatamodelConnector;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ const CAPABILITIES: ConnectorCapabilities = enumflags2::make_bitflags!(Connector
SupportsTxIsolationRepeatableRead |
SupportsTxIsolationSerializable |
SupportsTxIsolationSnapshot |
SupportsFiltersOnRelationsWithoutJoins
SupportsFiltersOnRelationsWithoutJoins |
SupportsDefaultInInsert
// InsertReturning | DeleteReturning - unimplemented.
});

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,8 @@ pub const CAPABILITIES: ConnectorCapabilities = enumflags2::make_bitflags!(Conne
SupportsTxIsolationSerializable |
RowIn |
SupportsFiltersOnRelationsWithoutJoins |
CorrelatedSubqueries
CorrelatedSubqueries |
SupportsDefaultInInsert
});

const CONSTRAINT_SCOPES: &[ConstraintScope] = &[ConstraintScope::GlobalForeignKey, ConstraintScope::ModelKeyIndex];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,8 @@ pub const CAPABILITIES: ConnectorCapabilities = enumflags2::make_bitflags!(Conne
DistinctOn |
DeleteReturning |
SupportsFiltersOnRelationsWithoutJoins |
LateralJoin
LateralJoin |
SupportsDefaultInInsert
});

pub struct PostgresDatamodelConnector;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ pub const CAPABILITIES: ConnectorCapabilities = enumflags2::make_bitflags!(Conne
InsertReturning |
DeleteReturning |
UpdateReturning |
SupportsFiltersOnRelationsWithoutJoins
SupportsFiltersOnRelationsWithoutJoins |
CreateMany |
CreateManyWriteableAutoIncId
});

pub struct SqliteDatamodelConnector;
Expand Down
1 change: 1 addition & 0 deletions psl/psl-core/src/datamodel_connector/capabilities.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ capabilities!(
InsensitiveFilters,
CreateMany,
CreateManyWriteableAutoIncId,
SupportsDefaultInInsert, // This capability is set if connector supports using `DEFAULT` instead of a value in the list of `INSERT` arguments.
WritableAutoincField,
CreateSkipDuplicates,
UpdateableId,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,4 @@ paste = "1.0.14"

[dev-dependencies]
insta = "1.7.1"
itertools.workspace = true
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
use serde_json::Value;

pub fn get_counter(json: &Value, name: &str) -> u64 {
let metric_value = get_metric_value(json, "counters", name);
metric_value.as_u64().unwrap()
}

pub fn get_gauge(json: &Value, name: &str) -> f64 {
let metric_value = get_metric_value(json, "gauges", name);
metric_value.as_f64().unwrap()
}

pub fn get_metric_value(json: &Value, metric_type: &str, name: &str) -> serde_json::Value {
let metrics = json.get(metric_type).unwrap().as_array().unwrap();
let metric = metrics
.iter()
.find(|metric| metric.get("key").unwrap().as_str() == Some(name))
.unwrap()
.as_object()
.unwrap();

metric.get("value").unwrap().clone()
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
mod batch;
mod bytes;
mod json;
pub mod metrics;
mod querying;
mod raw;
mod string;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ mod cockroachdb {
mod single_col {
use query_engine_tests::run_query;

#[connector_test(exclude(CockroachDb))]
#[connector_test(exclude(CockroachDb, Sqlite("cfd1")))]
async fn foo(runner: Runner) -> TestResult<()> {
insta::assert_snapshot!(
run_query!(&runner, "mutation { createManyTestModel(data: [{},{}]) { count }}"),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ mod metrics {
};
use query_engine_tests::ConnectorVersion::*;
use query_engine_tests::*;
use serde_json::Value;

#[connector_test]
async fn metrics_are_recorded(runner: Runner) -> TestResult<()> {
Expand All @@ -30,8 +29,8 @@ mod metrics {

let json = runner.get_metrics().to_json(Default::default());
// We cannot assert the full response it will be slightly different per database
let total_queries = get_counter(&json, PRISMA_DATASOURCE_QUERIES_TOTAL);
let total_operations = get_counter(&json, PRISMA_CLIENT_QUERIES_TOTAL);
let total_queries = utils::metrics::get_counter(&json, PRISMA_DATASOURCE_QUERIES_TOTAL);
let total_operations = utils::metrics::get_counter(&json, PRISMA_CLIENT_QUERIES_TOTAL);

match runner.connector_version() {
Sqlite(_) => assert_eq!(total_queries, 2),
Expand Down Expand Up @@ -63,7 +62,7 @@ mod metrics {
let _ = runner.commit_tx(tx_id).await?;

let json = runner.get_metrics().to_json(Default::default());
let active_transactions = get_gauge(&json, PRISMA_CLIENT_QUERIES_ACTIVE);
let active_transactions = utils::metrics::get_gauge(&json, PRISMA_CLIENT_QUERIES_ACTIVE);
assert_eq!(active_transactions, 0.0);

let tx_id = runner.start_tx(5000, 5000, None).await?;
Expand All @@ -80,30 +79,8 @@ mod metrics {
let _ = runner.rollback_tx(tx_id.clone()).await?;

let json = runner.get_metrics().to_json(Default::default());
let active_transactions = get_gauge(&json, PRISMA_CLIENT_QUERIES_ACTIVE);
let active_transactions = utils::metrics::get_gauge(&json, PRISMA_CLIENT_QUERIES_ACTIVE);
assert_eq!(active_transactions, 0.0);
Ok(())
}

fn get_counter(json: &Value, name: &str) -> u64 {
let metric_value = get_metric_value(json, "counters", name);
metric_value.as_u64().unwrap()
}

fn get_gauge(json: &Value, name: &str) -> f64 {
let metric_value = get_metric_value(json, "gauges", name);
metric_value.as_f64().unwrap()
}

fn get_metric_value(json: &Value, metric_type: &str, name: &str) -> serde_json::Value {
let metrics = json.get(metric_type).unwrap().as_array().unwrap();
let metric = metrics
.iter()
.find(|metric| metric.get("key").unwrap().as_str() == Some(name))
.unwrap()
.as_object()
.unwrap();

metric.get("value").unwrap().clone()
}
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use query_engine_tests::*;

#[test_suite(schema(schema), exclude(Sqlite))]
#[test_suite(schema(schema))]
mod prisma_14001 {
fn schema() -> String {
r#"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use query_engine_tests::*;
mod not_in_chunking {
use query_engine_tests::Runner;

#[connector_test(exclude(CockroachDb))]
#[connector_test(exclude(CockroachDb, Sqlite("cfd1")))]
async fn not_in_batch_filter(runner: Runner) -> TestResult<()> {
assert_error!(
runner,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -438,8 +438,7 @@ mod relation_load_strategy {
count
}
}
"#,
exclude(Sqlite)
"#
);

relation_load_strategy_not_available_test!(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ mod nested_create_many {
}

// "A basic createMany on a create top level" should "work"
#[connector_test(exclude(Sqlite))]
#[connector_test]
async fn create_many_on_create(runner: Runner) -> TestResult<()> {
insta::assert_snapshot!(
run_query!(&runner, r#"mutation {
Expand Down Expand Up @@ -53,7 +53,7 @@ mod nested_create_many {
}

// "A basic createMany on a create top level" should "work"
#[connector_test(exclude(Sqlite))]
#[connector_test]
async fn create_many_shorthand_on_create(runner: Runner) -> TestResult<()> {
insta::assert_snapshot!(
run_query!(&runner, r#"mutation {
Expand All @@ -78,7 +78,7 @@ mod nested_create_many {

// "Nested createMany" should "error on duplicates by default"
// TODO(dom): Not working for mongo
#[connector_test(exclude(Sqlite, MongoDb))]
#[connector_test(exclude(MongoDb))]
async fn nested_createmany_fail_dups(runner: Runner) -> TestResult<()> {
assert_error!(
&runner,
Expand Down Expand Up @@ -140,7 +140,7 @@ mod nested_create_many {
// Each DB allows a certain amount of params per single query, and a certain number of rows.
// We create 1000 nested records.
// "Nested createMany" should "allow creating a large number of records (horizontal partitioning check)"
#[connector_test(exclude(Sqlite))]
#[connector_test(exclude(Sqlite("cfd1")))]
async fn allow_create_large_number_records(runner: Runner) -> TestResult<()> {
let records: Vec<_> = (1..=1000).map(|i| format!(r#"{{ id: {i}, str1: "{i}" }}"#)).collect();

Expand Down
Loading

0 comments on commit 473ed31

Please sign in to comment.