Skip to content

Commit

Permalink
chore: merge main, fix conflicts
Browse files Browse the repository at this point in the history
  • Loading branch information
jkomyno committed Jan 23, 2024
2 parents 6cb33c0 + 6d02932 commit e595258
Show file tree
Hide file tree
Showing 17 changed files with 225 additions and 91 deletions.
59 changes: 51 additions & 8 deletions .github/workflows/wasm-benchmarks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,15 @@ jobs:

- name: "Setup Node.js"
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node_version }}

- name: "Setup pnpm"
uses: pnpm/action-setup@v2
with:
version: 8

- name: Install bc
run: sudo apt update && sudo apt-get install -y bc

- name: "Login to Docker Hub"
uses: docker/login-action@v3
continue-on-error: true
Expand All @@ -51,25 +52,67 @@ jobs:
- name: Run benchmarks
id: bench
run: |
make run-bench | tee results.txt
make run-bench | tee results.txt
# Extract the values from the benchmark output
regressed_values=$(grep "slower than Web Assembly: Latest" results.txt | cut -f1 -d'x')
improved_values=$(grep "faster than Web Assembly: Latest" results.txt | cut -f1 -d'x')
# Initialize sum variable and count
total_sum=0
total_count=0
# Add the inverted regressed values to the sum
for value in $regressed_values; do
inverted=$(echo "scale=4; 1/$value" | bc)
echo "Regressed value: $inverted"
total_sum=$(echo "$total_sum + $inverted" | bc)
total_count=$((total_count + 1))
done
# Add the improved values to the sum
for value in $improved_values; do
echo "Improved value: $value"
total_sum=$(echo "$total_sum + $value" | bc)
total_count=$((total_count + 1))
done
if [ $total_count -eq 0 ]; then
echo "💥 something was wrong running the benchmarks"
exit 1
fi
mean=$(echo "scale=4; $total_sum / $total_count" | bc)
regressed=$(grep "slower than Web Assembly: Latest" results.txt | cut -f1 -d'x' | awk '$1 > 1.02' | wc -l )
if [ "$regressed" -gt 0 ]; then
summary="🚨 WASM query-engine: $regressed benchmark(s) have regressed at least 2%"
echo "Extracted $total_count values from the benchmark output"
echo "Total sum: $total_sum"
echo "Total count: $total_count"
echo "Mean: $mean"
# Report improvement or worsening. Fails if >= 1.5% worsening.
if (( $(echo "$mean < 0.985" | bc -l) )); then
percent=$(echo "scale=4; ((1 / $mean) - 1) * 100" | bc)
summary="❌ WASM query-engine performance will worsen by $(printf %.2f "$percent")%"
status=failed
elif (( $(echo "$mean > 1.015" | bc -l) )); then
percent=$(echo "scale=4; ($mean - 1) * 100" | bc)
summary="🚀 WASM query-engine performance will improve by $(printf %.2f "$percent")%"
status=passed
else
summary="✅ WASM query-engine: no benchmarks have regressed"
delta=$(echo "scale=3; (1 / $mean)" | bc)
summary="✅ WASM query-engine performance won't change substantially ($(printf %.3f "$delta")x)"
status=passed
fi
echo "summary=$summary" >> "$GITHUB_OUTPUT"
echo "status=$status" >> "$GITHUB_OUTPUT"
# Save the output to a file so we can use it in the comment
{
echo 'bench_output<<EOF'
cat results.txt
echo EOF
} >> "$GITHUB_OUTPUT"
- name: Find past report comment
uses: peter-evans/find-comment@v2
id: findReportComment
Expand Down
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,10 @@ CONFIG_FILE = .test_config
SCHEMA_EXAMPLES_PATH = ./query-engine/example_schemas
DEV_SCHEMA_FILE = dev_datamodel.prisma
DRIVER_ADAPTERS_BRANCH ?= main

ifndef DISABLE_NIX
NIX := $(shell type nix 2> /dev/null)
endif

LIBRARY_EXT := $(shell \
case "$$(uname -s)" in \
Expand Down
50 changes: 50 additions & 0 deletions quaint/src/connector/connection_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,56 @@ impl SqlFamily {
}
}

/// Get the default max rows for a batch insert.
pub fn max_insert_rows(&self) -> Option<usize> {
match self {
#[cfg(feature = "postgresql")]
SqlFamily::Postgres => None,
#[cfg(feature = "mysql")]
SqlFamily::Mysql => None,
#[cfg(feature = "sqlite")]
SqlFamily::Sqlite => Some(999),
#[cfg(feature = "mssql")]
SqlFamily::Mssql => Some(1000),
}
}

/// Get the max number of bind parameters for a single query, which in targets other
/// than Wasm can be controlled with the env var QUERY_BATCH_SIZE.
#[cfg(not(target_arch = "wasm32"))]
pub fn max_bind_values(&self) -> usize {
use std::sync::OnceLock;
static BATCH_SIZE_OVERRIDE: OnceLock<Option<usize>> = OnceLock::new();
BATCH_SIZE_OVERRIDE
.get_or_init(|| {
std::env::var("QUERY_BATCH_SIZE")
.ok()
.map(|size| size.parse().expect("QUERY_BATCH_SIZE: not a valid size"))
})
.unwrap_or(self.default_max_bind_values())
}

/// Get the max number of bind parameters for a single query, in Wasm there's no
/// environment, and we omit that knob.
#[cfg(target_arch = "wasm32")]
pub fn max_bind_values(&self) -> usize {
self.default_max_bind_values()
}

/// Get the default max number of bind parameters for a single query.
pub fn default_max_bind_values(&self) -> usize {
match self {
#[cfg(feature = "postgresql")]
SqlFamily::Postgres => 32766,
#[cfg(feature = "mysql")]
SqlFamily::Mysql => 65535,
#[cfg(feature = "sqlite")]
SqlFamily::Sqlite => 999,
#[cfg(feature = "mssql")]
SqlFamily::Mssql => 2099,
}
}

/// Check if a family exists for the given scheme.
pub fn scheme_is_supported(url_scheme: &str) -> bool {
Self::from_scheme(url_scheme).is_some()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,3 +115,16 @@ macro_rules! retry {
}
}};
}

#[macro_export]
macro_rules! with_id_excess {
($runner:expr, $query_template:expr) => {{
let max_bind_values = $runner
.max_bind_values()
.expect("Test expected to run only for relational databases.");

let cycle = |argn: usize| (argn % 10 + 1).to_string();
let id_list = (0..=max_bind_values).map(cycle).collect::<Vec<_>>().join(",");
$query_template.replace(":id_list:", &id_list)
}};
}
Original file line number Diff line number Diff line change
@@ -1,42 +1,38 @@
use query_engine_tests::*;

#[test_suite(schema(autoinc_id), capabilities(CreateMany, AutoIncrement), exclude(CockroachDb))]
mod not_in_batching {
mod not_in_chunking {
use query_engine_tests::Runner;

#[connector_test(exclude(
CockroachDb,
Postgres("pg.js.wasm"),
Postgres("neon.js.wasm"),
Sqlite("libsql.js.wasm"),
Vitess("planetscale.js.wasm")
))]
#[connector_test(exclude(CockroachDb))]
async fn not_in_batch_filter(runner: Runner) -> TestResult<()> {
runner.query(r#"mutation { createManyTestModel(data: [{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]) { count }}"#).await?.assert_success();

assert_error!(
runner,
"query { findManyTestModel(where: { id: { notIn: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] } }) { id }}",
2029 // QueryParameterLimitExceeded
);
with_id_excess!(
runner,
"query { findManyTestModel(where: { id: { notIn: [:id_list:] } }) { id }}"
),
2029
); // QueryParameterLimitExceeded

Ok(())
}
}

#[test_suite(schema(autoinc_id_cockroachdb), only(CockroachDb))]
mod not_in_batching_cockroachdb {
mod not_in_chunking_cockroachdb {
use query_engine_tests::Runner;

#[connector_test]
async fn not_in_batch_filter(runner: Runner) -> TestResult<()> {
runner.query(r#"mutation { createManyTestModel(data: [{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]) { count }}"#).await?.assert_success();

assert_error!(
runner,
"query { findManyTestModel(where: { id: { notIn: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] } }) { id }}",
2029 // QueryParameterLimitExceeded
);
with_id_excess!(
runner,
"query { findManyTestModel(where: { id: { notIn: [:id_list:] } }) { id }}"
),
2029
); // QueryParameterLimitExceeded

Ok(())
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
mod in_selection_batching;
mod select_one_compound;
mod select_one_singular;
mod transactional_batch;
Original file line number Diff line number Diff line change
@@ -1,8 +1,16 @@
use query_engine_tests::*;

/// Port note: Batch size for testing is now 10 by default, not configurable (look at the direnv).
/// * QUERY_BATCH_SIZE for testing is 10, configured in direnv.
/// * It should be called QUERY_CHUNK_SIZE instead, because it's a knob to configure query chunking
/// which is splitting queries with more arguments than accepted by the database, in multiple
/// queries.
/// * WASM versions of the engine don't allow for runtime configuration of this value so they default
/// the mininum supported by any database on a SQL family (eg. Postgres, MySQL, SQLite, SQL Server,
/// etc.) As such, in order to guarantee chunking happens, a large number of arguments --larger
/// than the default-- needs to be used, to have actual coverage of chunking code while exercising
/// WASM query engines.
#[test_suite(schema(schema))]
mod isb {
mod chunking {
use indoc::indoc;
use query_engine_tests::{assert_error, run_query};

Expand Down Expand Up @@ -34,8 +42,8 @@ mod isb {
schema.to_owned()
}

// "batching of IN queries" should "work when having more than the specified amount of items"
// TODO(joins): Excluded because we have no support for batched queries with joins. In practice, it should happen under much less circumstances
// "chunking of IN queries" should "work when having more than the specified amount of items"
// TODO(joins): Excluded because we have no support for chunked queries with joins. In practice, it should happen under much less circumstances
// TODO(joins): than with the query-based strategy, because we don't issue `WHERE IN (parent_ids)` queries anymore to resolve relations.
#[connector_test(exclude_features("relationJoins"))]
async fn in_more_items(runner: Runner) -> TestResult<()> {
Expand All @@ -52,8 +60,8 @@ mod isb {
Ok(())
}

// "ascending ordering of batched IN queries" should "work when having more than the specified amount of items"
// TODO(joins): Excluded because we have no support for batched queries with joins. In practice, it should happen under much less circumstances
// "ascending ordering of chunked IN queries" should "work when having more than the specified amount of items"
// TODO(joins): Excluded because we have no support for chunked queries with joins. In practice, it should happen under much less circumstances
// TODO(joins): than with the query-based strategy, because we don't issue `WHERE IN (parent_ids)` queries anymore to resolve relations.
#[connector_test(exclude_features("relationJoins"))]
async fn asc_in_ordering(runner: Runner) -> TestResult<()> {
Expand All @@ -70,8 +78,8 @@ mod isb {
Ok(())
}

// "ascending ordering of batched IN queries" should "work when having more than the specified amount of items"
// TODO(joins): Excluded because we have no support for batched queries with joins. In practice, it should happen under much less circumstances
// "ascending ordering of chunked IN queries" should "work when having more than the specified amount of items"
// TODO(joins): Excluded because we have no support for chunked queries with joins. In practice, it should happen under much less circumstances
// TODO(joins): than with the query-based strategy, because we don't issue `WHERE IN (parent_ids)` queries anymore to resolve relations.
#[connector_test(exclude_features("relationJoins"))]
async fn desc_in_ordering(runner: Runner) -> TestResult<()> {
Expand All @@ -88,45 +96,29 @@ mod isb {
Ok(())
}

#[connector_test(exclude(
MongoDb,
Postgres("pg.js.wasm"),
Postgres("neon.js.wasm"),
Sqlite("libsql.js.wasm"),
Vitess("planetscale.js.wasm")
))]
#[connector_test(exclude(MongoDb))]
async fn order_by_aggregation_should_fail(runner: Runner) -> TestResult<()> {
create_test_data(&runner).await?;

assert_error!(
runner,
r#"query {
findManyA(where: {id: { in: [5,4,3,2,1,1,1,2,3,4,5,6,7,6,5,4,3,2,1,2,3,4,5,6] }}, orderBy: { b: { as: { _count: asc } } }) { id }
}"#,
with_id_excess!(&runner, "query { findManyA(where: {id: { in: [:id_list:] }}, orderBy: { b: { as: { _count: asc } } } ) { id } }"),
2029 // QueryParameterLimitExceeded
);

Ok(())
}

#[connector_test(
capabilities(FullTextSearchWithoutIndex),
exclude(
MongoDb,
Postgres("pg.js.wasm"),
Postgres("neon.js.wasm"),
Sqlite("libsql.js.wasm"),
Vitess("planetscale.js.wasm")
)
)]
#[connector_test(capabilities(FullTextSearchWithoutIndex), exclude(MongoDb))]
async fn order_by_relevance_should_fail(runner: Runner) -> TestResult<()> {
create_test_data(&runner).await?;

assert_error!(
runner,
r#"query {
findManyA(where: {id: { in: [5,4,3,2,1,1,1,2,3,4,5,6,7,6,5,4,3,2,1,2,3,4,5,6] }}, orderBy: { _relevance: { fields: text, search: "something", sort: asc } }) { id }
}"#,
with_id_excess!(
&runner,
r#"query { findManyA(where: {id: { in: [:id_list:] }}, orderBy: { _relevance: { fields: text, search: "something", sort: asc } } ) { id } }"#
),
2029 // QueryParameterLimitExceeded
);

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
mod aggregation;
mod batch;
mod batching;
mod chunking;
mod data_types;
mod distinct;
mod filters;
Expand Down
Loading

0 comments on commit e595258

Please sign in to comment.