Skip to content

Commit

Permalink
VM: Wrap Header in Arc to avoid cloning (#897)
Browse files Browse the repository at this point in the history
  • Loading branch information
gefjon authored Feb 27, 2024
1 parent 1904008 commit d24ead5
Show file tree
Hide file tree
Showing 14 changed files with 172 additions and 102 deletions.
4 changes: 2 additions & 2 deletions crates/core/src/client/messages.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ impl ServerMessage for OneOffQueryResponseMessage {
.results
.into_iter()
.map(|table| OneOffTableJson {
table_name: table.head.table_name,
table_name: table.head.table_name.clone(),
rows: table.data,
})
.collect(),
Expand All @@ -221,7 +221,7 @@ impl ServerMessage for OneOffQueryResponseMessage {
.results
.into_iter()
.map(|table| OneOffTable {
table_name: table.head.table_name,
table_name: table.head.table_name.clone(),
row: table
.data
.into_iter()
Expand Down
3 changes: 2 additions & 1 deletion crates/core/src/sql/compiler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ use spacetimedb_vm::dsl::{db_table, db_table_raw, query};
use spacetimedb_vm::expr::{ColumnOp, CrudExpr, DbType, Expr, QueryExpr, SourceExpr};
use spacetimedb_vm::operator::OpCmp;
use std::collections::HashMap;
use std::sync::Arc;

use super::ast::TableSchemaView;

Expand Down Expand Up @@ -182,7 +183,7 @@ fn compile_columns(table: &TableSchema, columns: Vec<FieldName>) -> DbTable {
}
}
DbTable::new(
Header::new(table.table_name.clone(), new, table.get_constraints()),
Arc::new(Header::new(table.table_name.clone(), new, table.get_constraints())),
table.table_id,
table.table_type,
table.table_access,
Expand Down
26 changes: 22 additions & 4 deletions crates/core/src/sql/execute.rs
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ pub(crate) mod tests {
use spacetimedb_sats::relation::Header;
use spacetimedb_sats::{product, AlgebraicType, ProductType};
use spacetimedb_vm::dsl::{mem_table, scalar};
use spacetimedb_vm::eval::create_game_data;
use spacetimedb_vm::eval::test_data::create_game_data;
use tempfile::TempDir;

/// Short-cut for simplify test execution
Expand Down Expand Up @@ -369,9 +369,27 @@ pub(crate) mod tests {
let (db, _tmp_dir) = make_test_db()?;

let mut tx = db.begin_mut_tx(IsolationLevel::Serializable);
create_table_with_rows(&db, &mut tx, "Inventory", data.inv.head.into(), &data.inv.data)?;
create_table_with_rows(&db, &mut tx, "Player", data.player.head.into(), &data.player.data)?;
create_table_with_rows(&db, &mut tx, "Location", data.location.head.into(), &data.location.data)?;
create_table_with_rows(
&db,
&mut tx,
"Inventory",
data.inv.head.to_product_type(),
&data.inv.data,
)?;
create_table_with_rows(
&db,
&mut tx,
"Player",
data.player.head.to_product_type(),
&data.player.data,
)?;
create_table_with_rows(
&db,
&mut tx,
"Location",
data.location.head.to_product_type(),
&data.location.data,
)?;
db.commit_tx(&ExecutionContext::default(), tx)?;

let result = &run_for_testing(
Expand Down
8 changes: 6 additions & 2 deletions crates/core/src/subscription/query.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use std::sync::Arc;
use std::time::Instant;

use crate::db::db_metrics::{DB_METRICS, MAX_QUERY_COMPILE_TIME};
Expand Down Expand Up @@ -32,7 +33,7 @@ pub const OP_TYPE_FIELD_NAME: &str = "__op_type";
/// Create a virtual table from a sequence of table updates.
/// Add a special column __op_type to distinguish inserts and deletes.
#[tracing::instrument(skip_all)]
pub fn to_mem_table_with_op_type(head: Header, table_access: StAccess, data: &DatabaseTableUpdate) -> MemTable {
pub fn to_mem_table_with_op_type(head: Arc<Header>, table_access: StAccess, data: &DatabaseTableUpdate) -> MemTable {
let mut t = MemTable::new(head, table_access, vec![]);

if let Some(pos) = t.head.find_pos_by_name(OP_TYPE_FIELD_NAME) {
Expand All @@ -42,11 +43,14 @@ pub fn to_mem_table_with_op_type(head: Header, table_access: StAccess, data: &Da
new
}));
} else {
t.head.fields.push(Column::new(
// TODO(perf): Eliminate this `clone_for_error` call, as we're not in an error path.
let mut head = t.head.clone_for_error();
head.fields.push(Column::new(
FieldName::named(&t.head.table_name, OP_TYPE_FIELD_NAME),
AlgebraicType::U8,
t.head.fields.len().into(),
));
t.head = Arc::new(head);
for row in &data.ops {
let mut new = row.row.clone();
new.elements.push(row.op_type.into());
Expand Down
3 changes: 2 additions & 1 deletion crates/core/src/subscription/subscription.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ use anyhow::Context;
use derive_more::{Deref, DerefMut, From, IntoIterator};
use std::collections::{btree_set, BTreeSet, HashMap, HashSet};
use std::ops::Deref;
use std::sync::Arc;
use std::time::Instant;

use crate::db::db_metrics::{DB_METRICS, MAX_QUERY_CPU_TIME};
Expand Down Expand Up @@ -681,7 +682,7 @@ impl<'a> IncrementalJoin<'a> {
/// Replace an [IndexJoin]'s scan or fetch operation with a delta table.
/// A delta table consists purely of updates or changes to the base table.
fn with_delta_table(mut join: IndexJoin, index_side: bool, delta: DatabaseTableUpdate) -> IndexJoin {
fn to_mem_table(head: Header, table_access: StAccess, delta: DatabaseTableUpdate) -> MemTable {
fn to_mem_table(head: Arc<Header>, table_access: StAccess, delta: DatabaseTableUpdate) -> MemTable {
MemTable::new(
head,
table_access,
Expand Down
15 changes: 8 additions & 7 deletions crates/core/src/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ use spacetimedb_vm::iterators::RelIter;
use spacetimedb_vm::program::ProgramVm;
use spacetimedb_vm::rel_ops::RelOps;
use spacetimedb_vm::relation::{MemTable, RelValue, Table};
use std::sync::Arc;

pub enum TxMode<'a> {
MutTx(&'a mut MutTx),
Expand Down Expand Up @@ -155,7 +156,7 @@ fn join_inner<'a>(
let header = if semi {
col_lhs_header.clone()
} else {
col_lhs_header.extend(&col_rhs_header)
Arc::new(col_lhs_header.extend(&col_rhs_header))
};

lhs.join_inner(
Expand Down Expand Up @@ -221,7 +222,7 @@ pub struct IndexSemiJoin<'a, Rhs: RelOps<'a>> {
// The field whose value will be used to probe the index.
pub probe_field: FieldName,
// The header for the index side of the join.
pub index_header: Header,
pub index_header: Arc<Header>,
// An optional predicate to evaluate over the matching rows of the index.
pub index_select: Option<ColumnOp>,
// The table id on which the index is defined.
Expand Down Expand Up @@ -261,7 +262,7 @@ impl<'a, Rhs: RelOps<'a>> IndexSemiJoin<'a, Rhs> {
}

impl<'a, Rhs: RelOps<'a>> RelOps<'a> for IndexSemiJoin<'a, Rhs> {
fn head(&self) -> &Header {
fn head(&self) -> &Arc<Header> {
if self.return_index_rows {
&self.index_header
} else {
Expand Down Expand Up @@ -502,7 +503,7 @@ impl ProgramVm for DbProgram<'_, '_> {
}

impl<'a> RelOps<'a> for TableCursor<'a> {
fn head(&self) -> &Header {
fn head(&self) -> &Arc<Header> {
&self.table.head
}

Expand All @@ -516,7 +517,7 @@ impl<'a> RelOps<'a> for TableCursor<'a> {
}

impl<'a, R: RangeBounds<AlgebraicValue>> RelOps<'a> for IndexCursor<'a, R> {
fn head(&self) -> &Header {
fn head(&self) -> &Arc<Header> {
&self.table.head
}

Expand All @@ -533,7 +534,7 @@ impl<'a, I> RelOps<'a> for CatalogCursor<I>
where
I: Iterator<Item = ProductValue>,
{
fn head(&self) -> &Header {
fn head(&self) -> &Arc<Header> {
&self.table.head
}

Expand Down Expand Up @@ -663,7 +664,7 @@ pub(crate) mod tests {
let result = run_ast(p, q.into());

//The expected result
let input = mem_table(schema.head, vec![row]);
let input = mem_table(schema.head.clone_for_error(), vec![row]);

assert_eq!(result, Code::Table(input), "{}", name);
}
Expand Down
8 changes: 7 additions & 1 deletion crates/sats/src/db/def.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use derive_more::Display;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;

use crate::db::auth::{StAccess, StTableType};
use crate::db::error::{DefType, SchemaError};
Expand Down Expand Up @@ -938,7 +939,12 @@ impl From<&TableSchema> for ProductType {

impl From<&TableSchema> for DbTable {
fn from(value: &TableSchema) -> Self {
DbTable::new(value.into(), value.table_id, value.table_type, value.table_access)
DbTable::new(
Arc::new(value.into()),
value.table_id,
value.table_type,
value.table_access,
)
}
}

Expand Down
53 changes: 40 additions & 13 deletions crates/sats/src/relation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,15 @@ use spacetimedb_primitives::{ColId, ColList, ColListBuilder, Constraints, TableI
use std::collections::hash_map::DefaultHasher;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::sync::Arc;

pub fn calculate_hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}

#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct TableField<'a> {
pub table: Option<&'a str>,
pub field: &'a str,
Expand All @@ -34,7 +35,7 @@ pub fn extract_table_field(ident: &str) -> Result<TableField, RelationError> {
}
}

#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub enum FieldOnly<'a> {
Name(&'a str),
Pos(usize),
Expand All @@ -53,6 +54,7 @@ impl fmt::Display for FieldOnly<'_> {
}
}

// TODO(perf): Remove `Clone` derivation.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub enum FieldName {
Name { table: String, field: String },
Expand Down Expand Up @@ -101,6 +103,7 @@ impl FieldName {
}
}

// TODO(perf): Remove `Clone` derivation.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, From)]
pub enum FieldExpr {
Name(FieldName),
Expand Down Expand Up @@ -135,12 +138,13 @@ impl fmt::Display for FieldExpr {
}
}

#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct ColumnOnlyField<'a> {
pub field: FieldOnly<'a>,
pub algebraic_type: &'a AlgebraicType,
}

// TODO(perf): Remove `Clone` derivation.
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Column {
pub field: FieldName,
Expand All @@ -165,12 +169,13 @@ impl Column {
}
}

// TODO(perf): Remove `Clone` impl.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct HeaderOnlyField<'a> {
pub fields: Vec<ColumnOnlyField<'a>>,
}

#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Header {
pub table_name: String,
pub fields: Vec<Column>,
Expand All @@ -186,6 +191,20 @@ impl Header {
}
}

/// Equivalent to what [`Clone::clone`] would do.
///
/// `Header` intentionally does not implement `Clone`,
/// as we can't afford to clone it in normal execution paths.
/// However, we don't care about performance in error paths,
/// and we need to embed owned `Header`s in error objects to report useful messages.
pub fn clone_for_error(&self) -> Self {
Header {
table_name: self.table_name.clone(),
fields: self.fields.clone(),
constraints: self.constraints.clone(),
}
}

pub fn from_product_type(table_name: String, fields: ProductType) -> Self {
let cols = fields
.elements
Expand All @@ -209,6 +228,14 @@ impl Header {
Self::new(table_name, cols, Default::default())
}

pub fn to_product_type(&self) -> ProductType {
ProductType::from_iter(
self.fields.iter().map(|x| {
ProductTypeElement::new(x.algebraic_type.clone(), x.field.field_name().map(ToString::to_string))
}),
)
}

pub fn for_mem_table(fields: ProductType) -> Self {
let table_name = format!("mem#{:x}", calculate_hash(&fields));
Self::from_product_type(table_name, fields)
Expand Down Expand Up @@ -246,7 +273,7 @@ impl Header {

pub fn column_pos_or_err<'a>(&'a self, col: &'a FieldName) -> Result<ColId, RelationError> {
self.column_pos(col)
.ok_or_else(|| RelationError::FieldNotFound(self.clone(), col.clone()))
.ok_or_else(|| RelationError::FieldNotFound(self.clone_for_error(), col.clone()))
}

/// Finds the position of a field with `name`.
Expand Down Expand Up @@ -419,7 +446,7 @@ impl RowCount {
/// A [Relation] is anything that could be represented as a [Header] of `[ColumnName:ColumnType]` that
/// generates rows/tuples of [AlgebraicValue] that exactly match that [Header].
pub trait Relation {
fn head(&self) -> &Header;
fn head(&self) -> &Arc<Header>;
/// Specify the size in rows of the [Relation].
///
/// Warning: It should at least be precise in the lower-bound estimate.
Expand All @@ -429,14 +456,14 @@ pub trait Relation {
/// A stored table from [RelationalDB]
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
pub struct DbTable {
pub head: Header,
pub head: Arc<Header>,
pub table_id: TableId,
pub table_type: StTableType,
pub table_access: StAccess,
}

impl DbTable {
pub fn new(head: Header, table_id: TableId, table_type: StTableType, table_access: StAccess) -> Self {
pub fn new(head: Arc<Header>, table_id: TableId, table_type: StTableType, table_access: StAccess) -> Self {
Self {
head,
table_id,
Expand All @@ -447,7 +474,7 @@ impl DbTable {
}

impl Relation for DbTable {
fn head(&self) -> &Header {
fn head(&self) -> &Arc<Header> {
&self.head
}

Expand Down Expand Up @@ -489,28 +516,28 @@ mod tests {
let head = head("t1", ("a", "b"), 0);
let new = head.project(&[] as &[FieldName]).unwrap();

let mut empty = head.clone();
let mut empty = head.clone_for_error();
empty.fields.clear();
empty.constraints.clear();

assert_eq!(empty, new);

let all = head.clone();
let all = head.clone_for_error();
let new = head
.project(&[FieldName::named("t1", "a"), FieldName::named("t1", "b")])
.unwrap();

assert_eq!(all, new);

let mut first = head.clone();
let mut first = head.clone_for_error();
first.fields.pop();
first.constraints = first.retain_constraints(&0.into());

let new = head.project(&[FieldName::named("t1", "a")]).unwrap();

assert_eq!(first, new);

let mut second = head.clone();
let mut second = head.clone_for_error();
second.fields.remove(0);
second.constraints = second.retain_constraints(&1.into());

Expand Down
Loading

2 comments on commit d24ead5

@github-actions
Copy link

@github-actions github-actions bot commented on d24ead5 Feb 27, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Benchmark results

Benchmark Report

Legend:

  • load: number of rows pre-loaded into the database
  • count: number of rows touched by the transaction
  • index types:
    • unique: a single index on the id column
    • non_unique: no indexes
    • multi_index: non-unique index on every column
  • schemas:
    • person(id: u32, name: String, age: u64)
    • location(id: u32, x: u64, y: u64)

All throughputs are single-threaded.

Empty transaction

db on disk new latency old latency new throughput old throughput
sqlite 💿 412.9±2.36ns 417.7±1.42ns - -
sqlite 🧠 402.9±1.76ns 413.6±1.29ns - -
stdb_raw 💿 775.1±1.58ns 763.7±4.16ns - -
stdb_raw 🧠 724.4±1.59ns 713.9±0.91ns - -

Single-row insertions

db on disk schema index type load new latency old latency new throughput old throughput

Multi-row insertions

db on disk schema index type load count new latency old latency new throughput old throughput
sqlite 💿 u32_u64_str btree_each_column 2048 256 516.8±0.83µs 517.0±0.33µs 1934 tx/sec 1934 tx/sec
sqlite 💿 u32_u64_str unique_0 2048 256 141.2±0.42µs 137.1±0.96µs 6.9 Ktx/sec 7.1 Ktx/sec
sqlite 💿 u32_u64_u64 btree_each_column 2048 256 422.4±1.20µs 443.0±63.84µs 2.3 Ktx/sec 2.2 Ktx/sec
sqlite 💿 u32_u64_u64 unique_0 2048 256 130.8±6.70µs 127.2±0.71µs 7.5 Ktx/sec 7.7 Ktx/sec
sqlite 🧠 u32_u64_str btree_each_column 2048 256 449.1±0.78µs 449.2±0.99µs 2.2 Ktx/sec 2.2 Ktx/sec
sqlite 🧠 u32_u64_str unique_0 2048 256 120.7±0.26µs 121.4±0.44µs 8.1 Ktx/sec 8.0 Ktx/sec
sqlite 🧠 u32_u64_u64 btree_each_column 2048 256 368.5±0.28µs 367.9±0.34µs 2.7 Ktx/sec 2.7 Ktx/sec
sqlite 🧠 u32_u64_u64 unique_0 2048 256 109.9±0.90µs 107.8±0.65µs 8.9 Ktx/sec 9.1 Ktx/sec
stdb_raw 💿 u32_u64_str btree_each_column 2048 256 741.9±0.92µs 743.6±0.58µs 1347 tx/sec 1344 tx/sec
stdb_raw 💿 u32_u64_str unique_0 2048 256 644.2±1.16µs 642.7±2.19µs 1552 tx/sec 1555 tx/sec
stdb_raw 💿 u32_u64_u64 btree_each_column 2048 256 450.5±0.45µs 445.0±0.35µs 2.2 Ktx/sec 2.2 Ktx/sec
stdb_raw 💿 u32_u64_u64 unique_0 2048 256 405.7±0.94µs 402.3±0.30µs 2.4 Ktx/sec 2.4 Ktx/sec
stdb_raw 🧠 u32_u64_str btree_each_column 2048 256 513.6±0.19µs 517.1±0.25µs 1946 tx/sec 1933 tx/sec
stdb_raw 🧠 u32_u64_str unique_0 2048 256 426.0±1.24µs 422.6±0.53µs 2.3 Ktx/sec 2.3 Ktx/sec
stdb_raw 🧠 u32_u64_u64 btree_each_column 2048 256 345.4±0.22µs 340.6±0.56µs 2.8 Ktx/sec 2.9 Ktx/sec
stdb_raw 🧠 u32_u64_u64 unique_0 2048 256 306.3±0.90µs 305.1±0.60µs 3.2 Ktx/sec 3.2 Ktx/sec

Full table iterate

db on disk schema index type new latency old latency new throughput old throughput
sqlite 💿 u32_u64_str unique_0 21.2±0.37µs 20.6±0.16µs 46.0 Ktx/sec 47.3 Ktx/sec
sqlite 💿 u32_u64_u64 unique_0 20.1±0.12µs 19.2±0.07µs 48.6 Ktx/sec 50.9 Ktx/sec
sqlite 🧠 u32_u64_str unique_0 20.0±0.22µs 19.6±0.25µs 48.8 Ktx/sec 49.8 Ktx/sec
sqlite 🧠 u32_u64_u64 unique_0 18.7±0.07µs 18.6±0.08µs 52.3 Ktx/sec 52.5 Ktx/sec
stdb_raw 💿 u32_u64_str unique_0 18.8±0.05µs 17.8±0.00µs 52.0 Ktx/sec 54.9 Ktx/sec
stdb_raw 💿 u32_u64_u64 unique_0 16.0±0.00µs 15.0±0.00µs 61.2 Ktx/sec 65.3 Ktx/sec
stdb_raw 🧠 u32_u64_str unique_0 18.7±0.00µs 17.7±0.00µs 52.1 Ktx/sec 55.0 Ktx/sec
stdb_raw 🧠 u32_u64_u64 unique_0 15.9±0.00µs 14.9±0.00µs 61.4 Ktx/sec 65.6 Ktx/sec

Find unique key

db on disk key type load new latency old latency new throughput old throughput

Filter

db on disk key type index strategy load count new latency old latency new throughput old throughput
sqlite 💿 string index 2048 256 67.4±0.22µs 65.5±0.39µs 14.5 Ktx/sec 14.9 Ktx/sec
sqlite 💿 u64 index 2048 256 63.7±0.22µs 62.0±0.14µs 15.3 Ktx/sec 15.8 Ktx/sec
sqlite 🧠 string index 2048 256 65.4±0.41µs 63.2±0.24µs 14.9 Ktx/sec 15.5 Ktx/sec
sqlite 🧠 u64 index 2048 256 59.0±0.21µs 58.5±0.25µs 16.6 Ktx/sec 16.7 Ktx/sec
stdb_raw 💿 string index 2048 256 5.8±0.00µs 5.7±0.00µs 168.8 Ktx/sec 172.0 Ktx/sec
stdb_raw 💿 u64 index 2048 256 5.7±0.00µs 5.6±0.00µs 171.0 Ktx/sec 174.6 Ktx/sec
stdb_raw 🧠 string index 2048 256 5.7±0.01µs 5.6±0.00µs 170.4 Ktx/sec 173.8 Ktx/sec
stdb_raw 🧠 u64 index 2048 256 5.7±0.00µs 5.5±0.01µs 172.8 Ktx/sec 176.6 Ktx/sec

Serialize

schema format count new latency old latency new throughput old throughput
u32_u64_str bsatn 100 2.4±0.00µs 2.5±0.00µs 39.8 Mtx/sec 37.9 Mtx/sec
u32_u64_str json 100 5.0±0.07µs 5.7±0.03µs 19.0 Mtx/sec 16.7 Mtx/sec
u32_u64_str product_value 100 648.0±0.27ns 650.3±2.21ns 147.2 Mtx/sec 146.7 Mtx/sec
u32_u64_u64 bsatn 100 1712.6±63.76ns 1776.7±35.59ns 55.7 Mtx/sec 53.7 Mtx/sec
u32_u64_u64 json 100 3.4±0.03µs 3.8±0.01µs 28.1 Mtx/sec 24.8 Mtx/sec
u32_u64_u64 product_value 100 601.3±0.51ns 598.1±0.63ns 158.6 Mtx/sec 159.4 Mtx/sec

Module: invoke with large arguments

arg size new latency old latency new throughput old throughput
64KiB 60.6±3.71µs 65.8±4.88µs - -

Module: print bulk

line count new latency old latency new throughput old throughput
1 28.4±2.96µs 26.6±1.49µs - -
100 199.8±0.66µs 204.1±11.10µs - -
1000 1882.0±329.56µs 1834.1±20.45µs - -

Remaining benchmarks

name new latency old latency new throughput old throughput
sqlite/💿/update_bulk/u32_u64_str/unique_0/load=2048/count=256 47.5±0.25µs 45.1±0.09µs 20.5 Ktx/sec 21.7 Ktx/sec
sqlite/💿/update_bulk/u32_u64_u64/unique_0/load=2048/count=256 41.8±0.12µs 40.2±0.25µs 23.4 Ktx/sec 24.3 Ktx/sec
sqlite/🧠/update_bulk/u32_u64_str/unique_0/load=2048/count=256 40.1±0.16µs 37.7±0.05µs 24.4 Ktx/sec 25.9 Ktx/sec
sqlite/🧠/update_bulk/u32_u64_u64/unique_0/load=2048/count=256 36.3±0.12µs 34.7±0.29µs 26.9 Ktx/sec 28.1 Ktx/sec
stdb_module/💿/update_bulk/u32_u64_str/unique_0/load=2048/count=256 1940.5±1.47µs 1972.9±5.02µs 515 tx/sec 506 tx/sec
stdb_module/💿/update_bulk/u32_u64_u64/unique_0/load=2048/count=256 1425.0±1.19µs 1432.3±1.25µs 701 tx/sec 698 tx/sec
stdb_raw/💿/update_bulk/u32_u64_str/unique_0/load=2048/count=256 1159.5±1.24µs 1151.1±0.83µs 862 tx/sec 868 tx/sec
stdb_raw/💿/update_bulk/u32_u64_u64/unique_0/load=2048/count=256 797.3±1.31µs 790.1±0.29µs 1254 tx/sec 1265 tx/sec
stdb_raw/🧠/update_bulk/u32_u64_str/unique_0/load=2048/count=256 833.5±1.02µs 824.0±0.62µs 1199 tx/sec 1213 tx/sec
stdb_raw/🧠/update_bulk/u32_u64_u64/unique_0/load=2048/count=256 601.4±0.38µs 598.5±0.66µs 1662 tx/sec 1670 tx/sec

@github-actions
Copy link

@github-actions github-actions bot commented on d24ead5 Feb 27, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Benchmark results <title>502 Bad Gateway</title>

502 Bad Gateway


nginx

Please sign in to comment.