diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a54e6b44a..c4db960b9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1277,6 +1277,7 @@ jobs: sed -i 's|default_storage_path = "./data/storage"|default_storage_path = "./test-data/storage"|g' server.toml sed -i 's|logs_path = "./logs"|logs_path = "./test-data/logs"|g' server.toml sed -i 's|jwt_secret = ".*"|jwt_secret = "pg-native-test-secret-key-minimum-32-characters-long"|g' server.toml + printf '\n[cluster]\ncluster_id = "pg-native-tests"\nnode_id = 1\nrpc_addr = "127.0.0.1:9188"\napi_addr = "http://127.0.0.1:8080"\nuser_shards = 1\nshared_shards = 1\n' >> server.toml - name: Start server shell: bash @@ -1285,7 +1286,7 @@ jobs: KALAMDB_ROOT_PASSWORD: "kalamdb123" KALAMDB_JWT_SECRET: "pg-native-test-secret-key-minimum-32-characters-long" KALAMDB_NODE_ID: "1" - KALAMDB_CLUSTER_RPC_ADDR: "0.0.0.0:9188" + KALAMDB_CLUSTER_RPC_ADDR: "127.0.0.1:9188" KALAMDB_CLUSTER_API_ADDR: "http://127.0.0.1:8080" run: | set -euo pipefail @@ -1319,20 +1320,41 @@ jobs: set -euo pipefail ./pg/scripts/pgrx-test-setup.sh 2>&1 | tee pg-pgrx-setup-output.txt - - name: Run native PG extension e2e tests + - name: Run native PG extension perf tests (informational) + continue-on-error: true shell: bash env: KALAMDB_SERVER_URL: "http://127.0.0.1:8080" KALAMDB_ROOT_PASSWORD: "kalamdb123" run: | set -euo pipefail + : > pg-native-test-output.txt cargo nextest run \ -p kalam-pg-extension \ --features e2e \ - -E 'test(e2e)' \ + --test e2e_perf \ --test-threads 1 \ + --no-fail-fast \ 2>&1 | tee pg-native-test-output.txt + - name: Run native PG extension e2e tests + shell: bash + env: + KALAMDB_SERVER_URL: "http://127.0.0.1:8080" + KALAMDB_ROOT_PASSWORD: "kalamdb123" + run: | + set -euo pipefail + cargo nextest run \ + -p kalam-pg-extension \ + --features e2e \ + --test e2e_ddl \ + --test e2e_dml \ + --test e2e_scenarios \ + --test extension_metadata \ + --test session_settings \ + --test-threads 1 \ + 2>&1 | tee -a pg-native-test-output.txt + - name: Stop pgrx PostgreSQL if: always() shell: bash @@ -1826,6 +1848,8 @@ jobs: push: true platforms: linux/amd64 build-args: | + PG_MAJOR=${{ env.PG_EXTENSION_MAJOR }} + POSTGRES_BASE_IMAGE=public.ecr.aws/docker/library/postgres:${{ env.PG_EXTENSION_MAJOR }}-bookworm OCI_IMAGE_DESCRIPTION=${{ steps.pg_image_description.outputs.value }} labels: | org.opencontainers.image.title=pg-kalam diff --git a/.gitignore b/.gitignore index f4d3a73bb..8a85f33fb 100644 --- a/.gitignore +++ b/.gitignore @@ -121,3 +121,4 @@ ts-sdk-repro/server.toml /benchv2/logs /benchv2/logs link/sdks/typescript/client/.npmrc +/target-pg-bench diff --git a/Cargo.lock b/Cargo.lock index aeda5fef0..85ce9b988 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -823,9 +823,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.16.1" +version = "1.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bffc006df10ac2a68c83692d734a465f8ee6c5b384d8545a636f81d858f4bf" +checksum = "a054912289d18629dc78375ba2c3726a3afe3ff71b4edba9dedfca0e3446d1fc" dependencies = [ "aws-lc-sys", "untrusted 0.7.1", @@ -834,9 +834,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.38.0" +version = "0.39.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4321e568ed89bb5a7d291a7f37997c2c0df89809d7b6d12062c81ddb54aa782e" +checksum = "83a25cf98105baa966497416dbd42565ce3a8cf8dbfd59803ec9ad46f3126399" dependencies = [ "cc", "cmake", @@ -3871,6 +3871,7 @@ dependencies = [ "http-body-util", "hyper", "hyper-util", + "kalam-client", "kalam-pg-api", "kalam-pg-client", "kalam-pg-common", @@ -4818,6 +4819,7 @@ dependencies = [ name = "link-common" version = "0.4.2-rc2" dependencies = [ + "aws-lc-rs", "base64", "bytes", "futures-util", @@ -4826,8 +4828,10 @@ dependencies = [ "js-sys", "log", "miniz_oxide 0.9.1", + "quinn-proto", "reqwest 0.13.2", "rmp-serde", + "rustls-webpki", "serde", "serde_json", "tokio", @@ -5038,11 +5042,11 @@ checksum = "e94e1e6445d314f972ff7395df2de295fe51b71821694f0b0e1e79c4f12c8577" [[package]] name = "nanoid" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" +checksum = "8628de41fe064cc3f0cf07f3d299ee3e73521adaff72278731d5c8cae3797873" dependencies = [ - "rand 0.8.5", + "rand 0.9.2", ] [[package]] @@ -5247,16 +5251,18 @@ dependencies = [ [[package]] name = "object_store" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2858065e55c148d294a9f3aae3b0fa9458edadb41a108397094566f4e3c0dfb" +checksum = "622acbc9100d3c10e2ee15804b0caa40e55c933d5aa53814cd520805b7958a49" dependencies = [ "async-trait", "base64", "bytes", "chrono", "form_urlencoded", - "futures", + "futures-channel", + "futures-core", + "futures-util", "http 1.4.0", "http-body-util", "httparse", @@ -5267,7 +5273,7 @@ dependencies = [ "parking_lot", "percent-encoding", "quick-xml", - "rand 0.9.2", + "rand 0.10.1", "reqwest 0.12.28", "ring", "rustls-pki-types", @@ -5996,9 +6002,9 @@ dependencies = [ [[package]] name = "quick-xml" -version = "0.38.4" +version = "0.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b66c2058c55a409d601666cffe35f04333cf1013010882cec174a7467cd4e21c" +checksum = "958f21e8e7ceb5a1aa7fa87fab28e7c75976e0bfe7e23ff069e0a260f894067d" dependencies = [ "memchr", "serde", @@ -6026,9 +6032,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.13" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098" dependencies = [ "aws-lc-rs", "bytes", @@ -6657,9 +6663,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.9" +version = "0.103.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" +checksum = "8279bb85272c9f10811ae6a6c547ff594d6a7f3c6c6b02ee9726d1d0dcfcdd06" dependencies = [ "aws-lc-rs", "ring", diff --git a/Cargo.toml b/Cargo.toml index b643146b7..c8425e5ac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,6 +85,11 @@ url = "2.5.8" # WebSocket tokio-tungstenite = { version = "0.29.0", features = ["rustls-tls-webpki-roots"] } +# Security floor pins for vulnerable transitive TLS/QUIC crates. +aws-lc-rs = { version = "1.16.2", default-features = false } +quinn-proto = { version = "0.11.14", default-features = false } +rustls-webpki = { version = "0.103.10", default-features = false } + # Time handling chrono = { version = "0.4.44", features = ["serde"] } @@ -118,7 +123,7 @@ actix-multipart = "0.7" uuid = { version = "1.23.0", features = ["v4", "v7", "serde"] } # NanoID generation (21-char URL-safe unique IDs) -nanoid = "0.4.0" +nanoid = "0.5.0" # ULID generation ulid = "1.1" @@ -195,7 +200,7 @@ quote = "1.0.44" syn = { version = "2.0.117", features = ["full", "extra-traits"] } # Object storage abstraction (S3/GCS/Azure/local) -object_store = { version = "0.13.1" } +object_store = { version = "0.13.2" } # Vector ANN engines usearch = "2.24.0" diff --git a/backend/crates/kalamdb-api/src/http/sql/execute.rs b/backend/crates/kalamdb-api/src/http/sql/execute.rs index 0afe932e4..3999b4bd8 100644 --- a/backend/crates/kalamdb-api/src/http/sql/execute.rs +++ b/backend/crates/kalamdb-api/src/http/sql/execute.rs @@ -136,10 +136,8 @@ pub async fn execute_sql_v1( // 4. Build execution context let default_namespace = namespace_id.clone().unwrap_or_else(|| NamespaceId::new("default")); let base_session = app_context.base_session_context(); - let mut exec_ctx = - ExecutionContext::from_session(session, Arc::clone(&base_session)).with_namespace_id( - default_namespace.clone(), - ); + let mut exec_ctx = ExecutionContext::from_session(session, Arc::clone(&base_session)) + .with_namespace_id(default_namespace.clone()); let is_meta_leader = app_context.executor().is_leader(GroupId::Meta).await; // 5. File uploads must go to the leader @@ -188,15 +186,11 @@ pub async fn execute_sql_v1( }; // 8. Split, parse, and classify SQL statements - let prepared_statements = match split_and_prepare_statements( - &sql, - &exec_ctx, - sql_executor.get_ref(), - start_time, - ) { - Ok(stmts) => stmts, - Err(resp) => return resp, - }; + let prepared_statements = + match split_and_prepare_statements(&sql, &exec_ctx, sql_executor.get_ref(), start_time) { + Ok(stmts) => stmts, + Err(resp) => return resp, + }; if exec_ctx.request_id().is_none() && batch_requires_request_id(&prepared_statements) { exec_ctx = exec_ctx.with_request_id(Uuid::now_v7().to_string()); diff --git a/backend/crates/kalamdb-api/src/http/sql/execution_paths.rs b/backend/crates/kalamdb-api/src/http/sql/execution_paths.rs index 04d8fb43c..323c0f4a0 100644 --- a/backend/crates/kalamdb-api/src/http/sql/execution_paths.rs +++ b/backend/crates/kalamdb-api/src/http/sql/execution_paths.rs @@ -5,8 +5,8 @@ use kalamdb_commons::schemas::TableType; use kalamdb_core::app_context::AppContext; use kalamdb_core::schema_registry::SchemaRegistry; use kalamdb_core::sql::context::ExecutionContext; -use kalamdb_core::sql::executor::{PreparedExecutionStatement, ScalarValue, SqlExecutor}; use kalamdb_core::sql::executor::request_transaction_state::RequestTransactionState; +use kalamdb_core::sql::executor::{PreparedExecutionStatement, ScalarValue, SqlExecutor}; use kalamdb_core::sql::SqlImpersonationService; use kalamdb_sql::classifier::SqlStatementKind; use kalamdb_system::FileSubfolderState; @@ -256,17 +256,17 @@ pub(super) async fn execute_batch_path( let mut total_updated = 0usize; let mut total_deleted = 0usize; let mut params_remaining = Some(params); - let mut request_transaction_state = match RequestTransactionState::from_execution_context(exec_ctx) - { - Ok(state) => state, - Err(err) => { - return HttpResponse::BadRequest().json(SqlResponse::error( - ErrorCode::SqlExecutionError, - &err.to_string(), - took_ms(start_time), - )); - }, - }; + let mut request_transaction_state = + match RequestTransactionState::from_execution_context(exec_ctx) { + Ok(state) => state, + Err(err) => { + return HttpResponse::BadRequest().json(SqlResponse::error( + ErrorCode::SqlExecutionError, + &err.to_string(), + took_ms(start_time), + )); + }, + }; if let Some(state) = request_transaction_state.as_mut() { state.sync_from_coordinator(app_context); } @@ -294,20 +294,18 @@ pub(super) async fn execute_batch_path( let batch_len = batch_end - idx; if batch_len > 1 { - let batch_stmts: Vec<&PreparedExecutionStatement> = - prepared_statements[idx..batch_end] - .iter() - .map(|s| &s.prepared_statement) - .collect(); + let batch_stmts: Vec<&PreparedExecutionStatement> = prepared_statements + [idx..batch_end] + .iter() + .map(|s| &s.prepared_statement) + .collect(); let batch_start = Instant::now(); - match sql_executor - .try_batch_insert_in_transaction( - &batch_stmts, - exec_ctx, - transaction_id, - ) - { + match sql_executor.try_batch_insert_in_transaction( + &batch_stmts, + exec_ctx, + transaction_id, + ) { Ok(Some(results)) => { let batch_rows: usize = results.iter().map(|r| r.affected_rows()).sum(); @@ -334,11 +332,7 @@ pub(super) async fn execute_batch_path( return HttpResponse::BadRequest().json( SqlResponse::error_with_details( ErrorCode::SqlExecutionError, - &format!( - "Statement {} failed: {}", - idx + 1, - err - ), + &format!("Statement {} failed: {}", idx + 1, err), &prepared_statements[idx].prepared_statement.sql, took_ms(start_time), ), @@ -595,10 +589,7 @@ fn is_batchable_insert(stmt: &PreparedApiExecutionStatement) -> bool { return false; } matches!( - stmt.prepared_statement - .classified_statement - .as_ref() - .map(|c| c.kind()), + stmt.prepared_statement.classified_statement.as_ref().map(|c| c.kind()), Some(SqlStatementKind::Insert(_)) ) } diff --git a/backend/crates/kalamdb-api/src/http/sql/forward.rs b/backend/crates/kalamdb-api/src/http/sql/forward.rs index ba8226a72..f394b28a4 100644 --- a/backend/crates/kalamdb-api/src/http/sql/forward.rs +++ b/backend/crates/kalamdb-api/src/http/sql/forward.rs @@ -214,13 +214,6 @@ pub async fn handle_not_leader_error( ); } - forward_sql_grpc( - ForwardTarget::Leader, - http_req, - req, - app_context, - request_id, - start_time, - ) - .await + forward_sql_grpc(ForwardTarget::Leader, http_req, req, app_context, request_id, start_time) + .await } diff --git a/backend/crates/kalamdb-api/src/http/sql/helpers/converter.rs b/backend/crates/kalamdb-api/src/http/sql/helpers/converter.rs index a7459e734..0d34bdf49 100644 --- a/backend/crates/kalamdb-api/src/http/sql/helpers/converter.rs +++ b/backend/crates/kalamdb-api/src/http/sql/helpers/converter.rs @@ -1,9 +1,7 @@ //! Arrow to JSON conversion helpers use arrow::record_batch::RecordBatch; -use kalamdb_commons::conversions::{ - mask_sensitive_rows_for_role, schema_fields_from_arrow_schema, -}; +use kalamdb_commons::conversions::{mask_sensitive_rows_for_role, schema_fields_from_arrow_schema}; use kalamdb_commons::models::Role; use kalamdb_commons::models::Username; use kalamdb_commons::schemas::SchemaField; diff --git a/backend/crates/kalamdb-api/src/http/sql/helpers/params.rs b/backend/crates/kalamdb-api/src/http/sql/helpers/params.rs index 448bd8467..178002e1e 100644 --- a/backend/crates/kalamdb-api/src/http/sql/helpers/params.rs +++ b/backend/crates/kalamdb-api/src/http/sql/helpers/params.rs @@ -77,22 +77,19 @@ mod tests { #[test] fn parse_forward_params_preserves_scalar_types() { - let params = Some(vec![json!(null), json!(true), json!(42), json!(3.5), json!("abc")]); + let params = Some(vec![ + json!(null), + json!(true), + json!(42), + json!(3.5), + json!("abc"), + ]); let parsed = parse_forward_params(¶ms).expect("convert forwarded params"); - assert!(matches!( - parsed[0].value, - Some(forward_sql_param::Value::NullValue(_)) - )); - assert!(matches!( - parsed[1].value, - Some(forward_sql_param::Value::BoolValue(true)) - )); - assert!(matches!( - parsed[2].value, - Some(forward_sql_param::Value::Int64Value(42)) - )); + assert!(matches!(parsed[0].value, Some(forward_sql_param::Value::NullValue(_)))); + assert!(matches!(parsed[1].value, Some(forward_sql_param::Value::BoolValue(true)))); + assert!(matches!(parsed[2].value, Some(forward_sql_param::Value::Int64Value(42)))); assert!(matches!( parsed[3].value, Some(forward_sql_param::Value::Float64Value(value)) if value == 3.5 diff --git a/backend/crates/kalamdb-api/src/http/sql/statements.rs b/backend/crates/kalamdb-api/src/http/sql/statements.rs index a7c1ecb43..b4f2156b9 100644 --- a/backend/crates/kalamdb-api/src/http/sql/statements.rs +++ b/backend/crates/kalamdb-api/src/http/sql/statements.rs @@ -151,8 +151,7 @@ fn prepare_api_statement( )) }, kalamdb_sql::classifier::StatementClassificationError::InvalidSql { - message, - .. + message, .. } => HttpResponse::BadRequest().json(SqlResponse::error( ErrorCode::InvalidSql, &message, @@ -200,12 +199,7 @@ pub(super) fn split_and_prepare_statements( let mut prepared = Vec::with_capacity(raw_statements.len()); for raw_statement in &raw_statements { - prepared.push(prepare_api_statement( - raw_statement, - exec_ctx, - sql_executor, - start_time, - )?); + prepared.push(prepare_api_statement(raw_statement, exec_ctx, sql_executor, start_time)?); } Ok(prepared) diff --git a/backend/crates/kalamdb-api/src/ui/mod.rs b/backend/crates/kalamdb-api/src/ui/mod.rs index 2fca88c5b..7ab2b849f 100644 --- a/backend/crates/kalamdb-api/src/ui/mod.rs +++ b/backend/crates/kalamdb-api/src/ui/mod.rs @@ -8,21 +8,31 @@ use std::path::PathBuf; #[derive(Debug, Clone)] pub struct UiRuntimeConfig { - backend_origin: String, + backend_origin: Option, } impl UiRuntimeConfig { - pub fn new(backend_origin: String) -> Self { - Self { backend_origin } + pub fn new(backend_origin: Option) -> Self { + Self { + backend_origin: backend_origin + .as_deref() + .map(str::trim) + .filter(|value| !value.is_empty()) + .map(|value| value.trim_end_matches('/').to_string()), + } } fn script_body(&self) -> String { - let payload = serde_json::json!({ - "backendOrigin": self.backend_origin, - }); - format!( - "window.__KALAMDB_RUNTIME_CONFIG__ = Object.freeze({payload});" - ) + let mut payload = serde_json::Map::new(); + if let Some(backend_origin) = &self.backend_origin { + payload.insert( + "backendOrigin".to_string(), + serde_json::Value::String(backend_origin.clone()), + ); + } + + let payload = serde_json::Value::Object(payload); + format!("window.__KALAMDB_RUNTIME_CONFIG__ = Object.freeze({payload});") } } @@ -72,10 +82,29 @@ pub fn configure_filesystem_ui_routes( .default_handler(web::to(move |data: web::Data| { let content = data.get_ref().clone(); async move { - HttpResponse::Ok() - .content_type("text/html; charset=utf-8") - .body(content) + HttpResponse::Ok().content_type("text/html; charset=utf-8").body(content) } })), ); } + +#[cfg(test)] +mod tests { + use super::UiRuntimeConfig; + + #[test] + fn test_runtime_config_script_includes_backend_origin_when_configured() { + let script = + UiRuntimeConfig::new(Some("https://kalamdb.masky.app/".to_string())).script_body(); + + assert!(script.contains("backendOrigin")); + assert!(script.contains("https://kalamdb.masky.app")); + } + + #[test] + fn test_runtime_config_script_omits_backend_origin_when_unset() { + let script = UiRuntimeConfig::new(Some(" ".to_string())).script_body(); + + assert_eq!(script, "window.__KALAMDB_RUNTIME_CONFIG__ = Object.freeze({});"); + } +} diff --git a/backend/crates/kalamdb-commons/src/conversions/arrow_json_conversion.rs b/backend/crates/kalamdb-commons/src/conversions/arrow_json_conversion.rs index 67eb7e38c..239966cc0 100644 --- a/backend/crates/kalamdb-commons/src/conversions/arrow_json_conversion.rs +++ b/backend/crates/kalamdb-commons/src/conversions/arrow_json_conversion.rs @@ -207,8 +207,7 @@ fn build_array_from_scalars(field: &Field, values: Vec) -> Result { build_embedding_array(child.clone(), *len, values) }, - _ => ScalarValue::iter_to_array(values.into_iter()) - .map_err(|e| format!("{}", e)), + _ => ScalarValue::iter_to_array(values.into_iter()).map_err(|e| format!("{}", e)), } } @@ -286,7 +285,13 @@ fn numeric_array_to_vec(array: &dyn Array, dimensions: usize) -> Option Some( (0..float_array.len()) - .map(|idx| if float_array.is_null(idx) { 0.0 } else { float_array.value(idx) }) + .map(|idx| { + if float_array.is_null(idx) { + 0.0 + } else { + float_array.value(idx) + } + }) .collect(), ) } diff --git a/backend/crates/kalamdb-commons/src/conversions/mod.rs b/backend/crates/kalamdb-commons/src/conversions/mod.rs index 86a873f7e..1a226fb77 100644 --- a/backend/crates/kalamdb-commons/src/conversions/mod.rs +++ b/backend/crates/kalamdb-commons/src/conversions/mod.rs @@ -66,11 +66,11 @@ pub use scalar_numeric::{as_f64, scalar_to_f64, scalar_to_i64}; pub use scalar_size::estimate_scalar_value_size; #[cfg(feature = "conversions")] pub use scalar_string::{parse_string_as_scalar, scalar_to_pk_string}; +#[cfg(all(feature = "schema-metadata", feature = "arrow-conversion"))] +pub use schema_metadata::{mask_sensitive_rows_for_role, schema_fields_from_arrow_schema}; #[cfg(feature = "schema-metadata")] pub use schema_metadata::{ read_kalam_column_flags_metadata, read_kalam_data_type_metadata, with_kalam_column_flags_metadata, with_kalam_data_type_metadata, KALAM_COLUMN_FLAGS_METADATA_KEY, KALAM_DATA_TYPE_METADATA_KEY, }; -#[cfg(all(feature = "schema-metadata", feature = "arrow-conversion"))] -pub use schema_metadata::{mask_sensitive_rows_for_role, schema_fields_from_arrow_schema}; diff --git a/backend/crates/kalamdb-commons/src/models/ids/mod.rs b/backend/crates/kalamdb-commons/src/models/ids/mod.rs index 242036d3b..aba05beb0 100644 --- a/backend/crates/kalamdb-commons/src/models/ids/mod.rs +++ b/backend/crates/kalamdb-commons/src/models/ids/mod.rs @@ -22,9 +22,9 @@ mod row_id; mod shard_id; mod storage_id; mod table_id; -mod transaction_id; mod table_version_id; mod topic_id; +mod transaction_id; mod user_id; mod user_row_id; @@ -42,7 +42,7 @@ pub use shard_id::ShardId; pub use storage_id::StorageId; pub use table_id::TableId; pub use table_version_id::{TableVersionId, LATEST_MARKER, VERSION_MARKER}; -pub use transaction_id::TransactionId; pub use topic_id::TopicId; +pub use transaction_id::TransactionId; pub use user_id::UserId; pub use user_row_id::UserRowId; diff --git a/backend/crates/kalamdb-commons/src/models/ids/transaction_id.rs b/backend/crates/kalamdb-commons/src/models/ids/transaction_id.rs index 87dfbcfde..cc3926610 100644 --- a/backend/crates/kalamdb-commons/src/models/ids/transaction_id.rs +++ b/backend/crates/kalamdb-commons/src/models/ids/transaction_id.rs @@ -88,9 +88,7 @@ fn validate_uuid_v7(id: &str) -> Result<(), TransactionIdValidationError> { } if bytes[14] != b'7' { - return Err(TransactionIdValidationError( - "transaction id must be a UUID v7".to_string(), - )); + return Err(TransactionIdValidationError("transaction id must be a UUID v7".to_string())); } let variant = bytes[19].to_ascii_lowercase(); @@ -219,4 +217,4 @@ mod tests { assert_eq!(restored, tx_id); assert!(serde_json::from_str::("\"not-a-uuid\"").is_err()); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-commons/src/models/mod.rs b/backend/crates/kalamdb-commons/src/models/mod.rs index 0f3bab27e..20f73ecde 100644 --- a/backend/crates/kalamdb-commons/src/models/mod.rs +++ b/backend/crates/kalamdb-commons/src/models/mod.rs @@ -42,8 +42,8 @@ mod oauth_provider; mod payload_mode; mod read_context; mod role; -mod transaction; mod topic_op; +mod transaction; mod user_name; // Row types only available with full feature (datafusion dependency) @@ -63,8 +63,8 @@ pub use payload_mode::PayloadMode; pub use read_context::ReadContext; pub use role::Role; pub use schemas::{TableAccess, TableName}; -pub use transaction::{OperationKind, TransactionOrigin, TransactionState}; pub use topic_op::TopicOp; +pub use transaction::{OperationKind, TransactionOrigin, TransactionState}; pub use user_name::UserName; pub use user_name::UserName as Username; diff --git a/backend/crates/kalamdb-commons/src/models/pg_operations.rs b/backend/crates/kalamdb-commons/src/models/pg_operations.rs index 5d6cccd6b..e6391dc88 100644 --- a/backend/crates/kalamdb-commons/src/models/pg_operations.rs +++ b/backend/crates/kalamdb-commons/src/models/pg_operations.rs @@ -18,6 +18,10 @@ pub struct ScanRequest { pub columns: Vec, pub limit: Option, pub user_id: Option, + /// Equality filters pushed down from the FDW WHERE clause. + /// Each pair is `(column_name, string_value)`. The server converts + /// string values to the correct Arrow type using the table schema. + pub filters: Vec<(String, String)>, } /// Domain-typed insert request. diff --git a/backend/crates/kalamdb-commons/src/models/rows/k_table_row.rs b/backend/crates/kalamdb-commons/src/models/rows/k_table_row.rs index 010e0b851..7b094b5f4 100644 --- a/backend/crates/kalamdb-commons/src/models/rows/k_table_row.rs +++ b/backend/crates/kalamdb-commons/src/models/rows/k_table_row.rs @@ -21,7 +21,13 @@ pub struct KTableRow { } impl KTableRow { - pub fn new(user_id: UserId, _seq: SeqId, _commit_seq: u64, fields: Row, _deleted: bool) -> Self { + pub fn new( + user_id: UserId, + _seq: SeqId, + _commit_seq: u64, + fields: Row, + _deleted: bool, + ) -> Self { Self { user_id, _seq, diff --git a/backend/crates/kalamdb-commons/src/models/transaction.rs b/backend/crates/kalamdb-commons/src/models/transaction.rs index 5c427e282..732ddfe09 100644 --- a/backend/crates/kalamdb-commons/src/models/transaction.rs +++ b/backend/crates/kalamdb-commons/src/models/transaction.rs @@ -136,4 +136,4 @@ mod tests { assert_eq!(TransactionOrigin::SqlBatch.as_str(), "SqlBatch"); assert_eq!(TransactionOrigin::Internal.as_str(), "Internal"); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-commons/src/serialization/generated/entity_envelope_generated.rs b/backend/crates/kalamdb-commons/src/serialization/generated/entity_envelope_generated.rs index fb3292eb5..d2d239083 100644 --- a/backend/crates/kalamdb-commons/src/serialization/generated/entity_envelope_generated.rs +++ b/backend/crates/kalamdb-commons/src/serialization/generated/entity_envelope_generated.rs @@ -2,311 +2,364 @@ // @generated extern crate alloc; - #[allow(unused_imports, dead_code)] pub mod kalamdb { -#[allow(unused_imports, dead_code)] -pub mod serialization { - - -#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] -pub const ENUM_MIN_CODEC_KIND: u8 = 0; -#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] -pub const ENUM_MAX_CODEC_KIND: u8 = 1; -#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] -#[allow(non_camel_case_types)] -pub const ENUM_VALUES_CODEC_KIND: [CodecKind; 2] = [ - CodecKind::FlatBuffers, - CodecKind::FlexBuffers, -]; - -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] -#[repr(transparent)] -pub struct CodecKind(pub u8); -#[allow(non_upper_case_globals)] -impl CodecKind { - pub const FlatBuffers: Self = Self(0); - pub const FlexBuffers: Self = Self(1); + #[allow(unused_imports, dead_code)] + pub mod serialization { - pub const ENUM_MIN: u8 = 0; - pub const ENUM_MAX: u8 = 1; - pub const ENUM_VALUES: &'static [Self] = &[ - Self::FlatBuffers, - Self::FlexBuffers, - ]; - /// Returns the variant's name or "" if unknown. - pub fn variant_name(self) -> Option<&'static str> { - match self { - Self::FlatBuffers => Some("FlatBuffers"), - Self::FlexBuffers => Some("FlexBuffers"), - _ => None, - } - } -} -impl ::core::fmt::Debug for CodecKind { - fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - if let Some(name) = self.variant_name() { - f.write_str(name) - } else { - f.write_fmt(format_args!("", self.0)) - } - } -} -impl<'a> ::flatbuffers::Follow<'a> for CodecKind { - type Inner = Self; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - let b = unsafe { ::flatbuffers::read_scalar_at::(buf, loc) }; - Self(b) - } -} + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MIN_CODEC_KIND: u8 = 0; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MAX_CODEC_KIND: u8 = 1; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + #[allow(non_camel_case_types)] + pub const ENUM_VALUES_CODEC_KIND: [CodecKind; 2] = + [CodecKind::FlatBuffers, CodecKind::FlexBuffers]; -impl ::flatbuffers::Push for CodecKind { - type Output = CodecKind; - #[inline] - unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { - unsafe { ::flatbuffers::emplace_scalar::(dst, self.0) }; - } -} + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] + #[repr(transparent)] + pub struct CodecKind(pub u8); + #[allow(non_upper_case_globals)] + impl CodecKind { + pub const FlatBuffers: Self = Self(0); + pub const FlexBuffers: Self = Self(1); -impl ::flatbuffers::EndianScalar for CodecKind { - type Scalar = u8; - #[inline] - fn to_little_endian(self) -> u8 { - self.0.to_le() - } - #[inline] - #[allow(clippy::wrong_self_convention)] - fn from_little_endian(v: u8) -> Self { - let b = u8::from_le(v); - Self(b) - } -} + pub const ENUM_MIN: u8 = 0; + pub const ENUM_MAX: u8 = 1; + pub const ENUM_VALUES: &'static [Self] = &[Self::FlatBuffers, Self::FlexBuffers]; + /// Returns the variant's name or "" if unknown. + pub fn variant_name(self) -> Option<&'static str> { + match self { + Self::FlatBuffers => Some("FlatBuffers"), + Self::FlexBuffers => Some("FlexBuffers"), + _ => None, + } + } + } + impl ::core::fmt::Debug for CodecKind { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + if let Some(name) = self.variant_name() { + f.write_str(name) + } else { + f.write_fmt(format_args!("", self.0)) + } + } + } + impl<'a> ::flatbuffers::Follow<'a> for CodecKind { + type Inner = Self; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + let b = unsafe { ::flatbuffers::read_scalar_at::(buf, loc) }; + Self(b) + } + } -impl<'a> ::flatbuffers::Verifiable for CodecKind { - #[inline] - fn run_verifier( - v: &mut ::flatbuffers::Verifier, pos: usize - ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { - u8::run_verifier(v, pos) - } -} + impl ::flatbuffers::Push for CodecKind { + type Output = CodecKind; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + unsafe { ::flatbuffers::emplace_scalar::(dst, self.0) }; + } + } -impl ::flatbuffers::SimpleToVerifyInSlice for CodecKind {} -pub enum EntityEnvelopeOffset {} -#[derive(Copy, Clone, PartialEq)] + impl ::flatbuffers::EndianScalar for CodecKind { + type Scalar = u8; + #[inline] + fn to_little_endian(self) -> u8 { + self.0.to_le() + } + #[inline] + #[allow(clippy::wrong_self_convention)] + fn from_little_endian(v: u8) -> Self { + let b = u8::from_le(v); + Self(b) + } + } -pub struct EntityEnvelope<'a> { - pub _tab: ::flatbuffers::Table<'a>, -} + impl<'a> ::flatbuffers::Verifiable for CodecKind { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + u8::run_verifier(v, pos) + } + } -impl<'a> ::flatbuffers::Follow<'a> for EntityEnvelope<'a> { - type Inner = EntityEnvelope<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { _tab: unsafe { ::flatbuffers::Table::new(buf, loc) } } - } -} + impl ::flatbuffers::SimpleToVerifyInSlice for CodecKind {} + pub enum EntityEnvelopeOffset {} + #[derive(Copy, Clone, PartialEq)] -impl<'a> EntityEnvelope<'a> { - pub const VT_CODEC_KIND: ::flatbuffers::VOffsetT = 4; - pub const VT_SCHEMA_VERSION: ::flatbuffers::VOffsetT = 6; - pub const VT_PAYLOAD: ::flatbuffers::VOffsetT = 8; + pub struct EntityEnvelope<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } - #[inline] - pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { - EntityEnvelope { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: ::flatbuffers::Allocator + 'bldr>( - _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, - args: &'args EntityEnvelopeArgs<'args> - ) -> ::flatbuffers::WIPOffset> { - let mut builder = EntityEnvelopeBuilder::new(_fbb); - if let Some(x) = args.payload { builder.add_payload(x); } - builder.add_schema_version(args.schema_version); - builder.add_codec_kind(args.codec_kind); - builder.finish() - } + impl<'a> ::flatbuffers::Follow<'a> for EntityEnvelope<'a> { + type Inner = EntityEnvelope<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + impl<'a> EntityEnvelope<'a> { + pub const VT_CODEC_KIND: ::flatbuffers::VOffsetT = 4; + pub const VT_SCHEMA_VERSION: ::flatbuffers::VOffsetT = 6; + pub const VT_PAYLOAD: ::flatbuffers::VOffsetT = 8; - #[inline] - pub fn codec_kind(&self) -> CodecKind { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(EntityEnvelope::VT_CODEC_KIND, Some(CodecKind::FlatBuffers)).unwrap()} - } - #[inline] - pub fn schema_version(&self) -> u16 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(EntityEnvelope::VT_SCHEMA_VERSION, Some(1)).unwrap()} - } - #[inline] - pub fn payload(&self) -> Option<::flatbuffers::Vector<'a, u8>> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, u8>>>(EntityEnvelope::VT_PAYLOAD, None)} - } -} + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + EntityEnvelope { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args EntityEnvelopeArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = EntityEnvelopeBuilder::new(_fbb); + if let Some(x) = args.payload { + builder.add_payload(x); + } + builder.add_schema_version(args.schema_version); + builder.add_codec_kind(args.codec_kind); + builder.finish() + } -impl ::flatbuffers::Verifiable for EntityEnvelope<'_> { - #[inline] - fn run_verifier( - v: &mut ::flatbuffers::Verifier, pos: usize - ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { - v.visit_table(pos)? - .visit_field::("codec_kind", Self::VT_CODEC_KIND, false)? - .visit_field::("schema_version", Self::VT_SCHEMA_VERSION, false)? - .visit_field::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'_, u8>>>("payload", Self::VT_PAYLOAD, false)? - .finish(); - Ok(()) - } -} -pub struct EntityEnvelopeArgs<'a> { - pub codec_kind: CodecKind, - pub schema_version: u16, - pub payload: Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, u8>>>, -} -impl<'a> Default for EntityEnvelopeArgs<'a> { - #[inline] - fn default() -> Self { - EntityEnvelopeArgs { - codec_kind: CodecKind::FlatBuffers, - schema_version: 1, - payload: None, - } - } -} + #[inline] + pub fn codec_kind(&self) -> CodecKind { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::( + EntityEnvelope::VT_CODEC_KIND, + Some(CodecKind::FlatBuffers), + ) + .unwrap() + } + } + #[inline] + pub fn schema_version(&self) -> u16 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(EntityEnvelope::VT_SCHEMA_VERSION, Some(1)).unwrap() } + } + #[inline] + pub fn payload(&self) -> Option<::flatbuffers::Vector<'a, u8>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, u8>>>( + EntityEnvelope::VT_PAYLOAD, + None, + ) + } + } + } -pub struct EntityEnvelopeBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { - fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, - start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, -} -impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> EntityEnvelopeBuilder<'a, 'b, A> { - #[inline] - pub fn add_codec_kind(&mut self, codec_kind: CodecKind) { - self.fbb_.push_slot::(EntityEnvelope::VT_CODEC_KIND, codec_kind, CodecKind::FlatBuffers); - } - #[inline] - pub fn add_schema_version(&mut self, schema_version: u16) { - self.fbb_.push_slot::(EntityEnvelope::VT_SCHEMA_VERSION, schema_version, 1); - } - #[inline] - pub fn add_payload(&mut self, payload: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b , u8>>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>(EntityEnvelope::VT_PAYLOAD, payload); - } - #[inline] - pub fn new(_fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>) -> EntityEnvelopeBuilder<'a, 'b, A> { - let start = _fbb.start_table(); - EntityEnvelopeBuilder { - fbb_: _fbb, - start_: start, - } - } - #[inline] - pub fn finish(self) -> ::flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - ::flatbuffers::WIPOffset::new(o.value()) - } -} + impl ::flatbuffers::Verifiable for EntityEnvelope<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::("codec_kind", Self::VT_CODEC_KIND, false)? + .visit_field::("schema_version", Self::VT_SCHEMA_VERSION, false)? + .visit_field::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'_, u8>>>( + "payload", + Self::VT_PAYLOAD, + false, + )? + .finish(); + Ok(()) + } + } + pub struct EntityEnvelopeArgs<'a> { + pub codec_kind: CodecKind, + pub schema_version: u16, + pub payload: Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, u8>>>, + } + impl<'a> Default for EntityEnvelopeArgs<'a> { + #[inline] + fn default() -> Self { + EntityEnvelopeArgs { + codec_kind: CodecKind::FlatBuffers, + schema_version: 1, + payload: None, + } + } + } -impl ::core::fmt::Debug for EntityEnvelope<'_> { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { - let mut ds = f.debug_struct("EntityEnvelope"); - ds.field("codec_kind", &self.codec_kind()); - ds.field("schema_version", &self.schema_version()); - ds.field("payload", &self.payload()); - ds.finish() - } -} -#[inline] -/// Verifies that a buffer of bytes contains a `EntityEnvelope` -/// and returns it. -/// Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `root_as_entity_envelope_unchecked`. -pub fn root_as_entity_envelope(buf: &[u8]) -> Result, ::flatbuffers::InvalidFlatbuffer> { - ::flatbuffers::root::(buf) -} -#[inline] -/// Verifies that a buffer of bytes contains a size prefixed -/// `EntityEnvelope` and returns it. -/// Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `size_prefixed_root_as_entity_envelope_unchecked`. -pub fn size_prefixed_root_as_entity_envelope(buf: &[u8]) -> Result, ::flatbuffers::InvalidFlatbuffer> { - ::flatbuffers::size_prefixed_root::(buf) -} -#[inline] -/// Verifies, with the given options, that a buffer of bytes -/// contains a `EntityEnvelope` and returns it. -/// Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `root_as_entity_envelope_unchecked`. -pub fn root_as_entity_envelope_with_opts<'b, 'o>( - opts: &'o ::flatbuffers::VerifierOptions, - buf: &'b [u8], -) -> Result, ::flatbuffers::InvalidFlatbuffer> { - ::flatbuffers::root_with_opts::>(opts, buf) -} -#[inline] -/// Verifies, with the given verifier options, that a buffer of -/// bytes contains a size prefixed `EntityEnvelope` and returns -/// it. Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `root_as_entity_envelope_unchecked`. -pub fn size_prefixed_root_as_entity_envelope_with_opts<'b, 'o>( - opts: &'o ::flatbuffers::VerifierOptions, - buf: &'b [u8], -) -> Result, ::flatbuffers::InvalidFlatbuffer> { - ::flatbuffers::size_prefixed_root_with_opts::>(opts, buf) -} -#[inline] -/// Assumes, without verification, that a buffer of bytes contains a EntityEnvelope and returns it. -/// # Safety -/// Callers must trust the given bytes do indeed contain a valid `EntityEnvelope`. -pub unsafe fn root_as_entity_envelope_unchecked(buf: &[u8]) -> EntityEnvelope<'_> { - unsafe { ::flatbuffers::root_unchecked::(buf) } -} -#[inline] -/// Assumes, without verification, that a buffer of bytes contains a size prefixed EntityEnvelope and returns it. -/// # Safety -/// Callers must trust the given bytes do indeed contain a valid size prefixed `EntityEnvelope`. -pub unsafe fn size_prefixed_root_as_entity_envelope_unchecked(buf: &[u8]) -> EntityEnvelope<'_> { - unsafe { ::flatbuffers::size_prefixed_root_unchecked::(buf) } -} -pub const ENTITY_ENVELOPE_IDENTIFIER: &str = "KENV"; + pub struct EntityEnvelopeBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> EntityEnvelopeBuilder<'a, 'b, A> { + #[inline] + pub fn add_codec_kind(&mut self, codec_kind: CodecKind) { + self.fbb_.push_slot::( + EntityEnvelope::VT_CODEC_KIND, + codec_kind, + CodecKind::FlatBuffers, + ); + } + #[inline] + pub fn add_schema_version(&mut self, schema_version: u16) { + self.fbb_.push_slot::(EntityEnvelope::VT_SCHEMA_VERSION, schema_version, 1); + } + #[inline] + pub fn add_payload( + &mut self, + payload: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b, u8>>, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + EntityEnvelope::VT_PAYLOAD, + payload, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> EntityEnvelopeBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + EntityEnvelopeBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } -#[inline] -pub fn entity_envelope_buffer_has_identifier(buf: &[u8]) -> bool { - ::flatbuffers::buffer_has_identifier(buf, ENTITY_ENVELOPE_IDENTIFIER, false) -} + impl ::core::fmt::Debug for EntityEnvelope<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("EntityEnvelope"); + ds.field("codec_kind", &self.codec_kind()); + ds.field("schema_version", &self.schema_version()); + ds.field("payload", &self.payload()); + ds.finish() + } + } + #[inline] + /// Verifies that a buffer of bytes contains a `EntityEnvelope` + /// and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_entity_envelope_unchecked`. + pub fn root_as_entity_envelope( + buf: &[u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::root::(buf) + } + #[inline] + /// Verifies that a buffer of bytes contains a size prefixed + /// `EntityEnvelope` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `size_prefixed_root_as_entity_envelope_unchecked`. + pub fn size_prefixed_root_as_entity_envelope( + buf: &[u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::size_prefixed_root::(buf) + } + #[inline] + /// Verifies, with the given options, that a buffer of bytes + /// contains a `EntityEnvelope` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_entity_envelope_unchecked`. + pub fn root_as_entity_envelope_with_opts<'b, 'o>( + opts: &'o ::flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::root_with_opts::>(opts, buf) + } + #[inline] + /// Verifies, with the given verifier options, that a buffer of + /// bytes contains a size prefixed `EntityEnvelope` and returns + /// it. Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_entity_envelope_unchecked`. + pub fn size_prefixed_root_as_entity_envelope_with_opts<'b, 'o>( + opts: &'o ::flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::size_prefixed_root_with_opts::>(opts, buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a EntityEnvelope and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid `EntityEnvelope`. + pub unsafe fn root_as_entity_envelope_unchecked(buf: &[u8]) -> EntityEnvelope<'_> { + unsafe { ::flatbuffers::root_unchecked::(buf) } + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a size prefixed EntityEnvelope and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid size prefixed `EntityEnvelope`. + pub unsafe fn size_prefixed_root_as_entity_envelope_unchecked( + buf: &[u8], + ) -> EntityEnvelope<'_> { + unsafe { ::flatbuffers::size_prefixed_root_unchecked::(buf) } + } + pub const ENTITY_ENVELOPE_IDENTIFIER: &str = "KENV"; -#[inline] -pub fn entity_envelope_size_prefixed_buffer_has_identifier(buf: &[u8]) -> bool { - ::flatbuffers::buffer_has_identifier(buf, ENTITY_ENVELOPE_IDENTIFIER, true) -} + #[inline] + pub fn entity_envelope_buffer_has_identifier(buf: &[u8]) -> bool { + ::flatbuffers::buffer_has_identifier(buf, ENTITY_ENVELOPE_IDENTIFIER, false) + } -#[inline] -pub fn finish_entity_envelope_buffer<'a, 'b, A: ::flatbuffers::Allocator + 'a>( - fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, - root: ::flatbuffers::WIPOffset>) { - fbb.finish(root, Some(ENTITY_ENVELOPE_IDENTIFIER)); -} + #[inline] + pub fn entity_envelope_size_prefixed_buffer_has_identifier(buf: &[u8]) -> bool { + ::flatbuffers::buffer_has_identifier(buf, ENTITY_ENVELOPE_IDENTIFIER, true) + } -#[inline] -pub fn finish_size_prefixed_entity_envelope_buffer<'a, 'b, A: ::flatbuffers::Allocator + 'a>(fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, root: ::flatbuffers::WIPOffset>) { - fbb.finish_size_prefixed(root, Some(ENTITY_ENVELOPE_IDENTIFIER)); -} -} // pub mod serialization -} // pub mod kalamdb + #[inline] + pub fn finish_entity_envelope_buffer<'a, 'b, A: ::flatbuffers::Allocator + 'a>( + fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + root: ::flatbuffers::WIPOffset>, + ) { + fbb.finish(root, Some(ENTITY_ENVELOPE_IDENTIFIER)); + } + #[inline] + pub fn finish_size_prefixed_entity_envelope_buffer< + 'a, + 'b, + A: ::flatbuffers::Allocator + 'a, + >( + fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + root: ::flatbuffers::WIPOffset>, + ) { + fbb.finish_size_prefixed(root, Some(ENTITY_ENVELOPE_IDENTIFIER)); + } + } // pub mod serialization +} // pub mod kalamdb diff --git a/backend/crates/kalamdb-commons/src/serialization/generated/row_models_generated.rs b/backend/crates/kalamdb-commons/src/serialization/generated/row_models_generated.rs index 0d52cb71d..d0cb78729 100644 --- a/backend/crates/kalamdb-commons/src/serialization/generated/row_models_generated.rs +++ b/backend/crates/kalamdb-commons/src/serialization/generated/row_models_generated.rs @@ -2,430 +2,525 @@ // @generated extern crate alloc; - #[allow(unused_imports, dead_code)] pub mod kalamdb { -#[allow(unused_imports, dead_code)] -pub mod serialization { - -#[allow(unused_imports, dead_code)] -pub mod row { - - -#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] -pub const ENUM_MIN_SCALAR_TAG: u16 = 0; -#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] -pub const ENUM_MAX_SCALAR_TAG: u16 = 24; -#[deprecated(since = "2.0.0", note = "Use associated constants instead. This will no longer be generated in 2021.")] -#[allow(non_camel_case_types)] -pub const ENUM_VALUES_SCALAR_TAG: [ScalarTag; 25] = [ - ScalarTag::Null, - ScalarTag::Boolean, - ScalarTag::Float32, - ScalarTag::Float64, - ScalarTag::Int8, - ScalarTag::Int16, - ScalarTag::Int32, - ScalarTag::Int64, - ScalarTag::UInt8, - ScalarTag::UInt16, - ScalarTag::UInt32, - ScalarTag::UInt64, - ScalarTag::Utf8, - ScalarTag::LargeUtf8, - ScalarTag::Binary, - ScalarTag::LargeBinary, - ScalarTag::FixedSizeBinary, - ScalarTag::Date32, - ScalarTag::Time64Microsecond, - ScalarTag::TimestampMillisecond, - ScalarTag::TimestampMicrosecond, - ScalarTag::TimestampNanosecond, - ScalarTag::Decimal128, - ScalarTag::Embedding, - ScalarTag::Fallback, -]; - -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] -#[repr(transparent)] -pub struct ScalarTag(pub u16); -#[allow(non_upper_case_globals)] -impl ScalarTag { - pub const Null: Self = Self(0); - pub const Boolean: Self = Self(1); - pub const Float32: Self = Self(2); - pub const Float64: Self = Self(3); - pub const Int8: Self = Self(4); - pub const Int16: Self = Self(5); - pub const Int32: Self = Self(6); - pub const Int64: Self = Self(7); - pub const UInt8: Self = Self(8); - pub const UInt16: Self = Self(9); - pub const UInt32: Self = Self(10); - pub const UInt64: Self = Self(11); - pub const Utf8: Self = Self(12); - pub const LargeUtf8: Self = Self(13); - pub const Binary: Self = Self(14); - pub const LargeBinary: Self = Self(15); - pub const FixedSizeBinary: Self = Self(16); - pub const Date32: Self = Self(17); - pub const Time64Microsecond: Self = Self(18); - pub const TimestampMillisecond: Self = Self(19); - pub const TimestampMicrosecond: Self = Self(20); - pub const TimestampNanosecond: Self = Self(21); - pub const Decimal128: Self = Self(22); - pub const Embedding: Self = Self(23); - pub const Fallback: Self = Self(24); - - pub const ENUM_MIN: u16 = 0; - pub const ENUM_MAX: u16 = 24; - pub const ENUM_VALUES: &'static [Self] = &[ - Self::Null, - Self::Boolean, - Self::Float32, - Self::Float64, - Self::Int8, - Self::Int16, - Self::Int32, - Self::Int64, - Self::UInt8, - Self::UInt16, - Self::UInt32, - Self::UInt64, - Self::Utf8, - Self::LargeUtf8, - Self::Binary, - Self::LargeBinary, - Self::FixedSizeBinary, - Self::Date32, - Self::Time64Microsecond, - Self::TimestampMillisecond, - Self::TimestampMicrosecond, - Self::TimestampNanosecond, - Self::Decimal128, - Self::Embedding, - Self::Fallback, - ]; - /// Returns the variant's name or "" if unknown. - pub fn variant_name(self) -> Option<&'static str> { - match self { - Self::Null => Some("Null"), - Self::Boolean => Some("Boolean"), - Self::Float32 => Some("Float32"), - Self::Float64 => Some("Float64"), - Self::Int8 => Some("Int8"), - Self::Int16 => Some("Int16"), - Self::Int32 => Some("Int32"), - Self::Int64 => Some("Int64"), - Self::UInt8 => Some("UInt8"), - Self::UInt16 => Some("UInt16"), - Self::UInt32 => Some("UInt32"), - Self::UInt64 => Some("UInt64"), - Self::Utf8 => Some("Utf8"), - Self::LargeUtf8 => Some("LargeUtf8"), - Self::Binary => Some("Binary"), - Self::LargeBinary => Some("LargeBinary"), - Self::FixedSizeBinary => Some("FixedSizeBinary"), - Self::Date32 => Some("Date32"), - Self::Time64Microsecond => Some("Time64Microsecond"), - Self::TimestampMillisecond => Some("TimestampMillisecond"), - Self::TimestampMicrosecond => Some("TimestampMicrosecond"), - Self::TimestampNanosecond => Some("TimestampNanosecond"), - Self::Decimal128 => Some("Decimal128"), - Self::Embedding => Some("Embedding"), - Self::Fallback => Some("Fallback"), - _ => None, - } - } -} -impl ::core::fmt::Debug for ScalarTag { - fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - if let Some(name) = self.variant_name() { - f.write_str(name) - } else { - f.write_fmt(format_args!("", self.0)) - } - } -} -impl<'a> ::flatbuffers::Follow<'a> for ScalarTag { - type Inner = Self; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - let b = unsafe { ::flatbuffers::read_scalar_at::(buf, loc) }; - Self(b) - } -} - -impl ::flatbuffers::Push for ScalarTag { - type Output = ScalarTag; - #[inline] - unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { - unsafe { ::flatbuffers::emplace_scalar::(dst, self.0) }; - } -} - -impl ::flatbuffers::EndianScalar for ScalarTag { - type Scalar = u16; - #[inline] - fn to_little_endian(self) -> u16 { - self.0.to_le() - } - #[inline] - #[allow(clippy::wrong_self_convention)] - fn from_little_endian(v: u16) -> Self { - let b = u16::from_le(v); - Self(b) - } -} - -impl<'a> ::flatbuffers::Verifiable for ScalarTag { - #[inline] - fn run_verifier( - v: &mut ::flatbuffers::Verifier, pos: usize - ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { - u16::run_verifier(v, pos) - } -} - -impl ::flatbuffers::SimpleToVerifyInSlice for ScalarTag {} -pub enum ScalarValuePayloadOffset {} -#[derive(Copy, Clone, PartialEq)] - -pub struct ScalarValuePayload<'a> { - pub _tab: ::flatbuffers::Table<'a>, -} - -impl<'a> ::flatbuffers::Follow<'a> for ScalarValuePayload<'a> { - type Inner = ScalarValuePayload<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { _tab: unsafe { ::flatbuffers::Table::new(buf, loc) } } - } -} - -impl<'a> ScalarValuePayload<'a> { - pub const VT_TAG: ::flatbuffers::VOffsetT = 4; - pub const VT_IS_NULL: ::flatbuffers::VOffsetT = 6; - pub const VT_BOOL_VALUE: ::flatbuffers::VOffsetT = 8; - pub const VT_I8_VALUE: ::flatbuffers::VOffsetT = 10; - pub const VT_I16_VALUE: ::flatbuffers::VOffsetT = 12; - pub const VT_I32_VALUE: ::flatbuffers::VOffsetT = 14; - pub const VT_I64_VALUE: ::flatbuffers::VOffsetT = 16; - pub const VT_U8_VALUE: ::flatbuffers::VOffsetT = 18; - pub const VT_U16_VALUE: ::flatbuffers::VOffsetT = 20; - pub const VT_U32_VALUE: ::flatbuffers::VOffsetT = 22; - pub const VT_U64_VALUE: ::flatbuffers::VOffsetT = 24; - pub const VT_F32_VALUE: ::flatbuffers::VOffsetT = 26; - pub const VT_F64_VALUE: ::flatbuffers::VOffsetT = 28; - pub const VT_TEXT_VALUE: ::flatbuffers::VOffsetT = 30; - pub const VT_BYTES_VALUE: ::flatbuffers::VOffsetT = 32; - pub const VT_FIXED_SIZE: ::flatbuffers::VOffsetT = 34; - pub const VT_TIMEZONE: ::flatbuffers::VOffsetT = 36; - pub const VT_DECIMAL_PRECISION: ::flatbuffers::VOffsetT = 38; - pub const VT_DECIMAL_SCALE: ::flatbuffers::VOffsetT = 40; - pub const VT_EMBEDDING_SIZE: ::flatbuffers::VOffsetT = 42; - pub const VT_EMBEDDING_VALUES: ::flatbuffers::VOffsetT = 44; - pub const VT_EMBEDDING_VALID: ::flatbuffers::VOffsetT = 46; - - #[inline] - pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { - ScalarValuePayload { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: ::flatbuffers::Allocator + 'bldr>( - _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, - args: &'args ScalarValuePayloadArgs<'args> - ) -> ::flatbuffers::WIPOffset> { - let mut builder = ScalarValuePayloadBuilder::new(_fbb); - builder.add_f64_value(args.f64_value); - builder.add_u64_value(args.u64_value); - builder.add_i64_value(args.i64_value); - if let Some(x) = args.embedding_valid { builder.add_embedding_valid(x); } - if let Some(x) = args.embedding_values { builder.add_embedding_values(x); } - builder.add_embedding_size(args.embedding_size); - if let Some(x) = args.timezone { builder.add_timezone(x); } - builder.add_fixed_size(args.fixed_size); - if let Some(x) = args.bytes_value { builder.add_bytes_value(x); } - if let Some(x) = args.text_value { builder.add_text_value(x); } - builder.add_f32_value(args.f32_value); - builder.add_u32_value(args.u32_value); - builder.add_i32_value(args.i32_value); - builder.add_u16_value(args.u16_value); - builder.add_i16_value(args.i16_value); - builder.add_tag(args.tag); - builder.add_decimal_scale(args.decimal_scale); - builder.add_decimal_precision(args.decimal_precision); - builder.add_u8_value(args.u8_value); - builder.add_i8_value(args.i8_value); - builder.add_bool_value(args.bool_value); - builder.add_is_null(args.is_null); - builder.finish() - } - - - #[inline] - pub fn tag(&self) -> ScalarTag { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_TAG, Some(ScalarTag::Null)).unwrap()} - } - #[inline] - pub fn is_null(&self) -> bool { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_IS_NULL, Some(false)).unwrap()} - } - #[inline] - pub fn bool_value(&self) -> bool { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_BOOL_VALUE, Some(false)).unwrap()} - } - #[inline] - pub fn i8_value(&self) -> i8 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_I8_VALUE, Some(0)).unwrap()} - } - #[inline] - pub fn i16_value(&self) -> i16 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_I16_VALUE, Some(0)).unwrap()} - } - #[inline] - pub fn i32_value(&self) -> i32 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_I32_VALUE, Some(0)).unwrap()} - } - #[inline] - pub fn i64_value(&self) -> i64 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_I64_VALUE, Some(0)).unwrap()} - } - #[inline] - pub fn u8_value(&self) -> u8 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_U8_VALUE, Some(0)).unwrap()} - } - #[inline] - pub fn u16_value(&self) -> u16 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_U16_VALUE, Some(0)).unwrap()} - } - #[inline] - pub fn u32_value(&self) -> u32 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_U32_VALUE, Some(0)).unwrap()} - } - #[inline] - pub fn u64_value(&self) -> u64 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_U64_VALUE, Some(0)).unwrap()} - } - #[inline] - pub fn f32_value(&self) -> f32 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_F32_VALUE, Some(0.0)).unwrap()} - } - #[inline] - pub fn f64_value(&self) -> f64 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_F64_VALUE, Some(0.0)).unwrap()} - } - #[inline] - pub fn text_value(&self) -> Option<&'a str> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset<&str>>(ScalarValuePayload::VT_TEXT_VALUE, None)} - } - #[inline] - pub fn bytes_value(&self) -> Option<::flatbuffers::Vector<'a, u8>> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, u8>>>(ScalarValuePayload::VT_BYTES_VALUE, None)} - } - #[inline] - pub fn fixed_size(&self) -> i32 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_FIXED_SIZE, Some(0)).unwrap()} - } - #[inline] - pub fn timezone(&self) -> Option<&'a str> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset<&str>>(ScalarValuePayload::VT_TIMEZONE, None)} - } - #[inline] - pub fn decimal_precision(&self) -> u8 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_DECIMAL_PRECISION, Some(0)).unwrap()} - } - #[inline] - pub fn decimal_scale(&self) -> i8 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_DECIMAL_SCALE, Some(0)).unwrap()} - } - #[inline] - pub fn embedding_size(&self) -> i32 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(ScalarValuePayload::VT_EMBEDDING_SIZE, Some(0)).unwrap()} - } - #[inline] - pub fn embedding_values(&self) -> Option<::flatbuffers::Vector<'a, f32>> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, f32>>>(ScalarValuePayload::VT_EMBEDDING_VALUES, None)} - } - #[inline] - pub fn embedding_valid(&self) -> Option<::flatbuffers::Vector<'a, bool>> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, bool>>>(ScalarValuePayload::VT_EMBEDDING_VALID, None)} - } -} - -impl ::flatbuffers::Verifiable for ScalarValuePayload<'_> { - #[inline] - fn run_verifier( - v: &mut ::flatbuffers::Verifier, pos: usize - ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { - v.visit_table(pos)? + #[allow(unused_imports, dead_code)] + pub mod serialization { + + #[allow(unused_imports, dead_code)] + pub mod row { + + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MIN_SCALAR_TAG: u16 = 0; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + pub const ENUM_MAX_SCALAR_TAG: u16 = 24; + #[deprecated( + since = "2.0.0", + note = "Use associated constants instead. This will no longer be generated in 2021." + )] + #[allow(non_camel_case_types)] + pub const ENUM_VALUES_SCALAR_TAG: [ScalarTag; 25] = [ + ScalarTag::Null, + ScalarTag::Boolean, + ScalarTag::Float32, + ScalarTag::Float64, + ScalarTag::Int8, + ScalarTag::Int16, + ScalarTag::Int32, + ScalarTag::Int64, + ScalarTag::UInt8, + ScalarTag::UInt16, + ScalarTag::UInt32, + ScalarTag::UInt64, + ScalarTag::Utf8, + ScalarTag::LargeUtf8, + ScalarTag::Binary, + ScalarTag::LargeBinary, + ScalarTag::FixedSizeBinary, + ScalarTag::Date32, + ScalarTag::Time64Microsecond, + ScalarTag::TimestampMillisecond, + ScalarTag::TimestampMicrosecond, + ScalarTag::TimestampNanosecond, + ScalarTag::Decimal128, + ScalarTag::Embedding, + ScalarTag::Fallback, + ]; + + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] + #[repr(transparent)] + pub struct ScalarTag(pub u16); + #[allow(non_upper_case_globals)] + impl ScalarTag { + pub const Null: Self = Self(0); + pub const Boolean: Self = Self(1); + pub const Float32: Self = Self(2); + pub const Float64: Self = Self(3); + pub const Int8: Self = Self(4); + pub const Int16: Self = Self(5); + pub const Int32: Self = Self(6); + pub const Int64: Self = Self(7); + pub const UInt8: Self = Self(8); + pub const UInt16: Self = Self(9); + pub const UInt32: Self = Self(10); + pub const UInt64: Self = Self(11); + pub const Utf8: Self = Self(12); + pub const LargeUtf8: Self = Self(13); + pub const Binary: Self = Self(14); + pub const LargeBinary: Self = Self(15); + pub const FixedSizeBinary: Self = Self(16); + pub const Date32: Self = Self(17); + pub const Time64Microsecond: Self = Self(18); + pub const TimestampMillisecond: Self = Self(19); + pub const TimestampMicrosecond: Self = Self(20); + pub const TimestampNanosecond: Self = Self(21); + pub const Decimal128: Self = Self(22); + pub const Embedding: Self = Self(23); + pub const Fallback: Self = Self(24); + + pub const ENUM_MIN: u16 = 0; + pub const ENUM_MAX: u16 = 24; + pub const ENUM_VALUES: &'static [Self] = &[ + Self::Null, + Self::Boolean, + Self::Float32, + Self::Float64, + Self::Int8, + Self::Int16, + Self::Int32, + Self::Int64, + Self::UInt8, + Self::UInt16, + Self::UInt32, + Self::UInt64, + Self::Utf8, + Self::LargeUtf8, + Self::Binary, + Self::LargeBinary, + Self::FixedSizeBinary, + Self::Date32, + Self::Time64Microsecond, + Self::TimestampMillisecond, + Self::TimestampMicrosecond, + Self::TimestampNanosecond, + Self::Decimal128, + Self::Embedding, + Self::Fallback, + ]; + /// Returns the variant's name or "" if unknown. + pub fn variant_name(self) -> Option<&'static str> { + match self { + Self::Null => Some("Null"), + Self::Boolean => Some("Boolean"), + Self::Float32 => Some("Float32"), + Self::Float64 => Some("Float64"), + Self::Int8 => Some("Int8"), + Self::Int16 => Some("Int16"), + Self::Int32 => Some("Int32"), + Self::Int64 => Some("Int64"), + Self::UInt8 => Some("UInt8"), + Self::UInt16 => Some("UInt16"), + Self::UInt32 => Some("UInt32"), + Self::UInt64 => Some("UInt64"), + Self::Utf8 => Some("Utf8"), + Self::LargeUtf8 => Some("LargeUtf8"), + Self::Binary => Some("Binary"), + Self::LargeBinary => Some("LargeBinary"), + Self::FixedSizeBinary => Some("FixedSizeBinary"), + Self::Date32 => Some("Date32"), + Self::Time64Microsecond => Some("Time64Microsecond"), + Self::TimestampMillisecond => Some("TimestampMillisecond"), + Self::TimestampMicrosecond => Some("TimestampMicrosecond"), + Self::TimestampNanosecond => Some("TimestampNanosecond"), + Self::Decimal128 => Some("Decimal128"), + Self::Embedding => Some("Embedding"), + Self::Fallback => Some("Fallback"), + _ => None, + } + } + } + impl ::core::fmt::Debug for ScalarTag { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + if let Some(name) = self.variant_name() { + f.write_str(name) + } else { + f.write_fmt(format_args!("", self.0)) + } + } + } + impl<'a> ::flatbuffers::Follow<'a> for ScalarTag { + type Inner = Self; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + let b = unsafe { ::flatbuffers::read_scalar_at::(buf, loc) }; + Self(b) + } + } + + impl ::flatbuffers::Push for ScalarTag { + type Output = ScalarTag; + #[inline] + unsafe fn push(&self, dst: &mut [u8], _written_len: usize) { + unsafe { ::flatbuffers::emplace_scalar::(dst, self.0) }; + } + } + + impl ::flatbuffers::EndianScalar for ScalarTag { + type Scalar = u16; + #[inline] + fn to_little_endian(self) -> u16 { + self.0.to_le() + } + #[inline] + #[allow(clippy::wrong_self_convention)] + fn from_little_endian(v: u16) -> Self { + let b = u16::from_le(v); + Self(b) + } + } + + impl<'a> ::flatbuffers::Verifiable for ScalarTag { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + u16::run_verifier(v, pos) + } + } + + impl ::flatbuffers::SimpleToVerifyInSlice for ScalarTag {} + pub enum ScalarValuePayloadOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct ScalarValuePayload<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for ScalarValuePayload<'a> { + type Inner = ScalarValuePayload<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> ScalarValuePayload<'a> { + pub const VT_TAG: ::flatbuffers::VOffsetT = 4; + pub const VT_IS_NULL: ::flatbuffers::VOffsetT = 6; + pub const VT_BOOL_VALUE: ::flatbuffers::VOffsetT = 8; + pub const VT_I8_VALUE: ::flatbuffers::VOffsetT = 10; + pub const VT_I16_VALUE: ::flatbuffers::VOffsetT = 12; + pub const VT_I32_VALUE: ::flatbuffers::VOffsetT = 14; + pub const VT_I64_VALUE: ::flatbuffers::VOffsetT = 16; + pub const VT_U8_VALUE: ::flatbuffers::VOffsetT = 18; + pub const VT_U16_VALUE: ::flatbuffers::VOffsetT = 20; + pub const VT_U32_VALUE: ::flatbuffers::VOffsetT = 22; + pub const VT_U64_VALUE: ::flatbuffers::VOffsetT = 24; + pub const VT_F32_VALUE: ::flatbuffers::VOffsetT = 26; + pub const VT_F64_VALUE: ::flatbuffers::VOffsetT = 28; + pub const VT_TEXT_VALUE: ::flatbuffers::VOffsetT = 30; + pub const VT_BYTES_VALUE: ::flatbuffers::VOffsetT = 32; + pub const VT_FIXED_SIZE: ::flatbuffers::VOffsetT = 34; + pub const VT_TIMEZONE: ::flatbuffers::VOffsetT = 36; + pub const VT_DECIMAL_PRECISION: ::flatbuffers::VOffsetT = 38; + pub const VT_DECIMAL_SCALE: ::flatbuffers::VOffsetT = 40; + pub const VT_EMBEDDING_SIZE: ::flatbuffers::VOffsetT = 42; + pub const VT_EMBEDDING_VALUES: ::flatbuffers::VOffsetT = 44; + pub const VT_EMBEDDING_VALID: ::flatbuffers::VOffsetT = 46; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + ScalarValuePayload { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args ScalarValuePayloadArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = ScalarValuePayloadBuilder::new(_fbb); + builder.add_f64_value(args.f64_value); + builder.add_u64_value(args.u64_value); + builder.add_i64_value(args.i64_value); + if let Some(x) = args.embedding_valid { + builder.add_embedding_valid(x); + } + if let Some(x) = args.embedding_values { + builder.add_embedding_values(x); + } + builder.add_embedding_size(args.embedding_size); + if let Some(x) = args.timezone { + builder.add_timezone(x); + } + builder.add_fixed_size(args.fixed_size); + if let Some(x) = args.bytes_value { + builder.add_bytes_value(x); + } + if let Some(x) = args.text_value { + builder.add_text_value(x); + } + builder.add_f32_value(args.f32_value); + builder.add_u32_value(args.u32_value); + builder.add_i32_value(args.i32_value); + builder.add_u16_value(args.u16_value); + builder.add_i16_value(args.i16_value); + builder.add_tag(args.tag); + builder.add_decimal_scale(args.decimal_scale); + builder.add_decimal_precision(args.decimal_precision); + builder.add_u8_value(args.u8_value); + builder.add_i8_value(args.i8_value); + builder.add_bool_value(args.bool_value); + builder.add_is_null(args.is_null); + builder.finish() + } + + #[inline] + pub fn tag(&self) -> ScalarTag { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(ScalarValuePayload::VT_TAG, Some(ScalarTag::Null)) + .unwrap() + } + } + #[inline] + pub fn is_null(&self) -> bool { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_IS_NULL, Some(false)).unwrap() + } + } + #[inline] + pub fn bool_value(&self) -> bool { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(ScalarValuePayload::VT_BOOL_VALUE, Some(false)) + .unwrap() + } + } + #[inline] + pub fn i8_value(&self) -> i8 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_I8_VALUE, Some(0)).unwrap() + } + } + #[inline] + pub fn i16_value(&self) -> i16 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_I16_VALUE, Some(0)).unwrap() + } + } + #[inline] + pub fn i32_value(&self) -> i32 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_I32_VALUE, Some(0)).unwrap() + } + } + #[inline] + pub fn i64_value(&self) -> i64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_I64_VALUE, Some(0)).unwrap() + } + } + #[inline] + pub fn u8_value(&self) -> u8 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_U8_VALUE, Some(0)).unwrap() + } + } + #[inline] + pub fn u16_value(&self) -> u16 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_U16_VALUE, Some(0)).unwrap() + } + } + #[inline] + pub fn u32_value(&self) -> u32 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_U32_VALUE, Some(0)).unwrap() + } + } + #[inline] + pub fn u64_value(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_U64_VALUE, Some(0)).unwrap() + } + } + #[inline] + pub fn f32_value(&self) -> f32 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_F32_VALUE, Some(0.0)).unwrap() + } + } + #[inline] + pub fn f64_value(&self) -> f64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_F64_VALUE, Some(0.0)).unwrap() + } + } + #[inline] + pub fn text_value(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset<&str>>( + ScalarValuePayload::VT_TEXT_VALUE, + None, + ) + } + } + #[inline] + pub fn bytes_value(&self) -> Option<::flatbuffers::Vector<'a, u8>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, u8>>>( + ScalarValuePayload::VT_BYTES_VALUE, + None, + ) + } + } + #[inline] + pub fn fixed_size(&self) -> i32 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_FIXED_SIZE, Some(0)).unwrap() + } + } + #[inline] + pub fn timezone(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset<&str>>( + ScalarValuePayload::VT_TIMEZONE, + None, + ) + } + } + #[inline] + pub fn decimal_precision(&self) -> u8 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(ScalarValuePayload::VT_DECIMAL_PRECISION, Some(0)) + .unwrap() + } + } + #[inline] + pub fn decimal_scale(&self) -> i8 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(ScalarValuePayload::VT_DECIMAL_SCALE, Some(0)).unwrap() + } + } + #[inline] + pub fn embedding_size(&self) -> i32 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(ScalarValuePayload::VT_EMBEDDING_SIZE, Some(0)) + .unwrap() + } + } + #[inline] + pub fn embedding_values(&self) -> Option<::flatbuffers::Vector<'a, f32>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, f32>>>( + ScalarValuePayload::VT_EMBEDDING_VALUES, + None, + ) + } + } + #[inline] + pub fn embedding_valid(&self) -> Option<::flatbuffers::Vector<'a, bool>> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, bool>>>( + ScalarValuePayload::VT_EMBEDDING_VALID, + None, + ) + } + } + } + + impl ::flatbuffers::Verifiable for ScalarValuePayload<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? .visit_field::("tag", Self::VT_TAG, false)? .visit_field::("is_null", Self::VT_IS_NULL, false)? .visit_field::("bool_value", Self::VT_BOOL_VALUE, false)? @@ -449,899 +544,1120 @@ impl ::flatbuffers::Verifiable for ScalarValuePayload<'_> { .visit_field::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'_, f32>>>("embedding_values", Self::VT_EMBEDDING_VALUES, false)? .visit_field::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'_, bool>>>("embedding_valid", Self::VT_EMBEDDING_VALID, false)? .finish(); - Ok(()) - } -} -pub struct ScalarValuePayloadArgs<'a> { - pub tag: ScalarTag, - pub is_null: bool, - pub bool_value: bool, - pub i8_value: i8, - pub i16_value: i16, - pub i32_value: i32, - pub i64_value: i64, - pub u8_value: u8, - pub u16_value: u16, - pub u32_value: u32, - pub u64_value: u64, - pub f32_value: f32, - pub f64_value: f64, - pub text_value: Option<::flatbuffers::WIPOffset<&'a str>>, - pub bytes_value: Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, u8>>>, - pub fixed_size: i32, - pub timezone: Option<::flatbuffers::WIPOffset<&'a str>>, - pub decimal_precision: u8, - pub decimal_scale: i8, - pub embedding_size: i32, - pub embedding_values: Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, f32>>>, - pub embedding_valid: Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, bool>>>, -} -impl<'a> Default for ScalarValuePayloadArgs<'a> { - #[inline] - fn default() -> Self { - ScalarValuePayloadArgs { - tag: ScalarTag::Null, - is_null: false, - bool_value: false, - i8_value: 0, - i16_value: 0, - i32_value: 0, - i64_value: 0, - u8_value: 0, - u16_value: 0, - u32_value: 0, - u64_value: 0, - f32_value: 0.0, - f64_value: 0.0, - text_value: None, - bytes_value: None, - fixed_size: 0, - timezone: None, - decimal_precision: 0, - decimal_scale: 0, - embedding_size: 0, - embedding_values: None, - embedding_valid: None, - } - } -} - -pub struct ScalarValuePayloadBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { - fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, - start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, -} -impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> ScalarValuePayloadBuilder<'a, 'b, A> { - #[inline] - pub fn add_tag(&mut self, tag: ScalarTag) { - self.fbb_.push_slot::(ScalarValuePayload::VT_TAG, tag, ScalarTag::Null); - } - #[inline] - pub fn add_is_null(&mut self, is_null: bool) { - self.fbb_.push_slot::(ScalarValuePayload::VT_IS_NULL, is_null, false); - } - #[inline] - pub fn add_bool_value(&mut self, bool_value: bool) { - self.fbb_.push_slot::(ScalarValuePayload::VT_BOOL_VALUE, bool_value, false); - } - #[inline] - pub fn add_i8_value(&mut self, i8_value: i8) { - self.fbb_.push_slot::(ScalarValuePayload::VT_I8_VALUE, i8_value, 0); - } - #[inline] - pub fn add_i16_value(&mut self, i16_value: i16) { - self.fbb_.push_slot::(ScalarValuePayload::VT_I16_VALUE, i16_value, 0); - } - #[inline] - pub fn add_i32_value(&mut self, i32_value: i32) { - self.fbb_.push_slot::(ScalarValuePayload::VT_I32_VALUE, i32_value, 0); - } - #[inline] - pub fn add_i64_value(&mut self, i64_value: i64) { - self.fbb_.push_slot::(ScalarValuePayload::VT_I64_VALUE, i64_value, 0); - } - #[inline] - pub fn add_u8_value(&mut self, u8_value: u8) { - self.fbb_.push_slot::(ScalarValuePayload::VT_U8_VALUE, u8_value, 0); - } - #[inline] - pub fn add_u16_value(&mut self, u16_value: u16) { - self.fbb_.push_slot::(ScalarValuePayload::VT_U16_VALUE, u16_value, 0); - } - #[inline] - pub fn add_u32_value(&mut self, u32_value: u32) { - self.fbb_.push_slot::(ScalarValuePayload::VT_U32_VALUE, u32_value, 0); - } - #[inline] - pub fn add_u64_value(&mut self, u64_value: u64) { - self.fbb_.push_slot::(ScalarValuePayload::VT_U64_VALUE, u64_value, 0); - } - #[inline] - pub fn add_f32_value(&mut self, f32_value: f32) { - self.fbb_.push_slot::(ScalarValuePayload::VT_F32_VALUE, f32_value, 0.0); - } - #[inline] - pub fn add_f64_value(&mut self, f64_value: f64) { - self.fbb_.push_slot::(ScalarValuePayload::VT_F64_VALUE, f64_value, 0.0); - } - #[inline] - pub fn add_text_value(&mut self, text_value: ::flatbuffers::WIPOffset<&'b str>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>(ScalarValuePayload::VT_TEXT_VALUE, text_value); - } - #[inline] - pub fn add_bytes_value(&mut self, bytes_value: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b , u8>>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>(ScalarValuePayload::VT_BYTES_VALUE, bytes_value); - } - #[inline] - pub fn add_fixed_size(&mut self, fixed_size: i32) { - self.fbb_.push_slot::(ScalarValuePayload::VT_FIXED_SIZE, fixed_size, 0); - } - #[inline] - pub fn add_timezone(&mut self, timezone: ::flatbuffers::WIPOffset<&'b str>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>(ScalarValuePayload::VT_TIMEZONE, timezone); - } - #[inline] - pub fn add_decimal_precision(&mut self, decimal_precision: u8) { - self.fbb_.push_slot::(ScalarValuePayload::VT_DECIMAL_PRECISION, decimal_precision, 0); - } - #[inline] - pub fn add_decimal_scale(&mut self, decimal_scale: i8) { - self.fbb_.push_slot::(ScalarValuePayload::VT_DECIMAL_SCALE, decimal_scale, 0); - } - #[inline] - pub fn add_embedding_size(&mut self, embedding_size: i32) { - self.fbb_.push_slot::(ScalarValuePayload::VT_EMBEDDING_SIZE, embedding_size, 0); - } - #[inline] - pub fn add_embedding_values(&mut self, embedding_values: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b , f32>>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>(ScalarValuePayload::VT_EMBEDDING_VALUES, embedding_values); - } - #[inline] - pub fn add_embedding_valid(&mut self, embedding_valid: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b , bool>>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>(ScalarValuePayload::VT_EMBEDDING_VALID, embedding_valid); - } - #[inline] - pub fn new(_fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>) -> ScalarValuePayloadBuilder<'a, 'b, A> { - let start = _fbb.start_table(); - ScalarValuePayloadBuilder { - fbb_: _fbb, - start_: start, - } - } - #[inline] - pub fn finish(self) -> ::flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - ::flatbuffers::WIPOffset::new(o.value()) - } -} - -impl ::core::fmt::Debug for ScalarValuePayload<'_> { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { - let mut ds = f.debug_struct("ScalarValuePayload"); - ds.field("tag", &self.tag()); - ds.field("is_null", &self.is_null()); - ds.field("bool_value", &self.bool_value()); - ds.field("i8_value", &self.i8_value()); - ds.field("i16_value", &self.i16_value()); - ds.field("i32_value", &self.i32_value()); - ds.field("i64_value", &self.i64_value()); - ds.field("u8_value", &self.u8_value()); - ds.field("u16_value", &self.u16_value()); - ds.field("u32_value", &self.u32_value()); - ds.field("u64_value", &self.u64_value()); - ds.field("f32_value", &self.f32_value()); - ds.field("f64_value", &self.f64_value()); - ds.field("text_value", &self.text_value()); - ds.field("bytes_value", &self.bytes_value()); - ds.field("fixed_size", &self.fixed_size()); - ds.field("timezone", &self.timezone()); - ds.field("decimal_precision", &self.decimal_precision()); - ds.field("decimal_scale", &self.decimal_scale()); - ds.field("embedding_size", &self.embedding_size()); - ds.field("embedding_values", &self.embedding_values()); - ds.field("embedding_valid", &self.embedding_valid()); - ds.finish() - } -} -pub enum ColumnValueOffset {} -#[derive(Copy, Clone, PartialEq)] - -pub struct ColumnValue<'a> { - pub _tab: ::flatbuffers::Table<'a>, -} - -impl<'a> ::flatbuffers::Follow<'a> for ColumnValue<'a> { - type Inner = ColumnValue<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { _tab: unsafe { ::flatbuffers::Table::new(buf, loc) } } - } -} - -impl<'a> ColumnValue<'a> { - pub const VT_NAME: ::flatbuffers::VOffsetT = 4; - pub const VT_VALUE: ::flatbuffers::VOffsetT = 6; - - #[inline] - pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { - ColumnValue { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: ::flatbuffers::Allocator + 'bldr>( - _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, - args: &'args ColumnValueArgs<'args> - ) -> ::flatbuffers::WIPOffset> { - let mut builder = ColumnValueBuilder::new(_fbb); - if let Some(x) = args.value { builder.add_value(x); } - if let Some(x) = args.name { builder.add_name(x); } - builder.finish() - } - - - #[inline] - pub fn name(&self) -> Option<&'a str> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset<&str>>(ColumnValue::VT_NAME, None)} - } - #[inline] - pub fn value(&self) -> Option> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset>(ColumnValue::VT_VALUE, None)} - } -} - -impl ::flatbuffers::Verifiable for ColumnValue<'_> { - #[inline] - fn run_verifier( - v: &mut ::flatbuffers::Verifier, pos: usize - ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { - v.visit_table(pos)? - .visit_field::<::flatbuffers::ForwardsUOffset<&str>>("name", Self::VT_NAME, false)? - .visit_field::<::flatbuffers::ForwardsUOffset>("value", Self::VT_VALUE, false)? - .finish(); - Ok(()) - } -} -pub struct ColumnValueArgs<'a> { - pub name: Option<::flatbuffers::WIPOffset<&'a str>>, - pub value: Option<::flatbuffers::WIPOffset>>, -} -impl<'a> Default for ColumnValueArgs<'a> { - #[inline] - fn default() -> Self { - ColumnValueArgs { - name: None, - value: None, - } - } -} - -pub struct ColumnValueBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { - fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, - start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, -} -impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> ColumnValueBuilder<'a, 'b, A> { - #[inline] - pub fn add_name(&mut self, name: ::flatbuffers::WIPOffset<&'b str>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>(ColumnValue::VT_NAME, name); - } - #[inline] - pub fn add_value(&mut self, value: ::flatbuffers::WIPOffset>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset>(ColumnValue::VT_VALUE, value); - } - #[inline] - pub fn new(_fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>) -> ColumnValueBuilder<'a, 'b, A> { - let start = _fbb.start_table(); - ColumnValueBuilder { - fbb_: _fbb, - start_: start, - } - } - #[inline] - pub fn finish(self) -> ::flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - ::flatbuffers::WIPOffset::new(o.value()) - } -} - -impl ::core::fmt::Debug for ColumnValue<'_> { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { - let mut ds = f.debug_struct("ColumnValue"); - ds.field("name", &self.name()); - ds.field("value", &self.value()); - ds.finish() - } -} -pub enum RowPayloadOffset {} -#[derive(Copy, Clone, PartialEq)] - -pub struct RowPayload<'a> { - pub _tab: ::flatbuffers::Table<'a>, -} - -impl<'a> ::flatbuffers::Follow<'a> for RowPayload<'a> { - type Inner = RowPayload<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { _tab: unsafe { ::flatbuffers::Table::new(buf, loc) } } - } -} - -impl<'a> RowPayload<'a> { - pub const VT_COLUMNS: ::flatbuffers::VOffsetT = 4; - - #[inline] - pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { - RowPayload { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: ::flatbuffers::Allocator + 'bldr>( - _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, - args: &'args RowPayloadArgs<'args> - ) -> ::flatbuffers::WIPOffset> { - let mut builder = RowPayloadBuilder::new(_fbb); - if let Some(x) = args.columns { builder.add_columns(x); } - builder.finish() - } - - - #[inline] - pub fn columns(&self) -> Option<::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>>> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>>>(RowPayload::VT_COLUMNS, None)} - } -} - -impl ::flatbuffers::Verifiable for RowPayload<'_> { - #[inline] - fn run_verifier( - v: &mut ::flatbuffers::Verifier, pos: usize - ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { - v.visit_table(pos)? - .visit_field::<::flatbuffers::ForwardsUOffset<::flatbuffers::Vector<'_, ::flatbuffers::ForwardsUOffset>>>("columns", Self::VT_COLUMNS, false)? - .finish(); - Ok(()) - } -} -pub struct RowPayloadArgs<'a> { - pub columns: Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>>>>, -} -impl<'a> Default for RowPayloadArgs<'a> { - #[inline] - fn default() -> Self { - RowPayloadArgs { - columns: None, - } - } -} - -pub struct RowPayloadBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { - fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, - start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, -} -impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> RowPayloadBuilder<'a, 'b, A> { - #[inline] - pub fn add_columns(&mut self, columns: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b , ::flatbuffers::ForwardsUOffset>>>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>(RowPayload::VT_COLUMNS, columns); - } - #[inline] - pub fn new(_fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>) -> RowPayloadBuilder<'a, 'b, A> { - let start = _fbb.start_table(); - RowPayloadBuilder { - fbb_: _fbb, - start_: start, - } - } - #[inline] - pub fn finish(self) -> ::flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - ::flatbuffers::WIPOffset::new(o.value()) - } -} - -impl ::core::fmt::Debug for RowPayload<'_> { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { - let mut ds = f.debug_struct("RowPayload"); - ds.field("columns", &self.columns()); - ds.finish() - } -} -pub enum UserTableRowPayloadOffset {} -#[derive(Copy, Clone, PartialEq)] - -pub struct UserTableRowPayload<'a> { - pub _tab: ::flatbuffers::Table<'a>, -} - -impl<'a> ::flatbuffers::Follow<'a> for UserTableRowPayload<'a> { - type Inner = UserTableRowPayload<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { _tab: unsafe { ::flatbuffers::Table::new(buf, loc) } } - } -} - -impl<'a> UserTableRowPayload<'a> { - pub const VT_USER_ID: ::flatbuffers::VOffsetT = 4; - pub const VT_SEQ: ::flatbuffers::VOffsetT = 6; - pub const VT_COMMIT_SEQ: ::flatbuffers::VOffsetT = 8; - pub const VT_DELETED: ::flatbuffers::VOffsetT = 10; - pub const VT_FIELDS: ::flatbuffers::VOffsetT = 12; - - #[inline] - pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { - UserTableRowPayload { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: ::flatbuffers::Allocator + 'bldr>( - _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, - args: &'args UserTableRowPayloadArgs<'args> - ) -> ::flatbuffers::WIPOffset> { - let mut builder = UserTableRowPayloadBuilder::new(_fbb); - builder.add_commit_seq(args.commit_seq); - builder.add_seq(args.seq); - if let Some(x) = args.fields { builder.add_fields(x); } - if let Some(x) = args.user_id { builder.add_user_id(x); } - builder.add_deleted(args.deleted); - builder.finish() - } - - - #[inline] - pub fn user_id(&self) -> Option<&'a str> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset<&str>>(UserTableRowPayload::VT_USER_ID, None)} - } - #[inline] - pub fn seq(&self) -> i64 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(UserTableRowPayload::VT_SEQ, Some(0)).unwrap()} - } - #[inline] - pub fn commit_seq(&self) -> u64 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(UserTableRowPayload::VT_COMMIT_SEQ, Some(0)).unwrap()} - } - #[inline] - pub fn deleted(&self) -> bool { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(UserTableRowPayload::VT_DELETED, Some(false)).unwrap()} - } - #[inline] - pub fn fields(&self) -> Option> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset>(UserTableRowPayload::VT_FIELDS, None)} - } -} - -impl ::flatbuffers::Verifiable for UserTableRowPayload<'_> { - #[inline] - fn run_verifier( - v: &mut ::flatbuffers::Verifier, pos: usize - ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { - v.visit_table(pos)? - .visit_field::<::flatbuffers::ForwardsUOffset<&str>>("user_id", Self::VT_USER_ID, false)? - .visit_field::("seq", Self::VT_SEQ, false)? - .visit_field::("commit_seq", Self::VT_COMMIT_SEQ, false)? - .visit_field::("deleted", Self::VT_DELETED, false)? - .visit_field::<::flatbuffers::ForwardsUOffset>("fields", Self::VT_FIELDS, false)? - .finish(); - Ok(()) - } -} -pub struct UserTableRowPayloadArgs<'a> { - pub user_id: Option<::flatbuffers::WIPOffset<&'a str>>, - pub seq: i64, - pub commit_seq: u64, - pub deleted: bool, - pub fields: Option<::flatbuffers::WIPOffset>>, -} -impl<'a> Default for UserTableRowPayloadArgs<'a> { - #[inline] - fn default() -> Self { - UserTableRowPayloadArgs { - user_id: None, - seq: 0, - commit_seq: 0, - deleted: false, - fields: None, - } - } -} - -pub struct UserTableRowPayloadBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { - fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, - start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, -} -impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> UserTableRowPayloadBuilder<'a, 'b, A> { - #[inline] - pub fn add_user_id(&mut self, user_id: ::flatbuffers::WIPOffset<&'b str>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>(UserTableRowPayload::VT_USER_ID, user_id); - } - #[inline] - pub fn add_seq(&mut self, seq: i64) { - self.fbb_.push_slot::(UserTableRowPayload::VT_SEQ, seq, 0); - } - #[inline] - pub fn add_commit_seq(&mut self, commit_seq: u64) { - self.fbb_.push_slot::(UserTableRowPayload::VT_COMMIT_SEQ, commit_seq, 0); - } - #[inline] - pub fn add_deleted(&mut self, deleted: bool) { - self.fbb_.push_slot::(UserTableRowPayload::VT_DELETED, deleted, false); - } - #[inline] - pub fn add_fields(&mut self, fields: ::flatbuffers::WIPOffset>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset>(UserTableRowPayload::VT_FIELDS, fields); - } - #[inline] - pub fn new(_fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>) -> UserTableRowPayloadBuilder<'a, 'b, A> { - let start = _fbb.start_table(); - UserTableRowPayloadBuilder { - fbb_: _fbb, - start_: start, - } - } - #[inline] - pub fn finish(self) -> ::flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - ::flatbuffers::WIPOffset::new(o.value()) - } -} - -impl ::core::fmt::Debug for UserTableRowPayload<'_> { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { - let mut ds = f.debug_struct("UserTableRowPayload"); - ds.field("user_id", &self.user_id()); - ds.field("seq", &self.seq()); - ds.field("commit_seq", &self.commit_seq()); - ds.field("deleted", &self.deleted()); - ds.field("fields", &self.fields()); - ds.finish() - } -} -pub enum SharedTableRowPayloadOffset {} -#[derive(Copy, Clone, PartialEq)] - -pub struct SharedTableRowPayload<'a> { - pub _tab: ::flatbuffers::Table<'a>, -} - -impl<'a> ::flatbuffers::Follow<'a> for SharedTableRowPayload<'a> { - type Inner = SharedTableRowPayload<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { _tab: unsafe { ::flatbuffers::Table::new(buf, loc) } } - } -} - -impl<'a> SharedTableRowPayload<'a> { - pub const VT_SEQ: ::flatbuffers::VOffsetT = 4; - pub const VT_COMMIT_SEQ: ::flatbuffers::VOffsetT = 6; - pub const VT_DELETED: ::flatbuffers::VOffsetT = 8; - pub const VT_FIELDS: ::flatbuffers::VOffsetT = 10; - - #[inline] - pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { - SharedTableRowPayload { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: ::flatbuffers::Allocator + 'bldr>( - _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, - args: &'args SharedTableRowPayloadArgs<'args> - ) -> ::flatbuffers::WIPOffset> { - let mut builder = SharedTableRowPayloadBuilder::new(_fbb); - builder.add_commit_seq(args.commit_seq); - builder.add_seq(args.seq); - if let Some(x) = args.fields { builder.add_fields(x); } - builder.add_deleted(args.deleted); - builder.finish() - } - - - #[inline] - pub fn seq(&self) -> i64 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(SharedTableRowPayload::VT_SEQ, Some(0)).unwrap()} - } - #[inline] - pub fn commit_seq(&self) -> u64 { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(SharedTableRowPayload::VT_COMMIT_SEQ, Some(0)).unwrap()} - } - #[inline] - pub fn deleted(&self) -> bool { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::(SharedTableRowPayload::VT_DELETED, Some(false)).unwrap()} - } - #[inline] - pub fn fields(&self) -> Option> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset>(SharedTableRowPayload::VT_FIELDS, None)} - } -} - -impl ::flatbuffers::Verifiable for SharedTableRowPayload<'_> { - #[inline] - fn run_verifier( - v: &mut ::flatbuffers::Verifier, pos: usize - ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { - v.visit_table(pos)? - .visit_field::("seq", Self::VT_SEQ, false)? - .visit_field::("commit_seq", Self::VT_COMMIT_SEQ, false)? - .visit_field::("deleted", Self::VT_DELETED, false)? - .visit_field::<::flatbuffers::ForwardsUOffset>("fields", Self::VT_FIELDS, false)? - .finish(); - Ok(()) - } -} -pub struct SharedTableRowPayloadArgs<'a> { - pub seq: i64, - pub commit_seq: u64, - pub deleted: bool, - pub fields: Option<::flatbuffers::WIPOffset>>, -} -impl<'a> Default for SharedTableRowPayloadArgs<'a> { - #[inline] - fn default() -> Self { - SharedTableRowPayloadArgs { - seq: 0, - commit_seq: 0, - deleted: false, - fields: None, - } - } -} - -pub struct SharedTableRowPayloadBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { - fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, - start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, -} -impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> SharedTableRowPayloadBuilder<'a, 'b, A> { - #[inline] - pub fn add_seq(&mut self, seq: i64) { - self.fbb_.push_slot::(SharedTableRowPayload::VT_SEQ, seq, 0); - } - #[inline] - pub fn add_commit_seq(&mut self, commit_seq: u64) { - self.fbb_.push_slot::(SharedTableRowPayload::VT_COMMIT_SEQ, commit_seq, 0); - } - #[inline] - pub fn add_deleted(&mut self, deleted: bool) { - self.fbb_.push_slot::(SharedTableRowPayload::VT_DELETED, deleted, false); - } - #[inline] - pub fn add_fields(&mut self, fields: ::flatbuffers::WIPOffset>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset>(SharedTableRowPayload::VT_FIELDS, fields); - } - #[inline] - pub fn new(_fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>) -> SharedTableRowPayloadBuilder<'a, 'b, A> { - let start = _fbb.start_table(); - SharedTableRowPayloadBuilder { - fbb_: _fbb, - start_: start, - } - } - #[inline] - pub fn finish(self) -> ::flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - ::flatbuffers::WIPOffset::new(o.value()) - } -} - -impl ::core::fmt::Debug for SharedTableRowPayload<'_> { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { - let mut ds = f.debug_struct("SharedTableRowPayload"); - ds.field("seq", &self.seq()); - ds.field("commit_seq", &self.commit_seq()); - ds.field("deleted", &self.deleted()); - ds.field("fields", &self.fields()); - ds.finish() - } -} -pub enum SystemTableRowPayloadOffset {} -#[derive(Copy, Clone, PartialEq)] - -pub struct SystemTableRowPayload<'a> { - pub _tab: ::flatbuffers::Table<'a>, -} - -impl<'a> ::flatbuffers::Follow<'a> for SystemTableRowPayload<'a> { - type Inner = SystemTableRowPayload<'a>; - #[inline] - unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { - Self { _tab: unsafe { ::flatbuffers::Table::new(buf, loc) } } - } -} - -impl<'a> SystemTableRowPayload<'a> { - pub const VT_FIELDS: ::flatbuffers::VOffsetT = 4; - - #[inline] - pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { - SystemTableRowPayload { _tab: table } - } - #[allow(unused_mut)] - pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr, A: ::flatbuffers::Allocator + 'bldr>( - _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, - args: &'args SystemTableRowPayloadArgs<'args> - ) -> ::flatbuffers::WIPOffset> { - let mut builder = SystemTableRowPayloadBuilder::new(_fbb); - if let Some(x) = args.fields { builder.add_fields(x); } - builder.finish() - } - - - #[inline] - pub fn fields(&self) -> Option> { - // Safety: - // Created from valid Table for this object - // which contains a valid value in this slot - unsafe { self._tab.get::<::flatbuffers::ForwardsUOffset>(SystemTableRowPayload::VT_FIELDS, None)} - } -} - -impl ::flatbuffers::Verifiable for SystemTableRowPayload<'_> { - #[inline] - fn run_verifier( - v: &mut ::flatbuffers::Verifier, pos: usize - ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { - v.visit_table(pos)? - .visit_field::<::flatbuffers::ForwardsUOffset>("fields", Self::VT_FIELDS, false)? - .finish(); - Ok(()) - } -} -pub struct SystemTableRowPayloadArgs<'a> { - pub fields: Option<::flatbuffers::WIPOffset>>, -} -impl<'a> Default for SystemTableRowPayloadArgs<'a> { - #[inline] - fn default() -> Self { - SystemTableRowPayloadArgs { - fields: None, - } - } -} - -pub struct SystemTableRowPayloadBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { - fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, - start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, -} -impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> SystemTableRowPayloadBuilder<'a, 'b, A> { - #[inline] - pub fn add_fields(&mut self, fields: ::flatbuffers::WIPOffset>) { - self.fbb_.push_slot_always::<::flatbuffers::WIPOffset>(SystemTableRowPayload::VT_FIELDS, fields); - } - #[inline] - pub fn new(_fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>) -> SystemTableRowPayloadBuilder<'a, 'b, A> { - let start = _fbb.start_table(); - SystemTableRowPayloadBuilder { - fbb_: _fbb, - start_: start, - } - } - #[inline] - pub fn finish(self) -> ::flatbuffers::WIPOffset> { - let o = self.fbb_.end_table(self.start_); - ::flatbuffers::WIPOffset::new(o.value()) - } -} - -impl ::core::fmt::Debug for SystemTableRowPayload<'_> { - fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { - let mut ds = f.debug_struct("SystemTableRowPayload"); - ds.field("fields", &self.fields()); - ds.finish() - } -} -#[inline] -/// Verifies that a buffer of bytes contains a `RowPayload` -/// and returns it. -/// Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `root_as_row_payload_unchecked`. -pub fn root_as_row_payload(buf: &[u8]) -> Result, ::flatbuffers::InvalidFlatbuffer> { - ::flatbuffers::root::(buf) -} -#[inline] -/// Verifies that a buffer of bytes contains a size prefixed -/// `RowPayload` and returns it. -/// Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `size_prefixed_root_as_row_payload_unchecked`. -pub fn size_prefixed_root_as_row_payload(buf: &[u8]) -> Result, ::flatbuffers::InvalidFlatbuffer> { - ::flatbuffers::size_prefixed_root::(buf) -} -#[inline] -/// Verifies, with the given options, that a buffer of bytes -/// contains a `RowPayload` and returns it. -/// Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `root_as_row_payload_unchecked`. -pub fn root_as_row_payload_with_opts<'b, 'o>( - opts: &'o ::flatbuffers::VerifierOptions, - buf: &'b [u8], -) -> Result, ::flatbuffers::InvalidFlatbuffer> { - ::flatbuffers::root_with_opts::>(opts, buf) -} -#[inline] -/// Verifies, with the given verifier options, that a buffer of -/// bytes contains a size prefixed `RowPayload` and returns -/// it. Note that verification is still experimental and may not -/// catch every error, or be maximally performant. For the -/// previous, unchecked, behavior use -/// `root_as_row_payload_unchecked`. -pub fn size_prefixed_root_as_row_payload_with_opts<'b, 'o>( - opts: &'o ::flatbuffers::VerifierOptions, - buf: &'b [u8], -) -> Result, ::flatbuffers::InvalidFlatbuffer> { - ::flatbuffers::size_prefixed_root_with_opts::>(opts, buf) -} -#[inline] -/// Assumes, without verification, that a buffer of bytes contains a RowPayload and returns it. -/// # Safety -/// Callers must trust the given bytes do indeed contain a valid `RowPayload`. -pub unsafe fn root_as_row_payload_unchecked(buf: &[u8]) -> RowPayload<'_> { - unsafe { ::flatbuffers::root_unchecked::(buf) } -} -#[inline] -/// Assumes, without verification, that a buffer of bytes contains a size prefixed RowPayload and returns it. -/// # Safety -/// Callers must trust the given bytes do indeed contain a valid size prefixed `RowPayload`. -pub unsafe fn size_prefixed_root_as_row_payload_unchecked(buf: &[u8]) -> RowPayload<'_> { - unsafe { ::flatbuffers::size_prefixed_root_unchecked::(buf) } -} -pub const ROW_PAYLOAD_IDENTIFIER: &str = "KROW"; - -#[inline] -pub fn row_payload_buffer_has_identifier(buf: &[u8]) -> bool { - ::flatbuffers::buffer_has_identifier(buf, ROW_PAYLOAD_IDENTIFIER, false) -} - -#[inline] -pub fn row_payload_size_prefixed_buffer_has_identifier(buf: &[u8]) -> bool { - ::flatbuffers::buffer_has_identifier(buf, ROW_PAYLOAD_IDENTIFIER, true) -} - -#[inline] -pub fn finish_row_payload_buffer<'a, 'b, A: ::flatbuffers::Allocator + 'a>( - fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, - root: ::flatbuffers::WIPOffset>) { - fbb.finish(root, Some(ROW_PAYLOAD_IDENTIFIER)); -} - -#[inline] -pub fn finish_size_prefixed_row_payload_buffer<'a, 'b, A: ::flatbuffers::Allocator + 'a>(fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, root: ::flatbuffers::WIPOffset>) { - fbb.finish_size_prefixed(root, Some(ROW_PAYLOAD_IDENTIFIER)); -} -} // pub mod row -} // pub mod serialization -} // pub mod kalamdb - + Ok(()) + } + } + pub struct ScalarValuePayloadArgs<'a> { + pub tag: ScalarTag, + pub is_null: bool, + pub bool_value: bool, + pub i8_value: i8, + pub i16_value: i16, + pub i32_value: i32, + pub i64_value: i64, + pub u8_value: u8, + pub u16_value: u16, + pub u32_value: u32, + pub u64_value: u64, + pub f32_value: f32, + pub f64_value: f64, + pub text_value: Option<::flatbuffers::WIPOffset<&'a str>>, + pub bytes_value: Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, u8>>>, + pub fixed_size: i32, + pub timezone: Option<::flatbuffers::WIPOffset<&'a str>>, + pub decimal_precision: u8, + pub decimal_scale: i8, + pub embedding_size: i32, + pub embedding_values: + Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, f32>>>, + pub embedding_valid: + Option<::flatbuffers::WIPOffset<::flatbuffers::Vector<'a, bool>>>, + } + impl<'a> Default for ScalarValuePayloadArgs<'a> { + #[inline] + fn default() -> Self { + ScalarValuePayloadArgs { + tag: ScalarTag::Null, + is_null: false, + bool_value: false, + i8_value: 0, + i16_value: 0, + i32_value: 0, + i64_value: 0, + u8_value: 0, + u16_value: 0, + u32_value: 0, + u64_value: 0, + f32_value: 0.0, + f64_value: 0.0, + text_value: None, + bytes_value: None, + fixed_size: 0, + timezone: None, + decimal_precision: 0, + decimal_scale: 0, + embedding_size: 0, + embedding_values: None, + embedding_valid: None, + } + } + } + + pub struct ScalarValuePayloadBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> ScalarValuePayloadBuilder<'a, 'b, A> { + #[inline] + pub fn add_tag(&mut self, tag: ScalarTag) { + self.fbb_.push_slot::( + ScalarValuePayload::VT_TAG, + tag, + ScalarTag::Null, + ); + } + #[inline] + pub fn add_is_null(&mut self, is_null: bool) { + self.fbb_.push_slot::(ScalarValuePayload::VT_IS_NULL, is_null, false); + } + #[inline] + pub fn add_bool_value(&mut self, bool_value: bool) { + self.fbb_.push_slot::( + ScalarValuePayload::VT_BOOL_VALUE, + bool_value, + false, + ); + } + #[inline] + pub fn add_i8_value(&mut self, i8_value: i8) { + self.fbb_.push_slot::(ScalarValuePayload::VT_I8_VALUE, i8_value, 0); + } + #[inline] + pub fn add_i16_value(&mut self, i16_value: i16) { + self.fbb_.push_slot::(ScalarValuePayload::VT_I16_VALUE, i16_value, 0); + } + #[inline] + pub fn add_i32_value(&mut self, i32_value: i32) { + self.fbb_.push_slot::(ScalarValuePayload::VT_I32_VALUE, i32_value, 0); + } + #[inline] + pub fn add_i64_value(&mut self, i64_value: i64) { + self.fbb_.push_slot::(ScalarValuePayload::VT_I64_VALUE, i64_value, 0); + } + #[inline] + pub fn add_u8_value(&mut self, u8_value: u8) { + self.fbb_.push_slot::(ScalarValuePayload::VT_U8_VALUE, u8_value, 0); + } + #[inline] + pub fn add_u16_value(&mut self, u16_value: u16) { + self.fbb_.push_slot::(ScalarValuePayload::VT_U16_VALUE, u16_value, 0); + } + #[inline] + pub fn add_u32_value(&mut self, u32_value: u32) { + self.fbb_.push_slot::(ScalarValuePayload::VT_U32_VALUE, u32_value, 0); + } + #[inline] + pub fn add_u64_value(&mut self, u64_value: u64) { + self.fbb_.push_slot::(ScalarValuePayload::VT_U64_VALUE, u64_value, 0); + } + #[inline] + pub fn add_f32_value(&mut self, f32_value: f32) { + self.fbb_.push_slot::(ScalarValuePayload::VT_F32_VALUE, f32_value, 0.0); + } + #[inline] + pub fn add_f64_value(&mut self, f64_value: f64) { + self.fbb_.push_slot::(ScalarValuePayload::VT_F64_VALUE, f64_value, 0.0); + } + #[inline] + pub fn add_text_value(&mut self, text_value: ::flatbuffers::WIPOffset<&'b str>) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + ScalarValuePayload::VT_TEXT_VALUE, + text_value, + ); + } + #[inline] + pub fn add_bytes_value( + &mut self, + bytes_value: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b, u8>>, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + ScalarValuePayload::VT_BYTES_VALUE, + bytes_value, + ); + } + #[inline] + pub fn add_fixed_size(&mut self, fixed_size: i32) { + self.fbb_.push_slot::(ScalarValuePayload::VT_FIXED_SIZE, fixed_size, 0); + } + #[inline] + pub fn add_timezone(&mut self, timezone: ::flatbuffers::WIPOffset<&'b str>) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + ScalarValuePayload::VT_TIMEZONE, + timezone, + ); + } + #[inline] + pub fn add_decimal_precision(&mut self, decimal_precision: u8) { + self.fbb_.push_slot::( + ScalarValuePayload::VT_DECIMAL_PRECISION, + decimal_precision, + 0, + ); + } + #[inline] + pub fn add_decimal_scale(&mut self, decimal_scale: i8) { + self.fbb_.push_slot::( + ScalarValuePayload::VT_DECIMAL_SCALE, + decimal_scale, + 0, + ); + } + #[inline] + pub fn add_embedding_size(&mut self, embedding_size: i32) { + self.fbb_.push_slot::( + ScalarValuePayload::VT_EMBEDDING_SIZE, + embedding_size, + 0, + ); + } + #[inline] + pub fn add_embedding_values( + &mut self, + embedding_values: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b, f32>>, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + ScalarValuePayload::VT_EMBEDDING_VALUES, + embedding_values, + ); + } + #[inline] + pub fn add_embedding_valid( + &mut self, + embedding_valid: ::flatbuffers::WIPOffset<::flatbuffers::Vector<'b, bool>>, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + ScalarValuePayload::VT_EMBEDDING_VALID, + embedding_valid, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> ScalarValuePayloadBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + ScalarValuePayloadBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for ScalarValuePayload<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("ScalarValuePayload"); + ds.field("tag", &self.tag()); + ds.field("is_null", &self.is_null()); + ds.field("bool_value", &self.bool_value()); + ds.field("i8_value", &self.i8_value()); + ds.field("i16_value", &self.i16_value()); + ds.field("i32_value", &self.i32_value()); + ds.field("i64_value", &self.i64_value()); + ds.field("u8_value", &self.u8_value()); + ds.field("u16_value", &self.u16_value()); + ds.field("u32_value", &self.u32_value()); + ds.field("u64_value", &self.u64_value()); + ds.field("f32_value", &self.f32_value()); + ds.field("f64_value", &self.f64_value()); + ds.field("text_value", &self.text_value()); + ds.field("bytes_value", &self.bytes_value()); + ds.field("fixed_size", &self.fixed_size()); + ds.field("timezone", &self.timezone()); + ds.field("decimal_precision", &self.decimal_precision()); + ds.field("decimal_scale", &self.decimal_scale()); + ds.field("embedding_size", &self.embedding_size()); + ds.field("embedding_values", &self.embedding_values()); + ds.field("embedding_valid", &self.embedding_valid()); + ds.finish() + } + } + pub enum ColumnValueOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct ColumnValue<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for ColumnValue<'a> { + type Inner = ColumnValue<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> ColumnValue<'a> { + pub const VT_NAME: ::flatbuffers::VOffsetT = 4; + pub const VT_VALUE: ::flatbuffers::VOffsetT = 6; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + ColumnValue { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args ColumnValueArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = ColumnValueBuilder::new(_fbb); + if let Some(x) = args.value { + builder.add_value(x); + } + if let Some(x) = args.name { + builder.add_name(x); + } + builder.finish() + } + + #[inline] + pub fn name(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::<::flatbuffers::ForwardsUOffset<&str>>(ColumnValue::VT_NAME, None) + } + } + #[inline] + pub fn value(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset>( + ColumnValue::VT_VALUE, + None, + ) + } + } + } + + impl ::flatbuffers::Verifiable for ColumnValue<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::<::flatbuffers::ForwardsUOffset<&str>>( + "name", + Self::VT_NAME, + false, + )? + .visit_field::<::flatbuffers::ForwardsUOffset>( + "value", + Self::VT_VALUE, + false, + )? + .finish(); + Ok(()) + } + } + pub struct ColumnValueArgs<'a> { + pub name: Option<::flatbuffers::WIPOffset<&'a str>>, + pub value: Option<::flatbuffers::WIPOffset>>, + } + impl<'a> Default for ColumnValueArgs<'a> { + #[inline] + fn default() -> Self { + ColumnValueArgs { + name: None, + value: None, + } + } + } + + pub struct ColumnValueBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> ColumnValueBuilder<'a, 'b, A> { + #[inline] + pub fn add_name(&mut self, name: ::flatbuffers::WIPOffset<&'b str>) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + ColumnValue::VT_NAME, + name, + ); + } + #[inline] + pub fn add_value( + &mut self, + value: ::flatbuffers::WIPOffset>, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset>( + ColumnValue::VT_VALUE, + value, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> ColumnValueBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + ColumnValueBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for ColumnValue<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("ColumnValue"); + ds.field("name", &self.name()); + ds.field("value", &self.value()); + ds.finish() + } + } + pub enum RowPayloadOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct RowPayload<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for RowPayload<'a> { + type Inner = RowPayload<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> RowPayload<'a> { + pub const VT_COLUMNS: ::flatbuffers::VOffsetT = 4; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + RowPayload { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args RowPayloadArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = RowPayloadBuilder::new(_fbb); + if let Some(x) = args.columns { + builder.add_columns(x); + } + builder.finish() + } + + #[inline] + pub fn columns( + &self, + ) -> Option< + ::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>>, + > { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset< + ::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>, + >>(RowPayload::VT_COLUMNS, None) + } + } + } + + impl ::flatbuffers::Verifiable for RowPayload<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::<::flatbuffers::ForwardsUOffset< + ::flatbuffers::Vector<'_, ::flatbuffers::ForwardsUOffset>, + >>("columns", Self::VT_COLUMNS, false)? + .finish(); + Ok(()) + } + } + pub struct RowPayloadArgs<'a> { + pub columns: Option< + ::flatbuffers::WIPOffset< + ::flatbuffers::Vector<'a, ::flatbuffers::ForwardsUOffset>>, + >, + >, + } + impl<'a> Default for RowPayloadArgs<'a> { + #[inline] + fn default() -> Self { + RowPayloadArgs { columns: None } + } + } + + pub struct RowPayloadBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> RowPayloadBuilder<'a, 'b, A> { + #[inline] + pub fn add_columns( + &mut self, + columns: ::flatbuffers::WIPOffset< + ::flatbuffers::Vector<'b, ::flatbuffers::ForwardsUOffset>>, + >, + ) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + RowPayload::VT_COLUMNS, + columns, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> RowPayloadBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + RowPayloadBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for RowPayload<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("RowPayload"); + ds.field("columns", &self.columns()); + ds.finish() + } + } + pub enum UserTableRowPayloadOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct UserTableRowPayload<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for UserTableRowPayload<'a> { + type Inner = UserTableRowPayload<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> UserTableRowPayload<'a> { + pub const VT_USER_ID: ::flatbuffers::VOffsetT = 4; + pub const VT_SEQ: ::flatbuffers::VOffsetT = 6; + pub const VT_COMMIT_SEQ: ::flatbuffers::VOffsetT = 8; + pub const VT_DELETED: ::flatbuffers::VOffsetT = 10; + pub const VT_FIELDS: ::flatbuffers::VOffsetT = 12; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + UserTableRowPayload { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args UserTableRowPayloadArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = UserTableRowPayloadBuilder::new(_fbb); + builder.add_commit_seq(args.commit_seq); + builder.add_seq(args.seq); + if let Some(x) = args.fields { + builder.add_fields(x); + } + if let Some(x) = args.user_id { + builder.add_user_id(x); + } + builder.add_deleted(args.deleted); + builder.finish() + } + + #[inline] + pub fn user_id(&self) -> Option<&'a str> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset<&str>>( + UserTableRowPayload::VT_USER_ID, + None, + ) + } + } + #[inline] + pub fn seq(&self) -> i64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(UserTableRowPayload::VT_SEQ, Some(0)).unwrap() } + } + #[inline] + pub fn commit_seq(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(UserTableRowPayload::VT_COMMIT_SEQ, Some(0)).unwrap() + } + } + #[inline] + pub fn deleted(&self) -> bool { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(UserTableRowPayload::VT_DELETED, Some(false)).unwrap() + } + } + #[inline] + pub fn fields(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset>( + UserTableRowPayload::VT_FIELDS, + None, + ) + } + } + } + + impl ::flatbuffers::Verifiable for UserTableRowPayload<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::<::flatbuffers::ForwardsUOffset<&str>>( + "user_id", + Self::VT_USER_ID, + false, + )? + .visit_field::("seq", Self::VT_SEQ, false)? + .visit_field::("commit_seq", Self::VT_COMMIT_SEQ, false)? + .visit_field::("deleted", Self::VT_DELETED, false)? + .visit_field::<::flatbuffers::ForwardsUOffset>( + "fields", + Self::VT_FIELDS, + false, + )? + .finish(); + Ok(()) + } + } + pub struct UserTableRowPayloadArgs<'a> { + pub user_id: Option<::flatbuffers::WIPOffset<&'a str>>, + pub seq: i64, + pub commit_seq: u64, + pub deleted: bool, + pub fields: Option<::flatbuffers::WIPOffset>>, + } + impl<'a> Default for UserTableRowPayloadArgs<'a> { + #[inline] + fn default() -> Self { + UserTableRowPayloadArgs { + user_id: None, + seq: 0, + commit_seq: 0, + deleted: false, + fields: None, + } + } + } + + pub struct UserTableRowPayloadBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> UserTableRowPayloadBuilder<'a, 'b, A> { + #[inline] + pub fn add_user_id(&mut self, user_id: ::flatbuffers::WIPOffset<&'b str>) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset<_>>( + UserTableRowPayload::VT_USER_ID, + user_id, + ); + } + #[inline] + pub fn add_seq(&mut self, seq: i64) { + self.fbb_.push_slot::(UserTableRowPayload::VT_SEQ, seq, 0); + } + #[inline] + pub fn add_commit_seq(&mut self, commit_seq: u64) { + self.fbb_.push_slot::(UserTableRowPayload::VT_COMMIT_SEQ, commit_seq, 0); + } + #[inline] + pub fn add_deleted(&mut self, deleted: bool) { + self.fbb_.push_slot::(UserTableRowPayload::VT_DELETED, deleted, false); + } + #[inline] + pub fn add_fields(&mut self, fields: ::flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset>( + UserTableRowPayload::VT_FIELDS, + fields, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> UserTableRowPayloadBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + UserTableRowPayloadBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for UserTableRowPayload<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("UserTableRowPayload"); + ds.field("user_id", &self.user_id()); + ds.field("seq", &self.seq()); + ds.field("commit_seq", &self.commit_seq()); + ds.field("deleted", &self.deleted()); + ds.field("fields", &self.fields()); + ds.finish() + } + } + pub enum SharedTableRowPayloadOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct SharedTableRowPayload<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for SharedTableRowPayload<'a> { + type Inner = SharedTableRowPayload<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> SharedTableRowPayload<'a> { + pub const VT_SEQ: ::flatbuffers::VOffsetT = 4; + pub const VT_COMMIT_SEQ: ::flatbuffers::VOffsetT = 6; + pub const VT_DELETED: ::flatbuffers::VOffsetT = 8; + pub const VT_FIELDS: ::flatbuffers::VOffsetT = 10; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + SharedTableRowPayload { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args SharedTableRowPayloadArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = SharedTableRowPayloadBuilder::new(_fbb); + builder.add_commit_seq(args.commit_seq); + builder.add_seq(args.seq); + if let Some(x) = args.fields { + builder.add_fields(x); + } + builder.add_deleted(args.deleted); + builder.finish() + } + + #[inline] + pub fn seq(&self) -> i64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { self._tab.get::(SharedTableRowPayload::VT_SEQ, Some(0)).unwrap() } + } + #[inline] + pub fn commit_seq(&self) -> u64 { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::(SharedTableRowPayload::VT_COMMIT_SEQ, Some(0)).unwrap() + } + } + #[inline] + pub fn deleted(&self) -> bool { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab + .get::(SharedTableRowPayload::VT_DELETED, Some(false)) + .unwrap() + } + } + #[inline] + pub fn fields(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset>( + SharedTableRowPayload::VT_FIELDS, + None, + ) + } + } + } + + impl ::flatbuffers::Verifiable for SharedTableRowPayload<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::("seq", Self::VT_SEQ, false)? + .visit_field::("commit_seq", Self::VT_COMMIT_SEQ, false)? + .visit_field::("deleted", Self::VT_DELETED, false)? + .visit_field::<::flatbuffers::ForwardsUOffset>( + "fields", + Self::VT_FIELDS, + false, + )? + .finish(); + Ok(()) + } + } + pub struct SharedTableRowPayloadArgs<'a> { + pub seq: i64, + pub commit_seq: u64, + pub deleted: bool, + pub fields: Option<::flatbuffers::WIPOffset>>, + } + impl<'a> Default for SharedTableRowPayloadArgs<'a> { + #[inline] + fn default() -> Self { + SharedTableRowPayloadArgs { + seq: 0, + commit_seq: 0, + deleted: false, + fields: None, + } + } + } + + pub struct SharedTableRowPayloadBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> SharedTableRowPayloadBuilder<'a, 'b, A> { + #[inline] + pub fn add_seq(&mut self, seq: i64) { + self.fbb_.push_slot::(SharedTableRowPayload::VT_SEQ, seq, 0); + } + #[inline] + pub fn add_commit_seq(&mut self, commit_seq: u64) { + self.fbb_.push_slot::(SharedTableRowPayload::VT_COMMIT_SEQ, commit_seq, 0); + } + #[inline] + pub fn add_deleted(&mut self, deleted: bool) { + self.fbb_.push_slot::(SharedTableRowPayload::VT_DELETED, deleted, false); + } + #[inline] + pub fn add_fields(&mut self, fields: ::flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset>( + SharedTableRowPayload::VT_FIELDS, + fields, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> SharedTableRowPayloadBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + SharedTableRowPayloadBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for SharedTableRowPayload<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("SharedTableRowPayload"); + ds.field("seq", &self.seq()); + ds.field("commit_seq", &self.commit_seq()); + ds.field("deleted", &self.deleted()); + ds.field("fields", &self.fields()); + ds.finish() + } + } + pub enum SystemTableRowPayloadOffset {} + #[derive(Copy, Clone, PartialEq)] + + pub struct SystemTableRowPayload<'a> { + pub _tab: ::flatbuffers::Table<'a>, + } + + impl<'a> ::flatbuffers::Follow<'a> for SystemTableRowPayload<'a> { + type Inner = SystemTableRowPayload<'a>; + #[inline] + unsafe fn follow(buf: &'a [u8], loc: usize) -> Self::Inner { + Self { + _tab: unsafe { ::flatbuffers::Table::new(buf, loc) }, + } + } + } + + impl<'a> SystemTableRowPayload<'a> { + pub const VT_FIELDS: ::flatbuffers::VOffsetT = 4; + + #[inline] + pub unsafe fn init_from_table(table: ::flatbuffers::Table<'a>) -> Self { + SystemTableRowPayload { _tab: table } + } + #[allow(unused_mut)] + pub fn create< + 'bldr: 'args, + 'args: 'mut_bldr, + 'mut_bldr, + A: ::flatbuffers::Allocator + 'bldr, + >( + _fbb: &'mut_bldr mut ::flatbuffers::FlatBufferBuilder<'bldr, A>, + args: &'args SystemTableRowPayloadArgs<'args>, + ) -> ::flatbuffers::WIPOffset> { + let mut builder = SystemTableRowPayloadBuilder::new(_fbb); + if let Some(x) = args.fields { + builder.add_fields(x); + } + builder.finish() + } + + #[inline] + pub fn fields(&self) -> Option> { + // Safety: + // Created from valid Table for this object + // which contains a valid value in this slot + unsafe { + self._tab.get::<::flatbuffers::ForwardsUOffset>( + SystemTableRowPayload::VT_FIELDS, + None, + ) + } + } + } + + impl ::flatbuffers::Verifiable for SystemTableRowPayload<'_> { + #[inline] + fn run_verifier( + v: &mut ::flatbuffers::Verifier, + pos: usize, + ) -> Result<(), ::flatbuffers::InvalidFlatbuffer> { + v.visit_table(pos)? + .visit_field::<::flatbuffers::ForwardsUOffset>( + "fields", + Self::VT_FIELDS, + false, + )? + .finish(); + Ok(()) + } + } + pub struct SystemTableRowPayloadArgs<'a> { + pub fields: Option<::flatbuffers::WIPOffset>>, + } + impl<'a> Default for SystemTableRowPayloadArgs<'a> { + #[inline] + fn default() -> Self { + SystemTableRowPayloadArgs { fields: None } + } + } + + pub struct SystemTableRowPayloadBuilder<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> { + fbb_: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + start_: ::flatbuffers::WIPOffset<::flatbuffers::TableUnfinishedWIPOffset>, + } + impl<'a: 'b, 'b, A: ::flatbuffers::Allocator + 'a> SystemTableRowPayloadBuilder<'a, 'b, A> { + #[inline] + pub fn add_fields(&mut self, fields: ::flatbuffers::WIPOffset>) { + self.fbb_.push_slot_always::<::flatbuffers::WIPOffset>( + SystemTableRowPayload::VT_FIELDS, + fields, + ); + } + #[inline] + pub fn new( + _fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + ) -> SystemTableRowPayloadBuilder<'a, 'b, A> { + let start = _fbb.start_table(); + SystemTableRowPayloadBuilder { + fbb_: _fbb, + start_: start, + } + } + #[inline] + pub fn finish(self) -> ::flatbuffers::WIPOffset> { + let o = self.fbb_.end_table(self.start_); + ::flatbuffers::WIPOffset::new(o.value()) + } + } + + impl ::core::fmt::Debug for SystemTableRowPayload<'_> { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + let mut ds = f.debug_struct("SystemTableRowPayload"); + ds.field("fields", &self.fields()); + ds.finish() + } + } + #[inline] + /// Verifies that a buffer of bytes contains a `RowPayload` + /// and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_row_payload_unchecked`. + pub fn root_as_row_payload( + buf: &[u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::root::(buf) + } + #[inline] + /// Verifies that a buffer of bytes contains a size prefixed + /// `RowPayload` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `size_prefixed_root_as_row_payload_unchecked`. + pub fn size_prefixed_root_as_row_payload( + buf: &[u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::size_prefixed_root::(buf) + } + #[inline] + /// Verifies, with the given options, that a buffer of bytes + /// contains a `RowPayload` and returns it. + /// Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_row_payload_unchecked`. + pub fn root_as_row_payload_with_opts<'b, 'o>( + opts: &'o ::flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::root_with_opts::>(opts, buf) + } + #[inline] + /// Verifies, with the given verifier options, that a buffer of + /// bytes contains a size prefixed `RowPayload` and returns + /// it. Note that verification is still experimental and may not + /// catch every error, or be maximally performant. For the + /// previous, unchecked, behavior use + /// `root_as_row_payload_unchecked`. + pub fn size_prefixed_root_as_row_payload_with_opts<'b, 'o>( + opts: &'o ::flatbuffers::VerifierOptions, + buf: &'b [u8], + ) -> Result, ::flatbuffers::InvalidFlatbuffer> { + ::flatbuffers::size_prefixed_root_with_opts::>(opts, buf) + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a RowPayload and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid `RowPayload`. + pub unsafe fn root_as_row_payload_unchecked(buf: &[u8]) -> RowPayload<'_> { + unsafe { ::flatbuffers::root_unchecked::(buf) } + } + #[inline] + /// Assumes, without verification, that a buffer of bytes contains a size prefixed RowPayload and returns it. + /// # Safety + /// Callers must trust the given bytes do indeed contain a valid size prefixed `RowPayload`. + pub unsafe fn size_prefixed_root_as_row_payload_unchecked( + buf: &[u8], + ) -> RowPayload<'_> { + unsafe { ::flatbuffers::size_prefixed_root_unchecked::(buf) } + } + pub const ROW_PAYLOAD_IDENTIFIER: &str = "KROW"; + + #[inline] + pub fn row_payload_buffer_has_identifier(buf: &[u8]) -> bool { + ::flatbuffers::buffer_has_identifier(buf, ROW_PAYLOAD_IDENTIFIER, false) + } + + #[inline] + pub fn row_payload_size_prefixed_buffer_has_identifier(buf: &[u8]) -> bool { + ::flatbuffers::buffer_has_identifier(buf, ROW_PAYLOAD_IDENTIFIER, true) + } + + #[inline] + pub fn finish_row_payload_buffer<'a, 'b, A: ::flatbuffers::Allocator + 'a>( + fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + root: ::flatbuffers::WIPOffset>, + ) { + fbb.finish(root, Some(ROW_PAYLOAD_IDENTIFIER)); + } + + #[inline] + pub fn finish_size_prefixed_row_payload_buffer< + 'a, + 'b, + A: ::flatbuffers::Allocator + 'a, + >( + fbb: &'b mut ::flatbuffers::FlatBufferBuilder<'a, A>, + root: ::flatbuffers::WIPOffset>, + ) { + fbb.finish_size_prefixed(root, Some(ROW_PAYLOAD_IDENTIFIER)); + } + } // pub mod row + } // pub mod serialization +} // pub mod kalamdb diff --git a/backend/crates/kalamdb-commons/src/serialization/row_codec.rs b/backend/crates/kalamdb-commons/src/serialization/row_codec.rs index 903df8aec..d6ae36b40 100644 --- a/backend/crates/kalamdb-commons/src/serialization/row_codec.rs +++ b/backend/crates/kalamdb-commons/src/serialization/row_codec.rs @@ -850,8 +850,8 @@ mod tests { values.insert("name".to_string(), ScalarValue::Utf8(Some("shared".to_string()))); let fields = Row { values }; - let encoded = encode_shared_table_row(SeqId::new(456), 77, true, &fields) - .expect("encode shared row"); + let encoded = + encode_shared_table_row(SeqId::new(456), 77, true, &fields).expect("encode shared row"); let (seq, commit_seq, deleted, decoded_fields) = decode_shared_table_row(&encoded).expect("decode shared row"); assert_eq!(seq, SeqId::new(456)); diff --git a/backend/crates/kalamdb-configs/src/config/loader.rs b/backend/crates/kalamdb-configs/src/config/loader.rs index 9df5de94d..4c26acf3c 100644 --- a/backend/crates/kalamdb-configs/src/config/loader.rs +++ b/backend/crates/kalamdb-configs/src/config/loader.rs @@ -55,8 +55,8 @@ impl ServerConfig { .map(str::trim) .filter(|value| !value.is_empty()) { - let valid_scheme = public_origin.starts_with("http://") - || public_origin.starts_with("https://"); + let valid_scheme = + public_origin.starts_with("http://") || public_origin.starts_with("https://"); if !valid_scheme { return Err(anyhow::anyhow!( "server.public_origin must start with http:// or https://" @@ -153,16 +153,26 @@ mod tests { let mut config = ServerConfig::default(); config.server.port = 9090; - assert_eq!( - config.server.effective_public_origin(), - "http://localhost:9090" - ); + assert_eq!(config.server.effective_public_origin(), "http://localhost:9090"); + + config.server.public_origin = Some("https://db.example.com/".to_string()); + + assert_eq!(config.server.effective_public_origin(), "https://db.example.com"); + } + + #[test] + fn test_configured_public_origin_treats_blank_as_unset() { + let mut config = ServerConfig::default(); + + config.server.public_origin = Some(" ".to_string()); + + assert_eq!(config.server.configured_public_origin(), None); config.server.public_origin = Some("https://db.example.com/".to_string()); assert_eq!( - config.server.effective_public_origin(), - "https://db.example.com" + config.server.configured_public_origin().as_deref(), + Some("https://db.example.com") ); } diff --git a/backend/crates/kalamdb-configs/src/config/override.rs b/backend/crates/kalamdb-configs/src/config/override.rs index ad3debf71..139235bbe 100644 --- a/backend/crates/kalamdb-configs/src/config/override.rs +++ b/backend/crates/kalamdb-configs/src/config/override.rs @@ -8,7 +8,7 @@ impl ServerConfig { /// Supported environment variables (T030): /// - KALAMDB_SERVER_HOST: Override server.host /// - KALAMDB_SERVER_PORT: Override server.port - /// - KALAMDB_SERVER_PUBLIC_ORIGIN: Override server.public_origin + /// - KALAMDB_SERVER_PUBLIC_ORIGIN: Override server.public_origin (empty keeps Admin UI browser-origin fallback) /// - KALAMDB_LOG_LEVEL: Override logging.level /// - KALAMDB_LOGS_DIR: Override logging.logs_path /// - KALAMDB_LOG_TO_CONSOLE: Override logging.log_to_console @@ -345,6 +345,20 @@ mod tests { env::remove_var("KALAMDB_SERVER_PORT"); } + #[test] + fn test_env_override_public_origin_allows_blank_browser_fallback() { + let _guard = acquire_env_lock(); + env::set_var("KALAMDB_SERVER_PUBLIC_ORIGIN", ""); + + let mut config = ServerConfig::default(); + config.apply_env_overrides().unwrap(); + + assert_eq!(config.server.public_origin.as_deref(), Some("")); + assert_eq!(config.server.configured_public_origin(), None); + + env::remove_var("KALAMDB_SERVER_PUBLIC_ORIGIN"); + } + #[test] fn test_env_override_log_level() { let _guard = acquire_env_lock(); diff --git a/backend/crates/kalamdb-configs/src/config/types.rs b/backend/crates/kalamdb-configs/src/config/types.rs index f3a21e02b..61969fe0a 100644 --- a/backend/crates/kalamdb-configs/src/config/types.rs +++ b/backend/crates/kalamdb-configs/src/config/types.rs @@ -244,12 +244,16 @@ pub struct ServerSettings { } impl ServerSettings { - pub fn effective_public_origin(&self) -> String { + pub fn configured_public_origin(&self) -> Option { self.public_origin .as_deref() .map(str::trim) .filter(|value| !value.is_empty()) .map(|value| value.trim_end_matches('/').to_string()) + } + + pub fn effective_public_origin(&self) -> String { + self.configured_public_origin() .unwrap_or_else(|| format!("http://localhost:{}", self.port)) } } diff --git a/backend/crates/kalamdb-core/src/app_context.rs b/backend/crates/kalamdb-core/src/app_context.rs index 5e34c8966..5c182224e 100644 --- a/backend/crates/kalamdb-core/src/app_context.rs +++ b/backend/crates/kalamdb-core/src/app_context.rs @@ -20,7 +20,9 @@ use kalamdb_commons::models::{NamespaceId, TransactionOrigin, UserId}; use kalamdb_commons::{constants::ColumnFamilyNames, NodeId}; use kalamdb_configs::ServerConfig; use kalamdb_filestore::StorageRegistry; -use kalamdb_live::{ConnectionsManager, LiveQueryManager, NotificationService, TopicPublisherService}; +use kalamdb_live::{ + ConnectionsManager, LiveQueryManager, NotificationService, TopicPublisherService, +}; use kalamdb_pg::{KalamPgService, LivePgTransaction}; use kalamdb_raft::CommandExecutor; use kalamdb_sharding::{GroupId, ShardRouter}; @@ -434,8 +436,7 @@ impl AppContext { let notification_service = NotificationService::new(Arc::clone(&connection_registry)); // Create unified topic publisher service for pub/sub infrastructure - let visibility_timeout = - Duration::from_secs(config.topics.visibility_timeout_secs); + let visibility_timeout = Duration::from_secs(config.topics.visibility_timeout_secs); let topic_publisher = Arc::new(TopicPublisherService::with_visibility_timeout( storage_backend.clone(), visibility_timeout, @@ -586,27 +587,28 @@ impl AppContext { app_ctx.transaction_coordinator().start_timeout_sweeper(); let app_ctx_for_transactions = Arc::clone(&app_ctx); - let transactions_snapshot_callback: TransactionsSnapshotCallback = Arc::new(move || { - app_ctx_for_transactions - .transaction_coordinator() - .active_metrics() - .into_iter() - .map(|metric| TransactionSnapshot { - transaction_id: metric.transaction_id.to_string(), - owner_id: metric.owner_id.to_string(), - origin: metric.origin.as_str().to_string(), - state: metric.state.lifecycle_str().to_string(), - age_ms: metric.age_ms.min(i64::MAX as u64) as i64, - idle_ms: metric.idle_ms.min(i64::MAX as u64) as i64, - write_count: metric.write_count.min(i64::MAX as usize) as i64, - write_bytes: metric.write_bytes.min(i64::MAX as usize) as i64, - touched_tables_count: metric.touched_tables_count.min(i64::MAX as usize) - as i64, - snapshot_commit_seq: metric.snapshot_commit_seq.min(i64::MAX as u64) - as i64, - }) - .collect() - }); + let transactions_snapshot_callback: TransactionsSnapshotCallback = + Arc::new(move || { + app_ctx_for_transactions + .transaction_coordinator() + .active_metrics() + .into_iter() + .map(|metric| TransactionSnapshot { + transaction_id: metric.transaction_id.to_string(), + owner_id: metric.owner_id.to_string(), + origin: metric.origin.as_str().to_string(), + state: metric.state.lifecycle_str().to_string(), + age_ms: metric.age_ms.min(i64::MAX as u64) as i64, + idle_ms: metric.idle_ms.min(i64::MAX as u64) as i64, + write_count: metric.write_count.min(i64::MAX as usize) as i64, + write_bytes: metric.write_bytes.min(i64::MAX as usize) as i64, + touched_tables_count: metric.touched_tables_count.min(i64::MAX as usize) + as i64, + snapshot_commit_seq: metric.snapshot_commit_seq.min(i64::MAX as u64) + as i64, + }) + .collect() + }); transactions_view.set_snapshot_callback(transactions_snapshot_callback); let live_query_manager = Arc::new(LiveQueryManager::new( @@ -825,8 +827,7 @@ impl AppContext { let notification_service = NotificationService::new(Arc::clone(&connection_registry)); // Create unified topic publisher service for tests - let visibility_timeout = - Duration::from_secs(config.topics.visibility_timeout_secs); + let visibility_timeout = Duration::from_secs(config.topics.visibility_timeout_secs); let topic_publisher = Arc::new(TopicPublisherService::with_visibility_timeout( storage_backend.clone(), visibility_timeout, diff --git a/backend/crates/kalamdb-core/src/applier/applier.rs b/backend/crates/kalamdb-core/src/applier/applier.rs index 117e5c577..f0d8d0e1a 100644 --- a/backend/crates/kalamdb-core/src/applier/applier.rs +++ b/backend/crates/kalamdb-core/src/applier/applier.rs @@ -496,31 +496,29 @@ impl UnifiedApplier for RaftApplier { // ========================================================================= - async fn commit_transaction( - &self, - transaction_id: TransactionId, - mutations: Vec, - ) -> Result { - let app_ctx = self.executor().app_context(); - let executor = app_ctx.executor(); - let raft_exec = executor - .as_any() - .downcast_ref::() - .ok_or(ApplierError::NoLeader)?; - let raft_mgr = raft_exec.manager(); - - let group_id = self.transaction_group_id(raft_mgr, &mutations)?; - let response = raft_mgr - .propose_transaction_commit(group_id, transaction_id, mutations) - .await - .map_err(|e| ApplierError::Raft(e.to_string()))?; - - if let DataResponse::Error { message } = &response { - return Err(ApplierError::Raft(message.clone())); - } + async fn commit_transaction( + &self, + transaction_id: TransactionId, + mutations: Vec, + ) -> Result { + let app_ctx = self.executor().app_context(); + let executor = app_ctx.executor(); + let raft_exec = + executor.as_any().downcast_ref::().ok_or(ApplierError::NoLeader)?; + let raft_mgr = raft_exec.manager(); + + let group_id = self.transaction_group_id(raft_mgr, &mutations)?; + let response = raft_mgr + .propose_transaction_commit(group_id, transaction_id, mutations) + .await + .map_err(|e| ApplierError::Raft(e.to_string()))?; - Ok(response) + if let DataResponse::Error { message } = &response { + return Err(ApplierError::Raft(message.clone())); } + + Ok(response) + } // Status Methods // ========================================================================= diff --git a/backend/crates/kalamdb-core/src/applier/executor/dml.rs b/backend/crates/kalamdb-core/src/applier/executor/dml.rs index e6debee6b..b6342ba5d 100644 --- a/backend/crates/kalamdb-core/src/applier/executor/dml.rs +++ b/backend/crates/kalamdb-core/src/applier/executor/dml.rs @@ -75,8 +75,7 @@ impl DmlExecutor { fn table_has_file_columns(&self, table_id: &TableId) -> bool { match self.app_context.schema_registry().get_table_if_exists(table_id) { Ok(Some(table_def)) => table_def.columns.iter().any(|column| { - column.data_type - == kalamdb_commons::models::datatypes::KalamDataType::File + column.data_type == kalamdb_commons::models::datatypes::KalamDataType::File }), Ok(None) => false, Err(error) => { @@ -103,14 +102,14 @@ impl DmlExecutor { let scoped_user = user_id.cloned(); let table_id = notification.table_id.clone(); - let _ = self - .publish_transaction_notification(user_id, ¬ification) - .await; + let _ = self.publish_transaction_notification(user_id, ¬ification).await; if has_live_subscribers { - self.app_context - .notification_service() - .notify_table_change(scoped_user, table_id, notification); + self.app_context.notification_service().notify_table_change( + scoped_user, + table_id, + notification, + ); } } @@ -219,8 +218,7 @@ impl DmlExecutor { if let Some((_row_key, notification)) = updated { if let Some(notification) = notification { - self.emit_autocommit_notification(Some(user_id), notification) - .await; + self.emit_autocommit_notification(Some(user_id), notification).await; } delete_file_refs_best_effort( self.app_context.as_ref(), @@ -293,8 +291,7 @@ impl DmlExecutor { .map_err(|e| ApplierError::Execution(format!("Failed to delete row: {}", e)))? { if let Some(notification) = notification { - self.emit_autocommit_notification(Some(user_id), notification) - .await; + self.emit_autocommit_notification(Some(user_id), notification).await; } deleted_count += 1; delete_file_refs_best_effort( @@ -461,8 +458,7 @@ impl DmlExecutor { pk_values: Option<&[String]>, ) -> Result { let commit_seq = self.app_context.commit_sequence_tracker().allocate_next(); - self.delete_shared_data_with_commit_seq(table_id, pk_values, commit_seq) - .await + self.delete_shared_data_with_commit_seq(table_id, pk_values, commit_seq).await } pub async fn delete_shared_data_with_commit_seq( @@ -540,7 +536,8 @@ impl DmlExecutor { }); if all_inserts_same_table_and_user { - let provider_arc = self.load_provider(&first_mutation.table_id, "Table provider").await?; + let provider_arc = + self.load_provider(&first_mutation.table_id, "Table provider").await?; let provider = provider_arc .as_any() .downcast_ref::() @@ -604,22 +601,16 @@ impl DmlExecutor { cached_provider = Some((mutation.table_id.clone(), Arc::clone(&provider_arc))); provider_arc }; - let provider = provider_arc - .as_any() - .downcast_ref::() - .ok_or_else(|| { + let provider = + provider_arc.as_any().downcast_ref::().ok_or_else(|| { ApplierError::Execution(format!( "Provider type mismatch for user table {}", mutation.table_id )) })?; - let batch_key = format!( - "{}|{}|{}", - mutation.table_id, - user_id.as_str(), - mutation.primary_key - ); + let batch_key = + format!("{}|{}|{}", mutation.table_id, user_id.as_str(), mutation.primary_key); if !seen_insert_keys.insert(batch_key) { return Err(ApplierError::Execution(format!( "Failed to insert batch row: Already exists: Primary key violation: value '{}' appears multiple times in the transaction batch for column '{}'", @@ -631,10 +622,7 @@ impl DmlExecutor { table_base::ensure_unique_pk_value(provider, Some(&user_id), &mutation.payload) .await .map_err(|error| { - ApplierError::Execution(format!( - "Failed to insert batch row: {}", - error - )) + ApplierError::Execution(format!("Failed to insert batch row: {}", error)) })?; } @@ -671,16 +659,13 @@ impl DmlExecutor { { Arc::clone(&cached_provider.as_ref().expect("cached provider").1) } else { - let provider_arc = self - .load_provider(&mutation.table_id, "Shared table provider") - .await?; + let provider_arc = + self.load_provider(&mutation.table_id, "Shared table provider").await?; cached_provider = Some((mutation.table_id.clone(), Arc::clone(&provider_arc))); provider_arc }; - let provider = provider_arc - .as_any() - .downcast_ref::() - .ok_or_else(|| { + let provider = + provider_arc.as_any().downcast_ref::().ok_or_else(|| { ApplierError::Execution(format!( "Provider type mismatch for shared table {}", mutation.table_id @@ -699,10 +684,7 @@ impl DmlExecutor { table_base::ensure_unique_pk_value(provider, None, &mutation.payload) .await .map_err(|error| { - ApplierError::Execution(format!( - "Failed to insert batch row: {}", - error - )) + ApplierError::Execution(format!("Failed to insert batch row: {}", error)) })?; } @@ -714,8 +696,7 @@ impl DmlExecutor { transaction_id: &TransactionId, mutations: &[StagedMutation], ) -> Result { - self.prevalidate_user_transaction_batch(transaction_id, mutations) - .await?; + self.prevalidate_user_transaction_batch(transaction_id, mutations).await?; let commit_seq = self.app_context.commit_sequence_tracker().allocate_next(); let mut affected_rows = 0; @@ -731,9 +712,8 @@ impl DmlExecutor { }); if all_inserts_same_table_and_user { - let provider_arc = self - .load_provider(&first_mutation.table_id, "Table provider") - .await?; + let provider_arc = + self.load_provider(&first_mutation.table_id, "Table provider").await?; let provider = provider_arc .as_any() .downcast_ref::() @@ -747,10 +727,7 @@ impl DmlExecutor { let applied = provider .insert_batch_deferred_prevalidated_with_commit_seq( &user_id, - mutations - .iter() - .map(|mutation| mutation.payload.clone()) - .collect(), + mutations.iter().map(|mutation| mutation.payload.clone()).collect(), commit_seq, ) .await @@ -823,7 +800,9 @@ impl DmlExecutor { } let user_id = mutation.user_id.clone().ok_or_else(|| { - ApplierError::Validation("user transaction batch mutation missing user_id".to_string()) + ApplierError::Validation( + "user transaction batch mutation missing user_id".to_string(), + ) })?; let provider_arc = if cached_provider @@ -837,10 +816,8 @@ impl DmlExecutor { cached_provider = Some((mutation.table_id.clone(), Arc::clone(&provider_arc))); provider_arc }; - let provider = provider_arc - .as_any() - .downcast_ref::() - .ok_or_else(|| { + let provider = + provider_arc.as_any().downcast_ref::().ok_or_else(|| { ApplierError::Execution(format!( "Provider type mismatch for user table {}", mutation.table_id @@ -860,31 +837,23 @@ impl DmlExecutor { )?; Some((row_key, notification)) }, - OperationKind::Update => { - provider - .update_by_pk_value_deferred( - &user_id, - mutation.primary_key.as_str(), - mutation.payload.clone(), - commit_seq, - ) - .await - .map_err(|e| { - ApplierError::Execution(format!("Failed to update row: {}", e)) - })? - }, - OperationKind::Delete => { - provider - .delete_by_pk_value_deferred( - &user_id, - mutation.primary_key.as_str(), - commit_seq, - ) - .await - .map_err(|e| { - ApplierError::Execution(format!("Failed to delete row: {}", e)) - })? - }, + OperationKind::Update => provider + .update_by_pk_value_deferred( + &user_id, + mutation.primary_key.as_str(), + mutation.payload.clone(), + commit_seq, + ) + .await + .map_err(|e| ApplierError::Execution(format!("Failed to update row: {}", e)))?, + OperationKind::Delete => provider + .delete_by_pk_value_deferred( + &user_id, + mutation.primary_key.as_str(), + commit_seq, + ) + .await + .map_err(|e| ApplierError::Execution(format!("Failed to delete row: {}", e)))?, }; let Some((_row_key, notification)) = applied else { @@ -895,10 +864,7 @@ impl DmlExecutor { side_effect_plan.record_manifest_update(); if let Some(notification) = notification { - if self - .publish_transaction_notification(Some(&user_id), ¬ification) - .await - { + if self.publish_transaction_notification(Some(&user_id), ¬ification).await { side_effect_plan.record_publisher_event(); } @@ -907,18 +873,14 @@ impl DmlExecutor { Some(&user_id), &mutation.table_id, ) { - side_effect_plan.push_notification( - FanoutOwnerScope::User(user_id.clone()), - notification, - ); + side_effect_plan + .push_notification(FanoutOwnerScope::User(user_id.clone()), notification); } } } - let notifications_sent = self - .app_context - .notification_service() - .dispatch_commit_plan(&side_effect_plan); + let notifications_sent = + self.app_context.notification_service().dispatch_commit_plan(&side_effect_plan); Ok(TransactionApplyResult { rows_affected: affected_rows, @@ -934,8 +896,7 @@ impl DmlExecutor { transaction_id: &TransactionId, mutations: &[StagedMutation], ) -> Result { - self.prevalidate_shared_transaction_batch(transaction_id, mutations) - .await?; + self.prevalidate_shared_transaction_batch(transaction_id, mutations).await?; let commit_seq = self.app_context.commit_sequence_tracker().allocate_next(); let mut affected_rows = 0; @@ -949,9 +910,8 @@ impl DmlExecutor { }); if all_inserts_same_table { - let provider_arc = self - .load_provider(&first_mutation.table_id, "Shared table provider") - .await?; + let provider_arc = + self.load_provider(&first_mutation.table_id, "Shared table provider").await?; let provider = provider_arc .as_any() .downcast_ref::() @@ -980,7 +940,9 @@ impl DmlExecutor { ))); } - for (_mutation, (_row_key, notification)) in mutations.iter().zip(applied.into_iter()) { + for (_mutation, (_row_key, notification)) in + mutations.iter().zip(applied.into_iter()) + { affected_rows += 1; side_effect_plan.record_manifest_update(); @@ -994,15 +956,14 @@ impl DmlExecutor { None, ¬ification.table_id, ) { - side_effect_plan.push_notification(FanoutOwnerScope::Shared, notification); + side_effect_plan + .push_notification(FanoutOwnerScope::Shared, notification); } } } - let notifications_sent = self - .app_context - .notification_service() - .dispatch_commit_plan(&side_effect_plan); + let notifications_sent = + self.app_context.notification_service().dispatch_commit_plan(&side_effect_plan); return Ok(TransactionApplyResult { rows_affected: affected_rows, @@ -1034,16 +995,13 @@ impl DmlExecutor { { Arc::clone(&cached_provider.as_ref().expect("cached provider").1) } else { - let provider_arc = self - .load_provider(&mutation.table_id, "Shared table provider") - .await?; + let provider_arc = + self.load_provider(&mutation.table_id, "Shared table provider").await?; cached_provider = Some((mutation.table_id.clone(), Arc::clone(&provider_arc))); provider_arc }; - let provider = provider_arc - .as_any() - .downcast_ref::() - .ok_or_else(|| { + let provider = + provider_arc.as_any().downcast_ref::().ok_or_else(|| { ApplierError::Execution(format!( "Provider type mismatch for shared table {}", mutation.table_id @@ -1063,26 +1021,18 @@ impl DmlExecutor { )?; Some((row_key, notification)) }, - OperationKind::Update => { - provider - .update_by_pk_value_deferred( - mutation.primary_key.as_str(), - mutation.payload.clone(), - commit_seq, - ) - .await - .map_err(|e| { - ApplierError::Execution(format!("Failed to update row: {}", e)) - })? - }, - OperationKind::Delete => { - provider - .delete_by_pk_value_deferred(mutation.primary_key.as_str(), commit_seq) - .await - .map_err(|e| { - ApplierError::Execution(format!("Failed to delete row: {}", e)) - })? - }, + OperationKind::Update => provider + .update_by_pk_value_deferred( + mutation.primary_key.as_str(), + mutation.payload.clone(), + commit_seq, + ) + .await + .map_err(|e| ApplierError::Execution(format!("Failed to update row: {}", e)))?, + OperationKind::Delete => provider + .delete_by_pk_value_deferred(mutation.primary_key.as_str(), commit_seq) + .await + .map_err(|e| ApplierError::Execution(format!("Failed to delete row: {}", e)))?, }; let Some((_row_key, notification)) = applied else { @@ -1107,10 +1057,8 @@ impl DmlExecutor { } } - let notifications_sent = self - .app_context - .notification_service() - .dispatch_commit_plan(&side_effect_plan); + let notifications_sent = + self.app_context.notification_service().dispatch_commit_plan(&side_effect_plan); Ok(TransactionApplyResult { rows_affected: affected_rows, @@ -1153,9 +1101,12 @@ impl DmlExecutor { } let op = Self::topic_op_for_change(¬ification.change_type); - if let Err(error) = - topic_publisher.publish_for_table(¬ification.table_id, op, ¬ification.row_data, user_id) - { + if let Err(error) = topic_publisher.publish_for_table( + ¬ification.table_id, + op, + ¬ification.row_data, + user_id, + ) { log::warn!( "Topic publish failed for transaction change on table {}: {}", notification.table_id, @@ -1317,9 +1268,7 @@ impl DmlExecutor { StreamTableRow, >>::update(provider, user_id, &key, updates) .await - .map_err(|e| { - ApplierError::Execution(format!("Failed to update row: {}", e)) - })?; + .map_err(|e| ApplierError::Execution(format!("Failed to update row: {}", e)))?; Ok(usize::from(updated.is_some())) } else { Ok(0) diff --git a/backend/crates/kalamdb-core/src/applier/raft/provider_shared_data_applier.rs b/backend/crates/kalamdb-core/src/applier/raft/provider_shared_data_applier.rs index 91503751b..ed5392a30 100644 --- a/backend/crates/kalamdb-core/src/applier/raft/provider_shared_data_applier.rs +++ b/backend/crates/kalamdb-core/src/applier/raft/provider_shared_data_applier.rs @@ -13,8 +13,8 @@ use crate::applier::executor::CommandExecutorImpl; use kalamdb_commons::models::rows::Row; use kalamdb_commons::models::TransactionId; use kalamdb_commons::TableId; -use kalamdb_transactions::StagedMutation; use kalamdb_raft::{RaftError, SharedDataApplier, TransactionApplyResult}; +use kalamdb_transactions::StagedMutation; /// SharedDataApplier implementation using Unified Command Executor /// diff --git a/backend/crates/kalamdb-core/src/applier/raft/provider_user_data_applier.rs b/backend/crates/kalamdb-core/src/applier/raft/provider_user_data_applier.rs index 30cf41fb7..876600674 100644 --- a/backend/crates/kalamdb-core/src/applier/raft/provider_user_data_applier.rs +++ b/backend/crates/kalamdb-core/src/applier/raft/provider_user_data_applier.rs @@ -13,8 +13,8 @@ use crate::applier::executor::CommandExecutorImpl; use kalamdb_commons::models::rows::Row; use kalamdb_commons::models::{TransactionId, UserId}; use kalamdb_commons::TableId; -use kalamdb_transactions::StagedMutation; use kalamdb_raft::{RaftError, TransactionApplyResult, UserDataApplier}; +use kalamdb_transactions::StagedMutation; /// UserDataApplier implementation using Unified Command Executor /// diff --git a/backend/crates/kalamdb-core/src/cluster_handler.rs b/backend/crates/kalamdb-core/src/cluster_handler.rs index a3c349863..7cd80c1c4 100644 --- a/backend/crates/kalamdb-core/src/cluster_handler.rs +++ b/backend/crates/kalamdb-core/src/cluster_handler.rs @@ -14,8 +14,7 @@ use kalamdb_commons::schemas::SchemaField; use kalamdb_commons::Role; use kalamdb_raft::{ forward_sql_param, ClusterMessageHandler, ForwardSqlParam, ForwardSqlRequest, - ForwardSqlResponsePayload, GetNodeInfoRequest, GetNodeInfoResponse, PingRequest, - RaftExecutor, + ForwardSqlResponsePayload, GetNodeInfoRequest, GetNodeInfoResponse, PingRequest, RaftExecutor, }; use kalamdb_session::{AuthMethod, AuthSession}; use serde::Serialize; @@ -23,8 +22,8 @@ use serde::Serialize; use crate::app_context::AppContext; use crate::sql::context::ExecutionContext; use crate::sql::executor::PreparedExecutionStatement; -use crate::sql::SqlImpersonationService; use crate::sql::ExecutionResult; +use crate::sql::SqlImpersonationService; // ── Response types (match SqlResponse JSON shape, no intermediate Value tree) ── @@ -75,7 +74,10 @@ impl CoreClusterHandler { status: "error", results: &[], took: started_at.elapsed().as_secs_f64() * 1000.0, - error: Some(ForwardedError { code: error_code, message }), + error: Some(ForwardedError { + code: error_code, + message, + }), }; let body = serde_json::to_vec(&resp).unwrap_or_else(|_| { b"{\"status\":\"error\",\"results\":[],\"took\":0,\"error\":{\"code\":\"INTERNAL_ERROR\",\"message\":\"Failed to serialize error payload\"}}".to_vec() @@ -87,11 +89,21 @@ impl CoreClusterHandler { param: &ForwardSqlParam, ) -> Result { match param.value.as_ref() { - Some(forward_sql_param::Value::NullValue(_)) => Ok(datafusion::scalar::ScalarValue::Utf8(None)), - Some(forward_sql_param::Value::BoolValue(v)) => Ok(datafusion::scalar::ScalarValue::Boolean(Some(*v))), - Some(forward_sql_param::Value::Int64Value(v)) => Ok(datafusion::scalar::ScalarValue::Int64(Some(*v))), - Some(forward_sql_param::Value::Float64Value(v)) => Ok(datafusion::scalar::ScalarValue::Float64(Some(*v))), - Some(forward_sql_param::Value::StringValue(v)) => Ok(datafusion::scalar::ScalarValue::Utf8(Some(v.clone()))), + Some(forward_sql_param::Value::NullValue(_)) => { + Ok(datafusion::scalar::ScalarValue::Utf8(None)) + }, + Some(forward_sql_param::Value::BoolValue(v)) => { + Ok(datafusion::scalar::ScalarValue::Boolean(Some(*v))) + }, + Some(forward_sql_param::Value::Int64Value(v)) => { + Ok(datafusion::scalar::ScalarValue::Int64(Some(*v))) + }, + Some(forward_sql_param::Value::Float64Value(v)) => { + Ok(datafusion::scalar::ScalarValue::Float64(Some(*v))) + }, + Some(forward_sql_param::Value::StringValue(v)) => { + Ok(datafusion::scalar::ScalarValue::Utf8(Some(v.clone()))) + }, None => Err("Missing parameter value".to_string()), } } @@ -109,12 +121,14 @@ impl CoreClusterHandler { message: Some(message), as_user: as_user.clone(), }), - ExecutionResult::Rows { batches, row_count, schema } => { + ExecutionResult::Rows { + batches, + row_count, + schema, + } => { let arrow_schema = batches.first().map(|b| b.schema()).or(schema); - let schema_fields = arrow_schema - .as_ref() - .map(schema_fields_from_arrow_schema) - .unwrap_or_default(); + let schema_fields = + arrow_schema.as_ref().map(schema_fields_from_arrow_schema).unwrap_or_default(); let mut rows = Vec::new(); for batch in &batches { @@ -154,18 +168,32 @@ impl CoreClusterHandler { message: Some(format!("Deleted {} row(s)", rows_affected)), as_user: as_user.clone(), }), - ExecutionResult::Flushed { tables, bytes_written } => Ok(ForwardedResult { + ExecutionResult::Flushed { + tables, + bytes_written, + } => Ok(ForwardedResult { schema: Vec::new(), rows: None, row_count: tables.len(), - message: Some(format!("Flushed {} table(s), {} bytes written", tables.len(), bytes_written)), + message: Some(format!( + "Flushed {} table(s), {} bytes written", + tables.len(), + bytes_written + )), as_user: as_user.clone(), }), - ExecutionResult::Subscription { subscription_id, channel, select_query } => Ok(ForwardedResult { + ExecutionResult::Subscription { + subscription_id, + channel, + select_query, + } => Ok(ForwardedResult { schema: Vec::new(), rows: None, row_count: 1, - message: Some(format!("Subscription {} on channel {} for query: {}", subscription_id, channel, select_query)), + message: Some(format!( + "Subscription {} on channel {} for query: {}", + subscription_id, channel, select_query + )), as_user: as_user.clone(), }), ExecutionResult::JobKilled { job_id, status } => Ok(ForwardedResult { @@ -182,9 +210,7 @@ impl CoreClusterHandler { authenticated_username: &Username, execute_as_username: Option<&Username>, ) -> Username { - execute_as_username - .cloned() - .unwrap_or_else(|| authenticated_username.clone()) + execute_as_username.cloned().unwrap_or_else(|| authenticated_username.clone()) } fn prepare_forwarded_statement( @@ -198,7 +224,8 @@ impl CoreClusterHandler { return Err("Empty SQL statement".to_string()); } - let (sql, execute_as_username) = match kalamdb_sql::execute_as::parse_execute_as(statement)? { + let (sql, execute_as_username) = match kalamdb_sql::execute_as::parse_execute_as(statement)? + { Some(envelope) => { let execute_as_username = Username::try_new(&envelope.username) .map_err(|e| format!("Invalid execute-as username: {}", e))?; @@ -213,10 +240,7 @@ impl CoreClusterHandler { .prepare_statement_metadata_for_role(&sql, default_namespace, actor_role) .map_err(|err| err.to_string())?; - Ok(( - prepared_statement, - execute_as_username, - )) + Ok((prepared_statement, execute_as_username)) } } @@ -395,7 +419,8 @@ impl ClusterMessageHandler for CoreClusterHandler { }; if execute_as_user.is_some() - && prepared_statement.table_type == Some(kalamdb_commons::schemas::TableType::Shared) + && prepared_statement.table_type + == Some(kalamdb_commons::schemas::TableType::Shared) { let table_name = prepared_statement .table_id @@ -433,8 +458,10 @@ impl ClusterMessageHandler for CoreClusterHandler { )); }, }; - let effective_username = - Self::resolve_result_username(&authenticated_username, execute_as_username.as_ref()); + let effective_username = Self::resolve_result_username( + &authenticated_username, + execute_as_username.as_ref(), + ); let effective_role = if execute_as_user.is_some() { Role::User } else { diff --git a/backend/crates/kalamdb-core/src/error_extensions.rs b/backend/crates/kalamdb-core/src/error_extensions.rs index 7ac0bd197..738b7828f 100644 --- a/backend/crates/kalamdb-core/src/error_extensions.rs +++ b/backend/crates/kalamdb-core/src/error_extensions.rs @@ -94,7 +94,6 @@ pub trait KalamDbResultExt { /// .into_invalid_operation("Invalid table configuration")?; /// ``` fn into_invalid_operation(self, context: &str) -> Result; - } impl KalamDbResultExt for Result { @@ -132,7 +131,6 @@ impl KalamDbResultExt for Result { fn into_invalid_operation(self, context: &str) -> Result { self.map_err(|e| KalamDbError::InvalidOperation(format!("{}: {}", context, e))) } - } /// Specialized extension methods for commonly-used types. diff --git a/backend/crates/kalamdb-core/src/live_adapters.rs b/backend/crates/kalamdb-core/src/live_adapters.rs index 47add130b..02a754c0a 100644 --- a/backend/crates/kalamdb-core/src/live_adapters.rs +++ b/backend/crates/kalamdb-core/src/live_adapters.rs @@ -64,12 +64,8 @@ impl LiveSqlExecutor for SqlExecutorAdapter { role: Role, read_context: ReadContext, ) -> Result, LiveError> { - let exec_ctx = ExecutionContext::new( - user_id, - role, - Arc::clone(&self.base_session_context), - ) - .with_read_context(read_context); + let exec_ctx = ExecutionContext::new(user_id, role, Arc::clone(&self.base_session_context)) + .with_read_context(read_context); let result = self .executor diff --git a/backend/crates/kalamdb-core/src/manifest/flush/shared.rs b/backend/crates/kalamdb-core/src/manifest/flush/shared.rs index 0c38ba370..f84c0baed 100644 --- a/backend/crates/kalamdb-core/src/manifest/flush/shared.rs +++ b/backend/crates/kalamdb-core/src/manifest/flush/shared.rs @@ -18,6 +18,7 @@ use kalamdb_commons::models::rows::Row; use kalamdb_commons::models::TableId; use kalamdb_commons::schemas::TableType; use kalamdb_commons::StorageKey; +use kalamdb_store::EntityStore; use kalamdb_tables::{SharedTableIndexedStore, SharedTableRow}; use std::sync::Arc; @@ -342,16 +343,9 @@ impl TableFlush for SharedTableFlushJob { ); self.delete_flushed_rows(&all_keys_to_delete)?; - // Compact RocksDB column family after flush to free space and optimize reads - use kalamdb_store::entity_store::EntityStore; - log::debug!( - "🔧 Compacting RocksDB column family after flush: {}", - self.store.partition().name() - ); - if let Err(e) = self.store.compact() { - log::warn!("⚠️ Failed to compact partition after flush: {}", e); - // Non-fatal: flush succeeded, compaction is optimization - } + // Note: RocksDB compaction is handled by the FlushExecutor (fire-and-forget) + // to avoid blocking job completion. Removing the inline compact() call here + // eliminates a redundant double-compaction. let parquet_path = destination_path; diff --git a/backend/crates/kalamdb-core/src/operations/mod.rs b/backend/crates/kalamdb-core/src/operations/mod.rs index f07dd2c42..f800db266 100644 --- a/backend/crates/kalamdb-core/src/operations/mod.rs +++ b/backend/crates/kalamdb-core/src/operations/mod.rs @@ -3,7 +3,7 @@ pub mod scan; pub mod service; pub mod table_cleanup; -pub use service::OperationService; pub use kalamdb_commons::models::pg_operations::{ DeleteRequest, InsertRequest, MutationResult, ScanRequest, ScanResult, UpdateRequest, }; +pub use service::OperationService; diff --git a/backend/crates/kalamdb-core/src/operations/scan.rs b/backend/crates/kalamdb-core/src/operations/scan.rs index 26c92f2cb..6108eb856 100644 --- a/backend/crates/kalamdb-core/src/operations/scan.rs +++ b/backend/crates/kalamdb-core/src/operations/scan.rs @@ -1,13 +1,34 @@ use std::sync::Arc; +use arrow::datatypes::DataType; use arrow::record_batch::RecordBatch; use datafusion::physical_plan::{collect, ExecutionPlan}; -use datafusion::prelude::SessionContext; +use datafusion::prelude::{col, lit, SessionContext}; use super::error::OperationError; use crate::schema_registry::SchemaRegistry; use kalamdb_commons::TableId; +/// Convert a string filter value to a typed DataFusion `Expr` literal +/// based on the Arrow column type. Falls back to string literal for unknown types. +#[inline] +fn typed_lit(value: &str, data_type: &DataType) -> datafusion::prelude::Expr { + match data_type { + DataType::Int8 => value.parse::().map_or_else(|_| lit(value), lit), + DataType::Int16 => value.parse::().map_or_else(|_| lit(value), lit), + DataType::Int32 => value.parse::().map_or_else(|_| lit(value), lit), + DataType::Int64 => value.parse::().map_or_else(|_| lit(value), lit), + DataType::UInt8 => value.parse::().map_or_else(|_| lit(value), lit), + DataType::UInt16 => value.parse::().map_or_else(|_| lit(value), lit), + DataType::UInt32 => value.parse::().map_or_else(|_| lit(value), lit), + DataType::UInt64 => value.parse::().map_or_else(|_| lit(value), lit), + DataType::Float32 => value.parse::().map_or_else(|_| lit(value), lit), + DataType::Float64 => value.parse::().map_or_else(|_| lit(value), lit), + DataType::Boolean => value.parse::().map_or_else(|_| lit(value), lit), + _ => lit(value), + } +} + /// Execute a direct provider scan without SQL reconstruction or a per-request SessionContext. /// /// 1. Resolve the table provider from the schema registry. @@ -15,15 +36,19 @@ use kalamdb_commons::TableId; /// 3. Call `TableProvider::scan(session_state, projection, filters=[], limit)` → `ExecutionPlan`. /// 4. Call `collect(plan, task_ctx)` → `Vec`. /// +/// When `filters` are non-empty, uses DataFusion's DataFrame API to apply predicates +/// so that the optimizer can push them into the provider scan. Filter values are +/// cast to the column's Arrow type using the table schema for type-correct comparisons. +/// /// The `base_session` is used to extract a `SessionState` (implements `Session` trait) /// and a `TaskContext` needed by the DataFusion physical execution layer. -/// No logical plan, no SQL parsing. pub async fn execute_scan( schema_registry: &SchemaRegistry, base_session: &SessionContext, table_id: &TableId, columns: &[String], limit: Option, + filters: &[(String, String)], ) -> Result, OperationError> { // 1. Resolve table provider let cached = schema_registry @@ -34,17 +59,46 @@ pub async fn execute_scan( .get_provider() .ok_or_else(|| OperationError::ProviderNotAvailable(table_id.full_name()))?; - // 2. Build projection from column names → indices + // 2. Filtered path: use DataFrame API for predicate pushdown + if !filters.is_empty() { + let schema = provider.schema(); + let mut df = base_session + .read_table(provider) + .map_err(OperationError::DataFusion)?; + + for (column, value) in filters { + // Look up the column type for type-correct literal casting + let expr = match schema.field_with_name(column) { + Ok(field) => col(column).eq(typed_lit(value, field.data_type())), + Err(_) => col(column).eq(lit(value.as_str())), + }; + df = df.filter(expr).map_err(OperationError::DataFusion)?; + } + + if !columns.is_empty() { + let col_exprs: Vec = + columns.iter().map(|c| col(c)).collect(); + df = df.select(col_exprs).map_err(OperationError::DataFusion)?; + } + + if let Some(lim) = limit { + df = df.limit(0, Some(lim)).map_err(OperationError::DataFusion)?; + } + + return df.collect().await.map_err(OperationError::DataFusion); + } + + // 3. Unfiltered path: direct provider scan (no LogicalPlan, no SQL) let projection = if columns.is_empty() { None } else { let schema = provider.schema(); let mut indices = Vec::with_capacity(columns.len()); - for col in columns { - let idx = schema.index_of(col).map_err(|_| { + for col_name in columns { + let idx = schema.index_of(col_name).map_err(|_| { OperationError::InvalidArgument(format!( "column '{}' not found in table '{}'", - col, + col_name, table_id.full_name() )) })?; @@ -53,15 +107,12 @@ pub async fn execute_scan( Some(indices) }; - // 3. Execute physical scan — no LogicalPlan, no SQL - // SessionState implements the Session trait; SessionContext does not. let state = base_session.state(); let plan: Arc = provider .scan(&state, projection.as_ref(), &[], limit) .await .map_err(OperationError::DataFusion)?; - // 4. Collect results using TaskContext only let task_ctx = state.task_ctx(); let batches = collect(plan, task_ctx).await.map_err(OperationError::DataFusion)?; diff --git a/backend/crates/kalamdb-core/src/operations/service.rs b/backend/crates/kalamdb-core/src/operations/service.rs index 2d806821e..581abaccd 100644 --- a/backend/crates/kalamdb-core/src/operations/service.rs +++ b/backend/crates/kalamdb-core/src/operations/service.rs @@ -2,11 +2,13 @@ use std::sync::Arc; use async_trait::async_trait; use datafusion::prelude::SessionContext; -use kalamdb_commons::models::rows::Row; -use kalamdb_commons::models::{OperationKind, ReadContext, Role, TransactionId, TransactionOrigin, UserId}; use kalamdb_commons::models::pg_operations::{ DeleteRequest, InsertRequest, MutationResult, ScanRequest, ScanResult, UpdateRequest, }; +use kalamdb_commons::models::rows::Row; +use kalamdb_commons::models::{ + OperationKind, ReadContext, Role, TransactionId, TransactionOrigin, UserId, +}; use kalamdb_commons::{NamespaceId, TableType}; use kalamdb_pg::OperationExecutor; use kalamdb_session_datafusion::SessionUserContext; @@ -75,11 +77,16 @@ impl OperationService { } } - fn active_transaction_for_session(&self, session_id: Option<&str>) -> Result, Status> { + fn active_transaction_for_session( + &self, + session_id: Option<&str>, + ) -> Result, Status> { // Autocommit typed DML stays on the hot path here: no transaction handle means // one session-id parse plus one coordinator owner-key lookup, with no overlay, // query-context, or staged-write allocation. - let Some(session_id) = session_id.map(str::trim).filter(|session_id| !session_id.is_empty()) else { + let Some(session_id) = + session_id.map(str::trim).filter(|session_id| !session_id.is_empty()) + else { return Ok(None); }; @@ -94,7 +101,9 @@ impl OperationService { ) -> Result, Status> { // Autocommit reads return from this helper without constructing an overlay view // or mutation sink unless an active transaction handle is actually present. - let Some(session_id) = session_id.map(str::trim).filter(|session_id| !session_id.is_empty()) else { + let Some(session_id) = + session_id.map(str::trim).filter(|session_id| !session_id.is_empty()) + else { return Ok(None); }; @@ -105,12 +114,12 @@ impl OperationService { return Ok(None); }; - let handle = coordinator - .get_handle(&transaction_id) - .ok_or_else(|| Status::failed_precondition(format!( + let handle = coordinator.get_handle(&transaction_id).ok_or_else(|| { + Status::failed_precondition(format!( "active transaction '{}' has no handle", transaction_id - )))?; + )) + })?; if !handle.state.is_open() { return Err(Status::failed_precondition(format!( @@ -122,14 +131,9 @@ impl OperationService { Ok(Some(TransactionQueryContext::new( transaction_id.clone(), handle.snapshot_commit_seq, - Arc::new(CoordinatorOverlayView::new( - Arc::clone(&coordinator), - transaction_id.clone(), - )), + Arc::new(CoordinatorOverlayView::new(Arc::clone(&coordinator), transaction_id.clone())), Arc::new(crate::transactions::CoordinatorMutationSink::new(coordinator)), - Arc::new(CoordinatorAccessValidator::new( - self.app_context.transaction_coordinator(), - )), + Arc::new(CoordinatorAccessValidator::new(self.app_context.transaction_coordinator())), ))) } @@ -164,11 +168,8 @@ impl OperationService { request: UpdateRequest, ) -> Result { let coordinator = self.app_context.transaction_coordinator(); - let payload = request - .updates - .into_iter() - .next() - .unwrap_or_else(|| Row::new(BTreeMap::new())); + let payload = + request.updates.into_iter().next().unwrap_or_else(|| Row::new(BTreeMap::new())); let mutation = StagedMutation::new( transaction_id.clone(), request.table_id, @@ -272,6 +273,7 @@ impl OperationExecutor for OperationService { &request.table_id, &request.columns, request.limit, + &request.filters, ) .await .map_err(|e| -> Status { e.into() })?; @@ -450,8 +452,8 @@ mod tests { use arrow::array::{Int64Array, StringArray}; use arrow::datatypes::{DataType, Field, Schema}; use arrow::record_batch::RecordBatch; - use datafusion_common::ScalarValue; use datafusion::datasource::MemTable; + use datafusion_common::ScalarValue; use kalamdb_commons::datatypes::KalamDataType; use kalamdb_commons::models::rows::Row; use kalamdb_commons::models::schemas::{ColumnDefinition, TableDefinition, TableOptions}; @@ -525,6 +527,7 @@ mod tests { columns: vec![], limit: None, user_id: None, + filters: vec![], }; let err = svc.execute_scan(req).await.unwrap_err(); assert_eq!(err.code(), tonic::Code::NotFound); @@ -544,6 +547,7 @@ mod tests { columns: vec![], limit: None, user_id: None, + filters: vec![], }) .await .expect("scan should succeed"); @@ -579,6 +583,7 @@ mod tests { columns: vec![], limit: None, user_id: None, + filters: vec![], }) .await .expect("scan should succeed"); @@ -616,6 +621,7 @@ mod tests { columns: vec!["name".to_string()], limit: None, user_id: None, + filters: vec![], }) .await .expect("scan with projection"); @@ -637,6 +643,7 @@ mod tests { columns: vec!["nonexistent_col".to_string()], limit: None, user_id: None, + filters: vec![], }) .await .unwrap_err(); @@ -674,6 +681,7 @@ mod tests { columns: vec![], limit: Some(2), user_id: None, + filters: vec![], }) .await .expect("scan with limit should succeed"); @@ -820,10 +828,7 @@ mod tests { let mut values = BTreeMap::new(); values.insert("id".to_string(), ScalarValue::Int64(Some(42))); - values.insert( - "name".to_string(), - ScalarValue::Utf8(Some("staged item".to_string())), - ); + values.insert("name".to_string(), ScalarValue::Utf8(Some("staged item".to_string()))); let result = svc .execute_insert(InsertRequest { diff --git a/backend/crates/kalamdb-core/src/schema_registry/registry/core.rs b/backend/crates/kalamdb-core/src/schema_registry/registry/core.rs index 115dbf9c0..bfd6de40a 100644 --- a/backend/crates/kalamdb-core/src/schema_registry/registry/core.rs +++ b/backend/crates/kalamdb-core/src/schema_registry/registry/core.rs @@ -10,7 +10,6 @@ use datafusion::arrow::datatypes::SchemaRef; use datafusion::datasource::TableProvider; use datafusion::logical_expr::expr::ScalarFunction as ScalarFunctionExpr; use datafusion::logical_expr::Expr; -use kalamdb_live::models::ChangeNotification; use kalamdb_commons::constants::SystemColumnNames; use kalamdb_commons::conversions::json_value_to_scalar; use kalamdb_commons::datatypes::KalamDataType; @@ -18,6 +17,7 @@ use kalamdb_commons::models::schemas::TableDefinition; use kalamdb_commons::models::{StorageId, TableId, TableVersionId}; use kalamdb_commons::schemas::{ColumnDefault, ColumnDefinition, TableType}; use kalamdb_commons::SystemTable; +use kalamdb_live::models::ChangeNotification; use kalamdb_system::{NotificationService, SchemaRegistry as SchemaRegistryTrait}; // use kalamdb_system::NotificationService as NotificationServiceTrait; use std::collections::{HashMap, HashSet}; diff --git a/backend/crates/kalamdb-core/src/sql/context/execution_context.rs b/backend/crates/kalamdb-core/src/sql/context/execution_context.rs index 5f4376f99..d5f61e76d 100644 --- a/backend/crates/kalamdb-core/src/sql/context/execution_context.rs +++ b/backend/crates/kalamdb-core/src/sql/context/execution_context.rs @@ -109,7 +109,11 @@ impl ExecutionContext { } #[inline] pub fn username(&self) -> Option<&str> { - self.auth_session.user_context().username.as_ref().map(|username| username.as_str()) + self.auth_session + .user_context() + .username + .as_ref() + .map(|username| username.as_str()) } #[inline] pub fn request_id(&self) -> Option<&str> { diff --git a/backend/crates/kalamdb-core/src/sql/executor/helpers/ast_parsing.rs b/backend/crates/kalamdb-core/src/sql/executor/helpers/ast_parsing.rs index b92270d94..e8c338fb9 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/helpers/ast_parsing.rs +++ b/backend/crates/kalamdb-core/src/sql/executor/helpers/ast_parsing.rs @@ -65,4 +65,3 @@ pub fn strip_nested_expr(expr: &Expr) -> &Expr { _ => expr, } } - diff --git a/backend/crates/kalamdb-core/src/sql/executor/mod.rs b/backend/crates/kalamdb-core/src/sql/executor/mod.rs index 6702c90ae..a8645413c 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/mod.rs +++ b/backend/crates/kalamdb-core/src/sql/executor/mod.rs @@ -12,8 +12,8 @@ pub mod handlers; pub mod helpers; pub mod parameter_binding; pub mod request_transaction_state; -mod transaction_batch_insert; mod sql_executor; +mod transaction_batch_insert; use crate::sql::executor::handler_registry::HandlerRegistry; use crate::sql::plan_cache::SqlCacheRegistry; diff --git a/backend/crates/kalamdb-core/src/sql/executor/request_transaction_state.rs b/backend/crates/kalamdb-core/src/sql/executor/request_transaction_state.rs index b72f11af1..c92a43a33 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/request_transaction_state.rs +++ b/backend/crates/kalamdb-core/src/sql/executor/request_transaction_state.rs @@ -53,9 +53,8 @@ impl RequestTransactionState { } pub fn sync_from_coordinator(&mut self, app_context: &AppContext) { - self.active_transaction_id = app_context - .transaction_coordinator() - .active_for_owner(&self.owner_key); + self.active_transaction_id = + app_context.transaction_coordinator().active_for_owner(&self.owner_key); } pub fn begin(&mut self, app_context: &AppContext) -> Result { @@ -75,17 +74,17 @@ impl RequestTransactionState { Ok(transaction_id) } - pub async fn commit(&mut self, app_context: &AppContext) -> Result { + pub async fn commit( + &mut self, + app_context: &AppContext, + ) -> Result { let transaction_id = self.active_transaction_id.clone().ok_or_else(|| { KalamDbError::InvalidOperation( "COMMIT requires an active explicit SQL transaction".to_string(), ) })?; - let committed = app_context - .transaction_coordinator() - .commit(&transaction_id) - .await?; + let committed = app_context.transaction_coordinator().commit(&transaction_id).await?; self.active_transaction_id = None; Ok(committed.transaction_id) } @@ -97,9 +96,7 @@ impl RequestTransactionState { ) })?; - app_context - .transaction_coordinator() - .rollback(&transaction_id)?; + app_context.transaction_coordinator().rollback(&transaction_id)?; self.active_transaction_id = None; Ok(transaction_id) } @@ -114,4 +111,4 @@ impl RequestTransactionState { self.rollback(app_context).map(Some) } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/src/sql/executor/sql_executor.rs b/backend/crates/kalamdb-core/src/sql/executor/sql_executor.rs index 3ab7b50d5..833e48dc5 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/sql_executor.rs +++ b/backend/crates/kalamdb-core/src/sql/executor/sql_executor.rs @@ -276,9 +276,7 @@ impl SqlExecutor { transaction_id.clone(), )), Arc::new(crate::transactions::CoordinatorMutationSink::new(coordinator)), - Arc::new(CoordinatorAccessValidator::new( - self.app_context.transaction_coordinator(), - )), + Arc::new(CoordinatorAccessValidator::new(self.app_context.transaction_coordinator())), ))) } @@ -287,7 +285,8 @@ impl SqlExecutor { exec_ctx: &ExecutionContext, ) -> Result { let session = exec_ctx.create_session_with_user(); - let Some(transaction_query_context) = self.transaction_query_context_for_request(exec_ctx)? + let Some(transaction_query_context) = + self.transaction_query_context_for_request(exec_ctx)? else { return Ok(session); }; @@ -305,13 +304,12 @@ impl SqlExecutor { &self, exec_ctx: &ExecutionContext, ) -> Result { - let mut request_state = RequestTransactionState::from_execution_context(exec_ctx)?.ok_or_else( - || { + let mut request_state = RequestTransactionState::from_execution_context(exec_ctx)? + .ok_or_else(|| { KalamDbError::InvalidOperation( "BEGIN requires a request-scoped execution context".to_string(), ) - }, - )?; + })?; request_state.sync_from_coordinator(&self.app_context); let transaction_id = request_state.begin(&self.app_context)?; Ok(ExecutionResult::Success { @@ -323,13 +321,12 @@ impl SqlExecutor { &self, exec_ctx: &ExecutionContext, ) -> Result { - let mut request_state = RequestTransactionState::from_execution_context(exec_ctx)?.ok_or_else( - || { + let mut request_state = RequestTransactionState::from_execution_context(exec_ctx)? + .ok_or_else(|| { KalamDbError::InvalidOperation( "COMMIT requires a request-scoped execution context".to_string(), ) - }, - )?; + })?; request_state.sync_from_coordinator(&self.app_context); let transaction_id = request_state.commit(&self.app_context).await?; Ok(ExecutionResult::Success { @@ -341,13 +338,12 @@ impl SqlExecutor { &self, exec_ctx: &ExecutionContext, ) -> Result { - let mut request_state = RequestTransactionState::from_execution_context(exec_ctx)?.ok_or_else( - || { + let mut request_state = RequestTransactionState::from_execution_context(exec_ctx)? + .ok_or_else(|| { KalamDbError::InvalidOperation( "ROLLBACK requires a request-scoped execution context".to_string(), ) - }, - )?; + })?; request_state.sync_from_coordinator(&self.app_context); let transaction_id = request_state.rollback(&self.app_context)?; Ok(ExecutionResult::Success { @@ -439,8 +435,7 @@ impl SqlExecutor { exec_ctx, table_id, transaction_id, - )? - { + )? { Some(counts) => Ok(Some( counts .into_iter() @@ -553,7 +548,9 @@ impl SqlExecutor { // Step 2: Route based on statement type let result = match classified.kind() { - SqlStatementKind::BeginTransaction => self.execute_begin_transaction(exec_ctx).await, + SqlStatementKind::BeginTransaction => { + self.execute_begin_transaction(exec_ctx).await + }, SqlStatementKind::CommitTransaction => { self.execute_commit_transaction(exec_ctx).await }, @@ -717,7 +714,8 @@ impl SqlExecutor { load_err ); } - let retry_session = self.create_session_with_transaction_context(exec_ctx)?; + let retry_session = + self.create_session_with_transaction_context(exec_ctx)?; retry_session .sql(execution_sql) .await @@ -1005,9 +1003,7 @@ impl SqlExecutor { let ordered_template = apply_default_order_by(planned_df.logical_plan().clone(), &self.app_context) .await?; - self.sql_cache_registry - .plan_cache() - .insert(cache_key, ordered_template.clone()); + self.sql_cache_registry.plan_cache().insert(cache_key, ordered_template.clone()); let executable_plan = if params.is_empty() { ordered_template diff --git a/backend/crates/kalamdb-core/src/sql/executor/transaction_batch_insert.rs b/backend/crates/kalamdb-core/src/sql/executor/transaction_batch_insert.rs index 23d44dfc0..b2f894493 100644 --- a/backend/crates/kalamdb-core/src/sql/executor/transaction_batch_insert.rs +++ b/backend/crates/kalamdb-core/src/sql/executor/transaction_batch_insert.rs @@ -2,13 +2,13 @@ use crate::app_context::AppContext; use crate::error::KalamDbError; use crate::schema_registry::CachedTableData; use crate::sql::plan_cache::{ - FastInsertDefaultEntry, FastInsertDefaultTemplate, FastInsertMetadata, - InsertMetadataCacheKey, SqlCacheRegistry, + FastInsertDefaultEntry, FastInsertDefaultTemplate, FastInsertMetadata, InsertMetadataCacheKey, + SqlCacheRegistry, }; use crate::sql::ExecutionContext; use chrono::Utc; -use kalamdb_commons::conversions::arrow_json_conversion::coerce_rows; use datafusion::scalar::ScalarValue; +use kalamdb_commons::conversions::arrow_json_conversion::coerce_rows; use kalamdb_commons::conversions::json_value_to_scalar; use kalamdb_commons::ids::SnowflakeGenerator; use kalamdb_commons::models::rows::row::Row; @@ -43,16 +43,10 @@ fn build_insert_metadata( .collect(); let column_names = if requested_columns.is_empty() { - available_columns - .iter() - .map(|column| (*column).to_string()) - .collect() + available_columns.iter().map(|column| (*column).to_string()).collect() } else { for column_name in requested_columns { - if !available_columns - .iter() - .any(|candidate| *candidate == column_name.as_str()) - { + if !available_columns.iter().any(|candidate| *candidate == column_name.as_str()) { return Err(KalamDbError::InvalidOperation(format!( "Column '{}' does not exist", column_name @@ -217,12 +211,14 @@ pub(crate) fn try_build_literal_insert_rows( let requested_columns: Vec = insert.columns.iter().map(|ident| ident.value.clone()).collect(); - let metadata_cache_key = InsertMetadataCacheKey::new(table_id.clone(), requested_columns.clone()); + let metadata_cache_key = + InsertMetadataCacheKey::new(table_id.clone(), requested_columns.clone()); let insert_metadata = match sql_cache_registry.insert_metadata_cache().get(&metadata_cache_key) { Some(metadata) => metadata, None => { - let metadata = Arc::new(build_insert_metadata(&requested_columns, cached_table.as_ref())?); + let metadata = + Arc::new(build_insert_metadata(&requested_columns, cached_table.as_ref())?); sql_cache_registry .insert_metadata_cache() .insert_arc(metadata_cache_key, Arc::clone(&metadata)); @@ -299,9 +295,7 @@ fn prepare_statement_default( "CURRENT_USER() default requires an authenticated username".to_string(), ) })?; - Ok(PreparedDefaultValue::Constant(ScalarValue::Utf8(Some( - username.to_string(), - )))) + Ok(PreparedDefaultValue::Constant(ScalarValue::Utf8(Some(username.to_string())))) }, FastInsertDefaultTemplate::SnowflakeId => { Ok(PreparedDefaultValue::Volatile(VolatileDefaultFunction::SnowflakeId)) @@ -385,12 +379,14 @@ pub(crate) fn try_batch_inserts_in_transaction( let requested_columns: Vec = first_insert.columns.iter().map(|ident| ident.value.clone()).collect(); - let metadata_cache_key = InsertMetadataCacheKey::new(table_id.clone(), requested_columns.clone()); + let metadata_cache_key = + InsertMetadataCacheKey::new(table_id.clone(), requested_columns.clone()); let insert_metadata = match sql_cache_registry.insert_metadata_cache().get(&metadata_cache_key) { Some(metadata) => metadata, None => { - let metadata = Arc::new(build_insert_metadata(&requested_columns, cached_table.as_ref())?); + let metadata = + Arc::new(build_insert_metadata(&requested_columns, cached_table.as_ref())?); sql_cache_registry .insert_metadata_cache() .insert_arc(metadata_cache_key, Arc::clone(&metadata)); @@ -469,4 +465,4 @@ pub(crate) fn try_batch_inserts_in_transaction( ); Ok(Some(per_statement_counts)) -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/src/sql/plan_cache.rs b/backend/crates/kalamdb-core/src/sql/plan_cache.rs index fea8b1a1f..a13459fad 100644 --- a/backend/crates/kalamdb-core/src/sql/plan_cache.rs +++ b/backend/crates/kalamdb-core/src/sql/plan_cache.rs @@ -1,5 +1,4 @@ pub use kalamdb_plan_cache::{ - FastInsertDefaultEntry, FastInsertDefaultTemplate, FastInsertMetadata, - InsertMetadataCacheKey, PlanCache, PlanCacheKey, SqlCacheRegistry, - SqlCacheRegistryConfig, + FastInsertDefaultEntry, FastInsertDefaultTemplate, FastInsertMetadata, InsertMetadataCacheKey, + PlanCache, PlanCacheKey, SqlCacheRegistry, SqlCacheRegistryConfig, }; diff --git a/backend/crates/kalamdb-core/src/transactions/binding.rs b/backend/crates/kalamdb-core/src/transactions/binding.rs index 5a6663d1d..1e02b7c99 100644 --- a/backend/crates/kalamdb-core/src/transactions/binding.rs +++ b/backend/crates/kalamdb-core/src/transactions/binding.rs @@ -9,4 +9,4 @@ pub enum TransactionRaftBinding { group_id: GroupId, leader_node_id: NodeId, }, -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/src/transactions/commit_result.rs b/backend/crates/kalamdb-core/src/transactions/commit_result.rs index ada4c2c60..482084a48 100644 --- a/backend/crates/kalamdb-core/src/transactions/commit_result.rs +++ b/backend/crates/kalamdb-core/src/transactions/commit_result.rs @@ -99,4 +99,4 @@ impl TransactionCommitResult { emitted_side_effects: TransactionSideEffects::default(), } } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/src/transactions/commit_sequence.rs b/backend/crates/kalamdb-core/src/transactions/commit_sequence.rs index 86fe47a4e..49df61bfb 100644 --- a/backend/crates/kalamdb-core/src/transactions/commit_sequence.rs +++ b/backend/crates/kalamdb-core/src/transactions/commit_sequence.rs @@ -65,4 +65,4 @@ impl CommitSequenceSource for CommitSequenceTracker { fn allocate_next(&self) -> u64 { self.allocate_next() } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/src/transactions/coordinator.rs b/backend/crates/kalamdb-core/src/transactions/coordinator.rs index 3a939503d..52bbcde00 100644 --- a/backend/crates/kalamdb-core/src/transactions/coordinator.rs +++ b/backend/crates/kalamdb-core/src/transactions/coordinator.rs @@ -7,7 +7,9 @@ use tokio::runtime::Handle; use tokio::time::MissedTickBehavior; use uuid::Uuid; -use kalamdb_commons::models::{NodeId, TableId, TransactionId, TransactionOrigin, TransactionState, UserId}; +use kalamdb_commons::models::{ + NodeId, TableId, TransactionId, TransactionOrigin, TransactionState, UserId, +}; use kalamdb_commons::TableType; use kalamdb_raft::RaftExecutor; use kalamdb_sharding::{GroupId, ShardRouter}; @@ -129,10 +131,9 @@ impl TransactionCoordinator { mutation.user_id.as_ref(), )?; - let mut handle = self - .active_by_id - .get_mut(transaction_id) - .ok_or_else(|| KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)))?; + let mut handle = self.active_by_id.get_mut(transaction_id).ok_or_else(|| { + KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)) + })?; if !handle.state.is_open() { return Err(Self::state_error(transaction_id, handle.state, "stage writes in")); @@ -213,11 +214,7 @@ impl TransactionCoordinator { total_estimated_bytes = total_estimated_bytes.saturating_add(estimated_bytes); touched_tables.insert(mutation.table_id.clone()); - let target = ( - mutation.table_id.clone(), - mutation.table_type, - mutation.user_id.clone(), - ); + let target = (mutation.table_id.clone(), mutation.table_type, mutation.user_id.clone()); if validated_targets.insert(target) { self.validate_table_access( transaction_id, @@ -228,10 +225,9 @@ impl TransactionCoordinator { } } - let mut handle = self - .active_by_id - .get_mut(transaction_id) - .ok_or_else(|| KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)))?; + let mut handle = self.active_by_id.get_mut(transaction_id).ok_or_else(|| { + KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)) + })?; if !handle.state.is_open() { return Err(Self::state_error(transaction_id, handle.state, "stage writes in")); @@ -285,18 +281,16 @@ impl TransactionCoordinator { transaction_id: &TransactionId, ) -> Result { let owner_key = { - let mut handle = self - .active_by_id - .get_mut(transaction_id) - .ok_or_else(|| { - KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)) - })?; + let mut handle = self.active_by_id.get_mut(transaction_id).ok_or_else(|| { + KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)) + })?; if !handle.state.is_open() { return Err(Self::state_error(transaction_id, handle.state, "commit")); } - if let Err(error) = self.ensure_bound_leadership_is_current(transaction_id, &mut handle) { + if let Err(error) = self.ensure_bound_leadership_is_current(transaction_id, &mut handle) + { drop(handle); self.mark_transaction_aborted(transaction_id); return Err(error); @@ -355,9 +349,8 @@ impl TransactionCoordinator { transaction_id )) })?; - let (notifications_sent, manifest_updates, publisher_events) = response - .committed_side_effect_counts() - .unwrap_or((0, 0, 0)); + let (notifications_sent, manifest_updates, publisher_events) = + response.committed_side_effect_counts().unwrap_or((0, 0, 0)); self.commit_sequence_tracker.observe_committed(committed_commit_seq); @@ -385,16 +378,15 @@ impl TransactionCoordinator { pub fn rollback(&self, transaction_id: &TransactionId) -> Result<(), KalamDbError> { let owner_key = { - let mut handle = self - .active_by_id - .get_mut(transaction_id) - .ok_or_else(|| { - KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)) - })?; + let mut handle = self.active_by_id.get_mut(transaction_id).ok_or_else(|| { + KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)) + })?; if matches!( handle.state, - TransactionState::TimedOut | TransactionState::Aborted | TransactionState::RolledBack + TransactionState::TimedOut + | TransactionState::Aborted + | TransactionState::RolledBack ) { let owner_key = handle.owner_key; drop(handle); @@ -439,10 +431,9 @@ impl TransactionCoordinator { transaction_id: &TransactionId, raft_binding: TransactionRaftBinding, ) -> Result<(), KalamDbError> { - let mut handle = self - .active_by_id - .get_mut(transaction_id) - .ok_or_else(|| KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)))?; + let mut handle = self.active_by_id.get_mut(transaction_id).ok_or_else(|| { + KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)) + })?; handle.raft_binding = raft_binding; Ok(()) } @@ -454,10 +445,9 @@ impl TransactionCoordinator { table_type: TableType, user_id: Option<&UserId>, ) -> Result<(), KalamDbError> { - let mut handle = self - .active_by_id - .get_mut(transaction_id) - .ok_or_else(|| KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)))?; + let mut handle = self.active_by_id.get_mut(transaction_id).ok_or_else(|| { + KalamDbError::NotFound(format!("transaction '{}' not found", transaction_id)) + })?; if !handle.state.is_open() { return Err(Self::state_error(transaction_id, handle.state, "access")); @@ -581,10 +571,7 @@ impl TransactionCoordinator { } } - async fn run_timeout_sweeper( - coordinator: Weak, - sweep_interval: Duration, - ) { + async fn run_timeout_sweeper(coordinator: Weak, sweep_interval: Duration) { let mut interval = tokio::time::interval(sweep_interval); interval.set_missed_tick_behavior(MissedTickBehavior::Delay); @@ -607,7 +594,8 @@ impl TransactionCoordinator { .iter() .filter_map(|entry| { let handle = entry.value(); - if handle.state.is_open() && now.duration_since(handle.last_activity_at) >= timeout { + if handle.state.is_open() && now.duration_since(handle.last_activity_at) >= timeout + { Some(( handle.transaction_id.clone(), now.duration_since(handle.started_at), @@ -624,12 +612,7 @@ impl TransactionCoordinator { } } - fn timeout_transaction( - &self, - transaction_id: &TransactionId, - age: Duration, - idle: Duration, - ) { + fn timeout_transaction(&self, transaction_id: &TransactionId, age: Duration, idle: Duration) { let Some(mut handle) = self.active_by_id.get_mut(transaction_id) else { return; }; @@ -689,7 +672,8 @@ impl TransactionCoordinator { return Ok(None); } - let router = ShardRouter::from_optional_cluster_config(self.app_context.config().cluster.as_ref()); + let router = + ShardRouter::from_optional_cluster_config(self.app_context.config().cluster.as_ref()); let group_id = match table_type { TableType::User => router.user_group_id(user_id.ok_or_else(|| { KalamDbError::InvalidOperation(format!( @@ -737,7 +721,8 @@ impl TransactionCoordinator { let TransactionRaftBinding::BoundCluster { group_id, leader_node_id, - } = handle.raft_binding else { + } = handle.raft_binding + else { return Ok(()); }; @@ -769,4 +754,4 @@ impl TransactionCoordinator { transaction_id, group_id, prior_leader_node_id, current_leader )) } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/src/transactions/handle.rs b/backend/crates/kalamdb-core/src/transactions/handle.rs index b14f0c799..3130e886b 100644 --- a/backend/crates/kalamdb-core/src/transactions/handle.rs +++ b/backend/crates/kalamdb-core/src/transactions/handle.rs @@ -76,8 +76,7 @@ impl TransactionHandle { table_ids: I, write_count: usize, write_bytes: usize, - ) - where + ) where I: IntoIterator, { self.last_activity_at = Instant::now(); @@ -94,4 +93,4 @@ impl TransactionHandle { self.state = state; self.last_activity_at = Instant::now(); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/src/transactions/mod.rs b/backend/crates/kalamdb-core/src/transactions/mod.rs index f5595fb9a..d05220e56 100644 --- a/backend/crates/kalamdb-core/src/transactions/mod.rs +++ b/backend/crates/kalamdb-core/src/transactions/mod.rs @@ -9,11 +9,11 @@ pub mod owner; pub mod staged_mutation; pub mod write_set; +pub use binding::TransactionRaftBinding; pub use commit_result::{ commit_side_effect_plan_from_write_set, CommitSideEffectPlan, FanoutDispatchPlan, FanoutOwnerScope, TransactionCommitOutcome, TransactionCommitResult, TransactionSideEffects, }; -pub use binding::TransactionRaftBinding; pub use commit_sequence::CommitSequenceTracker; pub use coordinator::TransactionCoordinator; pub use handle::TransactionHandle; @@ -25,4 +25,4 @@ pub use owner::ExecutionOwnerKey; pub use staged_mutation::StagedMutation; pub use write_set::TransactionWriteSet; -pub use kalamdb_transactions::{TransactionOverlay, TransactionOverlayEntry}; \ No newline at end of file +pub use kalamdb_transactions::{TransactionOverlay, TransactionOverlayEntry}; diff --git a/backend/crates/kalamdb-core/src/transactions/overlay_view.rs b/backend/crates/kalamdb-core/src/transactions/overlay_view.rs index 0bc26f010..f82eb0c2d 100644 --- a/backend/crates/kalamdb-core/src/transactions/overlay_view.rs +++ b/backend/crates/kalamdb-core/src/transactions/overlay_view.rs @@ -124,4 +124,4 @@ fn map_transaction_access_error(error: crate::error::KalamDbError) -> Transactio }, other => TransactionAccessError::invalid_operation(other.to_string()), } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/src/transactions/owner.rs b/backend/crates/kalamdb-core/src/transactions/owner.rs index 91347c789..9972a4128 100644 --- a/backend/crates/kalamdb-core/src/transactions/owner.rs +++ b/backend/crates/kalamdb-core/src/transactions/owner.rs @@ -14,10 +14,7 @@ fn parse_u32_decimal(value: &[u8], session_id: &str) -> Result Result { b'a'..=b'f' => (byte - b'a' + 10) as u64, b'A'..=b'F' => (byte - b'A' + 10) as u64, _ => { - return Err(invalid_pg_session_id( - session_id, - "config hash must be hexadecimal", - )); + return Err(invalid_pg_session_id(session_id, "config hash must be hexadecimal")); }, }; @@ -81,10 +75,7 @@ impl ExecutionOwnerKey { pub fn from_pg_session_id(session_id: &str) -> Result { let bytes = session_id.as_bytes(); if !bytes.starts_with(b"pg-") { - return Err(invalid_pg_session_id( - session_id, - "expected pg--", - )); + return Err(invalid_pg_session_id(session_id, "expected pg--")); } let rest = &bytes[3..]; @@ -149,8 +140,6 @@ mod tests { #[test] fn rejects_non_numeric_backend_pid() { let error = ExecutionOwnerKey::from_pg_session_id("pg-abc-deadbeef").unwrap_err(); - assert!(error - .to_string() - .contains("backend pid must be numeric")); + assert!(error.to_string().contains("backend pid must be numeric")); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/src/transactions/staged_mutation.rs b/backend/crates/kalamdb-core/src/transactions/staged_mutation.rs index e6ad78737..2cbfe25ff 100644 --- a/backend/crates/kalamdb-core/src/transactions/staged_mutation.rs +++ b/backend/crates/kalamdb-core/src/transactions/staged_mutation.rs @@ -1 +1 @@ -pub use kalamdb_transactions::StagedMutation; \ No newline at end of file +pub use kalamdb_transactions::StagedMutation; diff --git a/backend/crates/kalamdb-core/src/transactions/write_set.rs b/backend/crates/kalamdb-core/src/transactions/write_set.rs index e7a0da3d5..7ee23c55d 100644 --- a/backend/crates/kalamdb-core/src/transactions/write_set.rs +++ b/backend/crates/kalamdb-core/src/transactions/write_set.rs @@ -6,7 +6,9 @@ use super::{StagedMutation, TransactionOverlay}; fn scoped_table_key(user_id: Option<&UserId>, primary_key: &str) -> String { match user_id { - Some(user_id) => format!("u{}:{}:{}", user_id.as_str().len(), user_id.as_str(), primary_key), + Some(user_id) => { + format!("u{}:{}:{}", user_id.as_str().len(), user_id.as_str(), primary_key) + }, None => format!("s:{}", primary_key), } } @@ -79,7 +81,11 @@ impl TransactionWriteSet { &self.ordered_mutations } - pub fn latest_mutation(&self, table_id: &TableId, primary_key: &str) -> Option<&StagedMutation> { + pub fn latest_mutation( + &self, + table_id: &TableId, + primary_key: &str, + ) -> Option<&StagedMutation> { self.latest_mutation_for_scope(table_id, None, primary_key) } @@ -114,8 +120,8 @@ mod tests { use super::*; use kalamdb_commons::models::rows::Row; - use kalamdb_commons::models::{OperationKind, TableName}; use kalamdb_commons::models::{NamespaceId, TableId}; + use kalamdb_commons::models::{OperationKind, TableName}; use kalamdb_commons::TableType; fn row(values: &[(&'static str, ScalarValue)]) -> Row { @@ -178,14 +184,8 @@ mod tests { let mut inserted_values = BTreeMap::new(); inserted_values.insert("id".to_string(), ScalarValue::Int64(Some(1))); - inserted_values.insert( - "name".to_string(), - ScalarValue::Utf8(Some("before".to_string())), - ); - inserted_values.insert( - "color".to_string(), - ScalarValue::Utf8(Some("red".to_string())), - ); + inserted_values.insert("name".to_string(), ScalarValue::Utf8(Some("before".to_string()))); + inserted_values.insert("color".to_string(), ScalarValue::Utf8(Some("red".to_string()))); write_set.stage(StagedMutation::new( transaction_id.clone(), table_id.clone(), @@ -198,10 +198,7 @@ mod tests { )); let mut updated_values = BTreeMap::new(); - updated_values.insert( - "name".to_string(), - ScalarValue::Utf8(Some("after".to_string())), - ); + updated_values.insert("name".to_string(), ScalarValue::Utf8(Some("after".to_string()))); write_set.stage(StagedMutation::new( transaction_id, table_id.clone(), @@ -242,7 +239,10 @@ mod tests { Some(first_user.clone()), OperationKind::Insert, "1", - row(&[("id", ScalarValue::Int64(Some(1))), ("name", ScalarValue::Utf8(Some("alice".to_string())))]), + row(&[ + ("id", ScalarValue::Int64(Some(1))), + ("name", ScalarValue::Utf8(Some("alice".to_string()))), + ]), false, )); write_set.stage(StagedMutation::new( @@ -252,7 +252,10 @@ mod tests { Some(second_user.clone()), OperationKind::Insert, "1", - row(&[("id", ScalarValue::Int64(Some(1))), ("name", ScalarValue::Utf8(Some("bob".to_string())))]), + row(&[ + ("id", ScalarValue::Int64(Some(1))), + ("name", ScalarValue::Utf8(Some("bob".to_string()))), + ]), false, )); @@ -276,11 +279,9 @@ mod tests { .get("name"), Some(&ScalarValue::Utf8(Some("bob".to_string()))) ); - assert!(write_set - .latest_mutation_for_scope(&table_id, Some(&first_user), "1") - .is_some()); + assert!(write_set.latest_mutation_for_scope(&table_id, Some(&first_user), "1").is_some()); assert!(write_set .latest_mutation_for_scope(&table_id, Some(&second_user), "1") .is_some()); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/src/views/system_schema_provider.rs b/backend/crates/kalamdb-core/src/views/system_schema_provider.rs index 2ee604edf..a36f78e05 100644 --- a/backend/crates/kalamdb-core/src/views/system_schema_provider.rs +++ b/backend/crates/kalamdb-core/src/views/system_schema_provider.rs @@ -26,8 +26,8 @@ use kalamdb_views::columns_view::create_columns_view_provider; use kalamdb_views::datatypes::{DatatypesTableProvider, DatatypesView}; use kalamdb_views::describe::DescribeView; use kalamdb_views::live::{LiveTableProvider, LiveView}; -use kalamdb_views::sessions::{SessionsTableProvider, SessionsView}; use kalamdb_views::server_logs::create_server_logs_provider; +use kalamdb_views::sessions::{SessionsTableProvider, SessionsView}; use kalamdb_views::settings::{SettingsTableProvider, SettingsView}; use kalamdb_views::stats::{StatsTableProvider, StatsView}; use kalamdb_views::tables_view::create_tables_view_provider; diff --git a/backend/crates/kalamdb-core/tests/autocommit_perf_regression.rs b/backend/crates/kalamdb-core/tests/autocommit_perf_regression.rs index c6f46faf5..7d019606a 100644 --- a/backend/crates/kalamdb-core/tests/autocommit_perf_regression.rs +++ b/backend/crates/kalamdb-core/tests/autocommit_perf_regression.rs @@ -167,7 +167,11 @@ fn create_simple_app_context() -> (Arc, TestDb) { (app_ctx, test_db) } -fn make_shared_insert_request(table_id: &TableId, session_id: Option<&str>, id: i64) -> InsertRequest { +fn make_shared_insert_request( + table_id: &TableId, + session_id: Option<&str>, + id: i64, +) -> InsertRequest { InsertRequest { table_id: table_id.clone(), table_type: TableType::Shared, @@ -195,6 +199,7 @@ fn make_scan_request(table_id: &TableId, session_id: Option<&str>) -> ScanReques columns: vec![], limit: None, user_id: None, + filters: vec![], } } @@ -238,10 +243,7 @@ async fn measure_insert_round( .collect::>(); let start = Instant::now(); for request in requests { - service - .execute_insert(request) - .await - .expect("autocommit insert succeeds"); + service.execute_insert(request).await.expect("autocommit insert succeeds"); } start.elapsed().as_nanos() / ops as u128 } @@ -252,15 +254,10 @@ async fn measure_scan_round( session_id: Option<&str>, ops: usize, ) -> u128 { - let requests = (0..ops) - .map(|_| make_scan_request(table_id, session_id)) - .collect::>(); + let requests = (0..ops).map(|_| make_scan_request(table_id, session_id)).collect::>(); let start = Instant::now(); for request in requests { - service - .execute_scan(request) - .await - .expect("autocommit scan succeeds"); + service.execute_scan(request).await.expect("autocommit scan succeeds"); } start.elapsed().as_nanos() / ops as u128 } @@ -342,8 +339,8 @@ async fn measure_allocations_for_rejected_write( async fn idle_autocommit_transaction_checks_add_no_extra_allocations() { let (app_ctx, _test_db) = create_simple_app_context(); let service = OperationService::new(Arc::clone(&app_ctx)); - let scan_table = create_shared_table(&app_ctx, &unique_namespace("autocommit_alloc"), "items") - .await; + let scan_table = + create_shared_table(&app_ctx, &unique_namespace("autocommit_alloc"), "items").await; service .execute_scan(make_scan_request(&scan_table, None)) @@ -394,20 +391,13 @@ async fn autocommit_read_write_latency_regression_stays_within_five_percent() { let (app_ctx, _test_db) = create_cluster_app_context().await; let service = OperationService::new(Arc::clone(&app_ctx)); - let write_baseline_table = create_shared_table( - &app_ctx, - &unique_namespace("autocommit_write_base"), - "items", - ) - .await; - let write_candidate_table = create_shared_table( - &app_ctx, - &unique_namespace("autocommit_write_candidate"), - "items", - ) - .await; - let read_table = create_shared_table(&app_ctx, &unique_namespace("autocommit_read"), "items") - .await; + let write_baseline_table = + create_shared_table(&app_ctx, &unique_namespace("autocommit_write_base"), "items").await; + let write_candidate_table = + create_shared_table(&app_ctx, &unique_namespace("autocommit_write_candidate"), "items") + .await; + let read_table = + create_shared_table(&app_ctx, &unique_namespace("autocommit_read"), "items").await; seed_shared_table(&service, &read_table, 10_000, READ_SEED_ROWS).await; @@ -478,7 +468,8 @@ async fn autocommit_read_write_latency_regression_stays_within_five_percent() { for round in 0..READ_ROUNDS { if round % 2 == 0 { - read_baseline_samples.push(measure_scan_round(&service, &read_table, None, READ_OPS_PER_ROUND).await); + read_baseline_samples + .push(measure_scan_round(&service, &read_table, None, READ_OPS_PER_ROUND).await); read_candidate_samples.push( measure_scan_round( &service, @@ -498,7 +489,8 @@ async fn autocommit_read_write_latency_regression_stays_within_five_percent() { ) .await, ); - read_baseline_samples.push(measure_scan_round(&service, &read_table, None, READ_OPS_PER_ROUND).await); + read_baseline_samples + .push(measure_scan_round(&service, &read_table, None, READ_OPS_PER_ROUND).await); } } @@ -518,4 +510,4 @@ async fn autocommit_read_write_latency_regression_stays_within_five_percent() { assert!(app_ctx.transaction_coordinator().active_metrics().is_empty()); assert_regression("write", write_baseline_ns, write_candidate_ns); assert_regression("read", read_baseline_ns, read_candidate_ns); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/snapshot_isolation.rs b/backend/crates/kalamdb-core/tests/snapshot_isolation.rs index ce5d5e51d..9288832af 100644 --- a/backend/crates/kalamdb-core/tests/snapshot_isolation.rs +++ b/backend/crates/kalamdb-core/tests/snapshot_isolation.rs @@ -87,6 +87,7 @@ async fn scan_names( columns: vec![], limit: None, user_id: None, + filters: vec![], }) .await .expect("scan succeeds"); @@ -143,4 +144,4 @@ async fn snapshot_isolation_hides_later_commits_from_open_transaction() { .rollback_transaction(session_b, &transaction_id) .await .expect("rollback succeeds"); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/sql_autocommit_regression.rs b/backend/crates/kalamdb-core/tests/sql_autocommit_regression.rs index e9b5d6f82..8d43b4fed 100644 --- a/backend/crates/kalamdb-core/tests/sql_autocommit_regression.rs +++ b/backend/crates/kalamdb-core/tests/sql_autocommit_regression.rs @@ -15,4 +15,4 @@ async fn sql_autocommit_still_writes_without_explicit_transaction() { select_names(&executor, &observer_ctx, &table_id).await, vec!["plain".to_string()] ); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/sql_insert_transaction_semantics.rs b/backend/crates/kalamdb-core/tests/sql_insert_transaction_semantics.rs index 455085802..754d63356 100644 --- a/backend/crates/kalamdb-core/tests/sql_insert_transaction_semantics.rs +++ b/backend/crates/kalamdb-core/tests/sql_insert_transaction_semantics.rs @@ -18,10 +18,7 @@ async fn load_user_rows( user_id: &UserId, first_id: i64, second_id: i64, -) -> ( - kalamdb_tables::UserTableRow, - kalamdb_tables::UserTableRow, -) { +) -> (kalamdb_tables::UserTableRow, kalamdb_tables::UserTableRow) { let provider_arc = app_ctx .schema_registry() .get_provider(table_id) @@ -50,9 +47,11 @@ async fn load_user_rows( async fn multi_row_insert_statement_uses_one_internal_commit() { let (app_ctx, _test_db) = create_cluster_app_context().await; let user_id = UserId::from("sql-insert-batch-user"); - let table_id = create_user_table(&app_ctx, &unique_namespace("sql_insert_batch"), "items").await; + let table_id = + create_user_table(&app_ctx, &unique_namespace("sql_insert_batch"), "items").await; let executor = create_executor(app_ctx.clone()); - let exec_ctx = ExecutionContext::new(user_id.clone(), Role::User, app_ctx.base_session_context()); + let exec_ctx = + ExecutionContext::new(user_id.clone(), Role::User, app_ctx.base_session_context()); let sql = format!( "INSERT INTO {}.{} (id, name) VALUES (1, 'alpha'), (2, 'beta')", @@ -74,7 +73,8 @@ async fn separate_insert_statements_commit_independently() { let table_id = create_user_table(&app_ctx, &unique_namespace("sql_insert_separate"), "items").await; let executor = create_executor(app_ctx.clone()); - let exec_ctx = ExecutionContext::new(user_id.clone(), Role::User, app_ctx.base_session_context()); + let exec_ctx = + ExecutionContext::new(user_id.clone(), Role::User, app_ctx.base_session_context()); let first_insert = format!( "INSERT INTO {}.{} (id, name) VALUES (1, 'alpha')", @@ -122,4 +122,4 @@ async fn explicit_transaction_keeps_multiple_inserts_in_one_commit() { let (first_row, second_row) = load_user_rows(&app_ctx, &table_id, &user_id, 1, 2).await; assert_eq!(first_row._commit_seq, second_row._commit_seq); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/sql_transaction_commit.rs b/backend/crates/kalamdb-core/tests/sql_transaction_commit.rs index 554f574cc..fc9454bdc 100644 --- a/backend/crates/kalamdb-core/tests/sql_transaction_commit.rs +++ b/backend/crates/kalamdb-core/tests/sql_transaction_commit.rs @@ -1,6 +1,8 @@ mod support; -use support::{execute_ok, insert_sql, observer_exec_ctx, request_exec_ctx, select_names, setup_shared_table}; +use support::{ + execute_ok, insert_sql, observer_exec_ctx, request_exec_ctx, select_names, setup_shared_table, +}; #[tokio::test] #[ntest::timeout(15000)] @@ -19,4 +21,4 @@ async fn sql_request_transaction_commit_persists_rows() { select_names(&executor, &observer_ctx, &table_id).await, vec!["alpha".to_string(), "beta".to_string()] ); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/sql_transaction_multi_block.rs b/backend/crates/kalamdb-core/tests/sql_transaction_multi_block.rs index b891ce73f..c01deec9f 100644 --- a/backend/crates/kalamdb-core/tests/sql_transaction_multi_block.rs +++ b/backend/crates/kalamdb-core/tests/sql_transaction_multi_block.rs @@ -1,12 +1,13 @@ mod support; -use support::{execute_ok, insert_sql, observer_exec_ctx, request_exec_ctx, select_names, setup_shared_table}; +use support::{ + execute_ok, insert_sql, observer_exec_ctx, request_exec_ctx, select_names, setup_shared_table, +}; #[tokio::test] #[ntest::timeout(15000)] async fn sql_request_supports_multiple_sequential_transaction_blocks() { - let (app_ctx, executor, table_id, _test_db) = - setup_shared_table("sql_tx_multi", "items").await; + let (app_ctx, executor, table_id, _test_db) = setup_shared_table("sql_tx_multi", "items").await; let request_ctx = request_exec_ctx(&app_ctx, "sql-request-multi"); execute_ok(&executor, &request_ctx, "BEGIN").await; @@ -22,4 +23,4 @@ async fn sql_request_supports_multiple_sequential_transaction_blocks() { select_names(&executor, &observer_ctx, &table_id).await, vec!["first".to_string()] ); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/sql_transaction_nested_begin.rs b/backend/crates/kalamdb-core/tests/sql_transaction_nested_begin.rs index 8a570038b..3731d9f72 100644 --- a/backend/crates/kalamdb-core/tests/sql_transaction_nested_begin.rs +++ b/backend/crates/kalamdb-core/tests/sql_transaction_nested_begin.rs @@ -13,4 +13,4 @@ async fn sql_request_rejects_nested_begin() { let error = execute_err(&executor, &request_ctx, "BEGIN").await; assert!(error.contains("already has an active transaction")); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/sql_transaction_rollback.rs b/backend/crates/kalamdb-core/tests/sql_transaction_rollback.rs index 3230fc578..42ebd9bf6 100644 --- a/backend/crates/kalamdb-core/tests/sql_transaction_rollback.rs +++ b/backend/crates/kalamdb-core/tests/sql_transaction_rollback.rs @@ -1,6 +1,8 @@ mod support; -use support::{execute_ok, insert_sql, observer_exec_ctx, request_exec_ctx, select_names, setup_shared_table}; +use support::{ + execute_ok, insert_sql, observer_exec_ctx, request_exec_ctx, select_names, setup_shared_table, +}; #[tokio::test] #[ntest::timeout(15000)] @@ -16,4 +18,4 @@ async fn sql_request_transaction_rollback_discards_rows() { let observer_ctx = observer_exec_ctx(&app_ctx); assert!(select_names(&executor, &observer_ctx, &table_id).await.is_empty()); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/sql_transaction_unclosed.rs b/backend/crates/kalamdb-core/tests/sql_transaction_unclosed.rs index 7a37ea602..deda185d4 100644 --- a/backend/crates/kalamdb-core/tests/sql_transaction_unclosed.rs +++ b/backend/crates/kalamdb-core/tests/sql_transaction_unclosed.rs @@ -1,6 +1,9 @@ mod support; -use support::{execute_ok, insert_sql, observer_exec_ctx, request_exec_ctx, request_transaction_state, select_names, setup_shared_table}; +use support::{ + execute_ok, insert_sql, observer_exec_ctx, request_exec_ctx, request_transaction_state, + select_names, setup_shared_table, +}; #[tokio::test] #[ntest::timeout(15000)] @@ -18,4 +21,4 @@ async fn request_cleanup_rolls_back_unclosed_sql_transaction() { let observer_ctx = observer_exec_ctx(&app_ctx); assert!(select_names(&executor, &observer_ctx, &table_id).await.is_empty()); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/support/mod.rs b/backend/crates/kalamdb-core/tests/support/mod.rs index 1bbf4d590..2564aaf9b 100644 --- a/backend/crates/kalamdb-core/tests/support/mod.rs +++ b/backend/crates/kalamdb-core/tests/support/mod.rs @@ -1,8 +1,8 @@ #![allow(dead_code)] use std::collections::BTreeMap; -use std::sync::Arc; use std::sync::atomic::{AtomicU16, Ordering}; +use std::sync::Arc; use chrono::Utc; use datafusion_common::ScalarValue; @@ -75,11 +75,7 @@ pub async fn create_cluster_app_context_with_config( config, ); app_ctx.executor().start().await.expect("start raft"); - app_ctx - .executor() - .initialize_cluster() - .await - .expect("initialize raft cluster"); + app_ctx.executor().initialize_cluster().await.expect("initialize raft cluster"); app_ctx.wire_raft_appliers(); let storages = app_ctx.system_tables().storages(); @@ -282,10 +278,8 @@ pub fn row(id: i64, name: &str) -> Row { ])) } -pub fn request_transaction_state( - exec_ctx: &ExecutionContext, -) -> RequestTransactionState { +pub fn request_transaction_state(exec_ctx: &ExecutionContext) -> RequestTransactionState { RequestTransactionState::from_execution_context(exec_ctx) .expect("request transaction state") .expect("request transaction state present") -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/system_transactions_view.rs b/backend/crates/kalamdb-core/tests/system_transactions_view.rs index a8961f80b..38a273519 100644 --- a/backend/crates/kalamdb-core/tests/system_transactions_view.rs +++ b/backend/crates/kalamdb-core/tests/system_transactions_view.rs @@ -6,9 +6,9 @@ use std::time::Duration; use datafusion_common::ScalarValue; use kalamdb_commons::conversions::arrow_json_conversion::record_batch_to_json_rows; -use kalamdb_commons::models::KalamCellValue; -use kalamdb_commons::models::rows::Row; use kalamdb_commons::models::pg_operations::InsertRequest; +use kalamdb_commons::models::rows::Row; +use kalamdb_commons::models::KalamCellValue; use kalamdb_commons::models::TransactionId; use kalamdb_commons::TableType; use kalamdb_configs::ServerConfig; @@ -48,14 +48,13 @@ fn string_field(row: &HashMap, field: &str) -> String { fn i64_field(row: &HashMap, field: &str) -> i64 { row.get(field) - .and_then(|value| value.as_i64().or_else(|| value.as_str().and_then(|raw| raw.parse().ok()))) + .and_then(|value| { + value.as_i64().or_else(|| value.as_str().and_then(|raw| raw.parse().ok())) + }) .unwrap_or_else(|| panic!("missing i64 field {field}: {row:?}")) } -fn optional_string_field( - row: &HashMap, - field: &str, -) -> Option { +fn optional_string_field(row: &HashMap, field: &str) -> Option { row.get(field).and_then(|value| value.as_str()).map(ToString::to_string) } @@ -116,8 +115,8 @@ async fn rollback_transaction(service: &KalamPgService, session_id: &str, transa #[ntest::timeout(10000)] async fn system_transactions_shows_active_pg_and_sql_transactions_while_sessions_remain_pg_only() { let (app_ctx, _test_db) = create_cluster_app_context().await; - let table_id = create_shared_table(&app_ctx, &unique_namespace("system_transactions"), "items") - .await; + let table_id = + create_shared_table(&app_ctx, &unique_namespace("system_transactions"), "items").await; let executor = create_executor(Arc::clone(&app_ctx)); let operation_service = OperationService::new(Arc::clone(&app_ctx)); let observer_ctx = observer_exec_ctx(&app_ctx); @@ -200,12 +199,8 @@ async fn system_transactions_shows_active_pg_and_sql_transactions_while_sessions execute_ok(&executor, &request_ctx, "ROLLBACK").await; let cleared_rows = json_rows( - execute_ok( - &executor, - &observer_ctx, - "SELECT transaction_id FROM system.transactions", - ) - .await, + execute_ok(&executor, &observer_ctx, "SELECT transaction_id FROM system.transactions") + .await, ); assert!(cleared_rows.is_empty()); } @@ -247,9 +242,7 @@ async fn stale_idle_pg_sessions_drop_out_of_sessions_view() { execute_ok( &executor, &observer_ctx, - &format!( - "SELECT session_id FROM system.sessions WHERE session_id = '{session_id}'" - ), + &format!("SELECT session_id FROM system.sessions WHERE session_id = '{session_id}'"), ) .await, ); @@ -313,10 +306,7 @@ async fn pg_passive_timeout_hides_stale_transaction_fields_from_sessions_view() assert_eq!(timed_out_session_rows.len(), 1); assert_eq!(string_field(&timed_out_session_rows[0], "state"), "idle"); assert_eq!(optional_string_field(&timed_out_session_rows[0], "transaction_id"), None); - assert_eq!( - optional_string_field(&timed_out_session_rows[0], "transaction_state"), - None - ); + assert_eq!(optional_string_field(&timed_out_session_rows[0], "transaction_state"), None); let timed_out_transaction_rows = json_rows( execute_ok( @@ -342,8 +332,8 @@ async fn pg_timeout_after_write_clears_sessions_and_transactions_views() { config.transaction_timeout_secs = 1; let (app_ctx, _test_db) = create_cluster_app_context_with_config(config).await; - let table_id = create_shared_table(&app_ctx, &unique_namespace("pg_timeout_write"), "items") - .await; + let table_id = + create_shared_table(&app_ctx, &unique_namespace("pg_timeout_write"), "items").await; let executor = create_executor(Arc::clone(&app_ctx)); let observer_ctx = observer_exec_ctx(&app_ctx); let session_id = "pg-4301-deadbeef"; @@ -359,12 +349,7 @@ async fn pg_timeout_after_write_clears_sessions_and_transactions_views() { let transaction_id = begin_transaction(&pg_service, session_id).await; pg_service - .insert(Request::new(shared_insert_request( - &table_id, - session_id, - 1, - "pending", - ))) + .insert(Request::new(shared_insert_request(&table_id, session_id, 1, "pending"))) .await .expect("initial staged write succeeds"); @@ -406,12 +391,7 @@ async fn pg_timeout_after_write_clears_sessions_and_transactions_views() { tokio::time::sleep(Duration::from_millis(2200)).await; let timeout_error = pg_service - .insert(Request::new(shared_insert_request( - &table_id, - session_id, - 2, - "late", - ))) + .insert(Request::new(shared_insert_request(&table_id, session_id, 2, "late"))) .await .expect_err("follow-up write should fail after timeout"); assert_eq!(timeout_error.code(), tonic::Code::FailedPrecondition); @@ -423,17 +403,11 @@ async fn pg_timeout_after_write_clears_sessions_and_transactions_views() { TransactionId::try_new(transaction_id.clone()).expect("transaction id should parse"); assert!(app_ctx.transaction_coordinator().active_for_owner(&owner_key).is_none()); assert!( - app_ctx - .transaction_coordinator() - .get_handle(&parsed_transaction_id) - .is_none(), + app_ctx.transaction_coordinator().get_handle(&parsed_transaction_id).is_none(), "timed out PG write should not leave a handle behind" ); assert!( - app_ctx - .transaction_coordinator() - .get_overlay(&parsed_transaction_id) - .is_none(), + app_ctx.transaction_coordinator().get_overlay(&parsed_transaction_id).is_none(), "timed out PG write should not leave staged rows behind" ); @@ -450,10 +424,7 @@ async fn pg_timeout_after_write_clears_sessions_and_transactions_views() { assert_eq!(cleared_session_rows.len(), 1); assert_eq!(string_field(&cleared_session_rows[0], "state"), "idle"); assert_eq!(optional_string_field(&cleared_session_rows[0], "transaction_id"), None); - assert_eq!( - optional_string_field(&cleared_session_rows[0], "transaction_state"), - None - ); + assert_eq!(optional_string_field(&cleared_session_rows[0], "transaction_state"), None); let cleared_transaction_rows = json_rows( execute_ok( @@ -479,8 +450,8 @@ async fn pg_timeout_after_read_clears_sessions_and_transactions_views() { config.transaction_timeout_secs = 1; let (app_ctx, _test_db) = create_cluster_app_context_with_config(config).await; - let table_id = create_shared_table(&app_ctx, &unique_namespace("pg_timeout_read"), "items") - .await; + let table_id = + create_shared_table(&app_ctx, &unique_namespace("pg_timeout_read"), "items").await; let executor = create_executor(Arc::clone(&app_ctx)); let observer_ctx = observer_exec_ctx(&app_ctx); let session_id = "pg-4302-cafef00d"; @@ -519,6 +490,7 @@ async fn pg_timeout_after_read_clears_sessions_and_transactions_views() { session_id: session_id.to_string(), user_id: None, columns: vec![], + filters: vec![], limit: None, })) .await @@ -536,10 +508,7 @@ async fn pg_timeout_after_read_clears_sessions_and_transactions_views() { TransactionId::try_new(transaction_id.clone()).expect("transaction id should parse"); assert!(app_ctx.transaction_coordinator().active_for_owner(&owner_key).is_none()); assert!( - app_ctx - .transaction_coordinator() - .get_handle(&parsed_transaction_id) - .is_none(), + app_ctx.transaction_coordinator().get_handle(&parsed_transaction_id).is_none(), "timed out PG read should not leave a handle behind" ); @@ -556,10 +525,7 @@ async fn pg_timeout_after_read_clears_sessions_and_transactions_views() { assert_eq!(cleared_session_rows.len(), 1); assert_eq!(string_field(&cleared_session_rows[0], "state"), "idle"); assert_eq!(optional_string_field(&cleared_session_rows[0], "transaction_id"), None); - assert_eq!( - optional_string_field(&cleared_session_rows[0], "transaction_state"), - None - ); + assert_eq!(optional_string_field(&cleared_session_rows[0], "transaction_state"), None); let cleared_transaction_rows = json_rows( execute_ok( diff --git a/backend/crates/kalamdb-core/tests/transaction_buffer_limit.rs b/backend/crates/kalamdb-core/tests/transaction_buffer_limit.rs index 7eaa93d86..84759d6e0 100644 --- a/backend/crates/kalamdb-core/tests/transaction_buffer_limit.rs +++ b/backend/crates/kalamdb-core/tests/transaction_buffer_limit.rs @@ -8,8 +8,8 @@ use kalamdb_configs::ServerConfig; use kalamdb_core::operations::service::OperationService; use kalamdb_pg::OperationExecutor; use support::{ - create_cluster_app_context_with_config, create_executor, create_shared_table, observer_exec_ctx, - row, select_names, unique_namespace, + create_cluster_app_context_with_config, create_executor, create_shared_table, + observer_exec_ctx, row, select_names, unique_namespace, }; #[tokio::test] @@ -19,7 +19,8 @@ async fn transaction_buffer_limit_aborts_transaction_and_rejects_follow_up_write config.max_transaction_buffer_bytes = 256; let (app_ctx, _test_db) = create_cluster_app_context_with_config(config).await; - let table_id = create_shared_table(&app_ctx, &unique_namespace("tx_buffer_limit"), "items").await; + let table_id = + create_shared_table(&app_ctx, &unique_namespace("tx_buffer_limit"), "items").await; let service = OperationService::new(Arc::clone(&app_ctx)); let executor = create_executor(Arc::clone(&app_ctx)); let observer_ctx = observer_exec_ctx(&app_ctx); diff --git a/backend/crates/kalamdb-core/tests/transaction_cluster_failover.rs b/backend/crates/kalamdb-core/tests/transaction_cluster_failover.rs index 17d830786..dd97458cf 100644 --- a/backend/crates/kalamdb-core/tests/transaction_cluster_failover.rs +++ b/backend/crates/kalamdb-core/tests/transaction_cluster_failover.rs @@ -70,9 +70,6 @@ async fn sql_request_transaction_aborts_when_bound_leader_changes_before_commit( let mut cleaned_state = request_transaction_state(&request_ctx); cleaned_state.sync_from_coordinator(&app_ctx); assert!(cleaned_state.active_transaction_id().is_none()); - assert!(app_ctx - .transaction_coordinator() - .get_handle(&transaction_id) - .is_none()); + assert!(app_ctx.transaction_coordinator().get_handle(&transaction_id).is_none()); assert!(select_names(&executor, &observer_ctx, &table_id).await.is_empty()); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/transaction_cluster_group_rejection.rs b/backend/crates/kalamdb-core/tests/transaction_cluster_group_rejection.rs index 69004afd4..1900f05fa 100644 --- a/backend/crates/kalamdb-core/tests/transaction_cluster_group_rejection.rs +++ b/backend/crates/kalamdb-core/tests/transaction_cluster_group_rejection.rs @@ -5,8 +5,8 @@ use ntest::timeout; use support::{ create_cluster_app_context, create_executor, create_shared_table, create_user_table, - execute_err, execute_ok, insert_sql, request_exec_ctx, request_transaction_state, - select_names, unique_namespace, + execute_err, execute_ok, insert_sql, request_exec_ctx, request_transaction_state, select_names, + unique_namespace, }; #[tokio::test] @@ -44,7 +44,10 @@ async fn sql_request_transaction_rejects_cross_group_access_without_aborting_bou error.contains("already bound to data raft group"), "expected cross-group rejection, got: {error}" ); - assert!(error.contains(&bound_group.to_string()), "expected bound group in error: {error}"); + assert!( + error.contains(&bound_group.to_string()), + "expected bound group in error: {error}" + ); assert!( error.contains("data:shared:00"), "expected requested shared group in error: {error}" @@ -61,4 +64,4 @@ async fn sql_request_transaction_rejects_cross_group_access_without_aborting_bou assert!(select_names(&executor, &observer_ctx, &user_table).await.is_empty()); assert!(select_names(&executor, &observer_ctx, &shared_table).await.is_empty()); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/transaction_commit_live_fanout.rs b/backend/crates/kalamdb-core/tests/transaction_commit_live_fanout.rs index 7592bb547..abe29125f 100644 --- a/backend/crates/kalamdb-core/tests/transaction_commit_live_fanout.rs +++ b/backend/crates/kalamdb-core/tests/transaction_commit_live_fanout.rs @@ -4,13 +4,15 @@ use std::sync::Arc; use std::time::Duration; use kalamdb_commons::models::rows::Row; -use kalamdb_commons::models::{ConnectionId, LiveQueryId, OperationKind, TransactionOrigin, UserId}; +use kalamdb_commons::models::{ + ConnectionId, LiveQueryId, OperationKind, TransactionOrigin, UserId, +}; use kalamdb_commons::websocket::ChangeType; use kalamdb_commons::TableType; +use kalamdb_core::transactions::{ExecutionOwnerKey, StagedMutation}; use kalamdb_live::models::{ NotificationSender, SubscriptionFlowControl, SubscriptionHandle, SubscriptionRuntimeMetadata, }; -use kalamdb_core::transactions::{ExecutionOwnerKey, StagedMutation}; use tokio::sync::mpsc; use support::{create_cluster_app_context, create_shared_table, row, unique_namespace}; @@ -56,16 +58,14 @@ fn insert_mutation( #[ntest::timeout(8000)] async fn explicit_commit_releases_live_notification_after_commit() { let (app_ctx, _test_db) = create_cluster_app_context().await; - let table_id = create_shared_table(&app_ctx, &unique_namespace("tx_live_commit"), "items").await; + let table_id = + create_shared_table(&app_ctx, &unique_namespace("tx_live_commit"), "items").await; let registry = app_ctx.connection_registry(); let connection_id = ConnectionId::new("conn-live-commit"); let subscriber_user = UserId::new("watcher-commit"); - let live_id = LiveQueryId::new( - subscriber_user, - connection_id.clone(), - "sub-live-commit".to_string(), - ); + let live_id = + LiveQueryId::new(subscriber_user, connection_id.clone(), "sub-live-commit".to_string()); let (tx, mut rx) = mpsc::channel(8); let flow_control = Arc::new(SubscriptionFlowControl::new()); flow_control.mark_initial_complete(); @@ -93,16 +93,11 @@ async fn explicit_commit_releases_live_notification_after_commit() { .expect("stage succeeds"); assert!( - tokio::time::timeout(Duration::from_millis(150), rx.recv()) - .await - .is_err(), + tokio::time::timeout(Duration::from_millis(150), rx.recv()).await.is_err(), "staged transaction should not fan out before commit" ); - coordinator - .commit(&transaction_id) - .await - .expect("commit succeeds"); + coordinator.commit(&transaction_id).await.expect("commit succeeds"); let delivered = tokio::time::timeout(Duration::from_secs(1), rx.recv()) .await @@ -121,16 +116,14 @@ async fn explicit_commit_releases_live_notification_after_commit() { #[ntest::timeout(8000)] async fn explicit_rollback_emits_no_live_notification() { let (app_ctx, _test_db) = create_cluster_app_context().await; - let table_id = create_shared_table(&app_ctx, &unique_namespace("tx_live_rollback"), "items").await; + let table_id = + create_shared_table(&app_ctx, &unique_namespace("tx_live_rollback"), "items").await; let registry = app_ctx.connection_registry(); let connection_id = ConnectionId::new("conn-live-rollback"); let subscriber_user = UserId::new("watcher-rollback"); - let live_id = LiveQueryId::new( - subscriber_user, - connection_id.clone(), - "sub-live-rollback".to_string(), - ); + let live_id = + LiveQueryId::new(subscriber_user, connection_id.clone(), "sub-live-rollback".to_string()); let (tx, mut rx) = mpsc::channel(8); let flow_control = Arc::new(SubscriptionFlowControl::new()); flow_control.mark_initial_complete(); @@ -160,9 +153,7 @@ async fn explicit_rollback_emits_no_live_notification() { coordinator.rollback(&transaction_id).expect("rollback succeeds"); assert!( - tokio::time::timeout(Duration::from_millis(250), rx.recv()) - .await - .is_err(), + tokio::time::timeout(Duration::from_millis(250), rx.recv()).await.is_err(), "rollback should not fan out any live notification" ); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/transaction_races.rs b/backend/crates/kalamdb-core/tests/transaction_races.rs index c60ed43a9..28e3a2349 100644 --- a/backend/crates/kalamdb-core/tests/transaction_races.rs +++ b/backend/crates/kalamdb-core/tests/transaction_races.rs @@ -24,7 +24,8 @@ fn parse_transaction_id(transaction_id: &str) -> TransactionId { #[ntest::timeout(6000)] async fn repeated_commit_vs_rollback_clears_coordinator_state() { let (app_ctx, _test_db) = create_cluster_app_context().await; - let table_id = create_shared_table(&app_ctx, &unique_namespace("tx_race_commit_rb"), "items").await; + let table_id = + create_shared_table(&app_ctx, &unique_namespace("tx_race_commit_rb"), "items").await; let service = Arc::new(OperationService::new(Arc::clone(&app_ctx))); let session_id = "pg-7101-deadbeef"; let owner_key = @@ -56,10 +57,10 @@ async fn repeated_commit_vs_rollback_clears_coordinator_state() { let commit_tx = parsed_transaction_id.clone(); let rollback_tx = parsed_transaction_id.clone(); - let (commit_result, rollback_result) = tokio::join!( - async move { commit_coordinator.commit(&commit_tx).await }, - async move { rollback_coordinator.rollback(&rollback_tx) }, - ); + let (commit_result, rollback_result) = + tokio::join!(async move { commit_coordinator.commit(&commit_tx).await }, async move { + rollback_coordinator.rollback(&rollback_tx) + },); let commit_ok = commit_result.is_ok(); let rollback_ok = rollback_result.is_ok(); @@ -70,24 +71,15 @@ async fn repeated_commit_vs_rollback_clears_coordinator_state() { assert!(app_ctx.transaction_coordinator().active_metrics().is_empty()); assert!( - app_ctx - .transaction_coordinator() - .active_for_owner(&owner_key) - .is_none(), + app_ctx.transaction_coordinator().active_for_owner(&owner_key).is_none(), "owner mapping should be cleared after commit/rollback race" ); assert!( - app_ctx - .transaction_coordinator() - .get_handle(&parsed_transaction_id) - .is_none(), + app_ctx.transaction_coordinator().get_handle(&parsed_transaction_id).is_none(), "terminal race should not leave an active handle behind" ); assert!( - app_ctx - .transaction_coordinator() - .get_overlay(&parsed_transaction_id) - .is_none(), + app_ctx.transaction_coordinator().get_overlay(&parsed_transaction_id).is_none(), "terminal race should not leave an orphaned staged overlay" ); } @@ -100,7 +92,8 @@ async fn repeated_timeout_cleanup_drops_staged_state() { config.transaction_timeout_secs = 1; let (app_ctx, _test_db) = create_cluster_app_context_with_config(config).await; - let table_id = create_shared_table(&app_ctx, &unique_namespace("tx_race_timeout"), "items").await; + let table_id = + create_shared_table(&app_ctx, &unique_namespace("tx_race_timeout"), "items").await; let service = Arc::new(OperationService::new(Arc::clone(&app_ctx))); let executor = create_executor(Arc::clone(&app_ctx)); let observer_ctx = observer_exec_ctx(&app_ctx); @@ -143,10 +136,7 @@ async fn repeated_timeout_cleanup_drops_staged_state() { let parsed_transaction_id = parse_transaction_id(&transaction_id); assert!(app_ctx.transaction_coordinator().active_metrics().is_empty()); assert!( - app_ctx - .transaction_coordinator() - .get_overlay(&parsed_transaction_id) - .is_none(), + app_ctx.transaction_coordinator().get_overlay(&parsed_transaction_id).is_none(), "timed out transaction should not retain staged writes" ); @@ -157,17 +147,11 @@ async fn repeated_timeout_cleanup_drops_staged_state() { .expect("rollback returns transaction id"); assert!( - app_ctx - .transaction_coordinator() - .active_for_owner(&owner_key) - .is_none(), + app_ctx.transaction_coordinator().active_for_owner(&owner_key).is_none(), "timeout cleanup should clear owner mapping" ); assert!( - app_ctx - .transaction_coordinator() - .get_handle(&parsed_transaction_id) - .is_none(), + app_ctx.transaction_coordinator().get_handle(&parsed_transaction_id).is_none(), "timeout cleanup should clear the terminal transaction handle" ); } @@ -214,20 +198,14 @@ async fn repeated_request_cleanup_rolls_back_without_leaking_state() { "request cleanup should clear owner mapping" ); assert!( - app_ctx - .transaction_coordinator() - .get_handle(&transaction_id) - .is_none(), + app_ctx.transaction_coordinator().get_handle(&transaction_id).is_none(), "request cleanup should clear the transaction handle" ); assert!( - app_ctx - .transaction_coordinator() - .get_overlay(&transaction_id) - .is_none(), + app_ctx.transaction_coordinator().get_overlay(&transaction_id).is_none(), "request cleanup should not leave staged overlays behind" ); } assert!(select_names(&executor, &observer_ctx, &table_id).await.is_empty()); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-core/tests/transaction_stream_table_rejection.rs b/backend/crates/kalamdb-core/tests/transaction_stream_table_rejection.rs index be3b64224..46f0a4d64 100644 --- a/backend/crates/kalamdb-core/tests/transaction_stream_table_rejection.rs +++ b/backend/crates/kalamdb-core/tests/transaction_stream_table_rejection.rs @@ -34,7 +34,9 @@ async fn explicit_transaction_rejects_stream_table_writes() { .await .expect_err("stream-table write should be rejected"); assert!( - error.message().contains("stream tables are not supported inside explicit transactions"), + error + .message() + .contains("stream tables are not supported inside explicit transactions"), "{error}" ); diff --git a/backend/crates/kalamdb-core/tests/transaction_timeout.rs b/backend/crates/kalamdb-core/tests/transaction_timeout.rs index 4a89e9987..3b9dcc22c 100644 --- a/backend/crates/kalamdb-core/tests/transaction_timeout.rs +++ b/backend/crates/kalamdb-core/tests/transaction_timeout.rs @@ -9,8 +9,8 @@ use kalamdb_configs::ServerConfig; use kalamdb_core::operations::service::OperationService; use kalamdb_pg::OperationExecutor; use support::{ - create_cluster_app_context_with_config, create_executor, create_shared_table, observer_exec_ctx, - row, select_names, unique_namespace, + create_cluster_app_context_with_config, create_executor, create_shared_table, + observer_exec_ctx, row, select_names, unique_namespace, }; #[tokio::test] diff --git a/backend/crates/kalamdb-core/tests/transaction_user_batch_commit.rs b/backend/crates/kalamdb-core/tests/transaction_user_batch_commit.rs index ecfb2468e..7face1c7f 100644 --- a/backend/crates/kalamdb-core/tests/transaction_user_batch_commit.rs +++ b/backend/crates/kalamdb-core/tests/transaction_user_batch_commit.rs @@ -3,8 +3,8 @@ mod support; use std::sync::Arc; use datafusion_common::ScalarValue; -use kalamdb_commons::models::rows::Row; use kalamdb_commons::models::pg_operations::InsertRequest; +use kalamdb_commons::models::rows::Row; use kalamdb_commons::models::{OperationKind, TransactionOrigin, UserId}; use kalamdb_commons::TableType; use kalamdb_core::operations::service::OperationService; @@ -91,10 +91,7 @@ async fn explicit_commit_persists_same_table_same_user_user_inserts() { ) .expect("second stage succeeds"); - let commit_result = coordinator - .commit(&transaction_id) - .await - .expect("commit succeeds"); + let commit_result = coordinator.commit(&transaction_id).await.expect("commit succeeds"); let commit_seq = commit_result .committed_commit_seq .expect("commit should stamp a commit sequence"); @@ -217,7 +214,8 @@ async fn explicit_commit_preserves_user_scope_for_same_primary_keys() { async fn operation_service_preserves_user_scope_for_same_primary_keys() { let (app_ctx, _test_db) = create_cluster_app_context().await; let (first_user_id, second_user_id) = same_user_shard_pair(); - let table_id = create_user_table(&app_ctx, &unique_namespace("tx_user_scope_service"), "items").await; + let table_id = + create_user_table(&app_ctx, &unique_namespace("tx_user_scope_service"), "items").await; let service = Arc::new(OperationService::new(Arc::clone(&app_ctx))); let session_id = "pg-7103-deadbeef"; @@ -286,4 +284,4 @@ async fn operation_service_preserves_user_scope_for_same_primary_keys() { second_user_second_row.fields.get("name"), Some(&ScalarValue::Utf8(Some("beta-b".to_string()))) ); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-dialect/src/batch_execution.rs b/backend/crates/kalamdb-dialect/src/batch_execution.rs index dd9b3eb96..58d84de8f 100644 --- a/backend/crates/kalamdb-dialect/src/batch_execution.rs +++ b/backend/crates/kalamdb-dialect/src/batch_execution.rs @@ -9,8 +9,8 @@ use crate::execute_as::parse_execute_as; use crate::parser::utils::parse_single_statement; use crate::parser::utils::parse_sql_statements; use kalamdb_commons::models::Username; -use sqlparser::ast::Statement; use sqlparser::ast::Spanned; +use sqlparser::ast::Statement; use sqlparser::tokenizer::{Location, Span}; /// Error produced when parsing a batch SQL string fails. @@ -53,7 +53,10 @@ pub struct PreparedExecutionBatchStatement { #[derive(Debug, Clone, PartialEq, Eq)] pub enum ExecutionBatchParseError { Batch(BatchParseError), - Statement { statement_index: usize, message: String }, + Statement { + statement_index: usize, + message: String, + }, } impl std::fmt::Display for ExecutionBatchParseError { @@ -146,12 +149,11 @@ where let mut prepared = Vec::with_capacity(parsed_statements.len()); for (idx, statement) in parsed_statements.into_iter().enumerate() { - let prepared_statement = prepare_statement(&statement).map_err(|error| { - ExecutionBatchPrepareError::Prepare { + let prepared_statement = + prepare_statement(&statement).map_err(|error| ExecutionBatchPrepareError::Prepare { statement_index: idx + 1, error, - } - })?; + })?; prepared.push(PreparedExecutionBatchStatement { execute_as_username: statement.execute_as_username, @@ -173,11 +175,7 @@ fn line_start_offsets(sql: &str) -> Vec { starts } -fn byte_index_for_location( - sql: &str, - line_starts: &[usize], - location: Location, -) -> Option { +fn byte_index_for_location(sql: &str, line_starts: &[usize], location: Location) -> Option { if location.line == 0 || location.column == 0 { return None; } @@ -255,8 +253,8 @@ fn tail_has_separator_or_eof(mut tail: &str) -> bool { fn parse_batch_with_sqlparser(sql: &str) -> Result, BatchParseError> { let dialect = KalamDbDialect::default(); - let statements = parse_sql_statements(sql, &dialect) - .map_err(|err| BatchParseError::new(err.to_string()))?; + let statements = + parse_sql_statements(sql, &dialect).map_err(|err| BatchParseError::new(err.to_string()))?; if statements.is_empty() { return Ok(Vec::new()); @@ -567,11 +565,11 @@ mod tests { #[test] fn prepare_execution_batch_preserves_execute_as_username() { - let prepared = prepare_execution_batch( - "SELECT 1; EXECUTE AS USER alice (SELECT 2)", - |statement| Ok::<_, String>(statement.sql.clone()), - ) - .unwrap(); + let prepared = + prepare_execution_batch("SELECT 1; EXECUTE AS USER alice (SELECT 2)", |statement| { + Ok::<_, String>(statement.sql.clone()) + }) + .unwrap(); assert_eq!(prepared.len(), 2); assert_eq!(prepared[0].prepared_statement, "SELECT 1".to_string()); diff --git a/backend/crates/kalamdb-dialect/src/lib.rs b/backend/crates/kalamdb-dialect/src/lib.rs index ac6d019c5..3128a66ff 100644 --- a/backend/crates/kalamdb-dialect/src/lib.rs +++ b/backend/crates/kalamdb-dialect/src/lib.rs @@ -17,9 +17,8 @@ pub mod validation; pub use batch_execution::{ parse_batch_statements, parse_execution_batch, parse_execution_statement, - prepare_execution_batch, BatchParseError, ExecutionBatchParseError, + prepare_execution_batch, split_statements, BatchParseError, ExecutionBatchParseError, ExecutionBatchPrepareError, ParsedExecutionStatement, PreparedExecutionBatchStatement, - split_statements, }; pub use classifier::{SqlStatement, SqlStatementKind, StatementClassificationError}; pub use compatibility::{ diff --git a/backend/crates/kalamdb-dialect/src/parser/utils.rs b/backend/crates/kalamdb-dialect/src/parser/utils.rs index 554c48f60..8546318e2 100644 --- a/backend/crates/kalamdb-dialect/src/parser/utils.rs +++ b/backend/crates/kalamdb-dialect/src/parser/utils.rs @@ -210,15 +210,14 @@ fn table_id_from_parts(parts: &[String], default_namespace: &str) -> Option Option { - let compact_tokens: Vec<&Token> = tokens - .iter() - .filter(|token| !matches!(token, Token::Whitespace(_))) - .collect(); + let compact_tokens: Vec<&Token> = + tokens.iter().filter(|token| !matches!(token, Token::Whitespace(_))).collect(); match *(compact_tokens.first()?) { Token::Word(ref word) if word.value.eq_ignore_ascii_case("INSERT") => { let into_token = compact_tokens.get(1)?; - if !matches!(**into_token, Token::Word(ref word) if word.value.eq_ignore_ascii_case("INTO")) { + if !matches!(**into_token, Token::Word(ref word) if word.value.eq_ignore_ascii_case("INTO")) + { return None; } let (parts, _) = extract_object_name_parts(&compact_tokens, 2)?; @@ -230,7 +229,8 @@ fn extract_dml_table_id_from_tokens(tokens: &[Token], default_namespace: &str) - }, Token::Word(ref word) if word.value.eq_ignore_ascii_case("DELETE") => { let from_token = compact_tokens.get(1)?; - if !matches!(**from_token, Token::Word(ref word) if word.value.eq_ignore_ascii_case("FROM")) { + if !matches!(**from_token, Token::Word(ref word) if word.value.eq_ignore_ascii_case("FROM")) + { return None; } let (parts, _) = extract_object_name_parts(&compact_tokens, 2)?; @@ -594,23 +594,17 @@ mod tests { #[test] fn test_extract_dml_table_id_fast_insert_update_delete() { - let insert = extract_dml_table_id_fast( - "INSERT INTO chat.messages (id) VALUES (1)", - "default", - ) - .expect("fast insert table id"); + let insert = + extract_dml_table_id_fast("INSERT INTO chat.messages (id) VALUES (1)", "default") + .expect("fast insert table id"); assert_eq!(insert.full_name(), "chat.messages"); - let update = - extract_dml_table_id_fast("UPDATE messages SET id = 2 WHERE id = 1", "chat") - .expect("fast update table id"); + let update = extract_dml_table_id_fast("UPDATE messages SET id = 2 WHERE id = 1", "chat") + .expect("fast update table id"); assert_eq!(update.full_name(), "chat.messages"); - let delete = extract_dml_table_id_fast( - "DELETE FROM chat.messages WHERE id = 1", - "default", - ) - .expect("fast delete table id"); + let delete = extract_dml_table_id_fast("DELETE FROM chat.messages WHERE id = 1", "default") + .expect("fast delete table id"); assert_eq!(delete.full_name(), "chat.messages"); } diff --git a/backend/crates/kalamdb-dialect/src/validation.rs b/backend/crates/kalamdb-dialect/src/validation.rs index 6af34356a..b1f21bcc2 100644 --- a/backend/crates/kalamdb-dialect/src/validation.rs +++ b/backend/crates/kalamdb-dialect/src/validation.rs @@ -266,4 +266,4 @@ mod tests { Err(ValidationError::ReservedSqlKeyword("WHERE".to_string())) ); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-dialect/tests/test_create_table_constraints.rs b/backend/crates/kalamdb-dialect/tests/test_create_table_constraints.rs index d5984a021..6d6e6dc77 100644 --- a/backend/crates/kalamdb-dialect/tests/test_create_table_constraints.rs +++ b/backend/crates/kalamdb-dialect/tests/test_create_table_constraints.rs @@ -181,4 +181,4 @@ fn test_validation_primary_key_must_exist() { assert!(result.is_err()); assert!(result.unwrap_err().contains("not found")); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-dialect/tests/test_statement_classification.rs b/backend/crates/kalamdb-dialect/tests/test_statement_classification.rs index ea9414c58..3d04534c8 100644 --- a/backend/crates/kalamdb-dialect/tests/test_statement_classification.rs +++ b/backend/crates/kalamdb-dialect/tests/test_statement_classification.rs @@ -105,4 +105,4 @@ fn test_classify_whitespace_before_with() { assert!(result.is_ok()); let stmt = result.unwrap(); assert!(matches!(stmt.kind(), SqlStatementKind::Select)); -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-jobs/src/executors/flush.rs b/backend/crates/kalamdb-jobs/src/executors/flush.rs index cdeeff6b0..cb2e1c110 100644 --- a/backend/crates/kalamdb-jobs/src/executors/flush.rs +++ b/backend/crates/kalamdb-jobs/src/executors/flush.rs @@ -241,62 +241,83 @@ impl FlushExecutor { result.parquet_files.len() ); - // Compact RocksDB partition after flush to reclaim space from tombstones - ctx.log_trace("Running RocksDB compaction to clean up tombstones..."); - let backend = app_ctx.storage_backend(); - let partition_name = match table_type { - TableType::User => { - use kalamdb_commons::constants::ColumnFamilyNames; - format!( - "{}{}", - ColumnFamilyNames::USER_TABLE_PREFIX, - table_id // TableId Display: "namespace:table" - ) - }, - TableType::Shared => { - use kalamdb_commons::constants::ColumnFamilyNames; - format!( - "{}{}", - ColumnFamilyNames::SHARED_TABLE_PREFIX, - table_id // TableId Display: "namespace:table" - ) - }, - _ => { - // For Stream/System tables, skip compaction - return Ok(JobDecision::Completed { - message: Some(format!( - "Flushed {} successfully ({} rows, {} files)", - table_id, - result.rows_flushed, - result.parquet_files.len() - )), - }); - }, - }; - - use kalamdb_store::storage_trait::Partition; - let partition = Partition::new(partition_name); - - // Run RocksDB compaction in blocking thread pool to avoid blocking async runtime - let compact_result = - tokio::task::spawn_blocking(move || backend.compact_partition(&partition)) + // Fire-and-forget: compact RocksDB partition after flush to reclaim + // space from tombstones. Compaction is an optimisation, not a + // correctness requirement, so we must not block the job from being + // marked "completed". With max_background_jobs=2, synchronous + // compaction under concurrent flush load was the root cause of + // 90-120 s stalls observed in smoke tests. + let compact_table_type = table_type; + let compact_table_id = table_id.clone(); + let compact_backend = app_ctx.storage_backend(); + if matches!(compact_table_type, TableType::User | TableType::Shared) { + tokio::task::spawn(async move { + let partition_name = match compact_table_type { + TableType::User => { + use kalamdb_commons::constants::ColumnFamilyNames; + format!( + "{}{}", + ColumnFamilyNames::USER_TABLE_PREFIX, + compact_table_id + ) + }, + TableType::Shared => { + use kalamdb_commons::constants::ColumnFamilyNames; + format!( + "{}{}", + ColumnFamilyNames::SHARED_TABLE_PREFIX, + compact_table_id + ) + }, + _ => return, + }; + use kalamdb_store::storage_trait::Partition; + let partition = Partition::new(partition_name); + match tokio::task::spawn_blocking(move || { + compact_backend.compact_partition(&partition) + }) .await - .map_err(|e| { - KalamDbError::InvalidOperation(format!("Compaction task panicked: {}", e)) - })?; - - match compact_result { - Ok(()) => { - ctx.log_trace("RocksDB compaction completed successfully"); - }, - Err(e) => { - // Log compaction failure but don't fail the flush job - ctx.log_warn(&format!("RocksDB compaction failed (non-critical): {}", e)); - }, + { + Ok(Ok(())) => { + log::trace!( + "Post-flush compaction completed for {}", + compact_table_id + ); + }, + Ok(Err(e)) => { + log::warn!("Post-flush compaction failed (non-critical): {}", e); + }, + Err(e) => { + log::warn!("Post-flush compaction task panicked: {}", e); + }, + } + }); } + // Fire-and-forget: check if the shared table scope is empty and clean + // up cold segments if so. Also non-blocking to avoid stalling. if matches!(table_type, TableType::Shared) { - cleanup_empty_shared_scope_if_needed(ctx, table_id.as_ref()).await?; + let cleanup_app_ctx = app_ctx.clone(); + let cleanup_table_id = (*table_id).clone(); + let cleanup_job_id = ctx.job_id.clone(); + tokio::task::spawn(async move { + // Build a minimal JobContext just for the helper + let params = FlushParams { + table_id: cleanup_table_id.clone(), + table_type: TableType::Shared, + flush_threshold: None, + }; + let ctx = + crate::executors::JobContext::new(cleanup_app_ctx, cleanup_job_id, params); + if let Err(e) = + cleanup_empty_shared_scope_if_needed(&ctx, &cleanup_table_id).await + { + log::warn!( + "Post-flush shared scope cleanup failed (non-critical): {}", + e + ); + } + }); } Ok(JobDecision::Completed { diff --git a/backend/crates/kalamdb-live/src/fanout.rs b/backend/crates/kalamdb-live/src/fanout.rs index 7d15eb0cc..1e467ea17 100644 --- a/backend/crates/kalamdb-live/src/fanout.rs +++ b/backend/crates/kalamdb-live/src/fanout.rs @@ -75,15 +75,13 @@ impl FanoutDispatchPlan { } fn observe_notification_seq(&mut self, notification: &ChangeNotification) { - let seq_value = notification - .row_data - .values - .get(SystemColumnNames::SEQ) - .and_then(|value| match value { + let seq_value = notification.row_data.values.get(SystemColumnNames::SEQ).and_then( + |value| match value { ScalarValue::Int64(Some(seq)) => Some(SeqId::from(*seq)), ScalarValue::UInt64(Some(seq)) => Some(SeqId::from(*seq as i64)), _ => None, - }); + }, + ); if let Some(seq_value) = seq_value { match self.seq_upper_bound { @@ -126,14 +124,9 @@ impl CommitSideEffectPlan { owner_scope: FanoutOwnerScope, notification: ChangeNotification, ) { - if let Some(dispatch) = self - .notifications - .iter_mut() - .find(|dispatch| { - dispatch.table_id == notification.table_id - && dispatch.owner_scope == owner_scope - }) - { + if let Some(dispatch) = self.notifications.iter_mut().find(|dispatch| { + dispatch.table_id == notification.table_id && dispatch.owner_scope == owner_scope + }) { dispatch.push_notification(notification); return; } @@ -153,11 +146,7 @@ impl CommitSideEffectPlan { self.manifest_updates += 1; } - pub fn record_notification_group( - &mut self, - table_id: TableId, - owner_scope: FanoutOwnerScope, - ) { + pub fn record_notification_group(&mut self, table_id: TableId, owner_scope: FanoutOwnerScope) { if let Some(dispatch) = self .notifications .iter_mut() @@ -174,9 +163,7 @@ impl CommitSideEffectPlan { #[inline] pub fn is_empty(&self) -> bool { - self.notifications.is_empty() - && self.publisher_events == 0 - && self.manifest_updates == 0 + self.notifications.is_empty() && self.publisher_events == 0 && self.manifest_updates == 0 } #[inline] diff --git a/backend/crates/kalamdb-live/src/helpers/filter_eval.rs b/backend/crates/kalamdb-live/src/helpers/filter_eval.rs index 047ca4390..571524770 100644 --- a/backend/crates/kalamdb-live/src/helpers/filter_eval.rs +++ b/backend/crates/kalamdb-live/src/helpers/filter_eval.rs @@ -30,9 +30,8 @@ pub fn parse_where_clause(where_clause: &str) -> Result { let sql = format!("SELECT * FROM t WHERE {}", where_clause); let dialect = PostgreSqlDialect {}; - let statements = Parser::parse_sql(&dialect, &sql).map_err(|e| { - LiveError::InvalidOperation(format!("Failed to parse WHERE clause: {}", e)) - })?; + let statements = Parser::parse_sql(&dialect, &sql) + .map_err(|e| LiveError::InvalidOperation(format!("Failed to parse WHERE clause: {}", e)))?; if statements.is_empty() { return Err(LiveError::InvalidOperation("Empty WHERE clause".to_string())); @@ -132,9 +131,7 @@ fn evaluate_expr(expr: &Expr, row_data: &Row, depth: usize) -> Result { - Err(LiveError::InvalidOperation(format!("Unsupported unary operator: {:?}", op))) - }, + _ => Err(LiveError::InvalidOperation(format!("Unsupported unary operator: {:?}", op))), }, Expr::Like { @@ -169,10 +166,7 @@ fn evaluate_expr(expr: &Expr, row_data: &Row, depth: usize) -> Result Err(LiveError::InvalidOperation(format!( - "Unsupported expression type: {:?}", - expr - ))), + _ => Err(LiveError::InvalidOperation(format!("Unsupported expression type: {:?}", expr))), } } @@ -225,11 +219,7 @@ fn as_str(v: &ScalarValue) -> Option<&str> { } /// Helper to compare two ScalarValues for numeric comparisons -fn compare_numeric( - left: &ScalarValue, - right: &ScalarValue, - op: &str, -) -> Result { +fn compare_numeric(left: &ScalarValue, right: &ScalarValue, op: &str) -> Result { let left_num = as_f64(left).ok_or_else(|| { LiveError::InvalidOperation(format!("Cannot convert {:?} to number", left)) })?; @@ -293,10 +283,7 @@ fn evaluate_like_pattern( Ok(if negated { !matches } else { matches }) } -fn build_like_regex_pattern( - pattern: &str, - escape_char: Option, -) -> Result { +fn build_like_regex_pattern(pattern: &str, escape_char: Option) -> Result { let mut regex_pattern = String::with_capacity(pattern.len() + 2); regex_pattern.push('^'); diff --git a/backend/crates/kalamdb-live/src/helpers/initial_data.rs b/backend/crates/kalamdb-live/src/helpers/initial_data.rs index 06a6df144..c008ca3f7 100644 --- a/backend/crates/kalamdb-live/src/helpers/initial_data.rs +++ b/backend/crates/kalamdb-live/src/helpers/initial_data.rs @@ -130,9 +130,7 @@ impl InitialDataFetcher { /// /// The SQL executor is set later via `set_sql_executor` because of /// bootstrap ordering (LiveQueryManager is created before SqlExecutor). - pub fn new( - schema_lookup: Arc, - ) -> Self { + pub fn new(schema_lookup: Arc) -> Self { Self { schema_lookup, sql_executor: Arc::new(OnceCell::new()), @@ -377,11 +375,7 @@ impl InitialDataFetcher { Ok(where_clauses) } - fn table_has_column( - &self, - table_id: &TableId, - column_name: &str, - ) -> Result { + fn table_has_column(&self, table_id: &TableId, column_name: &str) -> Result { let schema = self.schema_lookup.get_arrow_schema(table_id)?; Ok(schema.field_with_name(column_name).is_ok()) } @@ -429,4 +423,3 @@ mod tests { assert!(options.include_deleted); } } - diff --git a/backend/crates/kalamdb-live/src/manager/connections_manager.rs b/backend/crates/kalamdb-live/src/manager/connections_manager.rs index 405330f2c..2e88fca94 100644 --- a/backend/crates/kalamdb-live/src/manager/connections_manager.rs +++ b/backend/crates/kalamdb-live/src/manager/connections_manager.rs @@ -199,12 +199,8 @@ impl ConnectionsManager { let (event_tx, event_rx) = mpsc::channel(EVENT_CHANNEL_CAPACITY); let (notification_tx, notification_rx) = mpsc::channel(NOTIFICATION_CHANNEL_CAPACITY); - let state = ConnectionState::new( - connection_id.clone(), - client_ip, - notification_tx, - event_tx, - ); + let state = + ConnectionState::new(connection_id.clone(), client_ip, notification_tx, event_tx); let shared_state = Arc::new(state); self.connections.insert(connection_id.clone(), Arc::clone(&shared_state)); @@ -427,7 +423,6 @@ impl ConnectionsManager { .unwrap_or_else(|| Arc::clone(&self.empty_subscriptions)) } - /// Send a notification directly to all authenticated connections for a user. /// /// This bypasses subscription indexing and is intended only for transport-layer @@ -915,11 +910,8 @@ mod tests { ) -> SubscriptionHandle { let flow_control = Arc::new(SubscriptionFlowControl::new()); flow_control.mark_initial_complete(); - let runtime_metadata = Arc::new(SubscriptionRuntimeMetadata::new( - "SELECT * FROM shared.test", - None, - 1, - )); + let runtime_metadata = + Arc::new(SubscriptionRuntimeMetadata::new("SELECT * FROM shared.test", None, 1)); SubscriptionHandle { subscription_id: Arc::from("test-subscription"), filter_expr: None, @@ -1075,7 +1067,7 @@ mod tests { // Send a notification to all shared table subscribers let notification = make_wire_notification("test"); - notify_shared_table_for_test(®istry, &table_id, notification); + notify_shared_table_for_test(®istry, &table_id, notification); // All subscribers should receive it for rx in &mut receivers { @@ -1119,7 +1111,7 @@ mod tests { let notification = make_wire_notification("perf"); let start = std::time::Instant::now(); - notify_shared_table_for_test(®istry, &table_id, notification); + notify_shared_table_for_test(®istry, &table_id, notification); let elapsed = start.elapsed(); // Should complete in well under 100ms for 1000 subscribers @@ -1186,7 +1178,7 @@ mod tests { ); let notification = make_wire_notification("parallel"); - notify_shared_table_for_test(®istry, &table_id, notification); + notify_shared_table_for_test(®istry, &table_id, notification); for mut rx in receivers { let msg = tokio::time::timeout(Duration::from_secs(1), rx.recv()).await; diff --git a/backend/crates/kalamdb-live/src/manager/queries_manager.rs b/backend/crates/kalamdb-live/src/manager/queries_manager.rs index 18e585d8b..d848210ac 100644 --- a/backend/crates/kalamdb-live/src/manager/queries_manager.rs +++ b/backend/crates/kalamdb-live/src/manager/queries_manager.rs @@ -13,9 +13,7 @@ use crate::error::LiveError; use crate::helpers::filter_eval::parse_where_clause; -use crate::helpers::initial_data::{ - InitialDataFetcher, InitialDataOptions, InitialDataResult, -}; +use crate::helpers::initial_data::{InitialDataFetcher, InitialDataOptions, InitialDataResult}; use crate::manager::ConnectionsManager; use crate::models::{SharedConnectionState, SubscriptionResult}; use crate::subscription::SubscriptionService; @@ -115,8 +113,7 @@ impl LiveQueryManager { ) -> Self { let node_id = *registry.node_id(); let subscription_service = Arc::new(SubscriptionService::new(registry.clone())); - let initial_data_fetcher = - Arc::new(InitialDataFetcher::new(Arc::clone(&schema_lookup))); + let initial_data_fetcher = Arc::new(InitialDataFetcher::new(Arc::clone(&schema_lookup))); Self { registry, @@ -380,15 +377,11 @@ impl LiveQueryManager { })?; let user_role = connection_state.user_role().ok_or_else(|| { - LiveError::InvalidOperation( - "Connection authenticated without role context".to_string(), - ) + LiveError::InvalidOperation("Connection authenticated without role context".to_string()) })?; - let table_def = self - .schema_lookup - .get_table_definition(&sub_state.table_id) - .ok_or_else(|| { + let table_def = + self.schema_lookup.get_table_definition(&sub_state.table_id).ok_or_else(|| { LiveError::NotFound(format!( "Table {} not found for batch fetch", sub_state.table_id diff --git a/backend/crates/kalamdb-live/src/models/connection.rs b/backend/crates/kalamdb-live/src/models/connection.rs index 24bf7d03a..ed7dfeaf4 100644 --- a/backend/crates/kalamdb-live/src/models/connection.rs +++ b/backend/crates/kalamdb-live/src/models/connection.rs @@ -316,9 +316,9 @@ pub struct SubscriptionState { /// /// Identity and channel fields are immutable after construction. /// Auth fields use set-once primitives (OnceLock + AtomicBool). - /// Subscriptions use a compact per-connection map. - /// Notification fanout reads from manager-level indices, so connection-local - /// subscription access is low-contention and does not need a sharded map. +/// Subscriptions use a compact per-connection map. +/// Notification fanout reads from manager-level indices, so connection-local +/// subscription access is low-contention and does not need a sharded map. /// Heartbeat uses AtomicU64 for zero-contention updates. /// /// Used for both WebSocket live query connections and topic consumer connections. @@ -500,9 +500,9 @@ impl ConnectionState { F: FnOnce() -> Option, { let mut subscriptions = self.subscriptions.write(); - subscriptions.remove_entry(primary_key).or_else(|| { - fallback_fn().and_then(|key| subscriptions.remove_entry(key.as_str())) - }) + subscriptions + .remove_entry(primary_key) + .or_else(|| fallback_fn().and_then(|key| subscriptions.remove_entry(key.as_str()))) } /// Get a subscription by ID (cloned out of the connection map). diff --git a/backend/crates/kalamdb-live/src/notification.rs b/backend/crates/kalamdb-live/src/notification.rs index b4ea5734e..5bf1fbd38 100644 --- a/backend/crates/kalamdb-live/src/notification.rs +++ b/backend/crates/kalamdb-live/src/notification.rs @@ -554,9 +554,7 @@ impl NotificationServiceTrait for NotificationService { mod tests { use super::*; use crate::helpers::filter_eval::parse_where_clause; - use crate::models::{ - SubscriptionFlowControl, SubscriptionHandle, SubscriptionRuntimeMetadata, - }; + use crate::models::{SubscriptionFlowControl, SubscriptionHandle, SubscriptionRuntimeMetadata}; use datafusion::scalar::ScalarValue; use kalamdb_commons::models::rows::Row; use kalamdb_commons::models::{ConnectionId, NamespaceId, TableName}; @@ -793,24 +791,14 @@ mod tests { let (tx_a, mut rx_a) = mpsc::channel(8); let flow_a = Arc::new(SubscriptionFlowControl::new()); flow_a.mark_initial_complete(); - let handle_a = make_shared_handle( - "sub_proj_a", - tx_a, - Arc::clone(&flow_a), - None, - Some(vec!["id"]), - ); + let handle_a = + make_shared_handle("sub_proj_a", tx_a, Arc::clone(&flow_a), None, Some(vec!["id"])); let (tx_b, mut rx_b) = mpsc::channel(8); let flow_b = Arc::new(SubscriptionFlowControl::new()); flow_b.mark_initial_complete(); - let handle_b = make_shared_handle( - "sub_proj_b", - tx_b, - Arc::clone(&flow_b), - None, - Some(vec!["id"]), - ); + let handle_b = + make_shared_handle("sub_proj_b", tx_b, Arc::clone(&flow_b), None, Some(vec!["id"])); let row = make_row(9, "shared", 9); let delivered = dispatch_chunk( @@ -843,13 +831,8 @@ mod tests { let (tx_id, mut rx_id) = mpsc::channel(8); let flow_id = Arc::new(SubscriptionFlowControl::new()); flow_id.mark_initial_complete(); - let handle_id = make_shared_handle( - "sub_only_id", - tx_id, - Arc::clone(&flow_id), - None, - Some(vec!["id"]), - ); + let handle_id = + make_shared_handle("sub_only_id", tx_id, Arc::clone(&flow_id), None, Some(vec!["id"])); let (tx_body, mut rx_body) = mpsc::channel(8); let flow_body = Arc::new(SubscriptionFlowControl::new()); diff --git a/backend/crates/kalamdb-observability/src/activity.rs b/backend/crates/kalamdb-observability/src/activity.rs index 0b9a4e691..f5bb8626e 100644 --- a/backend/crates/kalamdb-observability/src/activity.rs +++ b/backend/crates/kalamdb-observability/src/activity.rs @@ -39,4 +39,4 @@ mod tests { let idle = idle_duration().expect("idle duration should be available after activity"); assert!(idle < Duration::from_secs(1)); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-observability/src/allocator_metrics.rs b/backend/crates/kalamdb-observability/src/allocator_metrics.rs index e770e53e3..cfe2acdc7 100644 --- a/backend/crates/kalamdb-observability/src/allocator_metrics.rs +++ b/backend/crates/kalamdb-observability/src/allocator_metrics.rs @@ -43,63 +43,23 @@ impl AllocatorMetrics { push_u64(&mut pairs, "mimalloc_user_ms", self.user_ms); push_u64(&mut pairs, "mimalloc_system_ms", self.system_ms); push_u64(&mut pairs, "mimalloc_process_rss_bytes", self.process_rss_bytes); - push_u64( - &mut pairs, - "mimalloc_process_peak_rss_bytes", - self.process_peak_rss_bytes, - ); - push_u64( - &mut pairs, - "mimalloc_process_commit_bytes", - self.process_commit_bytes, - ); - push_u64( - &mut pairs, - "mimalloc_process_peak_commit_bytes", - self.process_peak_commit_bytes, - ); + push_u64(&mut pairs, "mimalloc_process_peak_rss_bytes", self.process_peak_rss_bytes); + push_u64(&mut pairs, "mimalloc_process_commit_bytes", self.process_commit_bytes); + push_u64(&mut pairs, "mimalloc_process_peak_commit_bytes", self.process_peak_commit_bytes); push_u64(&mut pairs, "mimalloc_page_faults", self.page_faults); - push_u64( - &mut pairs, - "mimalloc_reserved_current_bytes", - self.reserved_current_bytes, - ); - push_u64( - &mut pairs, - "mimalloc_reserved_peak_bytes", - self.reserved_peak_bytes, - ); - push_u64( - &mut pairs, - "mimalloc_committed_current_bytes", - self.committed_current_bytes, - ); - push_u64( - &mut pairs, - "mimalloc_committed_peak_bytes", - self.committed_peak_bytes, - ); - push_u64( - &mut pairs, - "mimalloc_reset_current_bytes", - self.reset_current_bytes, - ); - push_u64( - &mut pairs, - "mimalloc_purged_current_bytes", - self.purged_current_bytes, - ); + push_u64(&mut pairs, "mimalloc_reserved_current_bytes", self.reserved_current_bytes); + push_u64(&mut pairs, "mimalloc_reserved_peak_bytes", self.reserved_peak_bytes); + push_u64(&mut pairs, "mimalloc_committed_current_bytes", self.committed_current_bytes); + push_u64(&mut pairs, "mimalloc_committed_peak_bytes", self.committed_peak_bytes); + push_u64(&mut pairs, "mimalloc_reset_current_bytes", self.reset_current_bytes); + push_u64(&mut pairs, "mimalloc_purged_current_bytes", self.purged_current_bytes); push_u64( &mut pairs, "mimalloc_page_committed_current_bytes", self.page_committed_current_bytes, ); push_u64(&mut pairs, "mimalloc_pages_current", self.pages_current); - push_u64( - &mut pairs, - "mimalloc_pages_abandoned_current", - self.pages_abandoned_current, - ); + push_u64(&mut pairs, "mimalloc_pages_abandoned_current", self.pages_abandoned_current); push_u64(&mut pairs, "mimalloc_threads_current", self.threads_current); push_u64( &mut pairs, @@ -116,29 +76,17 @@ impl AllocatorMetrics { "mimalloc_malloc_normal_current_bytes", self.malloc_normal_current_bytes, ); - push_u64( - &mut pairs, - "mimalloc_malloc_huge_current_bytes", - self.malloc_huge_current_bytes, - ); + push_u64(&mut pairs, "mimalloc_malloc_huge_current_bytes", self.malloc_huge_current_bytes); push_u64(&mut pairs, "mimalloc_segments_current", self.segments_current); push_u64( &mut pairs, "mimalloc_segments_abandoned_current", self.segments_abandoned_current, ); - push_u64( - &mut pairs, - "mimalloc_segments_cache_current", - self.segments_cache_current, - ); + push_u64(&mut pairs, "mimalloc_segments_cache_current", self.segments_cache_current); push_u64(&mut pairs, "mimalloc_arena_count", self.arena_count); push_i64(&mut pairs, "mimalloc_purge_delay_ms", self.purge_delay_ms); - push_bool( - &mut pairs, - "mimalloc_purge_decommits_enabled", - self.purge_decommits, - ); + push_bool(&mut pairs, "mimalloc_purge_decommits_enabled", self.purge_decommits); push_u64( &mut pairs, "mimalloc_target_segments_per_thread", @@ -171,9 +119,8 @@ fn push_bool(pairs: &mut Vec<(String, String)>, key: &str, value: Option) mod imp { use super::AllocatorMetrics; use libmimalloc_sys::{ - mi_collect, mi_option_get, mi_option_is_enabled, mi_process_info, mi_stats_merge, - mi_version, - mi_option_t, + mi_collect, mi_option_get, mi_option_is_enabled, mi_option_t, mi_process_info, + mi_stats_merge, mi_version, }; use std::mem::MaybeUninit; @@ -303,9 +250,9 @@ mod imp { arena_count: clamp_counter(stats.arena_count.total), purge_delay_ms: Some(unsafe { mi_option_get(MI_OPTION_PURGE_DELAY) } as i64), purge_decommits: Some(unsafe { mi_option_is_enabled(MI_OPTION_PURGE_DECOMMITS) }), - target_segments_per_thread: Some( - unsafe { mi_option_get(MI_OPTION_TARGET_SEGMENTS_PER_THREAD) as u64 }, - ), + target_segments_per_thread: Some(unsafe { + mi_option_get(MI_OPTION_TARGET_SEGMENTS_PER_THREAD) as u64 + }), }) } @@ -335,4 +282,4 @@ mod imp { pub fn force_allocator_collection(_force: bool) {} } -pub use imp::{collect_allocator_metrics, force_allocator_collection}; \ No newline at end of file +pub use imp::{collect_allocator_metrics, force_allocator_collection}; diff --git a/backend/crates/kalamdb-observability/src/health_monitor.rs b/backend/crates/kalamdb-observability/src/health_monitor.rs index a6c960a2b..6a53e6a84 100644 --- a/backend/crates/kalamdb-observability/src/health_monitor.rs +++ b/backend/crates/kalamdb-observability/src/health_monitor.rs @@ -217,24 +217,14 @@ impl HealthMonitor { let namespaces = metric_map.get("total_namespaces").copied().unwrap_or("n/a"); let tables = metric_map.get("total_tables").copied().unwrap_or("n/a"); let subscriptions = metric_map.get("active_subscriptions").copied().unwrap_or("n/a"); - let subscriptions_peak = metric_map - .get("active_subscriptions_peak") - .copied() - .unwrap_or("n/a"); + let subscriptions_peak = + metric_map.get("active_subscriptions_peak").copied().unwrap_or("n/a"); let connections = metric_map.get("active_connections").copied().unwrap_or("n/a"); - let connections_peak = metric_map - .get("active_connections_peak") - .copied() - .unwrap_or("n/a"); - let connection_limit = metric_map - .get("max_connections_configured") - .copied() - .unwrap_or("n/a"); + let connections_peak = metric_map.get("active_connections_peak").copied().unwrap_or("n/a"); + let connection_limit = + metric_map.get("max_connections_configured").copied().unwrap_or("n/a"); let ws_sessions = metric_map.get("websocket_sessions").copied().unwrap_or("n/a"); - let ws_sessions_peak = metric_map - .get("websocket_sessions_peak") - .copied() - .unwrap_or("n/a"); + let ws_sessions_peak = metric_map.get("websocket_sessions_peak").copied().unwrap_or("n/a"); let jobs_running = metric_map.get("jobs_running").copied().unwrap_or("n/a"); let jobs_queued = metric_map.get("jobs_queued").copied().unwrap_or("n/a"); let jobs_failed = metric_map.get("jobs_failed").copied().unwrap_or("n/a"); @@ -483,10 +473,7 @@ mod tests { fn format_log_from_pairs_includes_memory_source_details_when_present() { let metrics = vec![ ("memory_usage_mb".to_string(), "109".to_string()), - ( - "memory_usage_source".to_string(), - "physical_footprint".to_string(), - ), + ("memory_usage_source".to_string(), "physical_footprint".to_string()), ("memory_rss_mb".to_string(), "141".to_string()), ("memory_rss_gap_mb".to_string(), "32".to_string()), ]; diff --git a/backend/crates/kalamdb-observability/src/lib.rs b/backend/crates/kalamdb-observability/src/lib.rs index 649b85fe0..6894d97f9 100644 --- a/backend/crates/kalamdb-observability/src/lib.rs +++ b/backend/crates/kalamdb-observability/src/lib.rs @@ -22,9 +22,8 @@ pub use allocator_metrics::{ }; pub use cpu::{get_cpu_count, get_physical_cpu_count}; pub use health_monitor::{ - decrement_websocket_sessions, get_websocket_session_count, - get_websocket_session_peak_count, increment_websocket_sessions, HealthCounts, - HealthMetrics, HealthMonitor, + decrement_websocket_sessions, get_websocket_session_count, get_websocket_session_peak_count, + increment_websocket_sessions, HealthCounts, HealthMetrics, HealthMonitor, }; pub use runtime_metrics::{ collect_runtime_metrics, RuntimeMetrics, BUILD_DATE, GIT_BRANCH, GIT_COMMIT_HASH, diff --git a/backend/crates/kalamdb-observability/src/runtime_metrics.rs b/backend/crates/kalamdb-observability/src/runtime_metrics.rs index 2612a2fda..4da9b1d8d 100644 --- a/backend/crates/kalamdb-observability/src/runtime_metrics.rs +++ b/backend/crates/kalamdb-observability/src/runtime_metrics.rs @@ -47,10 +47,7 @@ impl RuntimeMetrics { if let Some(mb) = self.memory_mb { pairs.push(("memory_usage_mb".to_string(), mb.to_string())); } - pairs.push(( - "memory_usage_source".to_string(), - self.memory_usage_source.to_string(), - )); + pairs.push(("memory_usage_source".to_string(), self.memory_usage_source.to_string())); if let Some(bytes) = self.memory_rss_bytes { pairs.push(("memory_rss_bytes".to_string(), bytes.to_string())); } @@ -70,16 +67,10 @@ impl RuntimeMetrics { pairs.push(("memory_rss_gap_mb".to_string(), mb.to_string())); } if let Some(bytes) = self.memory_physical_footprint_bytes { - pairs.push(( - "memory_physical_footprint_bytes".to_string(), - bytes.to_string(), - )); + pairs.push(("memory_physical_footprint_bytes".to_string(), bytes.to_string())); } if let Some(mb) = self.memory_physical_footprint_mb { - pairs.push(( - "memory_physical_footprint_mb".to_string(), - mb.to_string(), - )); + pairs.push(("memory_physical_footprint_mb".to_string(), mb.to_string())); } if let Some(cpu) = self.cpu_usage_percent { pairs.push(("cpu_usage_percent".to_string(), format!("{:.2}", cpu))); diff --git a/backend/crates/kalamdb-pg/src/lib.rs b/backend/crates/kalamdb-pg/src/lib.rs index e1d8817cf..c82f501c9 100644 --- a/backend/crates/kalamdb-pg/src/lib.rs +++ b/backend/crates/kalamdb-pg/src/lib.rs @@ -27,7 +27,7 @@ pub use service::{ ExecuteQueryRpcRequest, ExecuteQueryRpcResponse, ExecuteSqlRpcRequest, ExecuteSqlRpcResponse, InsertRpcRequest, InsertRpcResponse, OpenSessionRequest, OpenSessionResponse, PgServiceClient, PingRequest, PingResponse, RollbackTransactionRequest, RollbackTransactionResponse, - ScanRpcRequest, ScanRpcResponse, UpdateRpcRequest, UpdateRpcResponse, + ScanFilterExpression, ScanRpcRequest, ScanRpcResponse, UpdateRpcRequest, UpdateRpcResponse, }; #[cfg(feature = "server")] pub use session_registry::{LivePgTransaction, RemotePgSession, SessionRegistry}; diff --git a/backend/crates/kalamdb-pg/src/operation_executor.rs b/backend/crates/kalamdb-pg/src/operation_executor.rs index 4a39e5b6f..dc3efdc7f 100644 --- a/backend/crates/kalamdb-pg/src/operation_executor.rs +++ b/backend/crates/kalamdb-pg/src/operation_executor.rs @@ -115,6 +115,12 @@ pub fn encode_batches( pub fn scan_request_from_rpc(rpc: &ScanRpcRequest) -> Result { let table_id = parse_table_id(&rpc.namespace, &rpc.table_name)?; let table_type = parse_table_type(&rpc.table_type)?; + let filters = rpc + .filters + .iter() + .filter(|f| f.op == "eq") + .map(|f| (f.column.clone(), f.value.clone())) + .collect(); Ok(ScanRequest { table_id, table_type, @@ -122,6 +128,7 @@ pub fn scan_request_from_rpc(rpc: &ScanRpcRequest) -> Result, #[prost(uint64, optional, tag = "7")] pub limit: Option, + /// Equality filters pushed down from PostgreSQL WHERE clauses. + #[prost(message, repeated, tag = "8")] + pub filters: Vec, } #[derive(Clone, PartialEq, prost::Message)] @@ -937,10 +952,7 @@ impl KalamPgService { Arc::clone(&self.session_registry) } - pub fn snapshot_with_live_transactions( - &self, - active_transactions: I, - ) -> Vec + pub fn snapshot_with_live_transactions(&self, active_transactions: I) -> Vec where I: IntoIterator, { @@ -1081,9 +1093,7 @@ impl KalamPgService { Ok(value) => Ok(value), Err(status) if Self::should_reconcile_local_transaction_state(&status) => { self.cleanup_current_transaction_after_terminal_error( - session_id, - rpc_name, - &status, + session_id, rpc_name, &status, ) .await; Err(status) diff --git a/backend/crates/kalamdb-pg/src/session_registry.rs b/backend/crates/kalamdb-pg/src/session_registry.rs index e27341025..9f75544ce 100644 --- a/backend/crates/kalamdb-pg/src/session_registry.rs +++ b/backend/crates/kalamdb-pg/src/session_registry.rs @@ -143,11 +143,11 @@ impl RemotePgSession { } fn with_live_transaction(mut self, live_transaction: Option<&LivePgTransaction>) -> Self { - self.transaction_id = live_transaction.map(|transaction| transaction.transaction_id().to_owned()); + self.transaction_id = + live_transaction.map(|transaction| transaction.transaction_id().to_owned()); self.transaction_state = live_transaction.map(LivePgTransaction::transaction_state); - self.transaction_has_writes = live_transaction - .map(LivePgTransaction::transaction_has_writes) - .unwrap_or(false); + self.transaction_has_writes = + live_transaction.map(LivePgTransaction::transaction_has_writes).unwrap_or(false); self } @@ -235,12 +235,7 @@ impl SessionRegistry { if self .last_pruned_at_ms - .compare_exchange( - last_pruned_at_ms, - now_ms, - Ordering::Relaxed, - Ordering::Relaxed, - ) + .compare_exchange(last_pruned_at_ms, now_ms, Ordering::Relaxed, Ordering::Relaxed) .is_err() { return; @@ -263,10 +258,7 @@ impl SessionRegistry { } if pruned_count > 0 { - log::debug!( - "PG session registry pruned {} stale idle session(s)", - pruned_count - ); + log::debug!("PG session registry pruned {} stale idle session(s)", pruned_count); } } @@ -508,10 +500,7 @@ impl SessionRegistry { /// Return a point-in-time snapshot of tracked sessions with transaction state /// reconciled against the live coordinator view for pg-owned transactions. - pub fn snapshot_with_live_transactions( - &self, - active_transactions: I, - ) -> Vec + pub fn snapshot_with_live_transactions(&self, active_transactions: I) -> Vec where I: IntoIterator, { @@ -730,10 +719,7 @@ mod tests { registry.mark_transaction_writes("pg-1"); let snapshot = registry.snapshot_with_live_transactions(Vec::::new()); - let session = snapshot - .into_iter() - .find(|session| session.session_id() == "pg-1") - .unwrap(); + let session = snapshot.into_iter().find(|session| session.session_id() == "pg-1").unwrap(); assert_eq!(session.transaction_id(), None); assert_eq!(session.transaction_state(), None); @@ -752,10 +738,7 @@ mod tests { TransactionState::OpenWrite, true, )]); - let session = snapshot - .into_iter() - .find(|session| session.session_id() == "pg-1") - .unwrap(); + let session = snapshot.into_iter().find(|session| session.session_id() == "pg-1").unwrap(); assert_eq!(session.transaction_id(), Some("live-tx")); assert_eq!(session.transaction_state(), Some(TransactionState::OpenWrite)); @@ -814,10 +797,8 @@ mod tests { false, )]); - let session = snapshot - .into_iter() - .find(|session| session.session_id() == "pg-live") - .unwrap(); + let session = + snapshot.into_iter().find(|session| session.session_id() == "pg-live").unwrap(); assert_eq!(session.transaction_state(), Some(TransactionState::OpenRead)); assert!(registry.get("pg-live").is_some()); diff --git a/backend/crates/kalamdb-pg/tests/support/mod.rs b/backend/crates/kalamdb-pg/tests/support/mod.rs index 4dd4da923..3d36f2c95 100644 --- a/backend/crates/kalamdb-pg/tests/support/mod.rs +++ b/backend/crates/kalamdb-pg/tests/support/mod.rs @@ -293,6 +293,7 @@ pub async fn scan_shared_rows( session_id: session_id.to_string(), user_id: None, columns: vec![], + filters: vec![], limit: None, })) .await @@ -343,6 +344,7 @@ pub async fn scan_user_rows( session_id: session_id.to_string(), user_id: Some(user_id.to_string()), columns: vec![], + filters: vec![], limit: None, })) .await diff --git a/backend/crates/kalamdb-pg/tests/transaction_commit.rs b/backend/crates/kalamdb-pg/tests/transaction_commit.rs index 1dba016d5..9745ebc95 100644 --- a/backend/crates/kalamdb-pg/tests/transaction_commit.rs +++ b/backend/crates/kalamdb-pg/tests/transaction_commit.rs @@ -92,14 +92,18 @@ async fn transaction_commit_preserves_user_scope_for_same_primary_keys() { assert_eq!( table_entries .values() - .filter(|entry| entry.user_id.as_ref().map(UserId::as_str) == Some(first_user_id.as_str())) + .filter( + |entry| entry.user_id.as_ref().map(UserId::as_str) == Some(first_user_id.as_str()) + ) .count(), 2 ); assert_eq!( table_entries .values() - .filter(|entry| entry.user_id.as_ref().map(UserId::as_str) == Some(second_user_id.as_str())) + .filter( + |entry| entry.user_id.as_ref().map(UserId::as_str) == Some(second_user_id.as_str()) + ) .count(), 2 ); diff --git a/backend/crates/kalamdb-plan-cache/src/lib.rs b/backend/crates/kalamdb-plan-cache/src/lib.rs index dada12548..ab42f4090 100644 --- a/backend/crates/kalamdb-plan-cache/src/lib.rs +++ b/backend/crates/kalamdb-plan-cache/src/lib.rs @@ -63,10 +63,7 @@ pub struct PlanCache { impl PlanCache { pub fn new() -> Self { - Self::with_config( - DEFAULT_PLAN_MAX_ENTRIES, - Duration::from_secs(DEFAULT_IDLE_TTL_SECS), - ) + Self::with_config(DEFAULT_PLAN_MAX_ENTRIES, Duration::from_secs(DEFAULT_IDLE_TTL_SECS)) } pub fn with_config(max_entries: u64, idle_ttl: Duration) -> Self { @@ -166,11 +163,7 @@ impl InsertMetadataCache { self.cache.get(cache_key) } - pub fn insert_arc( - &self, - cache_key: InsertMetadataCacheKey, - metadata: Arc, - ) { + pub fn insert_arc(&self, cache_key: InsertMetadataCacheKey, metadata: Arc) { self.cache.insert(cache_key, metadata); } @@ -180,7 +173,6 @@ impl InsertMetadataCache { } } - pub struct SqlCacheRegistry { plan_cache: PlanCache, insert_metadata_cache: InsertMetadataCache, @@ -252,4 +244,4 @@ mod tests { assert!(registry.plan_cache().is_empty()); assert!(registry.insert_metadata_cache().get(&insert_key).is_none()); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-raft/src/codec/command_codec.rs b/backend/crates/kalamdb-raft/src/codec/command_codec.rs index dfdecbae0..7353a6c59 100644 --- a/backend/crates/kalamdb-raft/src/codec/command_codec.rs +++ b/backend/crates/kalamdb-raft/src/codec/command_codec.rs @@ -198,18 +198,20 @@ mod tests { .expect("encode retired user data command"); let err = decode_user_data_command(&bytes).expect_err("retired command must be rejected"); - assert!(err.to_string().contains("Unknown variant index") || err.to_string().contains("unknown variant")); + assert!( + err.to_string().contains("Unknown variant index") + || err.to_string().contains("unknown variant") + ); } #[test] fn option_none_serializes_compactly() { - let none_bytes = flexbuffers::to_vec(&Option::::None) - .expect("encode none option"); - let some_bytes = flexbuffers::to_vec(&Some( - kalamdb_commons::models::TransactionId::new( - "01960f7b-3d15-7d6d-b26c-7e4db6f25f8d", - ), - )) + let none_bytes = + flexbuffers::to_vec(&Option::::None) + .expect("encode none option"); + let some_bytes = flexbuffers::to_vec(&Some(kalamdb_commons::models::TransactionId::new( + "01960f7b-3d15-7d6d-b26c-7e4db6f25f8d", + ))) .expect("encode some option"); assert!(none_bytes.len() <= 3); @@ -276,9 +278,7 @@ mod tests { "01960f7b-3d15-7d6d-b26c-7e4db6f25f8d", ), mutations: vec![StagedMutation::new( - kalamdb_commons::models::TransactionId::new( - "01960f7b-3d15-7d6d-b26c-7e4db6f25f8d", - ), + kalamdb_commons::models::TransactionId::new("01960f7b-3d15-7d6d-b26c-7e4db6f25f8d"), TableId::new(NamespaceId::from("ns"), TableName::from("items")), TableType::Shared, None, diff --git a/backend/crates/kalamdb-raft/src/commands/data_response.rs b/backend/crates/kalamdb-raft/src/commands/data_response.rs index 66a19ecd8..78843e8fc 100644 --- a/backend/crates/kalamdb-raft/src/commands/data_response.rs +++ b/backend/crates/kalamdb-raft/src/commands/data_response.rs @@ -62,11 +62,9 @@ impl DataResponse { pub fn committed_side_effect_counts(&self) -> Option<(usize, usize, usize)> { match self { - DataResponse::TransactionCommitted(result) => Some(( - result.notifications_sent, - result.manifest_updates, - result.publisher_events, - )), + DataResponse::TransactionCommitted(result) => { + Some((result.notifications_sent, result.manifest_updates, result.publisher_events)) + }, _ => None, } } diff --git a/backend/crates/kalamdb-raft/src/lib.rs b/backend/crates/kalamdb-raft/src/lib.rs index 621a0231f..087db122a 100644 --- a/backend/crates/kalamdb-raft/src/lib.rs +++ b/backend/crates/kalamdb-raft/src/lib.rs @@ -67,11 +67,11 @@ pub use manager::{ PeerNode, RaftGroup, RaftManager, RaftManagerConfig, SnapshotInfo, SnapshotsSummary, DEFAULT_SHARED_DATA_SHARDS, DEFAULT_USER_DATA_SHARDS, }; -pub use network::{start_rpc_server, RaftNetwork, RaftNetworkFactory, RaftService}; pub use network::{ - forward_sql_param, ClusterClient, ClusterMessageHandler, ClusterServiceImpl, - ForwardSqlParam, ForwardSqlRequest, ForwardSqlResponse, ForwardSqlResponsePayload, - GetNodeInfoRequest, GetNodeInfoResponse, NoOpClusterHandler, PingRequest, PingResponse, + forward_sql_param, ClusterClient, ClusterMessageHandler, ClusterServiceImpl, ForwardSqlParam, + ForwardSqlRequest, ForwardSqlResponse, ForwardSqlResponsePayload, GetNodeInfoRequest, + GetNodeInfoResponse, NoOpClusterHandler, PingRequest, PingResponse, }; +pub use network::{start_rpc_server, RaftNetwork, RaftNetworkFactory, RaftService}; pub use state_machine::{serde_helpers, ApplyResult, KalamStateMachine, StateMachineSnapshot}; pub use storage::{KalamNode, KalamRaftStorage, KalamTypeConfig}; diff --git a/backend/crates/kalamdb-raft/src/network/cluster_client.rs b/backend/crates/kalamdb-raft/src/network/cluster_client.rs index a9135356c..37d9b3500 100644 --- a/backend/crates/kalamdb-raft/src/network/cluster_client.rs +++ b/backend/crates/kalamdb-raft/src/network/cluster_client.rs @@ -9,8 +9,8 @@ use kalamdb_commons::models::NodeId; use super::cluster_service::cluster_client::ClusterServiceClient; use super::models::{ - ForwardSqlRequest, ForwardSqlResponse, GetNodeInfoRequest, GetNodeInfoResponse, - PingRequest, PingResponse, + ForwardSqlRequest, ForwardSqlResponse, GetNodeInfoRequest, GetNodeInfoResponse, PingRequest, + PingResponse, }; use crate::manager::RaftManager; use crate::{GroupId, RaftError}; diff --git a/backend/crates/kalamdb-raft/src/network/mod.rs b/backend/crates/kalamdb-raft/src/network/mod.rs index 4dccb3b8b..d129e7b24 100644 --- a/backend/crates/kalamdb-raft/src/network/mod.rs +++ b/backend/crates/kalamdb-raft/src/network/mod.rs @@ -38,6 +38,5 @@ pub use cluster_client::ClusterClient; pub use cluster_handler::{ClusterMessageHandler, ClusterServiceImpl, NoOpClusterHandler}; pub use models::{ forward_sql_param, ForwardSqlParam, ForwardSqlRequest, ForwardSqlResponse, - ForwardSqlResponsePayload, GetNodeInfoRequest, GetNodeInfoResponse, PingRequest, - PingResponse, + ForwardSqlResponsePayload, GetNodeInfoRequest, GetNodeInfoResponse, PingRequest, PingResponse, }; diff --git a/backend/crates/kalamdb-raft/src/network/models/mod.rs b/backend/crates/kalamdb-raft/src/network/models/mod.rs index 55f5fdbd8..8d35d4ed2 100644 --- a/backend/crates/kalamdb-raft/src/network/models/mod.rs +++ b/backend/crates/kalamdb-raft/src/network/models/mod.rs @@ -9,8 +9,8 @@ mod node_info; mod ping; pub use forward::{ - forward_sql_param, ForwardSqlParam, ForwardSqlRequest, ForwardSqlResponse, - ForwardSqlResponsePayload, + forward_sql_param, ForwardSqlParam, ForwardSqlRequest, ForwardSqlResponse, + ForwardSqlResponsePayload, }; pub use node_info::{GetNodeInfoRequest, GetNodeInfoResponse}; pub use ping::{PingRequest, PingResponse}; diff --git a/backend/crates/kalamdb-raft/src/state_machine/shared_data.rs b/backend/crates/kalamdb-raft/src/state_machine/shared_data.rs index e907763e6..048145cd3 100644 --- a/backend/crates/kalamdb-raft/src/state_machine/shared_data.rs +++ b/backend/crates/kalamdb-raft/src/state_machine/shared_data.rs @@ -395,9 +395,7 @@ impl SharedDataStateMachine { }; let Some(applier) = applier else { - return Ok(DataResponse::error( - "No applier set, transaction commit not persisted", - )); + return Ok(DataResponse::error("No applier set, transaction commit not persisted")); }; match applier.apply_transaction_batch(&transaction_id, &mutations).await { @@ -546,11 +544,11 @@ impl KalamStateMachine for SharedDataStateMachine { mod tests { use super::*; use async_trait::async_trait; - use std::collections::BTreeMap; + use kalamdb_commons::models::rows::Row; use kalamdb_commons::models::NamespaceId; use kalamdb_commons::models::OperationKind; - use kalamdb_commons::models::rows::Row; use kalamdb_commons::TableId; + use std::collections::BTreeMap; struct TransactionBatchSharedApplier; diff --git a/backend/crates/kalamdb-raft/src/state_machine/user_data.rs b/backend/crates/kalamdb-raft/src/state_machine/user_data.rs index aafcecf76..20d3bc09b 100644 --- a/backend/crates/kalamdb-raft/src/state_machine/user_data.rs +++ b/backend/crates/kalamdb-raft/src/state_machine/user_data.rs @@ -313,7 +313,10 @@ impl UserDataStateMachine { } } - async fn apply_decoded_command(&self, cmd: UserApplyCommand) -> Result { + async fn apply_decoded_command( + &self, + cmd: UserApplyCommand, + ) -> Result { match cmd { UserApplyCommand::User(command) => self.apply_command(command).await, UserApplyCommand::TransactionCommit { @@ -343,9 +346,7 @@ impl UserDataStateMachine { }; let Some(applier) = applier else { - return Ok(DataResponse::error( - "No applier set, transaction commit not persisted", - )); + return Ok(DataResponse::error("No applier set, transaction commit not persisted")); }; match applier.apply_transaction_batch(&transaction_id, &mutations).await { @@ -489,11 +490,11 @@ impl KalamStateMachine for UserDataStateMachine { mod tests { use super::*; use async_trait::async_trait; - use std::collections::BTreeMap; + use kalamdb_commons::models::rows::Row; use kalamdb_commons::models::NamespaceId; use kalamdb_commons::models::OperationKind; - use kalamdb_commons::models::rows::Row; use kalamdb_commons::{TableId, UserId}; + use std::collections::BTreeMap; struct TransactionBatchUserApplier; diff --git a/backend/crates/kalamdb-streams/src/file_store.rs b/backend/crates/kalamdb-streams/src/file_store.rs index 0ab267fc8..85b8ca87a 100644 --- a/backend/crates/kalamdb-streams/src/file_store.rs +++ b/backend/crates/kalamdb-streams/src/file_store.rs @@ -335,12 +335,7 @@ impl FileStreamLogStore { // Use `entry` API so a concurrent creation by another thread is // handled correctly — whichever was inserted first wins. - Ok(self - .segments - .entry(path.to_path_buf()) - .or_insert(writer) - .value() - .clone()) + Ok(self.segments.entry(path.to_path_buf()).or_insert(writer).value().clone()) } /// Serialise `record` and write the length-prefixed frame to `writer`. @@ -352,9 +347,7 @@ impl FileStreamLogStore { writer .write_all(&len.to_le_bytes()) .map_err(|e| StreamLogError::Io(e.to_string()))?; - writer - .write_all(&payload) - .map_err(|e| StreamLogError::Io(e.to_string()))?; + writer.write_all(&payload).map_err(|e| StreamLogError::Io(e.to_string()))?; Ok(()) } @@ -536,10 +529,7 @@ impl StreamLogStore for FileStreamLogStore { let ts = row_id.seq().timestamp_millis(); let window_start = self.window_start_ms(ts); let path = self.log_path(user_id, window_start); - by_segment - .entry(path) - .or_default() - .push(StreamLogRecord::Put { row_id, row }); + by_segment.entry(path).or_default().push(StreamLogRecord::Put { row_id, row }); } for (path, records) in by_segment { @@ -794,9 +784,7 @@ mod tests { for i in 0..num_users { let user_id = UserId::new(format!("user-{}", i)); - let rows = store - .read_with_limit(&table_id, &user_id, writes_per_user + 1) - .unwrap(); + let rows = store.read_with_limit(&table_id, &user_id, writes_per_user + 1).unwrap(); assert_eq!( rows.len(), writes_per_user, @@ -841,9 +829,7 @@ mod tests { assert_eq!(store.open_segment_count(), 0); // Data should still be readable from disk. - let read = store - .read_with_limit(&table_id, &user_id, 10) - .unwrap(); + let read = store.read_with_limit(&table_id, &user_id, 10).unwrap(); assert_eq!(read.len(), 1); let _ = fs::remove_dir_all(&base_dir); diff --git a/backend/crates/kalamdb-system/src/providers/live/mod.rs b/backend/crates/kalamdb-system/src/providers/live/mod.rs index 6506028ab..863f154c2 100644 --- a/backend/crates/kalamdb-system/src/providers/live/mod.rs +++ b/backend/crates/kalamdb-system/src/providers/live/mod.rs @@ -4,4 +4,4 @@ pub mod models; -pub use models::{LiveQuery, LiveQueryStatus}; \ No newline at end of file +pub use models::{LiveQuery, LiveQueryStatus}; diff --git a/backend/crates/kalamdb-system/src/providers/live/models/live_query.rs b/backend/crates/kalamdb-system/src/providers/live/models/live_query.rs index 9783f28f3..2ddbbcf7d 100644 --- a/backend/crates/kalamdb-system/src/providers/live/models/live_query.rs +++ b/backend/crates/kalamdb-system/src/providers/live/models/live_query.rs @@ -239,4 +239,4 @@ mod tests { let deserialized: LiveQuery = serde_json::from_slice(&bytes).unwrap(); assert_eq!(live_query, deserialized); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-system/src/providers/live/models/live_query_status.rs b/backend/crates/kalamdb-system/src/providers/live/models/live_query_status.rs index 2e2890c18..5001e1da3 100644 --- a/backend/crates/kalamdb-system/src/providers/live/models/live_query_status.rs +++ b/backend/crates/kalamdb-system/src/providers/live/models/live_query_status.rs @@ -86,10 +86,7 @@ mod tests { fn test_status_from_str() { assert_eq!("active".parse::().unwrap(), LiveQueryStatus::Active); assert_eq!("paused".parse::().unwrap(), LiveQueryStatus::Paused); - assert_eq!( - "completed".parse::().unwrap(), - LiveQueryStatus::Completed - ); + assert_eq!("completed".parse::().unwrap(), LiveQueryStatus::Completed); assert_eq!("error".parse::().unwrap(), LiveQueryStatus::Error); assert_eq!("ACTIVE".parse::().unwrap(), LiveQueryStatus::Active); @@ -105,4 +102,4 @@ mod tests { assert_eq!(String::from(&LiveQueryStatus::Completed), "completed"); assert_eq!(String::from(&LiveQueryStatus::Error), "error"); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-system/src/providers/live/models/mod.rs b/backend/crates/kalamdb-system/src/providers/live/models/mod.rs index 20a78d138..c87d18d31 100644 --- a/backend/crates/kalamdb-system/src/providers/live/models/mod.rs +++ b/backend/crates/kalamdb-system/src/providers/live/models/mod.rs @@ -6,4 +6,4 @@ mod live_query; mod live_query_status; pub use live_query::LiveQuery; -pub use live_query_status::LiveQueryStatus; \ No newline at end of file +pub use live_query_status::LiveQueryStatus; diff --git a/backend/crates/kalamdb-tables/src/shared_tables/shared_table_provider.rs b/backend/crates/kalamdb-tables/src/shared_tables/shared_table_provider.rs index 7ff8f5447..4facec162 100644 --- a/backend/crates/kalamdb-tables/src/shared_tables/shared_table_provider.rs +++ b/backend/crates/kalamdb-tables/src/shared_tables/shared_table_provider.rs @@ -32,8 +32,8 @@ use datafusion::scalar::ScalarValue; use kalamdb_commons::conversions::arrow_json_conversion::{coerce_rows, coerce_updates}; use kalamdb_commons::ids::SharedTableRowId; use kalamdb_commons::models::datatypes::KalamDataType; -use kalamdb_commons::models::OperationKind; use kalamdb_commons::models::rows::Row; +use kalamdb_commons::models::OperationKind; use kalamdb_commons::models::UserId; use kalamdb_commons::websocket::ChangeNotification; use kalamdb_commons::NotLeaderError; @@ -200,7 +200,8 @@ impl SharedTableProvider { if self.core.services.cluster_coordinator.is_cluster_mode().await { let is_leader = self.core.services.cluster_coordinator.is_leader_for_shared().await; if !is_leader { - let leader_addr = self.core.services.cluster_coordinator.leader_addr_for_shared().await; + let leader_addr = + self.core.services.cluster_coordinator.leader_addr_for_shared().await; return Err(KalamDbError::NotLeader { leader_addr }); } } @@ -214,14 +215,12 @@ impl SharedTableProvider { return Ok(()); } - self.ensure_shared_write_leader() - .await - .map_err(|error| match error { - KalamDbError::NotLeader { leader_addr } => { - DataFusionError::External(Box::new(NotLeaderError::new(leader_addr))) - }, - other => DataFusionError::Execution(other.to_string()), - }) + self.ensure_shared_write_leader().await.map_err(|error| match error { + KalamDbError::NotLeader { leader_addr } => { + DataFusionError::External(Box::new(NotLeaderError::new(leader_addr))) + }, + other => DataFusionError::Execution(other.to_string()), + }) } async fn stage_vector_upsert( @@ -371,10 +370,9 @@ impl SharedTableProvider { )) })?; row._commit_seq = commit_seq; - self.store - .insert_async(*row_key, row) - .await - .map_err(|e| KalamDbError::InvalidOperation(format!("Failed to patch commit_seq: {}", e))) + self.store.insert_async(*row_key, row).await.map_err(|e| { + KalamDbError::InvalidOperation(format!("Failed to patch commit_seq: {}", e)) + }) } pub async fn patch_latest_commit_seq_by_pk( @@ -383,14 +381,12 @@ impl SharedTableProvider { commit_seq: u64, ) -> Result { let schema = self.schema_ref(); - let pk_field = schema - .field_with_name(self.primary_key_field_name()) - .map_err(|e| KalamDbError::InvalidOperation(format!("PK column lookup failed: {}", e)))?; - let pk_scalar = kalamdb_commons::conversions::parse_string_as_scalar( - pk_value, - pk_field.data_type(), - ) - .map_err(KalamDbError::InvalidOperation)?; + let pk_field = schema.field_with_name(self.primary_key_field_name()).map_err(|e| { + KalamDbError::InvalidOperation(format!("PK column lookup failed: {}", e)) + })?; + let pk_scalar = + kalamdb_commons::conversions::parse_string_as_scalar(pk_value, pk_field.data_type()) + .map_err(KalamDbError::InvalidOperation)?; let Some((row_key, _)) = self.latest_hot_pk_entry(&pk_scalar).await? else { return Ok(false); @@ -1163,78 +1159,79 @@ impl BaseTableProvider for SharedTableProvider if snapshot_commit_seq.is_none() { if let Some(expr) = filter { if let Some(pk_literal) = base::extract_pk_equality_literal(expr, pk_name) { - // Coerce the literal to the PK column's Arrow data type - let pk_field = schema.field_with_name(pk_name).ok(); - let pk_scalar = if let Some(field) = pk_field { - kalamdb_commons::conversions::parse_string_as_scalar( - &pk_literal.to_string(), - field.data_type(), - ) - .ok() - .unwrap_or(pk_literal) - } else { - pk_literal - }; + // Coerce the literal to the PK column's Arrow data type + let pk_field = schema.field_with_name(pk_name).ok(); + let pk_scalar = if let Some(field) = pk_field { + kalamdb_commons::conversions::parse_string_as_scalar( + &pk_literal.to_string(), + field.data_type(), + ) + .ok() + .unwrap_or(pk_literal) + } else { + pk_literal + }; - // Try hot storage PK index (O(1)) - let found = self.find_by_pk(&pk_scalar).await?; - if let Some((row_id, row)) = found { - log::debug!( - "[SharedProvider] PK fast-path hit for {}={}, _seq={}", - pk_name, - pk_scalar, - row_id.as_i64() - ); - return crate::utils::base::rows_to_arrow_batch( - &schema, - vec![(row_id, row)], - projection, - |_, _| {}, - ); - } + // Try hot storage PK index (O(1)) + let found = self.find_by_pk(&pk_scalar).await?; + if let Some((row_id, row)) = found { + log::debug!( + "[SharedProvider] PK fast-path hit for {}={}, _seq={}", + pk_name, + pk_scalar, + row_id.as_i64() + ); + return crate::utils::base::rows_to_arrow_batch( + &schema, + vec![(row_id, row)], + projection, + |_, _| {}, + ); + } - // Not in hot storage — check if it is tombstoned before trying cold storage. - // A tombstone in hot storage means the row was deleted; falling back to Parquet - // would surface a stale version and violate MVCC visibility rules. - if self.pk_tombstoned_in_hot(&pk_scalar).await? { - log::debug!( - "[SharedProvider] PK fast-path tombstone for {}={}", - pk_name, - pk_scalar - ); - return crate::utils::base::rows_to_arrow_batch( - &schema, - Vec::<(SharedTableRowId, SharedTableRow)>::new(), - projection, - |_, _| {}, - ); - } + // Not in hot storage — check if it is tombstoned before trying cold storage. + // A tombstone in hot storage means the row was deleted; falling back to Parquet + // would surface a stale version and violate MVCC visibility rules. + if self.pk_tombstoned_in_hot(&pk_scalar).await? { + log::debug!( + "[SharedProvider] PK fast-path tombstone for {}={}", + pk_name, + pk_scalar + ); + return crate::utils::base::rows_to_arrow_batch( + &schema, + Vec::<(SharedTableRowId, SharedTableRow)>::new(), + projection, + |_, _| {}, + ); + } - // Not in hot storage — check cold storage via manifest-based lookup - let cold_found = base::find_row_by_pk(self, None, &pk_scalar.to_string()).await?; - if let Some((row_id, row)) = cold_found { - log::debug!( - "[SharedProvider] PK fast-path cold hit for {}={}", - pk_name, - pk_scalar - ); + // Not in hot storage — check cold storage via manifest-based lookup + let cold_found = + base::find_row_by_pk(self, None, &pk_scalar.to_string()).await?; + if let Some((row_id, row)) = cold_found { + log::debug!( + "[SharedProvider] PK fast-path cold hit for {}={}", + pk_name, + pk_scalar + ); + return crate::utils::base::rows_to_arrow_batch( + &schema, + vec![(row_id, row)], + projection, + |_, _| {}, + ); + } + + // PK not found anywhere — return empty batch + log::debug!("[SharedProvider] PK fast-path miss for {}={}", pk_name, pk_scalar); return crate::utils::base::rows_to_arrow_batch( &schema, - vec![(row_id, row)], + Vec::<(SharedTableRowId, SharedTableRow)>::new(), projection, |_, _| {}, ); } - - // PK not found anywhere — return empty batch - log::debug!("[SharedProvider] PK fast-path miss for {}={}", pk_name, pk_scalar); - return crate::utils::base::rows_to_arrow_batch( - &schema, - Vec::<(SharedTableRowId, SharedTableRow)>::new(), - projection, - |_, _| {}, - ); - } } } @@ -1492,10 +1489,7 @@ impl SharedTableProvider { let has_topics = self.core.has_topic_routes(&table_id); let has_live_subs = notification_service.has_subscribers(None, &table_id); let notification = if has_topics || has_live_subs { - Some(ChangeNotification::insert( - table_id, - Self::build_notification_row(&entity), - )) + Some(ChangeNotification::insert(table_id, Self::build_notification_row(&entity))) } else { None }; @@ -1573,12 +1567,11 @@ impl SharedTableProvider { .collect(); let store = self.store.clone(); - let hot_duplicate = tokio::task::spawn_blocking( - move || -> Result, KalamDbError> { + let hot_duplicate = + tokio::task::spawn_blocking(move || -> Result, KalamDbError> { for (pk_str, prefix) in &pk_prefixes { - if let Some((_row_id, row)) = store - .get_latest_by_index_prefix(0, prefix) - .map_err(|e| { + if let Some((_row_id, row)) = + store.get_latest_by_index_prefix(0, prefix).map_err(|e| { KalamDbError::InvalidOperation(format!( "PK index scan failed: {}", e @@ -1591,10 +1584,11 @@ impl SharedTableProvider { } } Ok(None) - }, - ) - .await - .map_err(|e| KalamDbError::InvalidOperation(format!("spawn_blocking error: {}", e)))??; + }) + .await + .map_err(|e| { + KalamDbError::InvalidOperation(format!("spawn_blocking error: {}", e)) + })??; if let Some(dup_pk) = hot_duplicate { return Err(KalamDbError::AlreadyExists(format!( @@ -1769,8 +1763,7 @@ impl SharedTableProvider { rows: Vec, ) -> Result)>, KalamDbError> { let commit_seq = self.core.services.commit_sequence_source.allocate_next(); - self.insert_batch_deferred_prevalidated_with_commit_seq(rows, commit_seq) - .await + self.insert_batch_deferred_prevalidated_with_commit_seq(rows, commit_seq).await } pub async fn insert_batch_deferred_prevalidated_with_commit_seq( @@ -1787,9 +1780,7 @@ impl SharedTableProvider { deferred_side_effects = true ); async move { - let entries = self - .persist_insert_batch_rows(rows, false, commit_seq) - .await?; + let entries = self.persist_insert_batch_rows(rows, false, commit_seq).await?; let notification_service = self.core.services.notification_service.clone(); let table_id = self.core.table_id().clone(); @@ -1830,8 +1821,9 @@ impl SharedTableProvider { ); async move { let schema = self.schema(); - let updates = coerce_updates(updates, &schema) - .map_err(|e| KalamDbError::InvalidOperation(format!("Schema coercion failed: {}", e)))?; + let updates = coerce_updates(updates, &schema).map_err(|e| { + KalamDbError::InvalidOperation(format!("Schema coercion failed: {}", e)) + })?; let pk_name = self.primary_key_field_name().to_string(); let pk_field = schema.field_with_name(&pk_name).map_err(|e| { @@ -2101,9 +2093,8 @@ impl TableProvider for SharedTableProvider { let Some(transaction_query_context) = extract_transaction_query_context(state) else { return self.base_scan(state, projection, filters, limit).await; }; - let Some(table_overlay) = transaction_query_context - .overlay_view - .overlay_for_table(self.core.table_id()) + let Some(table_overlay) = + transaction_query_context.overlay_view.overlay_for_table(self.core.table_id()) else { return self.base_scan(state, projection, filters, limit).await; }; @@ -2114,12 +2105,7 @@ impl TableProvider for SharedTableProvider { self.primary_key_field_name(), )?; let base_plan = self - .base_scan( - state, - overlay_projection.effective_projection.as_ref(), - filters, - limit, - ) + .base_scan(state, overlay_projection.effective_projection.as_ref(), filters, limit) .await?; Ok(Arc::new(TransactionOverlayExec::try_new( @@ -2209,7 +2195,8 @@ impl TableProvider for SharedTableProvider { let commit_seq = transaction_query_context .is_none() .then(|| self.core.services.commit_sequence_source.allocate_next()); - let mut staged_mutations = transaction_query_context.map(|_| Vec::with_capacity(rows.len())); + let mut staged_mutations = + transaction_query_context.map(|_| Vec::with_capacity(rows.len())); for row in rows { let pk_value = crate::utils::datafusion_dml::extract_pk_value(&row, &pk_column)?; @@ -2306,7 +2293,8 @@ impl TableProvider for SharedTableProvider { let commit_seq = transaction_query_context .is_none() .then(|| self.core.services.commit_sequence_source.allocate_next()); - let mut staged_mutations = transaction_query_context.map(|_| Vec::with_capacity(rows.len())); + let mut staged_mutations = + transaction_query_context.map(|_| Vec::with_capacity(rows.len())); for row in rows { let pk_value = crate::utils::datafusion_dml::extract_pk_value(&row, &pk_column)?; @@ -2340,11 +2328,7 @@ impl TableProvider for SharedTableProvider { } let result = self - .update_by_pk_value( - base::system_user_id(), - &pk_value, - evaluated_updates, - ) + .update_by_pk_value(base::system_user_id(), &pk_value, evaluated_updates) .await .map_err(|e| DataFusionError::Execution(e.to_string()))?; if let Some(row_key) = result { diff --git a/backend/crates/kalamdb-tables/src/user_tables/user_table_provider.rs b/backend/crates/kalamdb-tables/src/user_tables/user_table_provider.rs index 9be641481..93dbaa12d 100644 --- a/backend/crates/kalamdb-tables/src/user_tables/user_table_provider.rs +++ b/backend/crates/kalamdb-tables/src/user_tables/user_table_provider.rs @@ -44,9 +44,7 @@ use kalamdb_store::EntityStore; use kalamdb_transactions::{ extract_transaction_query_context, StagedMutation, TransactionOverlayExec, }; -use kalamdb_vector::{ - new_indexed_user_vector_hot_store, UserVectorHotOpId, UserVectorHotStore, -}; +use kalamdb_vector::{new_indexed_user_vector_hot_store, UserVectorHotOpId, UserVectorHotStore}; use std::any::Any; use std::collections::{HashMap, HashSet}; use std::sync::Arc; @@ -246,9 +244,7 @@ impl UserTableProvider { let entries = vec![(row_key, entity)]; store .insert_batch_preencoded(&entries, encoded_values) - .map_err(|e| { - KalamDbError::InvalidOperation(format!("{}: {}", error_context, e)) - }) + .map_err(|e| KalamDbError::InvalidOperation(format!("{}: {}", error_context, e))) }) .await .map_err(|e| KalamDbError::InvalidOperation(format!("spawn_blocking error: {}", e)))??; @@ -271,7 +267,8 @@ impl UserTableProvider { for row_data in rows { if let Some(pk_value) = row_data.get(pk_name) { if !matches!(pk_value, ScalarValue::Null) { - let pk_str = crate::utils::unified_dml::extract_user_pk_value(row_data, pk_name)?; + let pk_str = + crate::utils::unified_dml::extract_user_pk_value(row_data, pk_name)?; if !seen_batch_pks.insert(pk_str.clone()) { return Err(KalamDbError::AlreadyExists(format!( "Primary key violation: value '{}' appears multiple times in the insert batch for column '{}'", @@ -295,23 +292,25 @@ impl UserTableProvider { .collect(); let store = self.store.clone(); - let hot_duplicate = tokio::task::spawn_blocking(move || -> Result, KalamDbError> { - for (pk_str, prefix) in &pk_prefixes { - if let Some((_row_id, row)) = store - .get_latest_by_index_prefix(0, prefix) - .map_err(|e| { - KalamDbError::InvalidOperation(format!("PK index scan failed: {}", e)) - })? - { - if !row._deleted { - return Ok(Some(pk_str.clone())); + let hot_duplicate = + tokio::task::spawn_blocking(move || -> Result, KalamDbError> { + for (pk_str, prefix) in &pk_prefixes { + if let Some((_row_id, row)) = + store.get_latest_by_index_prefix(0, prefix).map_err(|e| { + KalamDbError::InvalidOperation(format!("PK index scan failed: {}", e)) + })? + { + if !row._deleted { + return Ok(Some(pk_str.clone())); + } } } - } - Ok(None) - }) - .await - .map_err(|e| KalamDbError::InvalidOperation(format!("spawn_blocking error: {}", e)))??; + Ok(None) + }) + .await + .map_err(|e| { + KalamDbError::InvalidOperation(format!("spawn_blocking error: {}", e)) + })??; if let Some(dup_pk) = hot_duplicate { return Err(KalamDbError::AlreadyExists(format!( @@ -401,8 +400,7 @@ impl UserTableProvider { let entries = tokio::task::spawn_blocking( move || -> Result, KalamDbError> { - let encode_input: Vec<&UserTableRow> = - entries.iter().map(|(_, row)| row).collect(); + let encode_input: Vec<&UserTableRow> = entries.iter().map(|(_, row)| row).collect(); let encoded_values = kalamdb_commons::serialization::row_codec::batch_encode_user_table_row_refs( &encode_input, @@ -520,10 +518,9 @@ impl UserTableProvider { )) })?; row._commit_seq = commit_seq; - self.store - .insert_async(row_key.clone(), row) - .await - .map_err(|e| KalamDbError::InvalidOperation(format!("Failed to patch commit_seq: {}", e))) + self.store.insert_async(row_key.clone(), row).await.map_err(|e| { + KalamDbError::InvalidOperation(format!("Failed to patch commit_seq: {}", e)) + }) } pub async fn patch_latest_commit_seq_by_pk( @@ -533,14 +530,12 @@ impl UserTableProvider { commit_seq: u64, ) -> Result { let schema = self.schema_ref(); - let pk_field = schema - .field_with_name(self.primary_key_field_name()) - .map_err(|e| KalamDbError::InvalidOperation(format!("PK column lookup failed: {}", e)))?; - let pk_scalar = kalamdb_commons::conversions::parse_string_as_scalar( - pk_value, - pk_field.data_type(), - ) - .map_err(KalamDbError::InvalidOperation)?; + let pk_field = schema.field_with_name(self.primary_key_field_name()).map_err(|e| { + KalamDbError::InvalidOperation(format!("PK column lookup failed: {}", e)) + })?; + let pk_scalar = + kalamdb_commons::conversions::parse_string_as_scalar(pk_value, pk_field.data_type()) + .map_err(KalamDbError::InvalidOperation)?; let Some((row_key, _)) = self.latest_hot_pk_entry(user_id, &pk_scalar).await? else { return Ok(false); @@ -804,7 +799,8 @@ impl BaseTableProvider for UserTableProvider { // Use the same preencoded append path as batch inserts so single-row // MVCC writes and batch writes stay consistent. - self.append_hot_row(&row_key, &entity, "Failed to insert user table row").await?; + self.append_hot_row(&row_key, &entity, "Failed to insert user table row") + .await?; log::debug!( "Inserted user table row for user {} with _seq {}", @@ -887,8 +883,7 @@ impl BaseTableProvider for UserTableProvider { rows: Vec, ) -> Result, KalamDbError> { let commit_seq = self.core.services.commit_sequence_source.allocate_next(); - self.insert_batch_with_commit_seq(user_id, rows, commit_seq) - .await + self.insert_batch_with_commit_seq(user_id, rows, commit_seq).await } async fn update( @@ -1036,7 +1031,8 @@ impl BaseTableProvider for UserTableProvider { fields: new_fields, }; let row_key = UserTableRowId::new(user_id.clone(), seq_id); - self.append_hot_row(&row_key, &entity, "Failed to update user table row").await?; + self.append_hot_row(&row_key, &entity, "Failed to update user table row") + .await?; if let Err(e) = self.stage_vector_upsert(user_id, seq_id, &entity.fields).await { log::warn!( @@ -1204,7 +1200,8 @@ impl BaseTableProvider for UserTableProvider { pk_value, seq_id.as_i64() ); - self.append_hot_row(&row_key, &entity, "Failed to delete user table row").await?; + self.append_hot_row(&row_key, &entity, "Failed to delete user table row") + .await?; if let Err(e) = self.stage_vector_delete(user_id, seq_id, pk_value).await { log::warn!( @@ -1369,9 +1366,8 @@ impl BaseTableProvider for UserTableProvider { if !allow_all_users { if let Some(proj) = projection { if proj.is_empty() && filter.is_none() { - let count = self - .count_resolved_rows_async(user_id, snapshot_commit_seq) - .await?; + let count = + self.count_resolved_rows_async(user_id, snapshot_commit_seq).await?; return base::build_count_only_batch(count); } } @@ -1645,7 +1641,8 @@ impl UserTableProvider { }; let row_key = UserTableRowId::new(user_id.clone(), seq_id); - self.append_hot_row(&row_key, &entity, "Failed to insert user table row").await?; + self.append_hot_row(&row_key, &entity, "Failed to insert user table row") + .await?; if let Err(e) = self.stage_vector_upsert(user_id, seq_id, &entity.fields).await { log::warn!( @@ -1672,10 +1669,7 @@ impl UserTableProvider { let has_topics = self.core.has_topic_routes(&table_id); let has_live_subs = notification_service.has_subscribers(Some(user_id), &table_id); let notification = if has_topics || has_live_subs { - Some(ChangeNotification::insert( - table_id, - Self::build_notification_row(&entity), - )) + Some(ChangeNotification::insert(table_id, Self::build_notification_row(&entity))) } else { None }; @@ -1716,9 +1710,7 @@ impl UserTableProvider { row_count ); async move { - let entries = self - .persist_insert_batch_rows(user_id, rows, true, commit_seq) - .await?; + let entries = self.persist_insert_batch_rows(user_id, rows, true, commit_seq).await?; let row_keys: Vec = entries.iter().map(|(row_key, _)| row_key.clone()).collect(); @@ -1786,9 +1778,7 @@ impl UserTableProvider { deferred_side_effects = true ); async move { - let entries = self - .persist_insert_batch_rows(user_id, rows, false, commit_seq) - .await?; + let entries = self.persist_insert_batch_rows(user_id, rows, false, commit_seq).await?; let notification_service = self.core.services.notification_service.clone(); let table_id = self.core.table_id().clone(); @@ -1830,8 +1820,9 @@ impl UserTableProvider { ); async move { let schema = self.schema(); - let updates = coerce_updates(updates, &schema) - .map_err(|e| KalamDbError::InvalidOperation(format!("Schema coercion failed: {}", e)))?; + let updates = coerce_updates(updates, &schema).map_err(|e| { + KalamDbError::InvalidOperation(format!("Schema coercion failed: {}", e)) + })?; let pk_name = self.primary_key_field_name().to_string(); let pk_field = schema.field_with_name(&pk_name).map_err(|e| { @@ -1896,7 +1887,8 @@ impl UserTableProvider { fields: new_fields, }; let row_key = UserTableRowId::new(user_id.clone(), seq_id); - self.append_hot_row(&row_key, &entity, "Failed to update user table row").await?; + self.append_hot_row(&row_key, &entity, "Failed to update user table row") + .await?; if let Err(e) = self.stage_vector_upsert(user_id, seq_id, &entity.fields).await { log::warn!( @@ -1994,7 +1986,8 @@ impl UserTableProvider { fields: Row::new(values), }; let row_key = UserTableRowId::new(user_id.clone(), seq_id); - self.append_hot_row(&row_key, &entity, "Failed to delete user table row").await?; + self.append_hot_row(&row_key, &entity, "Failed to delete user table row") + .await?; if let Err(e) = self.stage_vector_delete(user_id, seq_id, pk_value).await { log::warn!( @@ -2092,9 +2085,8 @@ impl TableProvider for UserTableProvider { let Some(transaction_query_context) = extract_transaction_query_context(state) else { return self.base_scan(state, projection, filters, limit).await; }; - let Some(table_overlay) = transaction_query_context - .overlay_view - .overlay_for_table(self.core.table_id()) + let Some(table_overlay) = + transaction_query_context.overlay_view.overlay_for_table(self.core.table_id()) else { return self.base_scan(state, projection, filters, limit).await; }; @@ -2107,12 +2099,7 @@ impl TableProvider for UserTableProvider { self.primary_key_field_name(), )?; let base_plan = self - .base_scan( - state, - overlay_projection.effective_projection.as_ref(), - filters, - limit, - ) + .base_scan(state, overlay_projection.effective_projection.as_ref(), filters, limit) .await?; Ok(Arc::new(TransactionOverlayExec::try_new( @@ -2202,7 +2189,8 @@ impl TableProvider for UserTableProvider { let commit_seq = transaction_query_context .is_none() .then(|| self.core.services.commit_sequence_source.allocate_next()); - let mut staged_mutations = transaction_query_context.map(|_| Vec::with_capacity(rows.len())); + let mut staged_mutations = + transaction_query_context.map(|_| Vec::with_capacity(rows.len())); for row in rows { let pk_value = crate::utils::datafusion_dml::extract_pk_value(&row, &pk_column)?; @@ -2297,7 +2285,8 @@ impl TableProvider for UserTableProvider { let commit_seq = transaction_query_context .is_none() .then(|| self.core.services.commit_sequence_source.allocate_next()); - let mut staged_mutations = transaction_query_context.map(|_| Vec::with_capacity(rows.len())); + let mut staged_mutations = + transaction_query_context.map(|_| Vec::with_capacity(rows.len())); for row in rows { let pk_value = crate::utils::datafusion_dml::extract_pk_value(&row, &pk_column)?; @@ -2331,11 +2320,7 @@ impl TableProvider for UserTableProvider { } let result = self - .update_by_pk_value( - user_id, - &pk_value, - evaluated_updates, - ) + .update_by_pk_value(user_id, &pk_value, evaluated_updates) .await .map_err(|e| DataFusionError::Execution(e.to_string()))?; if let Some(row_key) = result { diff --git a/backend/crates/kalamdb-tables/src/utils/base.rs b/backend/crates/kalamdb-tables/src/utils/base.rs index b4d71a6f0..d1cbd6cd4 100644 --- a/backend/crates/kalamdb-tables/src/utils/base.rs +++ b/backend/crates/kalamdb-tables/src/utils/base.rs @@ -79,9 +79,9 @@ use kalamdb_commons::NotLeaderError; use kalamdb_commons::{StorageKey, TableId}; use kalamdb_filestore::registry::ListResult; use kalamdb_system::ClusterCoordinator as ClusterCoordinatorTrait; -use kalamdb_transactions::{extract_transaction_query_context, TransactionAccessError}; use kalamdb_system::Manifest; use kalamdb_system::SchemaRegistry as SchemaRegistryTrait; +use kalamdb_transactions::{extract_transaction_query_context, TransactionAccessError}; use std::collections::{HashMap, HashSet}; use std::sync::Arc; @@ -409,9 +409,7 @@ pub trait BaseTableProvider: Send + Sync + TableProvider { limit: Option, ) -> DataFusionResult> { self.validate_transaction_table_access(state)?; - self.ensure_leader_read(state) - .await - .map_err(kalam_error_to_datafusion)?; + self.ensure_leader_read(state).await.map_err(kalam_error_to_datafusion)?; let _ = limit; @@ -1642,10 +1640,7 @@ pub fn extract_embedding_vector(value: &ScalarValue, expected_dimensions: u32) - pub fn build_notification_row(fields: &Row, seq: SeqId, commit_seq: u64, deleted: bool) -> Row { let mut values = fields.values.clone(); values.insert(SystemColumnNames::SEQ.to_string(), ScalarValue::Int64(Some(seq.as_i64()))); - values.insert( - SystemColumnNames::COMMIT_SEQ.to_string(), - ScalarValue::UInt64(Some(commit_seq)), - ); + values.insert(SystemColumnNames::COMMIT_SEQ.to_string(), ScalarValue::UInt64(Some(commit_seq))); values.insert(SystemColumnNames::DELETED.to_string(), ScalarValue::Boolean(Some(deleted))); Row::new(values) } diff --git a/backend/crates/kalamdb-tables/src/utils/datafusion_dml.rs b/backend/crates/kalamdb-tables/src/utils/datafusion_dml.rs index fd4f93f10..31637f2e3 100644 --- a/backend/crates/kalamdb-tables/src/utils/datafusion_dml.rs +++ b/backend/crates/kalamdb-tables/src/utils/datafusion_dml.rs @@ -22,8 +22,7 @@ use kalamdb_commons::models::UserId; use kalamdb_commons::NotLeaderError; use kalamdb_commons::{TableId, TableType}; use kalamdb_transactions::{ - build_insert_staged_mutations, StagedMutation, TransactionAccessError, - TransactionQueryContext, + build_insert_staged_mutations, StagedMutation, TransactionAccessError, TransactionQueryContext, }; use std::collections::{BTreeMap, HashSet}; use std::sync::Arc; @@ -151,15 +150,14 @@ pub async fn collect_input_rows( } } -fn try_collect_memory_input_rows( - input: &dyn ExecutionPlan, -) -> DataFusionResult>> { +fn try_collect_memory_input_rows(input: &dyn ExecutionPlan) -> DataFusionResult>> { if let Some(data_source_exec) = input.as_any().downcast_ref::() { return try_collect_rows_from_data_source_exec(data_source_exec); } if let Some(projection_exec) = input.as_any().downcast_ref::() { - let Some(source_batches) = try_read_memory_source_batches(projection_exec.input().as_ref())? + let Some(source_batches) = + try_read_memory_source_batches(projection_exec.input().as_ref())? else { return Ok(None); }; @@ -171,7 +169,9 @@ fn try_collect_memory_input_rows( let arrays = projection_exec .expr() .iter() - .map(|projection| projection.expr.evaluate(&batch)?.into_array(batch.num_rows())) + .map(|projection| { + projection.expr.evaluate(&batch)?.into_array(batch.num_rows()) + }) .collect::>>()?; RecordBatch::try_new(Arc::clone(&projection_schema), arrays) .map_err(|error| DataFusionError::ArrowError(Box::new(error), None)) @@ -201,10 +201,8 @@ fn try_read_memory_source_batches( return Ok(None); }; - let Some(memory_source) = data_source_exec - .data_source() - .as_any() - .downcast_ref::() + let Some(memory_source) = + data_source_exec.data_source().as_any().downcast_ref::() else { return Ok(None); }; @@ -417,12 +415,10 @@ fn record_batches_to_rows(batches: &[RecordBatch]) -> DataFusionResult> for batch in batches { let schema = batch.schema(); - let field_names: Vec = schema.fields().iter().map(|field| field.name().to_string()).collect(); - let columns: Vec<&dyn arrow::array::Array> = batch - .columns() - .iter() - .map(|column| column.as_ref()) - .collect(); + let field_names: Vec = + schema.fields().iter().map(|field| field.name().to_string()).collect(); + let columns: Vec<&dyn arrow::array::Array> = + batch.columns().iter().map(|column| column.as_ref()).collect(); for row_idx in 0..batch.num_rows() { let mut values = BTreeMap::new(); for (col_idx, field_name) in field_names.iter().enumerate() { diff --git a/backend/crates/kalamdb-tables/src/utils/row_utils.rs b/backend/crates/kalamdb-tables/src/utils/row_utils.rs index f41dab30e..ce9ad93b8 100644 --- a/backend/crates/kalamdb-tables/src/utils/row_utils.rs +++ b/backend/crates/kalamdb-tables/src/utils/row_utils.rs @@ -9,13 +9,13 @@ use kalamdb_commons::constants::SystemColumnNames; use kalamdb_commons::conversions::arrow_json_conversion::json_rows_to_arrow_batch; use kalamdb_commons::ids::SeqId; use kalamdb_commons::models::rows::Row; -use std::collections::BTreeMap; use kalamdb_commons::models::{ReadContext, Role, UserId}; use kalamdb_session_datafusion::{ extract_full_user_context as extract_full_user_context_session, extract_user_context as extract_user_context_session, }; use once_cell::sync::Lazy; +use std::collections::BTreeMap; use std::sync::Arc; static SYSTEM_USER_ID: Lazy = Lazy::new(|| UserId::from("_system")); @@ -300,13 +300,7 @@ where materialized.values.extend(extra.values); } - inject_system_columns( - schema, - &mut materialized, - seq, - commit_seq, - deleted, - ); + inject_system_columns(schema, &mut materialized, seq, commit_seq, deleted); rows.push(materialized); } diff --git a/backend/crates/kalamdb-tables/src/utils/vector_staging.rs b/backend/crates/kalamdb-tables/src/utils/vector_staging.rs index 1f7cee925..713e4dc22 100644 --- a/backend/crates/kalamdb-tables/src/utils/vector_staging.rs +++ b/backend/crates/kalamdb-tables/src/utils/vector_staging.rs @@ -132,20 +132,14 @@ mod tests { SeqId::from_i64(1), Row::from_vec(vec![ ("id".to_string(), ScalarValue::Utf8(Some("row-1".to_string()))), - ( - "embedding".to_string(), - ScalarValue::Utf8(Some("[1.0,2.0]".to_string())), - ), + ("embedding".to_string(), ScalarValue::Utf8(Some("[1.0,2.0]".to_string()))), ]), ), ( SeqId::from_i64(2), Row::from_vec(vec![ ("id".to_string(), ScalarValue::Utf8(Some("row-2".to_string()))), - ( - "embedding".to_string(), - ScalarValue::Utf8(Some("[3.0,4.0]".to_string())), - ), + ("embedding".to_string(), ScalarValue::Utf8(Some("[3.0,4.0]".to_string()))), ]), ), ]; @@ -170,16 +164,16 @@ mod tests { fn build_vector_delete_ops_builds_one_delete_per_column() { let ops = build_vector_delete_ops( &table_id(), - &[("embedding".to_string(), 2), ("alt_embedding".to_string(), 3)], + &[ + ("embedding".to_string(), 2), + ("alt_embedding".to_string(), 3), + ], "row-1", |pk| pk.to_string(), ); assert_eq!(ops.len(), 2); assert_eq!(ops.get("embedding").expect("embedding")[0].1.pk, "row-1"); - assert_eq!( - ops.get("alt_embedding").expect("alt embedding")[0].1.dimensions, - 3 - ); + assert_eq!(ops.get("alt_embedding").expect("alt embedding")[0].1.dimensions, 3); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-tables/src/utils/version_resolution.rs b/backend/crates/kalamdb-tables/src/utils/version_resolution.rs index 9385abb0c..f0bc9c03f 100644 --- a/backend/crates/kalamdb-tables/src/utils/version_resolution.rs +++ b/backend/crates/kalamdb-tables/src/utils/version_resolution.rs @@ -720,11 +720,11 @@ impl<'a> ParquetBatchDecoder<'a> { .ok_or_else(|| KalamDbError::Other("_seq column is not Int64Array".to_string()))?; let deleted_array = deleted_idx.and_then(|idx| batch.column(idx).as_any().downcast_ref::()); - let commit_seq_array = commit_seq_idx - .and_then(|idx| batch.column(idx).as_any().downcast_ref::()); + let commit_seq_array = + commit_seq_idx.and_then(|idx| batch.column(idx).as_any().downcast_ref::()); // Try to cache the PK column as StringArray for fast extraction. - let pk_string_array = pk_idx - .and_then(|idx| batch.column(idx).as_any().downcast_ref::()); + let pk_string_array = + pk_idx.and_then(|idx| batch.column(idx).as_any().downcast_ref::()); let value_columns = schema .fields() .iter() @@ -764,16 +764,18 @@ impl<'a> ParquetBatchDecoder<'a> { None } else { let v = str_arr.value(row_idx); - if v.is_empty() { None } else { Some(v.to_owned()) } + if v.is_empty() { + None + } else { + Some(v.to_owned()) + } } } else { // Fallback for non-Utf8 PK types (Int64, etc.) self.pk_idx.and_then(|idx| { let array = self.batch.column(idx); arrow_value_to_scalar(array.as_ref(), row_idx).ok().and_then(|sv| match &sv { - ScalarValue::Utf8(Some(s)) | ScalarValue::LargeUtf8(Some(s)) => { - Some(s.clone()) - }, + ScalarValue::Utf8(Some(s)) | ScalarValue::LargeUtf8(Some(s)) => Some(s.clone()), other if other.is_null() => None, other => Some(other.to_string()), }) diff --git a/backend/crates/kalamdb-transactions/src/access.rs b/backend/crates/kalamdb-transactions/src/access.rs index 47de86b4c..02f9ea69c 100644 --- a/backend/crates/kalamdb-transactions/src/access.rs +++ b/backend/crates/kalamdb-transactions/src/access.rs @@ -38,4 +38,4 @@ pub trait TransactionAccessValidator: std::fmt::Debug + Send + Sync { table_type: TableType, user_id: Option<&UserId>, ) -> Result<(), TransactionAccessError>; -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-transactions/src/commit_sequence.rs b/backend/crates/kalamdb-transactions/src/commit_sequence.rs index 50ee70215..5f306b291 100644 --- a/backend/crates/kalamdb-transactions/src/commit_sequence.rs +++ b/backend/crates/kalamdb-transactions/src/commit_sequence.rs @@ -3,4 +3,4 @@ pub trait CommitSequenceSource: std::fmt::Debug + Send + Sync { fn current_committed(&self) -> u64; fn allocate_next(&self) -> u64; -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-transactions/src/lib.rs b/backend/crates/kalamdb-transactions/src/lib.rs index ff952bb74..375e4f6ec 100644 --- a/backend/crates/kalamdb-transactions/src/lib.rs +++ b/backend/crates/kalamdb-transactions/src/lib.rs @@ -5,9 +5,9 @@ //! `kalamdb-tables -> kalamdb-core` dependency. pub mod access; +pub mod commit_sequence; pub mod overlay; pub mod overlay_exec; -pub mod commit_sequence; pub mod query_context; pub mod query_extension; pub mod staged_mutation; @@ -16,8 +16,6 @@ pub use access::{TransactionAccessError, TransactionAccessValidator}; pub use commit_sequence::CommitSequenceSource; pub use overlay::{TransactionOverlay, TransactionOverlayEntry}; pub use overlay_exec::TransactionOverlayExec; -pub use query_context::{ - TransactionMutationSink, TransactionOverlayView, TransactionQueryContext, -}; +pub use query_context::{TransactionMutationSink, TransactionOverlayView, TransactionQueryContext}; pub use query_extension::{extract_transaction_query_context, TransactionQueryExtension}; -pub use staged_mutation::{build_insert_staged_mutations, StagedInsertBuildError, StagedMutation}; \ No newline at end of file +pub use staged_mutation::{build_insert_staged_mutations, StagedInsertBuildError, StagedMutation}; diff --git a/backend/crates/kalamdb-transactions/src/overlay.rs b/backend/crates/kalamdb-transactions/src/overlay.rs index fa586e619..512b5981d 100644 --- a/backend/crates/kalamdb-transactions/src/overlay.rs +++ b/backend/crates/kalamdb-transactions/src/overlay.rs @@ -7,312 +7,318 @@ use kalamdb_commons::TableType; use crate::query_context::TransactionOverlayView; fn scoped_entry_key(user_id: Option<&UserId>, primary_key: &str) -> String { - match user_id { - Some(user_id) => format!("u{}:{}:{}", user_id.as_str().len(), user_id.as_str(), primary_key), - None => format!("s:{}", primary_key), - } + match user_id { + Some(user_id) => { + format!("u{}:{}:{}", user_id.as_str().len(), user_id.as_str(), primary_key) + }, + None => format!("s:{}", primary_key), + } } /// Shared overlay entry exposed across crate boundaries for transaction-local reads. #[derive(Debug, Clone, PartialEq, Eq)] pub struct TransactionOverlayEntry { - pub transaction_id: TransactionId, - pub mutation_order: u64, - pub table_id: TableId, - pub table_type: TableType, - pub user_id: Option, - pub operation_kind: OperationKind, - pub primary_key: String, - pub payload: Row, - pub tombstone: bool, + pub transaction_id: TransactionId, + pub mutation_order: u64, + pub table_id: TableId, + pub table_type: TableType, + pub user_id: Option, + pub operation_kind: OperationKind, + pub primary_key: String, + pub payload: Row, + pub tombstone: bool, } impl TransactionOverlayEntry { - #[inline] - pub fn is_deleted(&self) -> bool { - self.tombstone || matches!(self.operation_kind, OperationKind::Delete) - } + #[inline] + pub fn is_deleted(&self) -> bool { + self.tombstone || matches!(self.operation_kind, OperationKind::Delete) + } } /// Query-time overlay for transaction-local read visibility. #[derive(Debug, Clone)] pub struct TransactionOverlay { - pub transaction_id: TransactionId, - pub entries_by_table: HashMap>, - pub inserted_keys: HashMap>, - pub deleted_keys: HashMap>, - pub updated_keys: HashMap>, + pub transaction_id: TransactionId, + pub entries_by_table: HashMap>, + pub inserted_keys: HashMap>, + pub deleted_keys: HashMap>, + pub updated_keys: HashMap>, } impl TransactionOverlay { - #[inline] - pub fn new(transaction_id: TransactionId) -> Self { - Self { - transaction_id, - entries_by_table: HashMap::new(), - inserted_keys: HashMap::new(), - deleted_keys: HashMap::new(), - updated_keys: HashMap::new(), - } - } - - pub fn apply_entry(&mut self, entry: TransactionOverlayEntry) { - let table_id = entry.table_id.clone(); - let primary_key = entry.primary_key.clone(); - let user_id = entry.user_id.clone(); - let entry_key = scoped_entry_key(user_id.as_ref(), primary_key.as_str()); - let effective_entry = - self.merge_visible_entry(&table_id, user_id.as_ref(), primary_key.as_str(), entry); - - self.entries_by_table - .entry(table_id.clone()) - .or_default() - .insert(entry_key.clone(), effective_entry.clone()); - - self.clear_key_membership(&table_id, entry_key.as_str()); - - let target_map = if effective_entry.is_deleted() { - &mut self.deleted_keys - } else { - match effective_entry.operation_kind { - OperationKind::Insert => &mut self.inserted_keys, - OperationKind::Update => &mut self.updated_keys, - OperationKind::Delete => &mut self.deleted_keys, - } - }; - - target_map.entry(table_id).or_default().insert(entry_key); - } - - pub fn merge_from(&mut self, other: &TransactionOverlay) { - for table_entries in other.entries_by_table.values() { - for entry in table_entries.values() { - self.apply_entry(entry.clone()); - } - } - } - - #[inline] - pub fn latest_visible_entry( - &self, - table_id: &TableId, - primary_key: &str, - ) -> Option<&TransactionOverlayEntry> { - self.latest_visible_entry_for_scope(table_id, None, primary_key) - } - - #[inline] - pub fn latest_visible_entry_for_scope( - &self, - table_id: &TableId, - user_id: Option<&UserId>, - primary_key: &str, - ) -> Option<&TransactionOverlayEntry> { - let entry_key = scoped_entry_key(user_id, primary_key); - self.entries_by_table.get(table_id)?.get(entry_key.as_str()) - } - - #[inline] - pub fn table_entries( - &self, - table_id: &TableId, - ) -> Option<&BTreeMap> { - self.entries_by_table.get(table_id) - } - - pub fn table_overlay(&self, table_id: &TableId) -> Option { - let entries = self.entries_by_table.get(table_id)?.clone(); - - let mut overlay = TransactionOverlay::new(self.transaction_id.clone()); - overlay.entries_by_table.insert(table_id.clone(), entries); - - if let Some(keys) = self.inserted_keys.get(table_id) { - overlay.inserted_keys.insert(table_id.clone(), keys.clone()); - } - if let Some(keys) = self.deleted_keys.get(table_id) { - overlay.deleted_keys.insert(table_id.clone(), keys.clone()); - } - if let Some(keys) = self.updated_keys.get(table_id) { - overlay.updated_keys.insert(table_id.clone(), keys.clone()); - } - - Some(overlay) - } - - fn clear_key_membership(&mut self, table_id: &TableId, entry_key: &str) { - for key_set in [&mut self.inserted_keys, &mut self.deleted_keys, &mut self.updated_keys] { - if let Some(keys) = key_set.get_mut(table_id) { - keys.remove(entry_key); - if keys.is_empty() { - key_set.remove(table_id); - } - } - } - } - - fn merge_visible_entry( - &self, - table_id: &TableId, - user_id: Option<&UserId>, - primary_key: &str, - mut next: TransactionOverlayEntry, - ) -> TransactionOverlayEntry { - if next.is_deleted() { - return next; - } - - let entry_key = scoped_entry_key(user_id, primary_key); - - let Some(current) = self - .entries_by_table - .get(table_id) - .and_then(|entries| entries.get(entry_key.as_str())) - else { - return next; - }; - - if current.is_deleted() { - return next; - } - - if matches!(next.operation_kind, OperationKind::Update) { - let mut merged_values = current.payload.values.clone(); - for (column_name, value) in &next.payload.values { - merged_values.insert(column_name.clone(), value.clone()); - } - next.payload = Row::new(merged_values); - if matches!(current.operation_kind, OperationKind::Insert) { - next.operation_kind = OperationKind::Insert; - } - } - - next - } + #[inline] + pub fn new(transaction_id: TransactionId) -> Self { + Self { + transaction_id, + entries_by_table: HashMap::new(), + inserted_keys: HashMap::new(), + deleted_keys: HashMap::new(), + updated_keys: HashMap::new(), + } + } + + pub fn apply_entry(&mut self, entry: TransactionOverlayEntry) { + let table_id = entry.table_id.clone(); + let primary_key = entry.primary_key.clone(); + let user_id = entry.user_id.clone(); + let entry_key = scoped_entry_key(user_id.as_ref(), primary_key.as_str()); + let effective_entry = + self.merge_visible_entry(&table_id, user_id.as_ref(), primary_key.as_str(), entry); + + self.entries_by_table + .entry(table_id.clone()) + .or_default() + .insert(entry_key.clone(), effective_entry.clone()); + + self.clear_key_membership(&table_id, entry_key.as_str()); + + let target_map = if effective_entry.is_deleted() { + &mut self.deleted_keys + } else { + match effective_entry.operation_kind { + OperationKind::Insert => &mut self.inserted_keys, + OperationKind::Update => &mut self.updated_keys, + OperationKind::Delete => &mut self.deleted_keys, + } + }; + + target_map.entry(table_id).or_default().insert(entry_key); + } + + pub fn merge_from(&mut self, other: &TransactionOverlay) { + for table_entries in other.entries_by_table.values() { + for entry in table_entries.values() { + self.apply_entry(entry.clone()); + } + } + } + + #[inline] + pub fn latest_visible_entry( + &self, + table_id: &TableId, + primary_key: &str, + ) -> Option<&TransactionOverlayEntry> { + self.latest_visible_entry_for_scope(table_id, None, primary_key) + } + + #[inline] + pub fn latest_visible_entry_for_scope( + &self, + table_id: &TableId, + user_id: Option<&UserId>, + primary_key: &str, + ) -> Option<&TransactionOverlayEntry> { + let entry_key = scoped_entry_key(user_id, primary_key); + self.entries_by_table.get(table_id)?.get(entry_key.as_str()) + } + + #[inline] + pub fn table_entries( + &self, + table_id: &TableId, + ) -> Option<&BTreeMap> { + self.entries_by_table.get(table_id) + } + + pub fn table_overlay(&self, table_id: &TableId) -> Option { + let entries = self.entries_by_table.get(table_id)?.clone(); + + let mut overlay = TransactionOverlay::new(self.transaction_id.clone()); + overlay.entries_by_table.insert(table_id.clone(), entries); + + if let Some(keys) = self.inserted_keys.get(table_id) { + overlay.inserted_keys.insert(table_id.clone(), keys.clone()); + } + if let Some(keys) = self.deleted_keys.get(table_id) { + overlay.deleted_keys.insert(table_id.clone(), keys.clone()); + } + if let Some(keys) = self.updated_keys.get(table_id) { + overlay.updated_keys.insert(table_id.clone(), keys.clone()); + } + + Some(overlay) + } + + fn clear_key_membership(&mut self, table_id: &TableId, entry_key: &str) { + for key_set in [ + &mut self.inserted_keys, + &mut self.deleted_keys, + &mut self.updated_keys, + ] { + if let Some(keys) = key_set.get_mut(table_id) { + keys.remove(entry_key); + if keys.is_empty() { + key_set.remove(table_id); + } + } + } + } + + fn merge_visible_entry( + &self, + table_id: &TableId, + user_id: Option<&UserId>, + primary_key: &str, + mut next: TransactionOverlayEntry, + ) -> TransactionOverlayEntry { + if next.is_deleted() { + return next; + } + + let entry_key = scoped_entry_key(user_id, primary_key); + + let Some(current) = self + .entries_by_table + .get(table_id) + .and_then(|entries| entries.get(entry_key.as_str())) + else { + return next; + }; + + if current.is_deleted() { + return next; + } + + if matches!(next.operation_kind, OperationKind::Update) { + let mut merged_values = current.payload.values.clone(); + for (column_name, value) in &next.payload.values { + merged_values.insert(column_name.clone(), value.clone()); + } + next.payload = Row::new(merged_values); + if matches!(current.operation_kind, OperationKind::Insert) { + next.operation_kind = OperationKind::Insert; + } + } + + next + } } impl TransactionOverlayView for TransactionOverlay { - fn overlay(&self) -> TransactionOverlay { - self.clone() - } + fn overlay(&self) -> TransactionOverlay { + self.clone() + } - fn overlay_for_table(&self, table_id: &TableId) -> Option { - self.table_overlay(table_id) - } + fn overlay_for_table(&self, table_id: &TableId) -> Option { + self.table_overlay(table_id) + } } #[cfg(test)] mod tests { - use datafusion::scalar::ScalarValue; - use std::collections::BTreeMap; - - use super::*; - use kalamdb_commons::models::{NamespaceId, TableName}; - - fn row(values: &[(&str, ScalarValue)]) -> Row { - let mut fields = BTreeMap::new(); - for (name, value) in values { - fields.insert((*name).to_string(), value.clone()); - } - Row::new(fields) - } - - #[test] - fn update_after_insert_preserves_inserted_columns() { - let transaction_id = TransactionId::new("01960f7b-3d15-7d6d-b26c-7e4db6f25f8d"); - let table_id = TableId::new(NamespaceId::new("app"), TableName::new("items")); - let mut overlay = TransactionOverlay::new(transaction_id.clone()); - - overlay.apply_entry(TransactionOverlayEntry { - transaction_id: transaction_id.clone(), - mutation_order: 0, - table_id: table_id.clone(), - table_type: TableType::Shared, - user_id: None, - operation_kind: OperationKind::Insert, - primary_key: "1".to_string(), - payload: row(&[ - ("id", ScalarValue::Int64(Some(1))), - ("name", ScalarValue::Utf8(Some("before".to_string()))), - ("color", ScalarValue::Utf8(Some("red".to_string()))), - ]), - tombstone: false, - }); - - overlay.apply_entry(TransactionOverlayEntry { - transaction_id, - mutation_order: 1, - table_id: table_id.clone(), - table_type: TableType::Shared, - user_id: None, - operation_kind: OperationKind::Update, - primary_key: "1".to_string(), - payload: row(&[("name", ScalarValue::Utf8(Some("after".to_string())))]), - tombstone: false, - }); - - let entry = overlay.latest_visible_entry(&table_id, "1").expect("overlay entry"); - assert_eq!(entry.operation_kind, OperationKind::Insert); - assert_eq!(entry.payload.values.get("id"), Some(&ScalarValue::Int64(Some(1)))); - assert_eq!( - entry.payload.values.get("name"), - Some(&ScalarValue::Utf8(Some("after".to_string()))) - ); - assert_eq!( - entry.payload.values.get("color"), - Some(&ScalarValue::Utf8(Some("red".to_string()))) - ); - } - - #[test] - fn preserves_distinct_user_scopes_for_same_primary_key() { - let transaction_id = TransactionId::new("01960f7b-3d15-7d6d-b26c-7e4db6f25f8d"); - let table_id = TableId::new(NamespaceId::new("app"), TableName::new("items")); - let first_user = UserId::new("user-a"); - let second_user = UserId::new("user-b"); - let mut overlay = TransactionOverlay::new(transaction_id.clone()); - - overlay.apply_entry(TransactionOverlayEntry { - transaction_id: transaction_id.clone(), - mutation_order: 0, - table_id: table_id.clone(), - table_type: TableType::User, - user_id: Some(first_user.clone()), - operation_kind: OperationKind::Insert, - primary_key: "1".to_string(), - payload: row(&[("name", ScalarValue::Utf8(Some("alice".to_string())))]), - tombstone: false, - }); - - overlay.apply_entry(TransactionOverlayEntry { - transaction_id, - mutation_order: 1, - table_id: table_id.clone(), - table_type: TableType::User, - user_id: Some(second_user.clone()), - operation_kind: OperationKind::Insert, - primary_key: "1".to_string(), - payload: row(&[("name", ScalarValue::Utf8(Some("bob".to_string())))]), - tombstone: false, - }); - - assert_eq!(overlay.table_entries(&table_id).expect("table entries").len(), 2); - assert_eq!( - overlay - .latest_visible_entry_for_scope(&table_id, Some(&first_user), "1") - .expect("first user entry") - .payload - .values - .get("name"), - Some(&ScalarValue::Utf8(Some("alice".to_string()))) - ); - assert_eq!( - overlay - .latest_visible_entry_for_scope(&table_id, Some(&second_user), "1") - .expect("second user entry") - .payload - .values - .get("name"), - Some(&ScalarValue::Utf8(Some("bob".to_string()))) - ); - } -} \ No newline at end of file + use datafusion::scalar::ScalarValue; + use std::collections::BTreeMap; + + use super::*; + use kalamdb_commons::models::{NamespaceId, TableName}; + + fn row(values: &[(&str, ScalarValue)]) -> Row { + let mut fields = BTreeMap::new(); + for (name, value) in values { + fields.insert((*name).to_string(), value.clone()); + } + Row::new(fields) + } + + #[test] + fn update_after_insert_preserves_inserted_columns() { + let transaction_id = TransactionId::new("01960f7b-3d15-7d6d-b26c-7e4db6f25f8d"); + let table_id = TableId::new(NamespaceId::new("app"), TableName::new("items")); + let mut overlay = TransactionOverlay::new(transaction_id.clone()); + + overlay.apply_entry(TransactionOverlayEntry { + transaction_id: transaction_id.clone(), + mutation_order: 0, + table_id: table_id.clone(), + table_type: TableType::Shared, + user_id: None, + operation_kind: OperationKind::Insert, + primary_key: "1".to_string(), + payload: row(&[ + ("id", ScalarValue::Int64(Some(1))), + ("name", ScalarValue::Utf8(Some("before".to_string()))), + ("color", ScalarValue::Utf8(Some("red".to_string()))), + ]), + tombstone: false, + }); + + overlay.apply_entry(TransactionOverlayEntry { + transaction_id, + mutation_order: 1, + table_id: table_id.clone(), + table_type: TableType::Shared, + user_id: None, + operation_kind: OperationKind::Update, + primary_key: "1".to_string(), + payload: row(&[("name", ScalarValue::Utf8(Some("after".to_string())))]), + tombstone: false, + }); + + let entry = overlay.latest_visible_entry(&table_id, "1").expect("overlay entry"); + assert_eq!(entry.operation_kind, OperationKind::Insert); + assert_eq!(entry.payload.values.get("id"), Some(&ScalarValue::Int64(Some(1)))); + assert_eq!( + entry.payload.values.get("name"), + Some(&ScalarValue::Utf8(Some("after".to_string()))) + ); + assert_eq!( + entry.payload.values.get("color"), + Some(&ScalarValue::Utf8(Some("red".to_string()))) + ); + } + + #[test] + fn preserves_distinct_user_scopes_for_same_primary_key() { + let transaction_id = TransactionId::new("01960f7b-3d15-7d6d-b26c-7e4db6f25f8d"); + let table_id = TableId::new(NamespaceId::new("app"), TableName::new("items")); + let first_user = UserId::new("user-a"); + let second_user = UserId::new("user-b"); + let mut overlay = TransactionOverlay::new(transaction_id.clone()); + + overlay.apply_entry(TransactionOverlayEntry { + transaction_id: transaction_id.clone(), + mutation_order: 0, + table_id: table_id.clone(), + table_type: TableType::User, + user_id: Some(first_user.clone()), + operation_kind: OperationKind::Insert, + primary_key: "1".to_string(), + payload: row(&[("name", ScalarValue::Utf8(Some("alice".to_string())))]), + tombstone: false, + }); + + overlay.apply_entry(TransactionOverlayEntry { + transaction_id, + mutation_order: 1, + table_id: table_id.clone(), + table_type: TableType::User, + user_id: Some(second_user.clone()), + operation_kind: OperationKind::Insert, + primary_key: "1".to_string(), + payload: row(&[("name", ScalarValue::Utf8(Some("bob".to_string())))]), + tombstone: false, + }); + + assert_eq!(overlay.table_entries(&table_id).expect("table entries").len(), 2); + assert_eq!( + overlay + .latest_visible_entry_for_scope(&table_id, Some(&first_user), "1") + .expect("first user entry") + .payload + .values + .get("name"), + Some(&ScalarValue::Utf8(Some("alice".to_string()))) + ); + assert_eq!( + overlay + .latest_visible_entry_for_scope(&table_id, Some(&second_user), "1") + .expect("second user entry") + .payload + .values + .get("name"), + Some(&ScalarValue::Utf8(Some("bob".to_string()))) + ); + } +} diff --git a/backend/crates/kalamdb-transactions/src/overlay_exec.rs b/backend/crates/kalamdb-transactions/src/overlay_exec.rs index 9c3259afb..c489f1522 100644 --- a/backend/crates/kalamdb-transactions/src/overlay_exec.rs +++ b/backend/crates/kalamdb-transactions/src/overlay_exec.rs @@ -9,7 +9,7 @@ use datafusion::execution::{SendableRecordBatchStream, TaskContext}; use datafusion::physical_expr::EquivalenceProperties; use datafusion::physical_plan::stream::RecordBatchStreamAdapter; use datafusion::physical_plan::{ - DisplayAs, DisplayFormatType, ExecutionPlan, ExecutionPlanProperties, PlanProperties, + DisplayAs, DisplayFormatType, ExecutionPlan, ExecutionPlanProperties, PlanProperties, }; use datafusion::scalar::ScalarValue; use datafusion::{common::Result as DataFusionResult, error::DataFusionError}; @@ -25,446 +25,438 @@ use crate::overlay::TransactionOverlay; /// Physical execution node that merges transaction-local overlay rows with committed scan output. #[derive(Debug, Clone)] pub struct TransactionOverlayExec { - input: Arc, - table_id: TableId, - primary_key_column: Arc, - overlay: TransactionOverlay, - user_scope: Option, - final_projection: Option>, - fetch: Option, - cache: Arc, + input: Arc, + table_id: TableId, + primary_key_column: Arc, + overlay: TransactionOverlay, + user_scope: Option, + final_projection: Option>, + fetch: Option, + cache: Arc, } impl TransactionOverlayExec { - pub fn try_new( - input: Arc, - table_id: TableId, - primary_key_column: impl Into>, - overlay: TransactionOverlay, - user_scope: Option, - final_projection: Option>, - fetch: Option, - ) -> DataFusionResult { - let output_schema = projected_schema(&input.schema(), final_projection.as_ref())?; - let cache = Arc::new(PlanProperties::new( - EquivalenceProperties::new(Arc::clone(&output_schema)), - input.output_partitioning().clone(), - input.pipeline_behavior(), - input.boundedness(), - )); - - Ok(Self { - input, - table_id, - primary_key_column: primary_key_column.into(), - overlay, - user_scope, - final_projection, - fetch, - cache, - }) - } + pub fn try_new( + input: Arc, + table_id: TableId, + primary_key_column: impl Into>, + overlay: TransactionOverlay, + user_scope: Option, + final_projection: Option>, + fetch: Option, + ) -> DataFusionResult { + let output_schema = projected_schema(&input.schema(), final_projection.as_ref())?; + let cache = Arc::new(PlanProperties::new( + EquivalenceProperties::new(Arc::clone(&output_schema)), + input.output_partitioning().clone(), + input.pipeline_behavior(), + input.boundedness(), + )); + + Ok(Self { + input, + table_id, + primary_key_column: primary_key_column.into(), + overlay, + user_scope, + final_projection, + fetch, + cache, + }) + } } impl DisplayAs for TransactionOverlayExec { - fn fmt_as( - &self, - t: DisplayFormatType, - f: &mut std::fmt::Formatter, - ) -> std::fmt::Result { - match t { - DisplayFormatType::Default | DisplayFormatType::Verbose => { - write!( - f, - "TransactionOverlayExec: table={}, pk={}, fetch={:?}", - self.table_id, - self.primary_key_column, - self.fetch - ) - }, - DisplayFormatType::TreeRender => { - write!(f, "table={}, pk={}", self.table_id, self.primary_key_column) - }, - } - } + fn fmt_as(&self, t: DisplayFormatType, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match t { + DisplayFormatType::Default | DisplayFormatType::Verbose => { + write!( + f, + "TransactionOverlayExec: table={}, pk={}, fetch={:?}", + self.table_id, self.primary_key_column, self.fetch + ) + }, + DisplayFormatType::TreeRender => { + write!(f, "table={}, pk={}", self.table_id, self.primary_key_column) + }, + } + } } impl ExecutionPlan for TransactionOverlayExec { - fn name(&self) -> &str { - Self::static_name() - } - - fn as_any(&self) -> &dyn Any { - self - } - - fn properties(&self) -> &Arc { - &self.cache - } - - fn children(&self) -> Vec<&Arc> { - vec![&self.input] - } - - fn maintains_input_order(&self) -> Vec { - vec![true] - } - - fn with_new_children( - self: Arc, - mut children: Vec>, - ) -> DataFusionResult> { - let input = children.swap_remove(0); - Ok(Arc::new(Self::try_new( - input, - self.table_id.clone(), - Arc::clone(&self.primary_key_column), - self.overlay.clone(), - self.user_scope.clone(), - self.final_projection.clone(), - self.fetch, - )?)) - } - - fn execute( - &self, - partition: usize, - context: Arc, - ) -> DataFusionResult { - let input = Arc::clone(&self.input); - let overlay = self.overlay.clone(); - let table_id = self.table_id.clone(); - let primary_key_column = Arc::clone(&self.primary_key_column); - let final_projection = self.final_projection.clone(); - let input_schema = input.schema(); - let output_schema = self.schema(); - let user_scope = self.user_scope.clone(); - let fetch = self.fetch; - - let stream = stream::once(async move { - let input_stream = input.execute(partition, context)?; - let batches = input_stream.try_collect::>().await?; - merge_batches_with_overlay( - &input_schema, - &table_id, - primary_key_column.as_ref(), - &overlay, - user_scope.as_ref(), - &batches, - final_projection.as_ref(), - fetch, - ) - }); - - Ok(Box::pin(RecordBatchStreamAdapter::new(output_schema, stream))) - } + fn name(&self) -> &str { + Self::static_name() + } + + fn as_any(&self) -> &dyn Any { + self + } + + fn properties(&self) -> &Arc { + &self.cache + } + + fn children(&self) -> Vec<&Arc> { + vec![&self.input] + } + + fn maintains_input_order(&self) -> Vec { + vec![true] + } + + fn with_new_children( + self: Arc, + mut children: Vec>, + ) -> DataFusionResult> { + let input = children.swap_remove(0); + Ok(Arc::new(Self::try_new( + input, + self.table_id.clone(), + Arc::clone(&self.primary_key_column), + self.overlay.clone(), + self.user_scope.clone(), + self.final_projection.clone(), + self.fetch, + )?)) + } + + fn execute( + &self, + partition: usize, + context: Arc, + ) -> DataFusionResult { + let input = Arc::clone(&self.input); + let overlay = self.overlay.clone(); + let table_id = self.table_id.clone(); + let primary_key_column = Arc::clone(&self.primary_key_column); + let final_projection = self.final_projection.clone(); + let input_schema = input.schema(); + let output_schema = self.schema(); + let user_scope = self.user_scope.clone(); + let fetch = self.fetch; + + let stream = stream::once(async move { + let input_stream = input.execute(partition, context)?; + let batches = input_stream.try_collect::>().await?; + merge_batches_with_overlay( + &input_schema, + &table_id, + primary_key_column.as_ref(), + &overlay, + user_scope.as_ref(), + &batches, + final_projection.as_ref(), + fetch, + ) + }); + + Ok(Box::pin(RecordBatchStreamAdapter::new(output_schema, stream))) + } } fn projected_schema( - input_schema: &SchemaRef, - projection: Option<&Vec>, + input_schema: &SchemaRef, + projection: Option<&Vec>, ) -> DataFusionResult { - match projection { - Some(indices) => input_schema - .project(indices) - .map(Arc::new) - .map_err(|error| DataFusionError::ArrowError(Box::new(error), None)), - None => Ok(Arc::clone(input_schema)), - } + match projection { + Some(indices) => input_schema + .project(indices) + .map(Arc::new) + .map_err(|error| DataFusionError::ArrowError(Box::new(error), None)), + None => Ok(Arc::clone(input_schema)), + } } fn merge_batches_with_overlay( - input_schema: &SchemaRef, - table_id: &TableId, - primary_key_column: &str, - overlay: &TransactionOverlay, - overlay_user_scope: Option<&UserId>, - batches: &[RecordBatch], - final_projection: Option<&Vec>, - fetch: Option, + input_schema: &SchemaRef, + table_id: &TableId, + primary_key_column: &str, + overlay: &TransactionOverlay, + overlay_user_scope: Option<&UserId>, + batches: &[RecordBatch], + final_projection: Option<&Vec>, + fetch: Option, ) -> DataFusionResult { - let mut rows: Vec> = Vec::new(); - let mut row_index_by_pk: HashMap = HashMap::new(); - - for batch in batches { - for row in record_batch_to_rows(batch)? { - let primary_key = extract_primary_key(&row, primary_key_column)?; - if let Some(existing_index) = row_index_by_pk.get(&primary_key).copied() { - rows[existing_index] = Some(row); - } else { - row_index_by_pk.insert(primary_key, rows.len()); - rows.push(Some(row)); - } - } - } - - let mut overlay_entries = overlay - .table_entries(table_id) - .map(|entries| { - entries - .values() - .filter(|entry| { - overlay_user_scope - .map(|user_id| entry.user_id.as_ref() == Some(user_id)) - .unwrap_or(true) - }) - .cloned() - .collect::>() - }) - .unwrap_or_default(); - overlay_entries.sort_by_key(|entry| entry.mutation_order); - - for entry in overlay_entries { - if entry.is_deleted() { - if let Some(existing_index) = row_index_by_pk.remove(entry.primary_key.as_str()) { - rows[existing_index] = None; - } - continue; - } - - if let Some(existing_index) = row_index_by_pk.get(entry.primary_key.as_str()).copied() { - if let Some(existing_row) = rows[existing_index].as_mut() { - merge_row(existing_row, &entry.payload); - } - } else { - row_index_by_pk.insert(entry.primary_key.clone(), rows.len()); - rows.push(Some(entry.payload.clone())); - } - } - - let mut merged_rows: Vec = rows.into_iter().flatten().collect(); - if let Some(fetch) = fetch { - merged_rows.truncate(fetch); - } - - let full_batch = json_rows_to_arrow_batch(input_schema, merged_rows) - .map_err(|error| DataFusionError::Execution(error.to_string()))?; - - match final_projection { - Some(indices) => full_batch - .project(indices) - .map_err(|error| DataFusionError::ArrowError(Box::new(error), None)), - None => Ok(full_batch), - } + let mut rows: Vec> = Vec::new(); + let mut row_index_by_pk: HashMap = HashMap::new(); + + for batch in batches { + for row in record_batch_to_rows(batch)? { + let primary_key = extract_primary_key(&row, primary_key_column)?; + if let Some(existing_index) = row_index_by_pk.get(&primary_key).copied() { + rows[existing_index] = Some(row); + } else { + row_index_by_pk.insert(primary_key, rows.len()); + rows.push(Some(row)); + } + } + } + + let mut overlay_entries = overlay + .table_entries(table_id) + .map(|entries| { + entries + .values() + .filter(|entry| { + overlay_user_scope + .map(|user_id| entry.user_id.as_ref() == Some(user_id)) + .unwrap_or(true) + }) + .cloned() + .collect::>() + }) + .unwrap_or_default(); + overlay_entries.sort_by_key(|entry| entry.mutation_order); + + for entry in overlay_entries { + if entry.is_deleted() { + if let Some(existing_index) = row_index_by_pk.remove(entry.primary_key.as_str()) { + rows[existing_index] = None; + } + continue; + } + + if let Some(existing_index) = row_index_by_pk.get(entry.primary_key.as_str()).copied() { + if let Some(existing_row) = rows[existing_index].as_mut() { + merge_row(existing_row, &entry.payload); + } + } else { + row_index_by_pk.insert(entry.primary_key.clone(), rows.len()); + rows.push(Some(entry.payload.clone())); + } + } + + let mut merged_rows: Vec = rows.into_iter().flatten().collect(); + if let Some(fetch) = fetch { + merged_rows.truncate(fetch); + } + + let full_batch = json_rows_to_arrow_batch(input_schema, merged_rows) + .map_err(|error| DataFusionError::Execution(error.to_string()))?; + + match final_projection { + Some(indices) => full_batch + .project(indices) + .map_err(|error| DataFusionError::ArrowError(Box::new(error), None)), + None => Ok(full_batch), + } } fn record_batch_to_rows(batch: &RecordBatch) -> DataFusionResult> { - let schema = batch.schema(); - let mut rows = Vec::with_capacity(batch.num_rows()); - - for row_index in 0..batch.num_rows() { - let mut values = BTreeMap::new(); - for (column_index, field) in schema.fields().iter().enumerate() { - let value = ScalarValue::try_from_array(batch.column(column_index).as_ref(), row_index)?; - values.insert(field.name().to_string(), value); - } - rows.push(Row::new(values)); - } - - Ok(rows) + let schema = batch.schema(); + let mut rows = Vec::with_capacity(batch.num_rows()); + + for row_index in 0..batch.num_rows() { + let mut values = BTreeMap::new(); + for (column_index, field) in schema.fields().iter().enumerate() { + let value = + ScalarValue::try_from_array(batch.column(column_index).as_ref(), row_index)?; + values.insert(field.name().to_string(), value); + } + rows.push(Row::new(values)); + } + + Ok(rows) } fn extract_primary_key(row: &Row, primary_key_column: &str) -> DataFusionResult { - row.values - .get(primary_key_column) - .map(|value| value.to_string()) - .ok_or_else(|| { - DataFusionError::Execution(format!( - "transaction overlay row is missing primary key column '{}'", - primary_key_column - )) - }) + row.values + .get(primary_key_column) + .map(|value| value.to_string()) + .ok_or_else(|| { + DataFusionError::Execution(format!( + "transaction overlay row is missing primary key column '{}'", + primary_key_column + )) + }) } fn merge_row(base: &mut Row, overlay: &Row) { - for (column_name, value) in &overlay.values { - base.values.insert(column_name.clone(), value.clone()); - } + for (column_name, value) in &overlay.values { + base.values.insert(column_name.clone(), value.clone()); + } } #[cfg(test)] mod tests { - use datafusion::arrow::array::{Int64Array, StringArray}; - use datafusion::arrow::datatypes::{DataType, Field, Schema}; - - use super::*; - use crate::overlay::TransactionOverlayEntry; - use kalamdb_commons::models::{NamespaceId, OperationKind, TableName, TransactionId}; - use kalamdb_commons::TableType; - - fn row(values: &[(&str, ScalarValue)]) -> Row { - let mut fields = BTreeMap::new(); - for (name, value) in values { - fields.insert((*name).to_string(), value.clone()); - } - Row::new(fields) - } - - #[test] - fn overlay_merge_replaces_updates_filters_deletes_and_appends_inserts() { - let schema = Arc::new(Schema::new(vec![ - Field::new("id", DataType::Int64, false), - Field::new("name", DataType::Utf8, true), - ])); - let base_batch = RecordBatch::try_new( - Arc::clone(&schema), - vec![ - Arc::new(Int64Array::from(vec![1, 2])), - Arc::new(StringArray::from(vec![Some("before"), Some("remove")])), - ], - ) - .expect("base batch"); - - let transaction_id = TransactionId::new("01960f7b-3d15-7d6d-b26c-7e4db6f25f8d"); - let table_id = TableId::new(NamespaceId::new("app"), TableName::new("items")); - let mut overlay = TransactionOverlay::new(transaction_id.clone()); - overlay.apply_entry(TransactionOverlayEntry { - transaction_id: transaction_id.clone(), - mutation_order: 0, - table_id: table_id.clone(), - table_type: TableType::Shared, - user_id: None, - operation_kind: OperationKind::Update, - primary_key: "1".to_string(), - payload: row(&[("name", ScalarValue::Utf8(Some("after".to_string())))]), - tombstone: false, - }); - overlay.apply_entry(TransactionOverlayEntry { - transaction_id: transaction_id.clone(), - mutation_order: 1, - table_id: table_id.clone(), - table_type: TableType::Shared, - user_id: None, - operation_kind: OperationKind::Delete, - primary_key: "2".to_string(), - payload: Row::new(BTreeMap::new()), - tombstone: true, - }); - overlay.apply_entry(TransactionOverlayEntry { - transaction_id, - mutation_order: 2, - table_id: table_id.clone(), - table_type: TableType::Shared, - user_id: None, - operation_kind: OperationKind::Insert, - primary_key: "3".to_string(), - payload: row(&[ - ("id", ScalarValue::Int64(Some(3))), - ("name", ScalarValue::Utf8(Some("inserted".to_string()))), - ]), - tombstone: false, - }); - - let merged = merge_batches_with_overlay( - &schema, - &table_id, - "id", - &overlay, - None, - &[base_batch], - None, - None, - ) - .expect("merged batch"); - - assert_eq!(merged.num_rows(), 2); - let rows = record_batch_to_rows(&merged).expect("rows"); - assert_eq!(rows[0].values.get("id"), Some(&ScalarValue::Int64(Some(1)))); - assert_eq!( - rows[0].values.get("name"), - Some(&ScalarValue::Utf8(Some("after".to_string()))) - ); - assert_eq!(rows[1].values.get("id"), Some(&ScalarValue::Int64(Some(3)))); - assert_eq!( - rows[1].values.get("name"), - Some(&ScalarValue::Utf8(Some("inserted".to_string()))) - ); - } - - #[test] - fn overlay_merge_honors_user_scope_for_same_primary_key() { - let schema = Arc::new(Schema::new(vec![ - Field::new("id", DataType::Int64, false), - Field::new("name", DataType::Utf8, true), - ])); - let empty_batch = RecordBatch::try_new( - Arc::clone(&schema), - vec![ - Arc::new(Int64Array::from(Vec::::new())), - Arc::new(StringArray::from(Vec::>::new())), - ], - ) - .expect("empty batch"); - - let transaction_id = TransactionId::new("01960f7b-3d15-7d6d-b26c-7e4db6f25f8d"); - let table_id = TableId::new(NamespaceId::new("app"), TableName::new("items")); - let first_user = UserId::new("user-a"); - let second_user = UserId::new("user-b"); - let mut overlay = TransactionOverlay::new(transaction_id.clone()); - - overlay.apply_entry(TransactionOverlayEntry { - transaction_id: transaction_id.clone(), - mutation_order: 0, - table_id: table_id.clone(), - table_type: TableType::User, - user_id: Some(first_user.clone()), - operation_kind: OperationKind::Insert, - primary_key: "1".to_string(), - payload: row(&[ - ("id", ScalarValue::Int64(Some(1))), - ("name", ScalarValue::Utf8(Some("alice".to_string()))), - ]), - tombstone: false, - }); - overlay.apply_entry(TransactionOverlayEntry { - transaction_id, - mutation_order: 1, - table_id: table_id.clone(), - table_type: TableType::User, - user_id: Some(second_user.clone()), - operation_kind: OperationKind::Insert, - primary_key: "1".to_string(), - payload: row(&[ - ("id", ScalarValue::Int64(Some(1))), - ("name", ScalarValue::Utf8(Some("bob".to_string()))), - ]), - tombstone: false, - }); - - let first_merged = merge_batches_with_overlay( - &schema, - &table_id, - "id", - &overlay, - Some(&first_user), - &[empty_batch.clone()], - None, - None, - ) - .expect("first user merged batch"); - let second_merged = merge_batches_with_overlay( - &schema, - &table_id, - "id", - &overlay, - Some(&second_user), - &[empty_batch], - None, - None, - ) - .expect("second user merged batch"); - - let first_rows = record_batch_to_rows(&first_merged).expect("first rows"); - let second_rows = record_batch_to_rows(&second_merged).expect("second rows"); - assert_eq!(first_rows.len(), 1); - assert_eq!(second_rows.len(), 1); - assert_eq!( - first_rows[0].values.get("name"), - Some(&ScalarValue::Utf8(Some("alice".to_string()))) - ); - assert_eq!( - second_rows[0].values.get("name"), - Some(&ScalarValue::Utf8(Some("bob".to_string()))) - ); - } -} \ No newline at end of file + use datafusion::arrow::array::{Int64Array, StringArray}; + use datafusion::arrow::datatypes::{DataType, Field, Schema}; + + use super::*; + use crate::overlay::TransactionOverlayEntry; + use kalamdb_commons::models::{NamespaceId, OperationKind, TableName, TransactionId}; + use kalamdb_commons::TableType; + + fn row(values: &[(&str, ScalarValue)]) -> Row { + let mut fields = BTreeMap::new(); + for (name, value) in values { + fields.insert((*name).to_string(), value.clone()); + } + Row::new(fields) + } + + #[test] + fn overlay_merge_replaces_updates_filters_deletes_and_appends_inserts() { + let schema = Arc::new(Schema::new(vec![ + Field::new("id", DataType::Int64, false), + Field::new("name", DataType::Utf8, true), + ])); + let base_batch = RecordBatch::try_new( + Arc::clone(&schema), + vec![ + Arc::new(Int64Array::from(vec![1, 2])), + Arc::new(StringArray::from(vec![Some("before"), Some("remove")])), + ], + ) + .expect("base batch"); + + let transaction_id = TransactionId::new("01960f7b-3d15-7d6d-b26c-7e4db6f25f8d"); + let table_id = TableId::new(NamespaceId::new("app"), TableName::new("items")); + let mut overlay = TransactionOverlay::new(transaction_id.clone()); + overlay.apply_entry(TransactionOverlayEntry { + transaction_id: transaction_id.clone(), + mutation_order: 0, + table_id: table_id.clone(), + table_type: TableType::Shared, + user_id: None, + operation_kind: OperationKind::Update, + primary_key: "1".to_string(), + payload: row(&[("name", ScalarValue::Utf8(Some("after".to_string())))]), + tombstone: false, + }); + overlay.apply_entry(TransactionOverlayEntry { + transaction_id: transaction_id.clone(), + mutation_order: 1, + table_id: table_id.clone(), + table_type: TableType::Shared, + user_id: None, + operation_kind: OperationKind::Delete, + primary_key: "2".to_string(), + payload: Row::new(BTreeMap::new()), + tombstone: true, + }); + overlay.apply_entry(TransactionOverlayEntry { + transaction_id, + mutation_order: 2, + table_id: table_id.clone(), + table_type: TableType::Shared, + user_id: None, + operation_kind: OperationKind::Insert, + primary_key: "3".to_string(), + payload: row(&[ + ("id", ScalarValue::Int64(Some(3))), + ("name", ScalarValue::Utf8(Some("inserted".to_string()))), + ]), + tombstone: false, + }); + + let merged = merge_batches_with_overlay( + &schema, + &table_id, + "id", + &overlay, + None, + &[base_batch], + None, + None, + ) + .expect("merged batch"); + + assert_eq!(merged.num_rows(), 2); + let rows = record_batch_to_rows(&merged).expect("rows"); + assert_eq!(rows[0].values.get("id"), Some(&ScalarValue::Int64(Some(1)))); + assert_eq!(rows[0].values.get("name"), Some(&ScalarValue::Utf8(Some("after".to_string())))); + assert_eq!(rows[1].values.get("id"), Some(&ScalarValue::Int64(Some(3)))); + assert_eq!( + rows[1].values.get("name"), + Some(&ScalarValue::Utf8(Some("inserted".to_string()))) + ); + } + + #[test] + fn overlay_merge_honors_user_scope_for_same_primary_key() { + let schema = Arc::new(Schema::new(vec![ + Field::new("id", DataType::Int64, false), + Field::new("name", DataType::Utf8, true), + ])); + let empty_batch = RecordBatch::try_new( + Arc::clone(&schema), + vec![ + Arc::new(Int64Array::from(Vec::::new())), + Arc::new(StringArray::from(Vec::>::new())), + ], + ) + .expect("empty batch"); + + let transaction_id = TransactionId::new("01960f7b-3d15-7d6d-b26c-7e4db6f25f8d"); + let table_id = TableId::new(NamespaceId::new("app"), TableName::new("items")); + let first_user = UserId::new("user-a"); + let second_user = UserId::new("user-b"); + let mut overlay = TransactionOverlay::new(transaction_id.clone()); + + overlay.apply_entry(TransactionOverlayEntry { + transaction_id: transaction_id.clone(), + mutation_order: 0, + table_id: table_id.clone(), + table_type: TableType::User, + user_id: Some(first_user.clone()), + operation_kind: OperationKind::Insert, + primary_key: "1".to_string(), + payload: row(&[ + ("id", ScalarValue::Int64(Some(1))), + ("name", ScalarValue::Utf8(Some("alice".to_string()))), + ]), + tombstone: false, + }); + overlay.apply_entry(TransactionOverlayEntry { + transaction_id, + mutation_order: 1, + table_id: table_id.clone(), + table_type: TableType::User, + user_id: Some(second_user.clone()), + operation_kind: OperationKind::Insert, + primary_key: "1".to_string(), + payload: row(&[ + ("id", ScalarValue::Int64(Some(1))), + ("name", ScalarValue::Utf8(Some("bob".to_string()))), + ]), + tombstone: false, + }); + + let first_merged = merge_batches_with_overlay( + &schema, + &table_id, + "id", + &overlay, + Some(&first_user), + &[empty_batch.clone()], + None, + None, + ) + .expect("first user merged batch"); + let second_merged = merge_batches_with_overlay( + &schema, + &table_id, + "id", + &overlay, + Some(&second_user), + &[empty_batch], + None, + None, + ) + .expect("second user merged batch"); + + let first_rows = record_batch_to_rows(&first_merged).expect("first rows"); + let second_rows = record_batch_to_rows(&second_merged).expect("second rows"); + assert_eq!(first_rows.len(), 1); + assert_eq!(second_rows.len(), 1); + assert_eq!( + first_rows[0].values.get("name"), + Some(&ScalarValue::Utf8(Some("alice".to_string()))) + ); + assert_eq!( + second_rows[0].values.get("name"), + Some(&ScalarValue::Utf8(Some("bob".to_string()))) + ); + } +} diff --git a/backend/crates/kalamdb-transactions/src/query_context.rs b/backend/crates/kalamdb-transactions/src/query_context.rs index ffa58eea1..6b18aae45 100644 --- a/backend/crates/kalamdb-transactions/src/query_context.rs +++ b/backend/crates/kalamdb-transactions/src/query_context.rs @@ -96,4 +96,4 @@ impl TransactionQueryContext { access_validator, } } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-transactions/src/query_extension.rs b/backend/crates/kalamdb-transactions/src/query_extension.rs index 1f50a963c..a1ce9f02b 100644 --- a/backend/crates/kalamdb-transactions/src/query_extension.rs +++ b/backend/crates/kalamdb-transactions/src/query_extension.rs @@ -57,4 +57,4 @@ pub fn extract_transaction_query_context( .extensions .get::() .map(|extension| &extension.context) -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-transactions/src/staged_mutation.rs b/backend/crates/kalamdb-transactions/src/staged_mutation.rs index 78b082cd3..81d2341a8 100644 --- a/backend/crates/kalamdb-transactions/src/staged_mutation.rs +++ b/backend/crates/kalamdb-transactions/src/staged_mutation.rs @@ -15,11 +15,9 @@ pub enum StagedInsertBuildError { impl fmt::Display for StagedInsertBuildError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Self::MissingPrimaryKey { column_name } => write!( - f, - "transactional INSERT requires primary key column '{}'", - column_name - ), + Self::MissingPrimaryKey { column_name } => { + write!(f, "transactional INSERT requires primary key column '{}'", column_name) + }, } } } @@ -184,4 +182,4 @@ mod tests { } ); } -} \ No newline at end of file +} diff --git a/backend/crates/kalamdb-views/src/lib.rs b/backend/crates/kalamdb-views/src/lib.rs index fb02204ae..92f40efe0 100644 --- a/backend/crates/kalamdb-views/src/lib.rs +++ b/backend/crates/kalamdb-views/src/lib.rs @@ -14,8 +14,8 @@ pub mod datatypes; pub mod describe; pub mod error; pub mod live; -pub mod sessions; pub mod server_logs; +pub mod sessions; pub mod settings; pub mod stats; pub mod tables_view; @@ -29,8 +29,8 @@ pub use datatypes::*; pub use describe::*; pub use error::*; pub use live::*; -pub use sessions::*; pub use server_logs::*; +pub use sessions::*; pub use settings::*; pub use stats::*; pub use tables_view::*; diff --git a/backend/crates/kalamdb-views/src/sessions.rs b/backend/crates/kalamdb-views/src/sessions.rs index b26c8be10..75fa1c285 100644 --- a/backend/crates/kalamdb-views/src/sessions.rs +++ b/backend/crates/kalamdb-views/src/sessions.rs @@ -50,8 +50,8 @@ fn sessions_schema() -> SchemaRef { fn parse_backend_pid(session_id: &str) -> Option { session_id .strip_prefix("pg-") - .and_then(|value| value.split('-').next()) - .and_then(|value| value.parse::().ok()) + .and_then(|value| value.split('-').next()) + .and_then(|value| value.parse::().ok()) } fn derive_state(snapshot: &PgSessionSnapshot) -> &'static str { @@ -230,9 +230,7 @@ impl SessionsView { TableType::System, columns, TableOptions::system(), - Some( - "Active PostgreSQL gRPC sessions tracked by the pg extension bridge".to_string(), - ), + Some("Active PostgreSQL gRPC sessions tracked by the pg extension bridge".to_string()), ) .expect("Failed to create system.sessions view definition") } @@ -390,4 +388,4 @@ mod tests { assert_eq!(parse_backend_pid("pg-321-deadbeef"), Some(321)); assert_eq!(parse_backend_pid("pg-321"), Some(321)); } -} \ No newline at end of file +} diff --git a/backend/src/lifecycle.rs b/backend/src/lifecycle.rs index 7338235bf..38d44763a 100644 --- a/backend/src/lifecycle.rs +++ b/backend/src/lifecycle.rs @@ -11,12 +11,12 @@ use kalamdb_api::limiter::RateLimiter; use kalamdb_auth::CachedUsersRepo; use kalamdb_commons::{AuthType, Role, StorageId, UserId}; use kalamdb_configs::ServerConfig; -use kalamdb_live::{ConnectionsManager, LiveQueryManager}; use kalamdb_core::sql::datafusion_session::DataFusionSessionFactory; use kalamdb_core::sql::executor::handler_registry::HandlerRegistry; use kalamdb_core::sql::executor::SqlExecutor; use kalamdb_dba::{initialize_dba_namespace, start_stats_recorder}; use kalamdb_jobs::AppContextJobsExt; +use kalamdb_live::{ConnectionsManager, LiveQueryManager}; use kalamdb_store::open_storage_backend; use kalamdb_system::providers::storages::models::StorageMode; use log::debug; @@ -524,9 +524,8 @@ pub async fn run( // Share auth settings with HTTP handlers let auth_settings = config.auth.clone(); let ui_path = config.server.ui_path.clone(); - let ui_runtime_config = kalamdb_api::ui::UiRuntimeConfig::new( - config.server.effective_public_origin(), - ); + let ui_runtime_config = + kalamdb_api::ui::UiRuntimeConfig::new(config.server.configured_public_origin()); // Log UI serving status let ui_status = if kalamdb_api::routes::is_embedded_ui_available() { @@ -784,9 +783,8 @@ pub async fn run_for_tests( kalamdb_auth::init_trusted_proxy_ranges(&config.security.trusted_proxy_ranges)?; let auth_settings = config.auth.clone(); let ui_path = config.server.ui_path.clone(); - let ui_runtime_config = kalamdb_api::ui::UiRuntimeConfig::new( - config.server.effective_public_origin(), - ); + let ui_runtime_config = + kalamdb_api::ui::UiRuntimeConfig::new(config.server.configured_public_origin()); let server = HttpServer::new(move || { let mut app = App::new() @@ -813,11 +811,7 @@ pub async fn run_for_tests( let path: String = path.clone(); let runtime_config = ui_runtime_config.clone(); app = app.configure(move |cfg| { - kalamdb_api::routes::configure_ui_routes( - cfg, - &path, - runtime_config.clone(), - ); + kalamdb_api::routes::configure_ui_routes(cfg, &path, runtime_config.clone()); }); } @@ -826,11 +820,7 @@ pub async fn run_for_tests( let path: String = path.clone(); let runtime_config = ui_runtime_config.clone(); app = app.configure(move |cfg| { - kalamdb_api::routes::configure_ui_routes( - cfg, - &path, - runtime_config.clone(), - ); + kalamdb_api::routes::configure_ui_routes(cfg, &path, runtime_config.clone()); }); } @@ -890,9 +880,8 @@ pub async fn run_detached( kalamdb_auth::init_trusted_proxy_ranges(&config.security.trusted_proxy_ranges)?; let auth_settings = config.auth.clone(); let ui_path = config.server.ui_path.clone(); - let ui_runtime_config = kalamdb_api::ui::UiRuntimeConfig::new( - config.server.effective_public_origin(), - ); + let ui_runtime_config = + kalamdb_api::ui::UiRuntimeConfig::new(config.server.configured_public_origin()); let server = HttpServer::new(move || { let mut app = App::new() @@ -919,11 +908,7 @@ pub async fn run_detached( let path: String = path.clone(); let runtime_config = ui_runtime_config.clone(); app = app.configure(move |cfg| { - kalamdb_api::routes::configure_ui_routes( - cfg, - &path, - runtime_config.clone(), - ); + kalamdb_api::routes::configure_ui_routes(cfg, &path, runtime_config.clone()); }); } @@ -932,11 +917,7 @@ pub async fn run_detached( let path: String = path.clone(); let runtime_config = ui_runtime_config.clone(); app = app.configure(move |cfg| { - kalamdb_api::routes::configure_ui_routes( - cfg, - &path, - runtime_config.clone(), - ); + kalamdb_api::routes::configure_ui_routes(cfg, &path, runtime_config.clone()); }); } diff --git a/backend/tests/common/testserver/http_server.rs b/backend/tests/common/testserver/http_server.rs index ebd6e9d0a..4aa6a0d9b 100644 --- a/backend/tests/common/testserver/http_server.rs +++ b/backend/tests/common/testserver/http_server.rs @@ -876,9 +876,8 @@ async fn wait_for_cluster_ready(nodes: &[HttpTestServer]) -> Result<()> { for node in nodes { let executor = node.app_context().executor(); let meta_leader = executor.get_leader(kalamdb_raft::GroupId::Meta).await; - let shared_leader = executor - .get_leader(kalamdb_raft::GroupId::DataSharedShard(0)) - .await; + let shared_leader = + executor.get_leader(kalamdb_raft::GroupId::DataSharedShard(0)).await; if meta_leader.is_none() || shared_leader.is_none() { ready = false; @@ -889,10 +888,7 @@ async fn wait_for_cluster_ready(nodes: &[HttpTestServer]) -> Result<()> { meta_leader_count += 1; } - if executor - .is_leader(kalamdb_raft::GroupId::DataSharedShard(0)) - .await - { + if executor.is_leader(kalamdb_raft::GroupId::DataSharedShard(0)).await { shared_leader_count += 1; } @@ -1010,13 +1006,7 @@ async fn start_cluster_server() -> Result { let cluster_id = format!("http-test-cluster-{}", std::process::id()); let node_specs = (1u64..=3) - .map(|node_id| { - Ok(( - node_id, - reserve_local_port()?, - reserve_local_port()?, - )) - }) + .map(|node_id| Ok((node_id, reserve_local_port()?, reserve_local_port()?))) .collect::>>()?; let mut nodes = Vec::new(); @@ -1062,8 +1052,8 @@ async fn start_cluster_server() -> Result { initialize_cluster, !initialize_cluster, ) - .await - .map_err(|e| anyhow::anyhow!("Failed to start cluster node {}: {}", index, e))?; + .await + .map_err(|e| anyhow::anyhow!("Failed to start cluster node {}: {}", index, e))?; nodes.push(server); } diff --git a/backend/tests/endurance_test.rs b/backend/tests/endurance_test.rs index 2e40e1def..b60f47920 100644 --- a/backend/tests/endurance_test.rs +++ b/backend/tests/endurance_test.rs @@ -232,20 +232,20 @@ async fn test_chat_app_endurance_with_100_parallel_users() -> Result<()> { let server = get_global_server().await; let namespace = unique_namespace("endurance_chat"); - let duration = Duration::from_secs(env_u64( - "KALAMDB_ENDURANCE_DURATION_SECS", - DEFAULT_DURATION_SECS, - )); + let duration = + Duration::from_secs(env_u64("KALAMDB_ENDURANCE_DURATION_SECS", DEFAULT_DURATION_SECS)); let user_count = env_usize("KALAMDB_ENDURANCE_USER_COUNT", DEFAULT_USER_COUNT); - let subscriber_count = env_usize( - "KALAMDB_ENDURANCE_SUBSCRIBER_COUNT", - DEFAULT_SUBSCRIBER_COUNT.min(user_count), - ) - .min(user_count); + let subscriber_count = + env_usize("KALAMDB_ENDURANCE_SUBSCRIBER_COUNT", DEFAULT_SUBSCRIBER_COUNT.min(user_count)) + .min(user_count); let conversation_count = env_usize("KALAMDB_ENDURANCE_ROOM_COUNT", DEFAULT_ROOM_COUNT); let create_namespace = server.execute_sql(&format!("CREATE NAMESPACE {}", namespace)).await?; - assert!(create_namespace.success(), "CREATE NAMESPACE failed: {:?}", create_namespace.error); + assert!( + create_namespace.success(), + "CREATE NAMESPACE failed: {:?}", + create_namespace.error + ); let create_conversations = server .execute_sql(&format!( @@ -280,11 +280,7 @@ async fn test_chat_app_endurance_with_100_parallel_users() -> Result<()> { namespace )) .await?; - assert!( - create_messages.success(), - "CREATE messages failed: {:?}", - create_messages.error - ); + assert!(create_messages.success(), "CREATE messages failed: {:?}", create_messages.error); let create_typing_events = server .execute_sql(&format!( @@ -350,10 +346,8 @@ async fn test_chat_app_endurance_with_100_parallel_users() -> Result<()> { .context("expected at least one endurance user")?; let assistant_namespace = namespace.clone(); - let assistant_users = user_clients - .iter() - .map(|(username, _)| username.clone()) - .collect::>(); + let assistant_users = + user_clients.iter().map(|(username, _)| username.clone()).collect::>(); let assistant_errors = Arc::clone(&errors); let assistant_message_ids = Arc::clone(&next_message_id); let assistant_task = tokio::spawn(async move { @@ -501,11 +495,7 @@ async fn test_chat_app_endurance_with_100_parallel_users() -> Result<()> { namespace )) .await?; - assert!( - duplicate_check.success(), - "duplicate check failed: {:?}", - duplicate_check.error - ); + assert!(duplicate_check.success(), "duplicate check failed: {:?}", duplicate_check.error); let totals = duplicate_check.rows_as_maps(); let total_rows = totals @@ -570,11 +560,7 @@ async fn test_chat_app_endurance_with_100_parallel_users() -> Result<()> { update_count, delete_count ); - assert_eq!( - error_count, - 0, - "endurance workload recorded backend errors" - ); + assert_eq!(error_count, 0, "endurance workload recorded backend errors"); let _ = server.execute_sql(&format!("DROP NAMESPACE {} CASCADE", namespace)).await; Ok(()) diff --git a/backend/tests/misc/sql/test_pk_index_efficiency.rs b/backend/tests/misc/sql/test_pk_index_efficiency.rs index 62fafca43..0fe48b48b 100644 --- a/backend/tests/misc/sql/test_pk_index_efficiency.rs +++ b/backend/tests/misc/sql/test_pk_index_efficiency.rs @@ -380,11 +380,7 @@ async fn test_user_table_pk_index_select() { .collect(); server .execute_sql_as_user( - &format!( - "INSERT INTO {}.records (id, data) VALUES {}", - ns, - values.join(", ") - ), + &format!("INSERT INTO {}.records (id, data) VALUES {}", ns, values.join(", ")), "select_user", ) .await; @@ -419,11 +415,7 @@ async fn test_user_table_pk_index_select() { .collect(); server .execute_sql_as_user( - &format!( - "INSERT INTO {}.records (id, data) VALUES {}", - ns, - values.join(", ") - ), + &format!("INSERT INTO {}.records (id, data) VALUES {}", ns, values.join(", ")), "select_user", ) .await; diff --git a/backend/tests/misc/system/test_runtime_metrics.rs b/backend/tests/misc/system/test_runtime_metrics.rs index e256a0e69..e4c2e7e84 100644 --- a/backend/tests/misc/system/test_runtime_metrics.rs +++ b/backend/tests/misc/system/test_runtime_metrics.rs @@ -63,4 +63,4 @@ async fn test_system_stats_expose_memory_breakdown_and_allocator_metrics() { .expect("memory_usage_source row"); assert_eq!(source, "physical_footprint"); } -} \ No newline at end of file +} diff --git a/backend/tests/testserver/cluster/test_cluster_health_http.rs b/backend/tests/testserver/cluster/test_cluster_health_http.rs index 24bad98b2..0d4e280f1 100644 --- a/backend/tests/testserver/cluster/test_cluster_health_http.rs +++ b/backend/tests/testserver/cluster/test_cluster_health_http.rs @@ -42,4 +42,4 @@ async fn test_cluster_health_exposes_runtime_metrics() -> Result<()> { server.shutdown().await; result -} \ No newline at end of file +} diff --git a/backend/tests/testserver/cluster/test_cluster_transactions_http.rs b/backend/tests/testserver/cluster/test_cluster_transactions_http.rs index 3284565d3..7b97ca597 100644 --- a/backend/tests/testserver/cluster/test_cluster_transactions_http.rs +++ b/backend/tests/testserver/cluster/test_cluster_transactions_http.rs @@ -12,7 +12,9 @@ fn result_i64(result: &QueryResult, column: &str) -> Result { .ok_or_else(|| anyhow::anyhow!("missing first row for column {}", column))?; row.get(column) - .and_then(|value| value.as_i64().or_else(|| value.as_str().and_then(|raw| raw.parse().ok()))) + .and_then(|value| { + value.as_i64().or_else(|| value.as_str().and_then(|raw| raw.parse().ok())) + }) .ok_or_else(|| anyhow::anyhow!("missing numeric column {} in row {:?}", column, row)) } @@ -73,7 +75,11 @@ async fn wait_for_table_visible( sleep(Duration::from_millis(50)).await; }, Ok(response) => { - anyhow::bail!("table {}.items did not become visible: {:?}", namespace, response.error) + anyhow::bail!( + "table {}.items did not become visible: {:?}", + namespace, + response.error + ) }, Err(error) => { anyhow::bail!("table {}.items did not become visible: {}", namespace, error) @@ -147,14 +153,22 @@ async fn test_sql_transaction_forwarded_from_follower_preserves_atomic_staging() .find(|result| result.row_as_map(0).is_some_and(|row| row.contains_key("visible_rows"))) .map(|result| result_i64(result, "visible_rows")) .transpose()? - .ok_or_else(|| anyhow::anyhow!("missing visible_rows result set: {:?}", response.results))?; - anyhow::ensure!(visible_rows == 2, "expected in-transaction visibility of 2 rows, got {}", visible_rows); + .ok_or_else(|| { + anyhow::anyhow!("missing visible_rows result set: {:?}", response.results) + })?; + anyhow::ensure!( + visible_rows == 2, + "expected in-transaction visibility of 2 rows, got {}", + visible_rows + ); let staged_result = response .results .iter() .find(|result| result.row_as_map(0).is_some_and(|row| row.contains_key("staged_writes"))) - .ok_or_else(|| anyhow::anyhow!("missing staged_writes result set: {:?}", response.results))?; + .ok_or_else(|| { + anyhow::anyhow!("missing staged_writes result set: {:?}", response.results) + })?; anyhow::ensure!(result_i64(staged_result, "staged_writes")? == 2); anyhow::ensure!(result_i64(staged_result, "staged_tables")? == 1); @@ -172,8 +186,15 @@ async fn test_sql_transaction_forwarded_from_follower_preserves_atomic_staging() let committed = shared_leader .execute_sql(&format!("SELECT COUNT(*) AS committed_rows FROM {}.items", namespace)) .await?; - anyhow::ensure!(committed.status == ResponseStatus::Success, "post-commit count failed: {:?}", committed.error); - anyhow::ensure!(get_count_value(&committed, 0) == 2, "expected 2 committed rows after atomic commit"); + anyhow::ensure!( + committed.status == ResponseStatus::Success, + "post-commit count failed: {:?}", + committed.error + ); + anyhow::ensure!( + get_count_value(&committed, 0) == 2, + "expected 2 committed rows after atomic commit" + ); let active_transactions = shared_leader .execute_sql("SELECT COUNT(*) AS cnt FROM system.transactions WHERE origin = 'SqlBatch'") @@ -183,7 +204,10 @@ async fn test_sql_transaction_forwarded_from_follower_preserves_atomic_staging() "system.transactions verification failed: {:?}", active_transactions.error ); - anyhow::ensure!(get_count_value(&active_transactions, -1) == 0, "expected no lingering SqlBatch transactions"); + anyhow::ensure!( + get_count_value(&active_transactions, -1) == 0, + "expected no lingering SqlBatch transactions" + ); Ok(()) -} \ No newline at end of file +} diff --git a/backend/tests/testserver/cluster/test_cluster_views_http.rs b/backend/tests/testserver/cluster/test_cluster_views_http.rs index e0a959a7e..46e741f5a 100644 --- a/backend/tests/testserver/cluster/test_cluster_views_http.rs +++ b/backend/tests/testserver/cluster/test_cluster_views_http.rs @@ -34,10 +34,7 @@ async fn test_system_cluster_views_over_http() -> Result<()> { first.contains_key("uptime_seconds"), "system.cluster missing uptime_seconds" ); - anyhow::ensure!( - first.contains_key("uptime_human"), - "system.cluster missing uptime_human" - ); + anyhow::ensure!(first.contains_key("uptime_human"), "system.cluster missing uptime_human"); let resp = server.execute_sql("SELECT * FROM system.cluster_groups").await?; anyhow::ensure!( diff --git a/cargo-license-check.toml b/cargo-license-check.toml index 277817a27..2412a4cfb 100644 --- a/cargo-license-check.toml +++ b/cargo-license-check.toml @@ -8,6 +8,7 @@ allow = [ "Apache-2.0", "Apache-2.0 WITH LLVM-exception", "MIT", + "MIT-0", "BSD-2-Clause", "BSD-3-Clause", "ISC", diff --git a/cli/src/session.rs b/cli/src/session.rs index 5c02d3f7a..b510ea8d4 100644 --- a/cli/src/session.rs +++ b/cli/src/session.rs @@ -14,10 +14,9 @@ use colored::*; use indicatif::{ProgressBar, ProgressStyle}; use kalam_client::{ credentials::{CredentialStore, Credentials}, - AuthProvider, AuthRefreshCallback, ClusterHealthResponse, ClusterNodeHealth, - ConnectionOptions, KalamLinkClient, KalamLinkError, KalamLinkTimeouts, - SubscriptionConfig, SubscriptionOptions, TimestampFormatter, UploadProgress, - UploadProgressCallback, + AuthProvider, AuthRefreshCallback, ClusterHealthResponse, ClusterNodeHealth, ConnectionOptions, + KalamLinkClient, KalamLinkError, KalamLinkTimeouts, SubscriptionConfig, SubscriptionOptions, + TimestampFormatter, UploadProgress, UploadProgressCallback, }; use rustyline::completion::Completer; use rustyline::error::ReadlineError; @@ -2500,12 +2499,12 @@ impl CLISession { self.connected = true; self.server_version = Self::normalize_server_field(health.version.clone()); self.server_api_version = Self::normalize_server_field(health.api_version.clone()); - self.server_build_date = health - .build_date - .clone() - .and_then(Self::normalize_server_field); + self.server_build_date = + health.build_date.clone().and_then(Self::normalize_server_field); }, - Err(KalamLinkError::ServerError { status_code: 403, .. }) => { + Err(KalamLinkError::ServerError { + status_code: 403, .. + }) => { // Localhost-only endpoint; fall back to authenticated SQL-based cluster info. }, Err(_) => { @@ -2524,12 +2523,16 @@ impl CLISession { if info.is_cluster_mode { match self.client.cluster_health_check().await { Ok(cluster_health) => { - self.server_version = Self::normalize_server_field(cluster_health.version.clone()); - self.server_build_date = Self::normalize_server_field(cluster_health.build_date.clone()); + self.server_version = + Self::normalize_server_field(cluster_health.version.clone()); + self.server_build_date = + Self::normalize_server_field(cluster_health.build_date.clone()); self.render_cluster_health_response(&cluster_health); return Ok(()); }, - Err(KalamLinkError::ServerError { status_code: 403, .. }) => { + Err(KalamLinkError::ServerError { + status_code: 403, .. + }) => { self.render_cluster_health_fallback( info, Some("Cluster health endpoint is localhost-only; using system.cluster"), @@ -2551,7 +2554,9 @@ impl CLISession { println!("✓ Server is healthy"); return Ok(()); }, - Err(KalamLinkError::ServerError { status_code: 403, .. }) => { + Err(KalamLinkError::ServerError { + status_code: 403, .. + }) => { println!("{} Server is reachable", "✓".green()); println!(" {}", "Health endpoint is restricted to localhost".yellow()); return Ok(()); diff --git a/cli/src/session/commands.rs b/cli/src/session/commands.rs index f524061df..5abe43339 100644 --- a/cli/src/session/commands.rs +++ b/cli/src/session/commands.rs @@ -178,8 +178,10 @@ impl CLISession { self.show_session_info().await; }, Command::Sessions => { - self.execute("SELECT * FROM system.sessions ORDER BY last_seen_at DESC, session_id") - .await?; + self.execute( + "SELECT * FROM system.sessions ORDER BY last_seen_at DESC, session_id", + ) + .await?; }, Command::Stats => { self.execute( diff --git a/cli/tests/cluster/cluster_test_table_identity.rs b/cli/tests/cluster/cluster_test_table_identity.rs index 3c1898209..24a349570 100644 --- a/cli/tests/cluster/cluster_test_table_identity.rs +++ b/cli/tests/cluster/cluster_test_table_identity.rs @@ -14,7 +14,8 @@ use std::time::Duration; fn query_with_verification_limit(sql: &str, expected_rows: usize) -> String { let trimmed = sql.trim().trim_end_matches(';'); - if expected_rows == 0 || trimmed.split_whitespace().any(|part| part.eq_ignore_ascii_case("LIMIT")) + if expected_rows == 0 + || trimmed.split_whitespace().any(|part| part.eq_ignore_ascii_case("LIMIT")) { trimmed.to_string() } else { diff --git a/cli/tests/common/mod.rs b/cli/tests/common/mod.rs index 14333b926..3269e0241 100644 --- a/cli/tests/common/mod.rs +++ b/cli/tests/common/mod.rs @@ -4500,9 +4500,8 @@ pub fn server_target_is_local() -> bool { fn read_local_process_rss_mb(pid: u32) -> Option { static PROCESS_SYSTEM: OnceLock> = OnceLock::new(); - let system = PROCESS_SYSTEM.get_or_init(|| { - Mutex::new(System::new_with_specifics(RefreshKind::nothing())) - }); + let system = PROCESS_SYSTEM + .get_or_init(|| Mutex::new(System::new_with_specifics(RefreshKind::nothing()))); let mut guard = system.lock().ok()?; let pid = Pid::from_u32(pid); diff --git a/cli/tests/performance.rs b/cli/tests/performance.rs index 515c9a107..748097a8f 100644 --- a/cli/tests/performance.rs +++ b/cli/tests/performance.rs @@ -6,4 +6,4 @@ mod common; #[path = "performance/test_server_memory_regression.rs"] -mod test_server_memory_regression; \ No newline at end of file +mod test_server_memory_regression; diff --git a/cli/tests/performance/test_server_memory_regression.rs b/cli/tests/performance/test_server_memory_regression.rs index 3d2465839..78d43cdf6 100644 --- a/cli/tests/performance/test_server_memory_regression.rs +++ b/cli/tests/performance/test_server_memory_regression.rs @@ -49,10 +49,7 @@ fn log_sample(label: &str, sample: &ServerMemorySample) { .rss_mb .map(|rss| format!("{}MB", rss)) .unwrap_or_else(|| "n/a".to_string()), - sample - .pid - .map(|pid| pid.to_string()) - .unwrap_or_else(|| "n/a".to_string()) + sample.pid.map(|pid| pid.to_string()).unwrap_or_else(|| "n/a".to_string()) ); } @@ -113,8 +110,9 @@ fn create_memory_test_table(namespace: &str, table: &str) -> String { } fn flush_and_drop_namespace(namespace: &str, full_table: &str) { - let flush_json = execute_sql_as_root_via_client_json(&format!("STORAGE FLUSH TABLE {}", full_table)) - .expect("flush table should succeed"); + let flush_json = + execute_sql_as_root_via_client_json(&format!("STORAGE FLUSH TABLE {}", full_table)) + .expect("flush table should succeed"); let job_id = parse_job_id_from_json_message(&flush_json) .or_else(|_| parse_job_id_from_flush_output(&flush_json)) .expect("flush response should contain a job id"); @@ -145,13 +143,7 @@ fn run_memory_workload( if !values.is_empty() { values.push_str(", "); } - values.push_str(&format!( - "({}, '{}', '{}', {})", - row_id, - tenant, - payload, - row_id - )); + values.push_str(&format!("({}, '{}', '{}', {})", row_id, tenant, payload, row_id)); } execute_sql_as_root_via_client(&format!( @@ -214,7 +206,10 @@ fn run_memory_workload( #[test] fn smoke_test_server_memory_regression() { if !is_server_running() { - eprintln!("Skipping smoke_test_server_memory_regression: server not running at {}", server_url()); + eprintln!( + "Skipping smoke_test_server_memory_regression: server not running at {}", + server_url() + ); return; } @@ -226,7 +221,8 @@ fn smoke_test_server_memory_regression() { let recovery_budget_mb = env_u64("KALAMDB_MEM_TEST_RECOVERY_DELTA_MB", DEFAULT_RECOVERY_DELTA_MB); let settle_seconds = env_u64("KALAMDB_MEM_TEST_SETTLE_SECS", DEFAULT_SETTLE_SECONDS).max(1); - let baseline_samples = env_usize("KALAMDB_MEM_TEST_BASELINE_SAMPLES", DEFAULT_BASELINE_SAMPLES).max(1); + let baseline_samples = + env_usize("KALAMDB_MEM_TEST_BASELINE_SAMPLES", DEFAULT_BASELINE_SAMPLES).max(1); let enable_warmup = env_bool("KALAMDB_MEM_TEST_WARMUP", true); let warmup_rows = env_usize("KALAMDB_MEM_TEST_WARMUP_ROWS", total_rows); let warmup_query_loops = env_usize("KALAMDB_MEM_TEST_WARMUP_QUERY_LOOPS", query_loops).max(1); @@ -251,11 +247,14 @@ fn smoke_test_server_memory_regression() { ); if enable_warmup && warmup_rows > 0 { - println!("[warmup] priming server memory and query paths before measuring steady-state recovery"); + println!( + "[warmup] priming server memory and query paths before measuring steady-state recovery" + ); let warmup_namespace = generate_unique_namespace("perf_mem_warm_ns"); let warmup_table = generate_unique_table("perf_mem_warm_table"); let warmup_full_table = create_memory_test_table(&warmup_namespace, &warmup_table); - let mut warmup_peak = capture_server_memory_sample().expect("capture warmup baseline sample"); + let mut warmup_peak = + capture_server_memory_sample().expect("capture warmup baseline sample"); let _ = run_memory_workload( &warmup_full_table, @@ -335,4 +334,4 @@ fn smoke_test_server_memory_regression() { recovery_delta_mb, recovery_budget_mb ); -} \ No newline at end of file +} diff --git a/cli/tests/smoke/subscription/smoke_test_subscription_close.rs b/cli/tests/smoke/subscription/smoke_test_subscription_close.rs index 05ee088a8..e2a178052 100644 --- a/cli/tests/smoke/subscription/smoke_test_subscription_close.rs +++ b/cli/tests/smoke/subscription/smoke_test_subscription_close.rs @@ -35,8 +35,8 @@ fn fast_link_client() -> Result bool { let start = std::time::Instant::now(); loop { - let output = execute_sql_as_root_via_client("SELECT query FROM system.live") - .unwrap_or_default(); + let output = + execute_sql_as_root_via_client("SELECT query FROM system.live").unwrap_or_default(); let found = output.contains(marker); if found == expect_present { return true; diff --git a/cli/tests/smoke/system/smoke_test_system_tables_extended.rs b/cli/tests/smoke/system/smoke_test_system_tables_extended.rs index 507efdc2a..9fb571e03 100644 --- a/cli/tests/smoke/system/smoke_test_system_tables_extended.rs +++ b/cli/tests/smoke/system/smoke_test_system_tables_extended.rs @@ -201,8 +201,8 @@ fn smoke_test_system_live_queries() { // Query system.live let query_sql = "SELECT live_id, query, user_id FROM system.live"; - let output = execute_sql_as_root_via_client_json(query_sql) - .expect("Failed to query system.live"); + let output = + execute_sql_as_root_via_client_json(query_sql).expect("Failed to query system.live"); println!("system.live output:\n{}", output); diff --git a/cli/tests/smoke/usecases/chat_ai_example_smoke.rs b/cli/tests/smoke/usecases/chat_ai_example_smoke.rs index 6e25ad53c..0651d5383 100644 --- a/cli/tests/smoke/usecases/chat_ai_example_smoke.rs +++ b/cli/tests/smoke/usecases/chat_ai_example_smoke.rs @@ -117,37 +117,18 @@ fn smoke_chat_ai_example_from_readme() { assert!(history.contains(sender_username), "expected sender username in history"); // 4. Subscribe to the STREAM table used for live agent draft events. - let agent_events_query = format!( - "SELECT * FROM {} WHERE room = {}", - agent_events_table, - sql_literal(room) - ); + let agent_events_query = + format!("SELECT * FROM {} WHERE room = {}", agent_events_table, sql_literal(room)); let mut listener = SubscriptionListener::start(&agent_events_query) .expect("failed to start subscription for agent events"); std::thread::sleep(Duration::from_secs(2)); // 5. Insert streamed agent lifecycle events and the final assistant reply. let events = vec![ - ( - "thinking", - "", - "Planning assistant reply", - ), - ( - "typing", - typing_preview.as_str(), - "Streaming the first characters of the reply", - ), - ( - "message_saved", - assistant_reply.as_str(), - "Assistant reply committed", - ), - ( - "complete", - assistant_reply.as_str(), - "Live stream finished", - ), + ("thinking", "", "Planning assistant reply"), + ("typing", typing_preview.as_str(), "Streaming the first characters of the reply"), + ("message_saved", assistant_reply.as_str(), "Assistant reply committed"), + ("complete", assistant_reply.as_str(), "Live stream finished"), ]; for (stage, preview, message) in &events { @@ -258,12 +239,10 @@ fn smoke_chat_ai_example_from_readme() { .expect("failed to observe final assistant reply in message history"); let escaped_assistant_reply = assistant_reply.replace('"', "\\\""); + assert!(final_history.contains(user_message), "expected user message in final history"); assert!( - final_history.contains(user_message), - "expected user message in final history" - ); - assert!( - final_history.contains(&assistant_reply) || final_history.contains(&escaped_assistant_reply), + final_history.contains(&assistant_reply) + || final_history.contains(&escaped_assistant_reply), "expected assistant reply in final history" ); diff --git a/cli/tests/smoke/usecases/smoke_test_all_datatypes.rs b/cli/tests/smoke/usecases/smoke_test_all_datatypes.rs index 69ba2222c..dd46ae118 100644 --- a/cli/tests/smoke/usecases/smoke_test_all_datatypes.rs +++ b/cli/tests/smoke/usecases/smoke_test_all_datatypes.rs @@ -248,8 +248,8 @@ fn smoke_all_datatypes_user_shared_stream() { // 8) Verify STREAM table row count / contents (no UPDATE/DELETE for stream tables) if !is_cluster_mode() { let stream_sel = format!("SELECT * FROM {}", stream_full); - let stream_out = execute_sql_as_root_via_http(&stream_sel) - .expect("select stream should succeed"); + let stream_out = + execute_sql_as_root_via_http(&stream_sel).expect("select stream should succeed"); assert!( stream_out.contains("stream_one") || stream_out.contains("stream_two"), "Expected stream row content: {}", diff --git a/link/kalam-client/src/lib.rs b/link/kalam-client/src/lib.rs index f7b219892..2bf2aceff 100644 --- a/link/kalam-client/src/lib.rs +++ b/link/kalam-client/src/lib.rs @@ -4,4 +4,4 @@ //! shared `link-common` implementation with lighter default features than the //! full compatibility surface that previously lived at the root of `link/`. -pub use link_common::*; \ No newline at end of file +pub use link_common::*; diff --git a/link/kalam-client/tests/common/mod.rs b/link/kalam-client/tests/common/mod.rs index ce504d309..fd05009c9 100644 --- a/link/kalam-client/tests/common/mod.rs +++ b/link/kalam-client/tests/common/mod.rs @@ -138,14 +138,15 @@ async fn ensure_server_setup( let mut last_err = String::new(); while Instant::now() < deadline { - let status_response = match client.get(format!("{}/v1/api/auth/status", base_url)).send().await { - Ok(response) => response, - Err(err) => { - last_err = format!("auth status request failed: {}", err); - sleep(AUTO_SERVER_RETRY_INTERVAL).await; - continue; - }, - }; + let status_response = + match client.get(format!("{}/v1/api/auth/status", base_url)).send().await { + Ok(response) => response, + Err(err) => { + last_err = format!("auth status request failed: {}", err); + sleep(AUTO_SERVER_RETRY_INTERVAL).await; + continue; + }, + }; if !status_response.status().is_success() { last_err = format!("auth status returned {}", status_response.status()); diff --git a/link/kalam-client/tests/common/tcp_proxy.rs b/link/kalam-client/tests/common/tcp_proxy.rs index 0f79cd362..0c4341f59 100644 --- a/link/kalam-client/tests/common/tcp_proxy.rs +++ b/link/kalam-client/tests/common/tcp_proxy.rs @@ -74,9 +74,8 @@ impl TcpDisconnectProxy { /// Binds to an ephemeral port on 127.0.0.1. pub async fn start(target_base_url: &str) -> Self { let target_addr = extract_host_port(target_base_url); - let listener = bind_loopback_listener() - .await - .expect("proxy should bind to an ephemeral port"); + let listener = + bind_loopback_listener().await.expect("proxy should bind to an ephemeral port"); let bind_addr = listener.local_addr().expect("proxy should have a local addr"); let paused = Arc::new(AtomicBool::new(false)); let impairments = Arc::new(ProxyImpairments::new()); diff --git a/link/kalam-client/tests/integration_tests.rs b/link/kalam-client/tests/integration_tests.rs index e99ed272f..f827a1157 100644 --- a/link/kalam-client/tests/integration_tests.rs +++ b/link/kalam-client/tests/integration_tests.rs @@ -15,7 +15,9 @@ //! ``` use kalam_client::models::{BatchControl, BatchStatus, KalamDataType, ResponseStatus, SchemaField}; -use kalam_client::{AuthProvider, ChangeEvent, KalamLinkClient, KalamLinkError, SubscriptionConfig}; +use kalam_client::{ + AuthProvider, ChangeEvent, KalamLinkClient, KalamLinkError, SubscriptionConfig, +}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, OnceLock}; use std::time::Duration; diff --git a/link/kalam-client/tests/proxied/blackhole_during_subscribe.rs b/link/kalam-client/tests/proxied/blackhole_during_subscribe.rs index 41f25f10b..2cc8d3140 100644 --- a/link/kalam-client/tests/proxied/blackhole_during_subscribe.rs +++ b/link/kalam-client/tests/proxied/blackhole_during_subscribe.rs @@ -54,9 +54,7 @@ async fn test_blackhole_during_subscribe_handshake_recovers() { client.connect().await.expect("connect through proxy"); wait_for_reconnect(&client, &connect_count, 1, "blackhole initial connect").await; assert!( - proxy - .wait_for_active_connections(1, RECONNECT_WAIT_TIMEOUT) - .await, + proxy.wait_for_active_connections(1, RECONNECT_WAIT_TIMEOUT).await, "proxy should see at least one active connection" ); diff --git a/link/kalam-client/tests/proxied/double_outage.rs b/link/kalam-client/tests/proxied/double_outage.rs index bda6c78d5..9317b49c5 100644 --- a/link/kalam-client/tests/proxied/double_outage.rs +++ b/link/kalam-client/tests/proxied/double_outage.rs @@ -92,9 +92,7 @@ async fn test_proxy_server_down_while_reconnecting() { // then kill it again immediately. proxy.simulate_server_up(); assert!( - proxy - .wait_for_active_connections(1, Duration::from_secs(10)) - .await, + proxy.wait_for_active_connections(1, Duration::from_secs(10)).await, "client should begin reconnecting before the second outage" ); @@ -126,13 +124,8 @@ async fn test_proxy_server_down_while_reconnecting() { let expected_connects = connect_count.load(Ordering::SeqCst) + 1; proxy.simulate_server_up(); - wait_for_reconnect( - &client, - &connect_count, - expected_connects, - "double outage final recovery", - ) - .await; + wait_for_reconnect(&client, &connect_count, expected_connects, "double outage final recovery") + .await; assert!(client.is_connected().await, "client should recover after double outage"); let mut resumed_ids = Vec::::new(); diff --git a/link/kalam-client/tests/proxied/gradual_degradation.rs b/link/kalam-client/tests/proxied/gradual_degradation.rs index e31f7e915..64b3eb5a7 100644 --- a/link/kalam-client/tests/proxied/gradual_degradation.rs +++ b/link/kalam-client/tests/proxied/gradual_degradation.rs @@ -119,13 +119,8 @@ async fn test_gradual_latency_ramp_forces_reconnect_then_recovers() { // ── Clear latency and allow recovery ──────────────────────────── proxy.clear_latency(); let expected_connects = connect_count.load(Ordering::SeqCst) + 1; - wait_for_reconnect( - &client, - &connect_count, - expected_connects, - "gradual latency recovery", - ) - .await; + wait_for_reconnect(&client, &connect_count, expected_connects, "gradual latency recovery") + .await; // Insert a post-recovery marker. writer diff --git a/link/kalam-client/tests/proxied/helpers.rs b/link/kalam-client/tests/proxied/helpers.rs index 50eb66091..e2f494789 100644 --- a/link/kalam-client/tests/proxied/helpers.rs +++ b/link/kalam-client/tests/proxied/helpers.rs @@ -103,7 +103,8 @@ pub fn create_test_client_with_events_for_base_url( /// Ensure a test table exists with a simple schema. pub async fn ensure_table(client: &KalamLinkClient, table: &str) { - let create_sql = format!("CREATE TABLE IF NOT EXISTS {} (id TEXT PRIMARY KEY, value TEXT)", table); + let create_sql = + format!("CREATE TABLE IF NOT EXISTS {} (id TEXT PRIMARY KEY, value TEXT)", table); let verify_sql = format!("SELECT COUNT(*) AS row_count FROM {}", table); let deadline = Instant::now() + Duration::from_secs(10); let mut last_err = String::new(); @@ -127,10 +128,7 @@ pub async fn ensure_table(client: &KalamLinkClient, table: &str) { } } - panic!( - "timed out waiting for table {} to become queryable: {}", - table, last_err - ); + panic!("timed out waiting for table {} to become queryable: {}", table, last_err); } pub async fn query_max_seq(client: &KalamLinkClient, table: &str) -> SeqId { @@ -271,10 +269,7 @@ pub async fn wait_for_disconnect_count( if Instant::now() >= deadline { panic!( "{}: disconnect count did not reach {} within {:?} (disconnect_count={})", - context, - expected_disconnects, - RECONNECT_WAIT_TIMEOUT, - current_disconnects + context, expected_disconnects, RECONNECT_WAIT_TIMEOUT, current_disconnects ); } @@ -293,8 +288,7 @@ pub async fn wait_until_connected(client: &KalamLinkClient, context: &str) { if Instant::now() >= deadline { panic!( "{}: client did not reach a connected state within {:?}", - context, - RECONNECT_WAIT_TIMEOUT + context, RECONNECT_WAIT_TIMEOUT ); } diff --git a/link/kalam-client/tests/proxied/mixed_stage_recovery.rs b/link/kalam-client/tests/proxied/mixed_stage_recovery.rs index 36a2e99e7..904cac826 100644 --- a/link/kalam-client/tests/proxied/mixed_stage_recovery.rs +++ b/link/kalam-client/tests/proxied/mixed_stage_recovery.rs @@ -177,13 +177,7 @@ async fn test_shared_connection_recovers_subscriptions_in_different_stages() { let expected_connects = connect_count.load(Ordering::SeqCst) + 1; proxy.simulate_server_up(); - wait_for_reconnect( - &client, - &connect_count, - expected_connects, - "mixed-stage outage", - ) - .await; + wait_for_reconnect(&client, &connect_count, expected_connects, "mixed-stage outage").await; writer .execute_query( diff --git a/link/kalam-client/tests/test_shared_connection.rs b/link/kalam-client/tests/test_shared_connection.rs index ec2a05f87..68ea426c3 100644 --- a/link/kalam-client/tests/test_shared_connection.rs +++ b/link/kalam-client/tests/test_shared_connection.rs @@ -81,7 +81,8 @@ fn create_test_client_with_events_for_base_url( /// Ensure a test table exists with a simple schema. async fn ensure_table(client: &KalamLinkClient, table: &str) { - let create_sql = format!("CREATE TABLE IF NOT EXISTS {} (id TEXT PRIMARY KEY, value TEXT)", table); + let create_sql = + format!("CREATE TABLE IF NOT EXISTS {} (id TEXT PRIMARY KEY, value TEXT)", table); let verify_sql = format!("SELECT COUNT(*) AS row_count FROM {}", table); let deadline = Instant::now() + Duration::from_secs(10); let mut last_err = String::new(); @@ -105,10 +106,7 @@ async fn ensure_table(client: &KalamLinkClient, table: &str) { } } - panic!( - "timed out waiting for table {} to become queryable: {}", - table, last_err - ); + panic!("timed out waiting for table {} to become queryable: {}", table, last_err); } async fn query_max_seq(client: &KalamLinkClient, table: &str) -> SeqId { diff --git a/link/kalam-consumer-wasm/src/lib.rs b/link/kalam-consumer-wasm/src/lib.rs index 77e089a47..3fc83e8b7 100644 --- a/link/kalam-consumer-wasm/src/lib.rs +++ b/link/kalam-consumer-wasm/src/lib.rs @@ -1,4 +1,4 @@ mod client; mod helpers; -pub use client::KalamConsumerClient; \ No newline at end of file +pub use client::KalamConsumerClient; diff --git a/link/kalam-consumer/src/lib.rs b/link/kalam-consumer/src/lib.rs index 7c9a4e292..6c2d1cc78 100644 --- a/link/kalam-consumer/src/lib.rs +++ b/link/kalam-consumer/src/lib.rs @@ -6,13 +6,13 @@ pub mod models { }; } +#[cfg(feature = "native-sdk")] +pub use kalam_client::consumer::ConsumerBuilder; +pub use kalam_client::models::Username; +#[cfg(feature = "native-sdk")] +pub use kalam_client::TopicConsumer; pub use kalam_client::{ AckResponse, AutoOffsetReset, CommitMode, CommitResult, ConsumeMessage, ConsumeRequest, ConsumeResponse, ConsumerConfig, ConsumerOffsets, ConsumerRecord, PayloadMode, RowData, TopicOp, }; -pub use kalam_client::models::Username; -#[cfg(feature = "native-sdk")] -pub use kalam_client::consumer::ConsumerBuilder; -#[cfg(feature = "native-sdk")] -pub use kalam_client::TopicConsumer; \ No newline at end of file diff --git a/link/link-common/Cargo.toml b/link/link-common/Cargo.toml index bff8ffef9..3f23d99a0 100644 --- a/link/link-common/Cargo.toml +++ b/link/link-common/Cargo.toml @@ -10,7 +10,7 @@ description = "Shared Rust implementation for KalamDB link crates" [dependencies] tokio = { workspace = true, optional = true } -reqwest = { version = "0.13.2", default-features = false, features = ["json", "rustls"], optional = true } +reqwest = { workspace = true, default-features = false, features = ["json", "rustls"], optional = true } tokio-tungstenite = { workspace = true, optional = true } serde = { workspace = true } serde_json = { workspace = true } @@ -46,9 +46,14 @@ web-sys = { workspace = true, features = [ "BlobPropertyBag", ], optional = true } +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +aws-lc-rs = { workspace = true, optional = true } +quinn-proto = { workspace = true, optional = true } +rustls-webpki = { workspace = true, optional = true } + [features] default = [] -tokio-runtime = ["tokio", "reqwest", "tokio-tungstenite", "bytes"] +tokio-runtime = ["tokio", "reqwest", "tokio-tungstenite", "bytes", "dep:aws-lc-rs", "dep:quinn-proto", "dep:rustls-webpki"] auth-flows = [] setup = [] healthcheck = [] diff --git a/link/link-common/src/lib.rs b/link/link-common/src/lib.rs index 83797e6e6..5ac5ffa95 100644 --- a/link/link-common/src/lib.rs +++ b/link/link-common/src/lib.rs @@ -1,24 +1,24 @@ //! Shared Rust implementation used by `kalam-client`, `kalam-consumer`, and //! `kalam-link-dart`. +pub mod auth; +#[cfg(feature = "tokio-runtime")] +pub mod client; pub mod compression; +pub(crate) mod connection; +#[cfg(feature = "consumer")] +pub mod consumer; +pub mod credentials; pub mod error; pub mod event_handlers; #[path = "models/mod.rs"] pub mod models; +pub mod query; pub mod seq_id; pub mod seq_tracking; +pub mod subscription; pub mod timeouts; pub mod timestamp; -pub mod credentials; -pub mod auth; -pub(crate) mod connection; -#[cfg(feature = "consumer")] -pub mod consumer; -#[cfg(feature = "tokio-runtime")] -pub mod client; -pub mod query; -pub mod subscription; #[cfg(feature = "wasm")] #[path = "wasm/mod.rs"] pub mod wasm; @@ -29,8 +29,8 @@ pub use auth::{ArcDynAuthProvider, AuthProvider, DynamicAuthProvider, ResolvedAu pub use client::KalamLinkClient; #[cfg(feature = "consumer")] pub use consumer::{ - AutoOffsetReset, CommitMode, CommitResult, ConsumerConfig, ConsumerOffsets, ConsumerRecord, - PayloadMode, TopicOp, + AutoOffsetReset, CommitMode, CommitResult, ConsumerConfig, ConsumerOffsets, ConsumerRecord, + PayloadMode, TopicOp, }; #[cfg(all(feature = "tokio-runtime", feature = "consumer"))] pub use consumer::{ConsumerBuilder, TopicConsumer}; @@ -39,12 +39,12 @@ pub use credentials::{CredentialStore, Credentials, MemoryCredentialStore}; pub use error::{KalamLinkError, Result}; pub use event_handlers::{ConnectionError, DisconnectReason, EventHandlers, MessageDirection}; pub use models::{ - parse_i64, ChangeEvent, ClusterHealthResponse, ClusterNodeHealth, ConnectionOptions, - ErrorDetail, FieldFlag, FieldFlags, FileRef, HealthCheckResponse, HttpVersion, - KalamCellValue, KalamDataType, LoginRequest, LoginResponse, LoginUserInfo, QueryRequest, - QueryResponse, QueryResult, RowData, SchemaField, ServerSetupRequest, ServerSetupResponse, - SetupStatusResponse, SetupUserInfo, SubscriptionConfig, SubscriptionInfo, - SubscriptionOptions, UploadProgress, + parse_i64, ChangeEvent, ClusterHealthResponse, ClusterNodeHealth, ConnectionOptions, + ErrorDetail, FieldFlag, FieldFlags, FileRef, HealthCheckResponse, HttpVersion, KalamCellValue, + KalamDataType, LoginRequest, LoginResponse, LoginUserInfo, QueryRequest, QueryResponse, + QueryResult, RowData, SchemaField, ServerSetupRequest, ServerSetupResponse, + SetupStatusResponse, SetupUserInfo, SubscriptionConfig, SubscriptionInfo, SubscriptionOptions, + UploadProgress, }; #[cfg(feature = "consumer")] pub use models::{AckResponse, ConsumeMessage, ConsumeRequest, ConsumeResponse}; @@ -64,4 +64,4 @@ pub use subscription::LiveRowsSubscription; pub use subscription::SubscriptionManager; pub use subscription::{LiveRowsConfig, LiveRowsEvent, LiveRowsMaterializer}; -pub const VERSION: &str = env!("CARGO_PKG_VERSION"); \ No newline at end of file +pub const VERSION: &str = env!("CARGO_PKG_VERSION"); diff --git a/link/link-common/src/models/mod.rs b/link/link-common/src/models/mod.rs index 36504a54f..3aa2197ce 100644 --- a/link/link-common/src/models/mod.rs +++ b/link/link-common/src/models/mod.rs @@ -32,9 +32,8 @@ pub use crate::auth::models::{ // ── Connection models ──────────────────────────────────────────────────────── pub use crate::connection::models::{ - ClientMessage, ClusterHealthResponse, ClusterNodeHealth, CompressionType, - ConnectionOptions, HealthCheckResponse, HttpVersion, ProtocolOptions, SerializationType, - ServerMessage, + ClientMessage, ClusterHealthResponse, ClusterNodeHealth, CompressionType, ConnectionOptions, + HealthCheckResponse, HttpVersion, ProtocolOptions, SerializationType, ServerMessage, }; // ── Consumer models ────────────────────────────────────────────────────────── diff --git a/link/link-common/src/wasm/reconnect.rs b/link/link-common/src/wasm/reconnect.rs index 6faaa9cfe..aeddc71c0 100644 --- a/link/link-common/src/wasm/reconnect.rs +++ b/link/link-common/src/wasm/reconnect.rs @@ -13,9 +13,9 @@ use crate::models::{ }; use super::auth::WasmAuthProvider; -use super::wasm_debug_log; use super::helpers::{create_promise, send_ws_message, ws_url_from_http_opts}; use super::state::SubscriptionState; +use super::wasm_debug_log; /// Resolve a `WasmAuthProvider` from an optional JS async callback. /// diff --git a/link/sdks/dart/android/src/main/jniLibs/arm64-v8a/libkalam_link_dart.so b/link/sdks/dart/android/src/main/jniLibs/arm64-v8a/libkalam_link_dart.so index 61796a48e..e25addffb 100755 Binary files a/link/sdks/dart/android/src/main/jniLibs/arm64-v8a/libkalam_link_dart.so and b/link/sdks/dart/android/src/main/jniLibs/arm64-v8a/libkalam_link_dart.so differ diff --git a/link/sdks/dart/android/src/main/jniLibs/x86_64/libkalam_link_dart.so b/link/sdks/dart/android/src/main/jniLibs/x86_64/libkalam_link_dart.so index 5dd6aea5b..287bae0ba 100755 Binary files a/link/sdks/dart/android/src/main/jniLibs/x86_64/libkalam_link_dart.so and b/link/sdks/dart/android/src/main/jniLibs/x86_64/libkalam_link_dart.so differ diff --git a/link/sdks/dart/ios/Frameworks/libkalam_link_dart.a b/link/sdks/dart/ios/Frameworks/libkalam_link_dart.a index facbd67b9..3d2ebd245 100644 Binary files a/link/sdks/dart/ios/Frameworks/libkalam_link_dart.a and b/link/sdks/dart/ios/Frameworks/libkalam_link_dart.a differ diff --git a/link/sdks/dart/web/pkg/package.json b/link/sdks/dart/web/pkg/package.json index e09cf0a0f..20e1393c5 100644 --- a/link/sdks/dart/web/pkg/package.json +++ b/link/sdks/dart/web/pkg/package.json @@ -9,7 +9,7 @@ "license": "Apache-2.0", "repository": { "type": "git", - "url": "https://github.com/jamals86/KalamDB" + "url": "https://github.com/kalamstack/KalamDB" }, "files": [ "kalam_link_dart_bg.wasm", diff --git a/pg/Cargo.toml b/pg/Cargo.toml index 718760d83..752277edc 100644 --- a/pg/Cargo.toml +++ b/pg/Cargo.toml @@ -49,6 +49,7 @@ bytes = { workspace = true } http-body-util = { workspace = true } hyper = { workspace = true } hyper-util = { workspace = true } +kalam-client = { path = "../link/kalam-client", default-features = false, features = ["tokio-runtime", "file-uploads"] } kalamdb-commons = { path = "../backend/crates/kalamdb-commons", default-features = false, features = ["rows"] } kalamdb-pg = { path = "../backend/crates/kalamdb-pg", features = ["server"] } ntest = { workspace = true } diff --git a/pg/README.md b/pg/README.md index a0d526bac..71849152d 100644 --- a/pg/README.md +++ b/pg/README.md @@ -8,7 +8,8 @@ The extension is built with `pgrx` and supports PostgreSQL `pg13` through `pg18` - `CREATE EXTENSION pg_kalam;` - A `pg_kalam` foreign data wrapper registered by the extension install SQL -- A PostgreSQL-side bridge to a running KalamDB server over gRPC- `pgvector` preinstalled in all Docker images (`pg-kalam:latest`, `Dockerfile`, `Dockerfile.runtime`) +- A PostgreSQL-side bridge to a running KalamDB server over gRPC +- `pgvector` preinstalled in Docker images built from `Dockerfile`, `Dockerfile.runtime`, and `Dockerfile.release-pg`; enable it per database with `CREATE EXTENSION vector;` ## Choose the right workflow @@ -132,6 +133,11 @@ Verify the extension loaded correctly: SELECT kalam_version(), kalam_compiled_mode(); ``` +Type note for KalamDB-specific columns: + +- `CREATE TABLE ... USING kalamdb (... attachment FILE ...)` keeps `FILE` as the remote KalamDB type, but the mirrored PostgreSQL foreign table column is created as `JSONB` +- Read and write the column from PostgreSQL as a JSON `FileRef` payload + If you need to reinstall during development: ```sql diff --git a/pg/crates/kalam-pg-client/src/lib.rs b/pg/crates/kalam-pg-client/src/lib.rs index ed6e6aeeb..a66d2659e 100644 --- a/pg/crates/kalam-pg-client/src/lib.rs +++ b/pg/crates/kalam-pg-client/src/lib.rs @@ -8,7 +8,8 @@ use kalam_pg_common::{KalamPgError, RemoteServerConfig}; use kalamdb_pg::{ BeginTransactionRequest, CloseSessionRequest, CommitTransactionRequest, DeleteRpcRequest, ExecuteQueryRpcRequest, ExecuteSqlRpcRequest, InsertRpcRequest, OpenSessionRequest, - PgServiceClient, PingRequest, RollbackTransactionRequest, ScanRpcRequest, UpdateRpcRequest, + PgServiceClient, PingRequest, RollbackTransactionRequest, ScanFilterExpression, + ScanRpcRequest, UpdateRpcRequest, }; #[cfg(feature = "tls")] use tonic::transport::{Certificate, ClientTlsConfig, Identity}; @@ -76,14 +77,14 @@ impl RemoteKalamClient { .map_err(|error| Self::connect_err(&error, &server_addr))?; let auth_header = match config.auth_header.as_deref().filter(|v| !v.is_empty()) { - Some(value) => Some(value.parse::>().map_err( - |error| { + Some(value) => { + Some(value.parse::>().map_err(|error| { KalamPgError::Validation(format!( "invalid auth_header metadata value: {}", error )) - }, - )?), + })?) + }, None => None, }; @@ -278,8 +279,17 @@ impl RemoteKalamClient { user_id: Option<&str>, columns: Vec, limit: Option, + filters: Vec<(String, String)>, ) -> Result { let mut client = PgServiceClient::new(self.channel.clone()); + let grpc_filters = filters + .into_iter() + .map(|(column, value)| ScanFilterExpression { + column, + op: "eq".to_string(), + value, + }) + .collect(); let request = self.authorized_request(ScanRpcRequest { namespace: namespace.to_string(), table_name: table_name.to_string(), @@ -288,6 +298,7 @@ impl RemoteKalamClient { user_id: user_id.map(str::to_string), columns, limit, + filters: grpc_filters, }); let response = client .scan(request) @@ -450,10 +461,7 @@ impl RemoteKalamClient { session_id: session_id.to_string(), transaction_id: transaction_id.to_string(), }); - let response = client - .rollback_transaction(request) - .await? - .into_inner(); + let response = client.rollback_transaction(request).await?.into_inner(); Ok(response.transaction_id) } diff --git a/pg/crates/kalam-pg-client/tests/statement_round_trip.rs b/pg/crates/kalam-pg-client/tests/statement_round_trip.rs index 0b85b2906..c63b23332 100644 --- a/pg/crates/kalam-pg-client/tests/statement_round_trip.rs +++ b/pg/crates/kalam-pg-client/tests/statement_round_trip.rs @@ -113,7 +113,7 @@ async fn scan_returns_arrow_batches() { client.open_session("sess-scan", Some("app")).await.expect("open session"); let response = client - .scan("app", "messages", "shared", "sess-scan", None, vec![], None) + .scan("app", "messages", "shared", "sess-scan", None, vec![], None, vec![]) .await .expect("scan"); @@ -142,7 +142,7 @@ async fn scan_with_projection_and_limit() { client.open_session("sess-proj", Some("app")).await.expect("open session"); let response = client - .scan("app", "messages", "shared", "sess-proj", None, vec!["id".to_string()], Some(1)) + .scan("app", "messages", "shared", "sess-proj", None, vec!["id".to_string()], Some(1), vec![]) .await .expect("scan with projection"); diff --git a/pg/docker/Dockerfile b/pg/docker/Dockerfile index 0cd227274..5a6f92458 100644 --- a/pg/docker/Dockerfile +++ b/pg/docker/Dockerfile @@ -74,12 +74,13 @@ COPY backend/crates/kalamdb-store/Cargo.toml backend/crates/kalamdb-stor COPY backend/crates/kalamdb-system/Cargo.toml backend/crates/kalamdb-system/Cargo.toml COPY backend/crates/kalamdb-filestore/Cargo.toml backend/crates/kalamdb-filestore/Cargo.toml COPY backend/crates/kalamdb-core/Cargo.toml backend/crates/kalamdb-core/Cargo.toml -COPY backend/crates/kalamdb-sql/Cargo.toml backend/crates/kalamdb-sql/Cargo.toml COPY backend/crates/kalamdb-tables/Cargo.toml backend/crates/kalamdb-tables/Cargo.toml COPY backend/crates/kalamdb-streams/Cargo.toml backend/crates/kalamdb-streams/Cargo.toml COPY backend/crates/kalamdb-auth/Cargo.toml backend/crates/kalamdb-auth/Cargo.toml COPY backend/crates/kalamdb-api/Cargo.toml backend/crates/kalamdb-api/Cargo.toml COPY backend/crates/kalamdb-session/Cargo.toml backend/crates/kalamdb-session/Cargo.toml +COPY backend/crates/kalamdb-session-datafusion/Cargo.toml backend/crates/kalamdb-session-datafusion/Cargo.toml +COPY backend/crates/kalamdb-transactions/Cargo.toml backend/crates/kalamdb-transactions/Cargo.toml COPY backend/crates/kalamdb-observability/Cargo.toml backend/crates/kalamdb-observability/Cargo.toml COPY backend/crates/kalamdb-publisher/Cargo.toml backend/crates/kalamdb-publisher/Cargo.toml COPY backend/crates/kalamdb-raft/Cargo.toml backend/crates/kalamdb-raft/Cargo.toml @@ -87,7 +88,16 @@ COPY backend/crates/kalamdb-sharding/Cargo.toml backend/crates/kalamdb-shar COPY backend/crates/kalamdb-views/Cargo.toml backend/crates/kalamdb-views/Cargo.toml COPY backend/crates/kalamdb-vector/Cargo.toml backend/crates/kalamdb-vector/Cargo.toml COPY backend/crates/kalamdb-dba/Cargo.toml backend/crates/kalamdb-dba/Cargo.toml -COPY backend/crates/kalamdb-oidc/Cargo.toml backend/crates/kalamdb-oidc/Cargo.toml +COPY backend/crates/kalamdb-live/Cargo.toml backend/crates/kalamdb-live/Cargo.toml +COPY backend/crates/kalamdb-handlers/Cargo.toml backend/crates/kalamdb-handlers/Cargo.toml +COPY backend/crates/kalamdb-handlers/crates/support/Cargo.toml backend/crates/kalamdb-handlers/crates/support/Cargo.toml +COPY backend/crates/kalamdb-handlers/crates/admin/Cargo.toml backend/crates/kalamdb-handlers/crates/admin/Cargo.toml +COPY backend/crates/kalamdb-handlers/crates/ddl/Cargo.toml backend/crates/kalamdb-handlers/crates/ddl/Cargo.toml +COPY backend/crates/kalamdb-handlers/crates/stream/Cargo.toml backend/crates/kalamdb-handlers/crates/stream/Cargo.toml +COPY backend/crates/kalamdb-handlers/crates/user/Cargo.toml backend/crates/kalamdb-handlers/crates/user/Cargo.toml +COPY backend/crates/kalamdb-jobs/Cargo.toml backend/crates/kalamdb-jobs/Cargo.toml +COPY backend/crates/kalamdb-plan-cache/Cargo.toml backend/crates/kalamdb-plan-cache/Cargo.toml +COPY backend/crates/kalamdb-dialect/Cargo.toml backend/crates/kalamdb-dialect/Cargo.toml COPY link/link-common/Cargo.toml link/link-common/Cargo.toml COPY link/kalam-client/Cargo.toml link/kalam-client/Cargo.toml COPY link/kalam-consumer/Cargo.toml link/kalam-consumer/Cargo.toml @@ -107,11 +117,16 @@ RUN set -e \ && mkdir -p link/kalam-consumer-wasm/src && touch link/kalam-consumer-wasm/src/lib.rs \ && mkdir -p link/kalam-link-dart/src && touch link/kalam-link-dart/src/lib.rs \ && for c in kalamdb-configs kalamdb-store kalamdb-system kalamdb-filestore \ - kalamdb-core kalamdb-sql kalamdb-tables kalamdb-streams kalamdb-auth \ - kalamdb-api kalamdb-session kalamdb-observability kalamdb-publisher \ + kalamdb-core kalamdb-tables kalamdb-streams kalamdb-auth \ + kalamdb-api kalamdb-session kalamdb-session-datafusion kalamdb-transactions \ + kalamdb-observability kalamdb-publisher \ kalamdb-raft kalamdb-sharding kalamdb-views kalamdb-vector kalamdb-dba \ - kalamdb-oidc; do \ + kalamdb-live kalamdb-jobs kalamdb-plan-cache kalamdb-dialect; do \ mkdir -p "backend/crates/$c/src" && touch "backend/crates/$c/src/lib.rs"; \ + done \ + && mkdir -p backend/crates/kalamdb-handlers/src && touch backend/crates/kalamdb-handlers/src/lib.rs \ + && for sc in support admin ddl stream user; do \ + mkdir -p "backend/crates/kalamdb-handlers/crates/$sc/src" && touch "backend/crates/kalamdb-handlers/crates/$sc/src/lib.rs"; \ done # --- Real source for the 9 crates that actually get compiled --------------- diff --git a/pg/docker/Dockerfile.release-pg b/pg/docker/Dockerfile.release-pg index 0532cd9a1..f802a90c7 100644 --- a/pg/docker/Dockerfile.release-pg +++ b/pg/docker/Dockerfile.release-pg @@ -19,6 +19,10 @@ COPY pg_kalam.so /pg-artifacts/pg_kalam.so COPY pg_kalam.control /pg-artifacts/pg_kalam.control COPY pg_kalam--*.sql /pg-artifacts/ +RUN apt-get update \ + && apt-get install -y --no-install-recommends postgresql-${PG_MAJOR}-pgvector \ + && rm -rf /var/lib/apt/lists/* + RUN install -m 755 /pg-artifacts/pg_kalam.so "/usr/lib/postgresql/${PG_MAJOR}/lib/pg_kalam.so" \ && install -m 644 /pg-artifacts/pg_kalam.control "/usr/share/postgresql/${PG_MAJOR}/extension/pg_kalam.control" \ && install -m 644 /pg-artifacts/pg_kalam--*.sql "/usr/share/postgresql/${PG_MAJOR}/extension/" \ diff --git a/pg/docker/README.md b/pg/docker/README.md index 042429e60..42264abe7 100644 --- a/pg/docker/README.md +++ b/pg/docker/README.md @@ -1,12 +1,13 @@ # pg-kalam on Docker Hub -PostgreSQL with the pg_kalam extension preinstalled so you can connect PostgreSQL Kalam-backed tables to a remote KalamDB server. +PostgreSQL with the pg_kalam extension and pgvector preinstalled so you can connect PostgreSQL Kalam-backed tables to a remote KalamDB server. -This image packages stock PostgreSQL with the `pg_kalam` foreign data wrapper extension already installed. +This image packages stock PostgreSQL with the `pg_kalam` foreign data wrapper extension and the `pgvector` package already installed. ## What you get - A PostgreSQL runtime image with `pg_kalam.so`, `pg_kalam.control`, and the extension SQL files already installed +- `pgvector` preinstalled so `CREATE EXTENSION vector;` works without extra package installation - `CREATE EXTENSION pg_kalam;` support without a separate extension build step in the container - A ready-to-run base for local testing, CI, and production images that need PostgreSQL to talk to KalamDB @@ -33,6 +34,7 @@ Then enable the extension: ```sql CREATE EXTENSION IF NOT EXISTS pg_kalam; +CREATE EXTENSION IF NOT EXISTS vector; ``` ## Connect to KalamDB diff --git a/pg/docker/image-description.txt b/pg/docker/image-description.txt index 433c8ef0f..535f052e0 100644 --- a/pg/docker/image-description.txt +++ b/pg/docker/image-description.txt @@ -1 +1 @@ -PostgreSQL with the pg_kalam extension preinstalled so you can connect PostgreSQL Kalam-backed tables to a remote KalamDB server. \ No newline at end of file +PostgreSQL with the pg_kalam extension and pgvector preinstalled so you can connect PostgreSQL Kalam-backed tables to a remote KalamDB server. \ No newline at end of file diff --git a/pg/docker/test.sh b/pg/docker/test.sh index 8da225c2e..bf539257d 100755 --- a/pg/docker/test.sh +++ b/pg/docker/test.sh @@ -130,6 +130,22 @@ for i in $(seq 1 30); do sleep 1 done +echo "" +echo "Verifying pgvector extension availability ..." + +PGVECTOR_VERSION=$(PAGER=cat "$PSQL_BIN" -h "$PGHOST" -p "$PGPORT" -U "$PGUSER" -d "$PGDATABASE" \ + -v ON_ERROR_STOP=1 \ + -P pager=off \ + -t -A \ + -c "CREATE EXTENSION IF NOT EXISTS vector; SELECT extversion FROM pg_extension WHERE extname = 'vector';" | tail -n 1 | tr -d '[:space:]') + +if [ -z "$PGVECTOR_VERSION" ]; then + echo "ERROR: pgvector is not available in this PostgreSQL image." + exit 1 +fi + +echo "pgvector is available (version $PGVECTOR_VERSION)." + echo "" echo "Running test.sql ..." echo "" diff --git a/pg/src/arrow_to_pg.rs b/pg/src/arrow_to_pg.rs index a713e7b4f..fadf5a7a6 100644 --- a/pg/src/arrow_to_pg.rs +++ b/pg/src/arrow_to_pg.rs @@ -22,7 +22,11 @@ const UNIX_TO_PG_EPOCH_MICROSECONDS: i64 = 946_684_800_000_000; /// /// # Safety /// Must be called within a valid PostgreSQL memory context (e.g., per-tuple context). -pub unsafe fn arrow_value_to_datum(array: &dyn Array, row: usize) -> (pg_sys::Datum, bool) { +pub unsafe fn arrow_value_to_datum( + array: &dyn Array, + row: usize, + target_type_oid: pg_sys::Oid, +) -> (pg_sys::Datum, bool) { if array.is_null(row) { return (pg_sys::Datum::from(0usize), true); } @@ -55,23 +59,17 @@ pub unsafe fn arrow_value_to_datum(array: &dyn Array, row: usize) -> (pg_sys::Da DataType::Utf8 => { let arr = array.as_any().downcast_ref::().unwrap(); let val = arr.value(row); - match CString::new(val) { - Ok(cstr) => { - let pg_text = pg_sys::cstring_to_text(cstr.as_ptr()); - (pg_sys::Datum::from(pg_text as usize), false) - }, - Err(_) => (pg_sys::Datum::from(0usize), true), + match datum_from_str_for_target_type(val, target_type_oid) { + Some(datum) => (datum, false), + None => (pg_sys::Datum::from(0usize), true), } }, DataType::LargeUtf8 => { let arr = array.as_any().downcast_ref::().unwrap(); let val = arr.value(row); - match CString::new(val) { - Ok(cstr) => { - let pg_text = pg_sys::cstring_to_text(cstr.as_ptr()); - (pg_sys::Datum::from(pg_text as usize), false) - }, - Err(_) => (pg_sys::Datum::from(0usize), true), + match datum_from_str_for_target_type(val, target_type_oid) { + Some(datum) => (datum, false), + None => (pg_sys::Datum::from(0usize), true), } }, DataType::Binary => { @@ -105,12 +103,9 @@ pub unsafe fn arrow_value_to_datum(array: &dyn Array, row: usize) -> (pg_sys::Da match scalar { Ok(s) => { let text = s.to_string(); - match CString::new(text) { - Ok(cstr) => { - let pg_text = pg_sys::cstring_to_text(cstr.as_ptr()); - (pg_sys::Datum::from(pg_text as usize), false) - }, - Err(_) => (pg_sys::Datum::from(0usize), true), + match datum_from_str_for_target_type(&text, target_type_oid) { + Some(datum) => (datum, false), + None => (pg_sys::Datum::from(0usize), true), } }, Err(_) => (pg_sys::Datum::from(0usize), true), @@ -119,6 +114,39 @@ pub unsafe fn arrow_value_to_datum(array: &dyn Array, row: usize) -> (pg_sys::Da } } +unsafe fn datum_from_str_for_target_type( + value: &str, + target_type_oid: pg_sys::Oid, +) -> Option { + match target_type_oid { + pg_sys::JSONOID | pg_sys::JSONBOID => parse_text_via_type_input(value, target_type_oid), + _ => text_datum_from_str(value), + } +} + +unsafe fn text_datum_from_str(value: &str) -> Option { + let cstr = CString::new(value).ok()?; + let pg_text = pg_sys::cstring_to_text(cstr.as_ptr()); + Some(pg_sys::Datum::from(pg_text as usize)) +} + +unsafe fn parse_text_via_type_input( + value: &str, + target_type_oid: pg_sys::Oid, +) -> Option { + let cstr = CString::new(value).ok()?; + let mut typinput = pg_sys::Oid::INVALID; + let mut typioparam = pg_sys::Oid::INVALID; + pg_sys::getTypeInputInfo(target_type_oid, &mut typinput, &mut typioparam); + + Some(pg_sys::OidInputFunctionCall( + typinput, + cstr.as_ptr() as *mut std::ffi::c_char, + typioparam, + -1, + )) +} + /// Allocate a PostgreSQL `bytea` from a byte slice. unsafe fn pg_bytea_from_slice(data: &[u8]) -> *mut pg_sys::varlena { let total_size = data.len() + pg_sys::VARHDRSZ; diff --git a/pg/src/fdw_ddl.rs b/pg/src/fdw_ddl.rs index 4365fb011..217d5b07b 100644 --- a/pg/src/fdw_ddl.rs +++ b/pg/src/fdw_ddl.rs @@ -194,16 +194,7 @@ unsafe extern "C-unwind" fn kalam_process_utility( _ => {}, } - call_prev( - pstmt, - query_string, - read_only_tree, - context, - params, - query_env, - dest, - qc, - ); + call_prev(pstmt, query_string, read_only_tree, context, params, query_env, dest, qc); match tx_kind { pg_sys::TransactionStmtKind::TRANS_STMT_BEGIN @@ -265,10 +256,7 @@ unsafe fn report_sql_error(message: &str) -> ! { unsafe extern "C-unwind" { fn errcode(sqlerrcode: std::os::raw::c_int) -> std::os::raw::c_int; fn errmsg(fmt: *const std::os::raw::c_char, ...) -> std::os::raw::c_int; - fn errstart( - elevel: std::os::raw::c_int, - domain: *const std::os::raw::c_char, - ) -> bool; + fn errstart(elevel: std::os::raw::c_int, domain: *const std::os::raw::c_char) -> bool; fn errfinish( filename: *const std::os::raw::c_char, lineno: std::os::raw::c_int, @@ -292,10 +280,7 @@ unsafe fn report_sql_error(message: &str) -> ! { // CREATE TABLE ... USING kalamdb → CREATE FOREIGN TABLE + propagate // --------------------------------------------------------------------------- -unsafe fn handle_create_table_using_kalamdb( - stmt: *mut pg_sys::CreateStmt, - statement_sql: &str, -) { +unsafe fn handle_create_table_using_kalamdb(stmt: *mut pg_sys::CreateStmt, statement_sql: &str) { let rv = (*stmt).relation; if rv.is_null() { pgrx::error!("pg_kalam DDL: CREATE TABLE USING kalamdb requires a table name"); @@ -307,7 +292,9 @@ unsafe fn handle_create_table_using_kalamdb( } let namespace = resolve_create_namespace(rv).unwrap_or_else(|| { - pgrx::error!("pg_kalam DDL: could not determine target schema for CREATE TABLE USING kalamdb") + pgrx::error!( + "pg_kalam DDL: could not determine target schema for CREATE TABLE USING kalamdb" + ) }); let if_not_exists = create_statement_has_if_not_exists(statement_sql).unwrap_or(false); @@ -335,10 +322,7 @@ unsafe fn handle_create_table_using_kalamdb( pgrx::error!("pg_kalam DDL: no user columns found in CREATE TABLE USING kalamdb"); }, Err(error) => { - pgrx::error!( - "pg_kalam DDL: failed to parse CREATE TABLE USING kalamdb: {}", - error - ); + pgrx::error!("pg_kalam DDL: failed to parse CREATE TABLE USING kalamdb: {}", error); }, }; @@ -385,11 +369,7 @@ unsafe fn handle_create_table_using_kalamdb( ); if let Err(error) = execute_remote_sql(&create_ns_sql, DEFAULT_KALAM_SERVER) { - pgrx::warning!( - "pg_kalam DDL: failed to create namespace '{}': {}", - namespace, - error - ); + pgrx::warning!("pg_kalam DDL: failed to create namespace '{}': {}", namespace, error); } if let Err(error) = execute_remote_sql(&create_table_sql, DEFAULT_KALAM_SERVER) { pgrx::error!( @@ -401,13 +381,10 @@ unsafe fn handle_create_table_using_kalamdb( } if !namespace_exists(&namespace) { - let create_schema_sql = format!("CREATE SCHEMA IF NOT EXISTS {}", quote_ident_pg(&namespace)); + let create_schema_sql = + format!("CREATE SCHEMA IF NOT EXISTS {}", quote_ident_pg(&namespace)); if let Err(error) = pgrx::Spi::run(&create_schema_sql) { - pgrx::error!( - "pg_kalam DDL: failed to ensure local schema '{}': {}", - namespace, - error - ); + pgrx::error!("pg_kalam DDL: failed to ensure local schema '{}': {}", namespace, error); } } @@ -419,9 +396,10 @@ unsafe fn handle_create_table_using_kalamdb( if key == "type" { continue; } - let assignment = format_foreign_table_option_assignment(key, value).unwrap_or_else(|error| { - pgrx::error!("pg_kalam DDL: invalid local foreign table option: {}", error) - }); + let assignment = + format_foreign_table_option_assignment(key, value).unwrap_or_else(|error| { + pgrx::error!("pg_kalam DDL: invalid local foreign table option: {}", error) + }); ft_options.push(assignment); } @@ -1046,26 +1024,16 @@ fn format_kalam_option_assignment(key: &str, value: &str) -> Result Result { @@ -1087,11 +1055,7 @@ fn format_foreign_table_option_assignment(key: &str, value: &str) -> Result bool { @@ -1481,6 +1445,7 @@ fn strip_for_foreign_table(column_defs: &[String]) -> Vec { .filter(|def| !table_pk_re.is_match(def)) .map(|def| { let result = pk_re.replace_all(def, "").into_owned(); + let result = rewrite_local_foreign_table_column_type(&result); result .split_whitespace() .collect::>() @@ -1492,6 +1457,18 @@ fn strip_for_foreign_table(column_defs: &[String]) -> Vec { .collect() } +fn rewrite_local_foreign_table_column_type(definition: &str) -> String { + if first_sql_identifier(definition).is_none() { + return definition.to_string(); + } + + let file_type_re = + regex::Regex::new(r#"(?i)^(\s*(?:"[^"]+"|[A-Za-z_][A-Za-z0-9_]*)\s+)FILE\b"#) + .unwrap(); + + file_type_re.replace(definition, "${1}JSONB").into_owned() +} + #[cfg(test)] mod tests { use super::*; @@ -1642,6 +1619,22 @@ mod tests { assert_eq!(stripped[2], "created_at TIMESTAMP DEFAULT NOW()"); } + #[test] + fn strip_for_foreign_table_rewrites_file_type_to_jsonb() { + let defs = vec![ + "\"attachment\" FILE DEFAULT '{}'::jsonb NOT NULL".to_string(), + "metadata JSONB".to_string(), + ]; + let stripped = super::strip_for_foreign_table(&defs); + assert_eq!( + stripped, + vec![ + "\"attachment\" JSONB DEFAULT '{}'::jsonb NOT NULL".to_string(), + "metadata JSONB".to_string(), + ] + ); + } + #[test] fn create_statement_has_if_not_exists_detects_clause() { let sql = "CREATE TABLE IF NOT EXISTS app.items (id BIGINT) USING kalamdb WITH (type = 'shared');"; @@ -1653,21 +1646,24 @@ mod tests { #[test] fn format_kalam_option_assignment_rejects_numeric_prefix() { - let error = format_kalam_option_assignment("9evil", "value").expect_err("numeric prefix should be rejected"); + let error = format_kalam_option_assignment("9evil", "value") + .expect_err("numeric prefix should be rejected"); assert!(matches!(error, KalamPgError::Validation(_))); assert!(error.to_string().contains("unsupported KalamDB option name '9evil'")); } #[test] fn validate_no_system_columns_rejects_userid() { - let sql = "CREATE FOREIGN TABLE t (id TEXT, _userid TEXT) SERVER s OPTIONS (table_type 'user');"; + let sql = + "CREATE FOREIGN TABLE t (id TEXT, _userid TEXT) SERVER s OPTIONS (table_type 'user');"; let err = validate_no_system_columns(sql).expect_err("should reject _userid"); assert!(err.to_string().contains("_userid")); } #[test] fn validate_no_system_columns_rejects_seq() { - let sql = "CREATE FOREIGN TABLE t (id TEXT, _seq BIGINT) SERVER s OPTIONS (table_type 'shared');"; + let sql = + "CREATE FOREIGN TABLE t (id TEXT, _seq BIGINT) SERVER s OPTIONS (table_type 'shared');"; let err = validate_no_system_columns(sql).expect_err("should reject _seq"); assert!(err.to_string().contains("_seq")); } @@ -1681,7 +1677,8 @@ mod tests { #[test] fn validate_no_system_columns_allows_clean_sql() { - let sql = "CREATE FOREIGN TABLE t (id TEXT, name TEXT) SERVER s OPTIONS (table_type 'shared');"; + let sql = + "CREATE FOREIGN TABLE t (id TEXT, name TEXT) SERVER s OPTIONS (table_type 'shared');"; validate_no_system_columns(sql).expect("clean SQL should pass"); } } @@ -1693,10 +1690,7 @@ fn execute_remote_sql(sql: &str, server_name: &str) -> Result Result runtime, column_names, pk_column, + flushed_for_modify: false, }); (*rinfo).ri_FdwState = Box::into_raw(modify_state) as *mut std::ffi::c_void; @@ -372,13 +373,16 @@ unsafe fn exec_foreign_update_impl( slot: *mut pg_sys::TupleTableSlot, plan_slot: *mut pg_sys::TupleTableSlot, ) -> Result<(), KalamPgError> { - let state = &*((*rinfo).ri_FdwState as *mut KalamModifyState); - // Flush pending inserts so the update sees all rows - crate::write_buffer::flush_table( - &state.session_id, - &state.table_options.table_id, - state.table_options.table_type, - )?; + let state = &mut *((*rinfo).ri_FdwState as *mut KalamModifyState); + // Flush pending inserts once per modify lifecycle so updates see all rows + if !state.flushed_for_modify { + crate::write_buffer::flush_table( + &state.session_id, + &state.table_options.table_id, + state.table_options.table_type, + )?; + state.flushed_for_modify = true; + } let pk_value = extract_pk_value(plan_slot, &state.pk_column, &state.column_names)?; let (updates, _explicit_userid) = slot_to_row(slot, &state.column_names)?; @@ -401,13 +405,16 @@ unsafe fn exec_foreign_delete_impl( rinfo: *mut pg_sys::ResultRelInfo, plan_slot: *mut pg_sys::TupleTableSlot, ) -> Result<(), KalamPgError> { - let state = &*((*rinfo).ri_FdwState as *mut KalamModifyState); - // Flush pending inserts so the delete sees all rows - crate::write_buffer::flush_table( - &state.session_id, - &state.table_options.table_id, - state.table_options.table_type, - )?; + let state = &mut *((*rinfo).ri_FdwState as *mut KalamModifyState); + // Flush pending inserts once per modify lifecycle so deletes see all rows + if !state.flushed_for_modify { + crate::write_buffer::flush_table( + &state.session_id, + &state.table_options.table_id, + state.table_options.table_type, + )?; + state.flushed_for_modify = true; + } let pk_value = extract_pk_value(plan_slot, &state.pk_column, &state.column_names)?; let user_id_str = crate::current_kalam_user_id(); diff --git a/pg/src/fdw_scan.rs b/pg/src/fdw_scan.rs index 2cb3308c7..b487e44b8 100644 --- a/pg/src/fdw_scan.rs +++ b/pg/src/fdw_scan.rs @@ -5,6 +5,8 @@ use crate::arrow_to_pg::arrow_value_to_datum; use crate::fdw_options::parse_options; use crate::fdw_state::KalamScanState; use crate::relation_table_options::resolve_table_options_for_relation; +use datafusion_common::ScalarValue; +use kalam_pg_api::ScanFilter; use kalam_pg_common::{KalamPgError, DELETED_COLUMN, SEQ_COLUMN, USER_ID_COLUMN}; use pgrx::pg_guard; use pgrx::pg_sys; @@ -157,7 +159,7 @@ pub unsafe extern "C-unwind" fn iterate_foreign_scan( // Map to Arrow column if let Some(Some(arrow_idx)) = state.column_mapping.get(att_idx) { let array = batch.column(*arrow_idx); - let (datum, is_null) = arrow_value_to_datum(array.as_ref(), row); + let (datum, is_null) = arrow_value_to_datum(array.as_ref(), row, (*att).atttypid); *(*slot).tts_values.add(att_idx) = datum; *(*slot).tts_isnull.add(att_idx) = is_null; } else { @@ -263,13 +265,17 @@ unsafe fn begin_foreign_scan_impl(node: *mut pg_sys::ForeignScanState) -> Result let tenant_context = kalam_pg_api::TenantContext::new(None, user_id.clone()); + // Extract pushdown-safe equality filters from plan quals. + // PG still applies all quals locally, so this is a pure optimization. + let pushdown_filters = extract_pushdown_filters(node); + let request = kalam_pg_api::ScanRequest { table_id: table_options.table_id.clone(), table_type: table_options.table_type, tenant_context, remote_session: None, projection, - filters: Vec::new(), + filters: pushdown_filters, limit: None, }; request.validate()?; @@ -311,3 +317,167 @@ unsafe fn begin_foreign_scan_impl(node: *mut pg_sys::ForeignScanState) -> Result (*node).fdw_state = Box::into_raw(scan_state) as *mut std::ffi::c_void; Ok(()) } + +// --------------------------------------------------------------------------- +// WHERE clause pushdown: extract simple `column = value` predicates +// --------------------------------------------------------------------------- + +/// Extract simple `column = constant` filters from plan quals for remote pushdown. +/// +/// Only extracts filters that are safe to push to the remote server: +/// - Equality operator (`=`) +/// - One side is a column reference (Var) +/// - Other side is a Const or resolved external Param +/// +/// PG still applies all quals locally, so incomplete extraction is safe — +/// it only reduces the data transferred from the remote server. +unsafe fn extract_pushdown_filters(node: *mut pg_sys::ForeignScanState) -> Vec { + let mut filters = Vec::new(); + + let qual_list = (*(*node).ss.ps.plan).qual; + if qual_list.is_null() { + return filters; + } + + let tupdesc = (*(*node).ss.ss_ScanTupleSlot).tts_tupleDescriptor; + + // Parameter list for resolving $1, $2, ... in prepared statements + let param_list = if !(*node).ss.ps.state.is_null() { + (*(*node).ss.ps.state).es_param_list_info + } else { + std::ptr::null_mut() + }; + + let length = (*qual_list).length as usize; + for i in 0..length { + let element = (*qual_list).elements.add(i); + let expr_node = (*element).ptr_value as *mut pg_sys::Node; + + if let Some(filter) = try_extract_eq_filter(expr_node, tupdesc, param_list) { + filters.push(filter); + } + } + + filters +} + +/// Try to extract an equality filter from a single qual expression. +/// +/// Returns `Some(ScanFilter::Eq { ... })` for `column = value` patterns, +/// `None` for anything else (complex expressions, non-equality ops, etc.). +unsafe fn try_extract_eq_filter( + node: *mut pg_sys::Node, + tupdesc: pg_sys::TupleDesc, + params: pg_sys::ParamListInfo, +) -> Option { + if (*node).type_ != pg_sys::NodeTag::T_OpExpr { + return None; + } + + let opexpr = node as *mut pg_sys::OpExpr; + + // Verify this is an equality operator by checking the operator name. + let opname_ptr = pg_sys::get_opname((*opexpr).opno); + if opname_ptr.is_null() { + return None; + } + let opname = CStr::from_ptr(opname_ptr).to_str().ok()?; + if opname != "=" { + return None; + } + + // Must have exactly 2 arguments (binary operator) + let args = (*opexpr).args; + if args.is_null() || (*args).length != 2 { + return None; + } + + let first = (*(*args).elements.add(0)).ptr_value as *mut pg_sys::Node; + let second = (*(*args).elements.add(1)).ptr_value as *mut pg_sys::Node; + + // Try both orderings: Var = Value and Value = Var + try_var_value_pair(first, second, tupdesc, params) + .or_else(|| try_var_value_pair(second, first, tupdesc, params)) +} + +/// Try to match a (Var, Const|Param) pair and extract column name + value. +unsafe fn try_var_value_pair( + maybe_var: *mut pg_sys::Node, + maybe_value: *mut pg_sys::Node, + tupdesc: pg_sys::TupleDesc, + params: pg_sys::ParamListInfo, +) -> Option { + if (*maybe_var).type_ != pg_sys::NodeTag::T_Var { + return None; + } + + let var = maybe_var as *mut pg_sys::Var; + + // Get column name from attribute number + let attnum = (*var).varattno; + if attnum <= 0 || attnum as i32 > (*tupdesc).natts { + return None; + } + let att = (*tupdesc).attrs.as_ptr().add((attnum - 1) as usize); + if (*att).attisdropped { + return None; + } + let col_name = CStr::from_ptr((*att).attname.data.as_ptr()) + .to_string_lossy() + .into_owned(); + + // Skip virtual columns — they are handled separately by the FDW + match col_name.as_str() { + USER_ID_COLUMN | SEQ_COLUMN | DELETED_COLUMN => return None, + _ => {}, + } + + // Extract scalar value from Const or external Param node + let scalar = extract_node_value(maybe_value, params)?; + + Some(ScanFilter::eq(col_name, scalar)) +} + +/// Convert a Const or external Param node to a DataFusion ScalarValue. +unsafe fn extract_node_value( + node: *mut pg_sys::Node, + params: pg_sys::ParamListInfo, +) -> Option { + match (*node).type_ { + pg_sys::NodeTag::T_Const => { + let konst = node as *mut pg_sys::Const; + if (*konst).constisnull { + return None; + } + Some(crate::pg_to_kalam::datum_to_scalar( + (*konst).constvalue, + (*konst).consttype, + false, + )) + }, + pg_sys::NodeTag::T_Param => { + let param = node as *mut pg_sys::Param; + // Only external params (from prepared statements: $1, $2, ...) + if (*param).paramkind != pg_sys::ParamKind::PARAM_EXTERN { + return None; + } + if params.is_null() { + return None; + } + let param_id = (*param).paramid; + if param_id < 1 || param_id > (*params).numParams { + return None; + } + let prm = &*(*params).params.as_ptr().add((param_id - 1) as usize); + if prm.isnull { + return None; + } + Some(crate::pg_to_kalam::datum_to_scalar( + prm.value, + (*param).paramtype, + false, + )) + }, + _ => None, + } +} diff --git a/pg/src/fdw_state.rs b/pg/src/fdw_state.rs index 6fe357f44..805f054be 100644 --- a/pg/src/fdw_state.rs +++ b/pg/src/fdw_state.rs @@ -33,4 +33,7 @@ pub struct KalamModifyState { pub column_names: Vec, /// Name of the primary key column (for UPDATE/DELETE row identification). pub pk_column: String, + /// Whether pending writes for this table have already been flushed in this + /// modify lifecycle. Avoids redundant per-row flush_table calls in UPDATE/DELETE. + pub flushed_for_modify: bool, } diff --git a/pg/src/fdw_xact.rs b/pg/src/fdw_xact.rs index 4c97f33f1..290cfe50d 100644 --- a/pg/src/fdw_xact.rs +++ b/pg/src/fdw_xact.rs @@ -59,11 +59,12 @@ pub fn ensure_transaction(session_id: &str) -> Result Vec { } fn active_transactions_snapshot() -> Vec { - CURRENT_TX - .lock() - .unwrap_or_else(|e| e.into_inner()) - .values() - .cloned() - .collect() + CURRENT_TX.lock().unwrap_or_else(|e| e.into_inner()).values().cloned().collect() } fn clear_active_transactions() { @@ -131,18 +127,12 @@ fn commit_transactions(transactions: &[ActiveTransaction]) -> Result<(), String> for tx in transactions { let state = crate::remote_state::get_remote_extension_state_for_session(&tx.session_id) .ok_or_else(|| { - format!( - "remote extension state not initialized for session '{}'", - tx.session_id - ) + format!("remote extension state not initialized for session '{}'", tx.session_id) })?; let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { state.runtime().block_on(async { - state - .client() - .commit_transaction(&tx.session_id, &tx.transaction_id) - .await + state.client().commit_transaction(&tx.session_id, &tx.transaction_id).await }) })); @@ -155,10 +145,7 @@ fn commit_transactions(transactions: &[ActiveTransaction]) -> Result<(), String> )); }, Err(_panic) => { - return Err(format!( - "panic committing KalamDB transaction {}", - tx.transaction_id - )); + return Err(format!("panic committing KalamDB transaction {}", tx.transaction_id)); }, } } @@ -168,17 +155,15 @@ fn commit_transactions(transactions: &[ActiveTransaction]) -> Result<(), String> fn rollback_transactions(transactions: &[ActiveTransaction]) { for tx in transactions { - let Some(state) = crate::remote_state::get_remote_extension_state_for_session(&tx.session_id) + let Some(state) = + crate::remote_state::get_remote_extension_state_for_session(&tx.session_id) else { continue; }; let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { state.runtime().block_on(async { - state - .client() - .rollback_transaction(&tx.session_id, &tx.transaction_id) - .await + state.client().rollback_transaction(&tx.session_id, &tx.transaction_id).await }) })); @@ -191,10 +176,7 @@ fn rollback_transactions(transactions: &[ActiveTransaction]) { ); }, Err(_panic) => { - eprintln!( - "pg_kalam: panic rolling back KalamDB transaction {}", - tx.transaction_id, - ); + eprintln!("pg_kalam: panic rolling back KalamDB transaction {}", tx.transaction_id,); }, } } @@ -204,18 +186,12 @@ fn try_rollback_transactions(transactions: &[ActiveTransaction]) -> Result<(), S for tx in transactions { let state = crate::remote_state::get_remote_extension_state_for_session(&tx.session_id) .ok_or_else(|| { - format!( - "remote extension state not initialized for session '{}'", - tx.session_id - ) + format!("remote extension state not initialized for session '{}'", tx.session_id) })?; let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { state.runtime().block_on(async { - state - .client() - .rollback_transaction(&tx.session_id, &tx.transaction_id) - .await + state.client().rollback_transaction(&tx.session_id, &tx.transaction_id).await }) })); @@ -246,9 +222,8 @@ pub fn commit_explicit_transaction_block() -> Result<(), String> { return Ok(()); } - crate::write_buffer::flush_all().map_err(|error| { - format!("failed to flush writes before explicit COMMIT: {}", error) - })?; + crate::write_buffer::flush_all() + .map_err(|error| format!("failed to flush writes before explicit COMMIT: {}", error))?; commit_transactions(&transactions)?; clear_active_transactions(); @@ -315,10 +290,7 @@ unsafe extern "C-unwind" fn xact_callback( if let Err(error) = commit_transactions(&transactions) { rollback_transactions(&transactions); - pgrx::error!( - "pg_kalam: {}, aborting transaction before PostgreSQL commit", - error - ); + pgrx::error!("pg_kalam: {}, aborting transaction before PostgreSQL commit", error); } return; diff --git a/pg/src/remote_executor.rs b/pg/src/remote_executor.rs index 45a227593..90c512cdc 100644 --- a/pg/src/remote_executor.rs +++ b/pg/src/remote_executor.rs @@ -1,4 +1,5 @@ use async_trait::async_trait; +use datafusion_common::ScalarValue; use kalam_pg_api::request::{DeleteRequest, InsertRequest, ScanRequest, UpdateRequest}; use kalam_pg_api::response::{MutationResponse, ScanResponse}; use kalam_pg_api::KalamBackendExecutor; @@ -26,6 +27,17 @@ impl KalamBackendExecutor for RemoteBackendExecutor { let columns = request.projection.unwrap_or_default(); let limit = request.limit.map(|l| l as u64); + // Convert ScanFilter::Eq to (column, value) pairs for gRPC pushdown + let filters: Vec<(String, String)> = request + .filters + .iter() + .filter_map(|f| match f { + kalam_pg_api::ScanFilter::Eq { column, value } => { + scalar_to_string(value).map(|v| (column.clone(), v)) + }, + }) + .collect(); + self.client .scan( request.table_id.namespace_id().as_str(), @@ -35,6 +47,7 @@ impl KalamBackendExecutor for RemoteBackendExecutor { user_id.as_deref(), columns, limit, + filters, ) .await } @@ -111,3 +124,22 @@ impl KalamBackendExecutor for RemoteBackendExecutor { .await } } + +/// Convert a DataFusion ScalarValue to a plain string for gRPC filter transport. +fn scalar_to_string(value: &ScalarValue) -> Option { + match value { + ScalarValue::Utf8(Some(s)) | ScalarValue::LargeUtf8(Some(s)) => Some(s.clone()), + ScalarValue::Boolean(Some(b)) => Some(b.to_string()), + ScalarValue::Int8(Some(v)) => Some(v.to_string()), + ScalarValue::Int16(Some(v)) => Some(v.to_string()), + ScalarValue::Int32(Some(v)) => Some(v.to_string()), + ScalarValue::Int64(Some(v)) => Some(v.to_string()), + ScalarValue::UInt8(Some(v)) => Some(v.to_string()), + ScalarValue::UInt16(Some(v)) => Some(v.to_string()), + ScalarValue::UInt32(Some(v)) => Some(v.to_string()), + ScalarValue::UInt64(Some(v)) => Some(v.to_string()), + ScalarValue::Float32(Some(v)) => Some(v.to_string()), + ScalarValue::Float64(Some(v)) => Some(v.to_string()), + _ => None, + } +} diff --git a/pg/src/remote_state.rs b/pg/src/remote_state.rs index d4da683e0..735155fc6 100644 --- a/pg/src/remote_state.rs +++ b/pg/src/remote_state.rs @@ -1,5 +1,5 @@ -use std::collections::HashMap; use std::collections::hash_map::DefaultHasher; +use std::collections::HashMap; use std::hash::{Hash, Hasher}; use std::sync::{Arc, Mutex, OnceLock}; @@ -112,12 +112,10 @@ fn session_id_for_config(config: &RemoteServerConfig) -> String { fn build_remote_extension_state( config: &RemoteServerConfig, ) -> Result { - let runtime = Arc::new( - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .map_err(|e| KalamPgError::Execution(format!("failed to build tokio runtime: {}", e)))?, - ); + let runtime = + Arc::new(tokio::runtime::Builder::new_current_thread().enable_all().build().map_err( + |e| KalamPgError::Execution(format!("failed to build tokio runtime: {}", e)), + )?); let client = runtime.block_on(async { RemoteKalamClient::connect(config.clone()).await })?; let session_id = session_id_for_config(config); @@ -130,9 +128,7 @@ fn build_remote_extension_state( }) } -fn register_exit_handler_once( - registry: &mut RemoteStateRegistry, -) { +fn register_exit_handler_once(registry: &mut RemoteStateRegistry) { if registry.exit_handler_registered { return; } diff --git a/pg/src/write_buffer.rs b/pg/src/write_buffer.rs index bab31f461..e6959c311 100644 --- a/pg/src/write_buffer.rs +++ b/pg/src/write_buffer.rs @@ -28,6 +28,16 @@ use kalamdb_commons::{TableId, TableType}; /// Maximum rows to buffer before auto-flushing. const FLUSH_THRESHOLD: usize = 256; +/// Fast check: returns true if the global write buffer has any entries at all. +/// Uses a try_lock to avoid blocking — if the lock is contended, conservatively +/// returns true so the caller proceeds with the full flush path. +pub fn has_any_pending_writes() -> bool { + match WRITE_BUFFER.try_lock() { + Ok(guard) => guard.as_ref().is_some_and(|map| !map.is_empty()), + Err(_) => true, // conservatively assume writes pending + } +} + /// A pending batch of rows for a specific table + user context. struct PendingBatch { session_id: String, @@ -68,13 +78,7 @@ fn pending_batch_key( user_id: Option<&UserId>, ) -> String { let user_scope = user_id.map(UserId::as_str).unwrap_or("_"); - format!( - "{}|{}|{}|{}", - session_id, - table_type.as_str(), - table_id.full_name(), - user_scope - ) + format!("{}|{}|{}|{}", session_id, table_type.as_str(), table_id.full_name(), user_scope) } /// Global write buffer keyed by table full name. @@ -135,6 +139,11 @@ pub fn flush_table( table_id: &TableId, table_type: TableType, ) -> Result<(), KalamPgError> { + // Fast path: skip mutex + iteration when no writes are buffered at all + if !has_any_pending_writes() { + return Ok(()); + } + let mut guard = WRITE_BUFFER.lock().unwrap_or_else(|e| e.into_inner()); let Some(map) = guard.as_mut() else { return Ok(()); @@ -221,17 +230,11 @@ mod tests { }) } - async fn update( - &self, - _request: UpdateRequest, - ) -> Result { + async fn update(&self, _request: UpdateRequest) -> Result { Ok(MutationResponse { affected_rows: 0 }) } - async fn delete( - &self, - _request: DeleteRequest, - ) -> Result { + async fn delete(&self, _request: DeleteRequest) -> Result { Ok(MutationResponse { affected_rows: 0 }) } } @@ -297,8 +300,14 @@ mod tests { }); assert_eq!(inserts.len(), 2); - assert_eq!(inserts[0].tenant_context.effective_user_id().map(UserId::as_str), Some("user-a")); - assert_eq!(inserts[1].tenant_context.effective_user_id().map(UserId::as_str), Some("user-b")); + assert_eq!( + inserts[0].tenant_context.effective_user_id().map(UserId::as_str), + Some("user-a") + ); + assert_eq!( + inserts[1].tenant_context.effective_user_id().map(UserId::as_str), + Some("user-b") + ); assert_eq!(inserts[0].rows.len(), 1); assert_eq!(inserts[1].rows.len(), 1); diff --git a/pg/tests/e2e_common/mod.rs b/pg/tests/e2e_common/mod.rs index e8eece9cb..c9cee6812 100644 --- a/pg/tests/e2e_common/mod.rs +++ b/pg/tests/e2e_common/mod.rs @@ -9,9 +9,9 @@ pub mod tcp_proxy; #[path = "../support/http_client.rs"] mod http_client; -use std::process::Command; use std::hash::{Hash, Hasher}; use std::ops::{Deref, DerefMut}; +use std::process::Command; use std::sync::OnceLock; use std::time::Duration; use std::{env, fmt, future::Future}; @@ -28,7 +28,7 @@ use tokio_postgres::{Config, NoTls}; // Default: pgrx local postgres. Override with KALAMDB_PG_HOST / KALAMDB_PG_PORT. const DEFAULT_PG_HOST: &str = "127.0.0.1"; const DEFAULT_PG_PORT: u16 = 28816; -const TEST_DB: &str = "kalamdb_test"; +const DEFAULT_TEST_DB: &str = "kalamdb_test"; fn pg_connection_config() -> (String, u16) { let host = env::var("KALAMDB_PG_HOST") @@ -142,6 +142,10 @@ fn pg_user_from_env() -> String { .unwrap_or_else(|| env::var("USER").unwrap_or_else(|_| "postgres".to_string())) } +fn pg_password_from_env() -> Option { + env::var("KALAMDB_PG_PASSWORD").ok().filter(|value| !value.is_empty()) +} + // --------------------------------------------------------------------------- // TestEnv — shared, singleton local test environment // --------------------------------------------------------------------------- @@ -222,14 +226,27 @@ impl TestEnv { ENV.get_or_init(|| env) } + fn pg_database_from_env() -> String { + env::var("KALAMDB_PG_DATABASE") + .ok() + .filter(|value| !value.is_empty()) + .unwrap_or_else(|| DEFAULT_TEST_DB.to_string()) + } + pub async fn pg_connect(&self) -> OwnedPgClient { + let test_db = Self::pg_database_from_env(); let (pg_host, pg_port) = pg_connection_config(); - self.pg_connect_to(TEST_DB) + self.pg_connect_to(&test_db) .await .unwrap_or_else(|e| panic!("connect to PostgreSQL at {pg_host}:{pg_port}: {e}")) } pub async fn kalamdb_sql(&self, sql: &str) -> Value { + let text = self.kalamdb_sql_text(sql).await; + serde_json::from_str(&text).unwrap_or(Value::Null) + } + + pub async fn kalamdb_sql_text(&self, sql: &str) -> String { let base_url = kalamdb_auth_config().base_url; let url = format!("{base_url}/v1/api/sql"); let body = serde_json::json!({ "sql": sql }); @@ -241,7 +258,7 @@ impl TestEnv { let status = resp.status; let text = resp.body; assert!(status.is_success(), "KalamDB SQL failed ({status}): {text}\n SQL: {sql}"); - serde_json::from_str(&text).unwrap_or(Value::Null) + text } pub async fn kalamdb_table_exists(&self, namespace: &str, table: &str) -> bool { @@ -321,7 +338,9 @@ impl TestEnv { if client .get(&url) .await - .map(|response| response.status.is_success()) + .map(|response| { + response.status.is_success() || matches!(response.status.as_u16(), 401 | 403) + }) .unwrap_or(false) { return; @@ -349,11 +368,15 @@ impl TestEnv { } let _ = client - .post_json(&format!("{}/v1/api/auth/setup", config.base_url), &serde_json::json!({ - "username": config.setup_username, - "password": config.setup_password, - "root_password": config.root_password, - }), None) + .post_json( + &format!("{}/v1/api/auth/setup", config.base_url), + &serde_json::json!({ + "username": config.setup_username, + "password": config.setup_password, + "root_password": config.root_password, + }), + None, + ) .await; if let Some(token) = @@ -378,16 +401,17 @@ impl TestEnv { } async fn ensure_test_db(&self) { + let test_db = Self::pg_database_from_env(); let postgres = self.pg_connect_to("postgres").await.expect("connect to postgres database"); let exists = postgres - .query_opt("SELECT 1 FROM pg_database WHERE datname = $1", &[&TEST_DB]) + .query_opt("SELECT 1 FROM pg_database WHERE datname = $1", &[&test_db]) .await .expect("query test database") .is_some(); if !exists { postgres - .batch_execute(&format!("CREATE DATABASE {TEST_DB};")) + .batch_execute(&format!("CREATE DATABASE {test_db};")) .await .expect("create test database"); } @@ -442,7 +466,7 @@ impl TestEnv { Ok(client) => { client.disconnect().await; return; - } + }, Err(_) => { if i == 0 { eprintln!(" waiting for PostgreSQL on {pg_host}:{pg_port}..."); @@ -457,23 +481,22 @@ impl TestEnv { ); } - async fn pg_connect_to( - &self, - dbname: &str, - ) -> Result { + async fn pg_connect_to(&self, dbname: &str) -> Result { let (pg_host, pg_port) = pg_connection_config(); - let (client, conn) = Config::new() - .host(&pg_host) - .port(pg_port) - .user(&self.pg_user) - .dbname(dbname) - .connect(NoTls) - .await?; + let mut config = Config::new(); + config.host(&pg_host).port(pg_port).user(&self.pg_user).dbname(dbname); + if let Some(password) = pg_password_from_env() { + config.password(password); + } + let (client, conn) = config.connect(NoTls).await?; let connection_task = tokio::spawn(async move { if let Err(e) = conn.await { eprintln!("pg connection error: {e}"); } }); + + client.batch_execute("LOAD 'pg_kalam';").await?; + Ok(OwnedPgClient::new(client, connection_task)) } } @@ -485,10 +508,14 @@ async fn try_login( password: &str, ) -> Option { let resp = client - .post_json(&format!("{base_url}/v1/api/auth/login"), &serde_json::json!({ - "username": username, - "password": password, - }), None) + .post_json( + &format!("{base_url}/v1/api/auth/login"), + &serde_json::json!({ + "username": username, + "password": password, + }), + None, + ) .await .ok()?; if !resp.status.is_success() { @@ -510,25 +537,17 @@ pub async fn create_shared_kalam_table( ensure_schema_exists(client, "e2e").await; let drop = format!("DROP FOREIGN TABLE IF EXISTS e2e.{table};"); client.batch_execute(&drop).await.expect("drop old table"); - let sql = format!( - "CREATE TABLE e2e.{table} ({columns}) USING kalamdb WITH (type = 'shared');" - ); + let sql = format!("CREATE TABLE e2e.{table} ({columns}) USING kalamdb WITH (type = 'shared');"); client.batch_execute(&sql).await.expect("create Kalam table"); TestEnv::global().await.wait_for_kalamdb_table_exists("e2e", table).await; wait_for_table_queryable(client, &format!("e2e.{table}")).await; } -pub async fn create_user_kalam_table( - client: &tokio_postgres::Client, - table: &str, - columns: &str, -) { +pub async fn create_user_kalam_table(client: &tokio_postgres::Client, table: &str, columns: &str) { ensure_schema_exists(client, "e2e").await; let drop = format!("DROP FOREIGN TABLE IF EXISTS e2e.{table};"); client.batch_execute(&drop).await.expect("drop old table"); - let sql = format!( - "CREATE TABLE e2e.{table} ({columns}) USING kalamdb WITH (type = 'user');" - ); + let sql = format!("CREATE TABLE e2e.{table} ({columns}) USING kalamdb WITH (type = 'user');"); client.batch_execute(&sql).await.expect("create Kalam table"); TestEnv::global().await.wait_for_kalamdb_table_exists("e2e", table).await; } @@ -643,7 +662,9 @@ pub async fn same_user_shard_pair(first_user_id: &str, second_prefix: &str) -> ( for index in 0..1024 { let candidate = format!("{second_prefix}-{index}"); - if candidate != first_user_id && user_shard_group_id(&candidate, num_user_shards) == target_group { + if candidate != first_user_id + && user_shard_group_id(&candidate, num_user_shards) == target_group + { return (first_user_id.to_string(), candidate); } } @@ -693,7 +714,7 @@ where ); } tokio::time::sleep(std::time::Duration::from_millis(50)).await; - } + }, Err(error) => panic!("{description} failed: {error}"), } } @@ -830,10 +851,7 @@ pub fn process_group_rss_kb(pids: &[u32]) -> u64 { ); let stdout = String::from_utf8(output.stdout).expect("parse process group rss output"); - stdout - .lines() - .filter_map(|line| line.trim().parse::().ok()) - .sum() + stdout.lines().filter_map(|line| line.trim().parse::().ok()).sum() } pub async fn sample_process_peak_rss_kb( diff --git a/pg/tests/e2e_common/tcp_proxy.rs b/pg/tests/e2e_common/tcp_proxy.rs index a02e87dfd..0af0ace5a 100644 --- a/pg/tests/e2e_common/tcp_proxy.rs +++ b/pg/tests/e2e_common/tcp_proxy.rs @@ -26,9 +26,8 @@ struct ProxyImpairments { impl TcpDisconnectProxy { pub async fn start(target_base_url: &str) -> Self { let target_addr = extract_host_port(target_base_url); - let listener = bind_loopback_listener() - .await - .expect("proxy should bind to an ephemeral port"); + let listener = + bind_loopback_listener().await.expect("proxy should bind to an ephemeral port"); let bind_addr = listener.local_addr().expect("proxy should have a local addr"); let paused = Arc::new(AtomicBool::new(false)); let impairments = Arc::new(ProxyImpairments::default()); @@ -92,22 +91,16 @@ impl TcpDisconnectProxy { } pub fn blackhole(&self) { - self.impairments - .blackhole_traffic - .store(true, Ordering::SeqCst); + self.impairments.blackhole_traffic.store(true, Ordering::SeqCst); } pub fn restore_traffic(&self) { - self.impairments - .blackhole_traffic - .store(false, Ordering::SeqCst); + self.impairments.blackhole_traffic.store(false, Ordering::SeqCst); } pub fn set_chunk_delay(&self, delay: Duration) { let delay_ms = delay.as_millis().min(u64::MAX as u128) as u64; - self.impairments - .chunk_delay_ms - .store(delay_ms, Ordering::SeqCst); + self.impairments.chunk_delay_ms.store(delay_ms, Ordering::SeqCst); } pub fn clear_chunk_delay(&self) { @@ -218,4 +211,4 @@ fn extract_host_port(base_url: &str) -> String { .next() .unwrap_or("127.0.0.1:9188") .to_string() -} \ No newline at end of file +} diff --git a/pg/tests/e2e_ddl/lifecycle.rs b/pg/tests/e2e_ddl/lifecycle.rs index 9608e6ebc..40111a26f 100644 --- a/pg/tests/e2e_ddl/lifecycle.rs +++ b/pg/tests/e2e_ddl/lifecycle.rs @@ -1,5 +1,64 @@ +use kalam_client::{AuthProvider, KalamLinkClient}; +use std::time::{Duration, Instant}; + use super::common::{ensure_schema_exists, require_ddl_env, unique_name}; +fn kalamdb_server_url() -> String { + std::env::var("KALAMDB_SERVER_URL") + .unwrap_or_else(|_| "http://127.0.0.1:8080".to_string()) + .trim_end_matches('/') + .to_string() +} + +fn kalamlink_client(bearer_token: &str) -> KalamLinkClient { + KalamLinkClient::builder() + .base_url(kalamdb_server_url()) + .auth(AuthProvider::jwt_token(bearer_token.to_string())) + .build() + .expect("build KalamLink client") +} + +async fn wait_for_postgres_row( + pg: &tokio_postgres::Client, + select_sql: &str, + row_id: &str, +) -> tokio_postgres::Row { + let deadline = Instant::now() + Duration::from_secs(5); + + loop { + if let Some(row) = pg + .query_opt(select_sql, &[&row_id]) + .await + .expect("query Postgres row") + { + return row; + } + + if Instant::now() >= deadline { + panic!("Postgres did not observe row '{row_id}' within timeout"); + } + + tokio::time::sleep(Duration::from_millis(100)).await; + } +} + +fn assert_file_json_text( + attachment_text: &str, + expected_name: &str, + expected_mime: &str, + expected_size: usize, +) -> serde_json::Value { + let attachment_json: serde_json::Value = + serde_json::from_str(attachment_text).expect("parse Postgres jsonb text"); + assert_eq!(attachment_json["name"].as_str(), Some(expected_name)); + assert_eq!(attachment_json["mime"].as_str(), Some(expected_mime)); + assert_eq!(attachment_json["size"].as_u64(), Some(expected_size as u64)); + assert!(!attachment_json["id"].as_str().unwrap_or_default().is_empty()); + assert!(!attachment_json["sub"].as_str().unwrap_or_default().is_empty()); + assert!(!attachment_json["sha256"].as_str().unwrap_or_default().is_empty()); + attachment_json +} + #[tokio::test] async fn e2e_ddl_create_shared_table() { let env = require_ddl_env!(); @@ -19,7 +78,10 @@ async fn e2e_ddl_create_shared_table() { pg.batch_execute(&sql).await.expect("CREATE TABLE USING kalamdb"); env.wait_for_kalamdb_table_exists(ns, &table).await; - assert!(env.kalamdb_table_exists(ns, &table).await, "KalamDB table {ns}.{table} should exist after CREATE TABLE USING kalamdb"); + assert!( + env.kalamdb_table_exists(ns, &table).await, + "KalamDB table {ns}.{table} should exist after CREATE TABLE USING kalamdb" + ); let cols = env.kalamdb_columns(ns, &table).await; eprintln!("[DDL] Created {ns}.{table}, columns: {cols:?}"); @@ -51,7 +113,10 @@ async fn e2e_ddl_create_user_table() { pg.batch_execute(&sql).await.expect("CREATE TABLE USING kalamdb (user)"); env.wait_for_kalamdb_table_exists(ns, &table).await; - assert!(env.kalamdb_table_exists(ns, &table).await, "KalamDB user table {ns}.{table} should exist"); + assert!( + env.kalamdb_table_exists(ns, &table).await, + "KalamDB user table {ns}.{table} should exist" + ); let cols = env.kalamdb_columns(ns, &table).await; eprintln!("[DDL] Created user table {ns}.{table}, columns: {cols:?}"); @@ -64,6 +129,354 @@ async fn e2e_ddl_create_user_table() { .ok(); } +#[tokio::test] +async fn e2e_ddl_create_file_column_mirrors_as_jsonb() { + let env = require_ddl_env!(); + let pg = env.pg_connect().await; + + let ns = "ddl_test"; + let table = unique_name("file_tbl"); + ensure_schema_exists(&pg, ns).await; + + let sql = format!( + "CREATE TABLE {ns}.{table} ( + id TEXT, + attachment FILE + ) USING kalamdb WITH (type = 'shared');" + ); + pg.batch_execute(&sql) + .await + .expect("CREATE TABLE USING kalamdb with FILE column"); + env.wait_for_kalamdb_table_exists(ns, &table).await; + + let local_type: String = pg + .query_one( + "SELECT format_type(a.atttypid, a.atttypmod) + FROM pg_attribute a + JOIN pg_class c ON a.attrelid = c.oid + JOIN pg_namespace n ON c.relnamespace = n.oid + WHERE n.nspname = $1 + AND c.relname = $2 + AND a.attname = 'attachment' + AND a.attnum > 0 + AND NOT a.attisdropped", + &[&ns, &table], + ) + .await + .expect("resolve mirrored attachment column type") + .get(0); + + assert_eq!(local_type, "jsonb"); + + pg.batch_execute(&format!("DROP FOREIGN TABLE IF EXISTS {ns}.{table};")) + .await + .ok(); +} + +#[tokio::test] +#[ntest::timeout(1200)] +async fn e2e_ddl_file_column_roundtrip_via_kalamlink() { + let env = require_ddl_env!(); + let pg = env.pg_connect().await; + + let namespace = "ddl_test"; + let table = unique_name("file_roundtrip"); + let row_id = unique_name("file_row"); + let file_name = "hello.txt"; + let file_mime = "text/plain"; + let file_bytes = b"hello from kalamlink".to_vec(); + + ensure_schema_exists(&pg, namespace).await; + + let create_sql = format!( + "CREATE TABLE {namespace}.{table} ( + id TEXT, + attachment FILE + ) USING kalamdb WITH (type = 'shared');" + ); + pg.batch_execute(&create_sql) + .await + .expect("CREATE TABLE USING kalamdb with FILE column for KalamLink roundtrip"); + env.wait_for_kalamdb_table_exists(namespace, &table).await; + + let client = KalamLinkClient::builder() + .base_url(kalamdb_server_url()) + .auth(AuthProvider::jwt_token(env.bearer_token.clone())) + .build() + .expect("build KalamLink client"); + + let insert_sql = format!( + "INSERT INTO {namespace}.{table} (id, attachment) VALUES ('{row_id}', FILE(\"attachment\"))" + ); + let insert_result = client + .execute_with_files( + &insert_sql, + vec![( + "attachment", + file_name, + file_bytes.clone(), + Some(file_mime), + )], + None, + None, + ) + .await + .expect("insert FILE row via KalamLink"); + assert!(insert_result.success(), "KalamLink insert should succeed"); + + let select_sql = format!( + "SELECT attachment::text, + jsonb_typeof(attachment), + attachment->>'name', + attachment->>'mime', + (attachment->>'size')::bigint, + attachment->>'sha256' + FROM {namespace}.{table} + WHERE id = $1" + ); + + let deadline = Instant::now() + Duration::from_secs(5); + let row = loop { + if let Some(row) = pg + .query_opt(&select_sql, &[&row_id]) + .await + .expect("query Postgres FILE row") + { + break row; + } + + if Instant::now() >= deadline { + panic!( + "Postgres did not observe KalamLink-inserted FILE row for {}.{} within timeout", + namespace, table + ); + } + + tokio::time::sleep(Duration::from_millis(100)).await; + }; + + let attachment_text: String = row.get(0); + let attachment_kind: String = row.get(1); + let attachment_name: String = row.get(2); + let attachment_mime: String = row.get(3); + let attachment_size: i64 = row.get(4); + let attachment_sha256: String = row.get(5); + + assert_eq!(attachment_kind, "object"); + assert_eq!(attachment_name, file_name); + assert_eq!(attachment_mime, file_mime); + assert_eq!(attachment_size, file_bytes.len() as i64); + assert!(!attachment_sha256.is_empty(), "sha256 should be populated"); + + let attachment_json: serde_json::Value = + serde_json::from_str(&attachment_text).expect("parse Postgres jsonb text"); + assert_eq!(attachment_json["name"].as_str(), Some(file_name)); + assert_eq!(attachment_json["mime"].as_str(), Some(file_mime)); + assert_eq!(attachment_json["size"].as_u64(), Some(file_bytes.len() as u64)); + assert!(!attachment_json["id"].as_str().unwrap_or_default().is_empty()); + assert!(!attachment_json["sub"].as_str().unwrap_or_default().is_empty()); + assert_eq!(attachment_json["sha256"].as_str(), Some(attachment_sha256.as_str())); + + pg.batch_execute(&format!("DROP FOREIGN TABLE IF EXISTS {namespace}.{table};")) + .await + .ok(); +} + +#[tokio::test] +#[ntest::timeout(1200)] +async fn e2e_ddl_multiple_file_columns_roundtrip_via_kalamlink() { + let env = require_ddl_env!(); + let pg = env.pg_connect().await; + + let namespace = "ddl_test"; + let table = unique_name("file_multi"); + let row_id = unique_name("file_multi_row"); + let avatar_name = "avatar.png"; + let avatar_mime = "image/png"; + let avatar_bytes = b"png-avatar-bytes".to_vec(); + let contract_name = "contract.pdf"; + let contract_mime = "application/pdf"; + let contract_bytes = b"pdf-contract-bytes".to_vec(); + + ensure_schema_exists(&pg, namespace).await; + + let create_sql = format!( + "CREATE TABLE {namespace}.{table} ( + id TEXT, + avatar FILE, + contract FILE + ) USING kalamdb WITH (type = 'shared');" + ); + pg.batch_execute(&create_sql) + .await + .expect("CREATE TABLE USING kalamdb with multiple FILE columns"); + env.wait_for_kalamdb_table_exists(namespace, &table).await; + + let client = kalamlink_client(&env.bearer_token); + let insert_sql = format!( + "INSERT INTO {namespace}.{table} (id, avatar, contract) VALUES ('{row_id}', FILE(\"avatar\"), FILE(\"contract\"))" + ); + let insert_result = client + .execute_with_files( + &insert_sql, + vec![ + ("avatar", avatar_name, avatar_bytes.clone(), Some(avatar_mime)), + ( + "contract", + contract_name, + contract_bytes.clone(), + Some(contract_mime), + ), + ], + None, + None, + ) + .await + .expect("insert multi-FILE row via KalamLink"); + assert!(insert_result.success(), "multi-FILE KalamLink insert should succeed"); + + let select_sql = format!( + "SELECT avatar::text, contract::text + FROM {namespace}.{table} + WHERE id = $1" + ); + let row = wait_for_postgres_row(&pg, &select_sql, &row_id).await; + let avatar_text: String = row.get(0); + let contract_text: String = row.get(1); + + assert_file_json_text(&avatar_text, avatar_name, avatar_mime, avatar_bytes.len()); + assert_file_json_text( + &contract_text, + contract_name, + contract_mime, + contract_bytes.len(), + ); + + pg.batch_execute(&format!("DROP FOREIGN TABLE IF EXISTS {namespace}.{table};")) + .await + .ok(); +} + +#[tokio::test] +#[ntest::timeout(1200)] +async fn e2e_ddl_file_update_via_kalamlink_is_visible_in_postgres() { + let env = require_ddl_env!(); + let pg = env.pg_connect().await; + + let namespace = "ddl_test"; + let table = unique_name("file_update"); + let row_id = unique_name("file_update_row"); + let initial_file_name = "draft.txt"; + let initial_file_mime = "text/plain"; + let initial_file_bytes = b"draft file contents".to_vec(); + let updated_file_name = "final.txt"; + let updated_file_mime = "text/plain"; + let updated_file_bytes = b"final file contents with more bytes".to_vec(); + + ensure_schema_exists(&pg, namespace).await; + + let create_sql = format!( + "CREATE TABLE {namespace}.{table} ( + id TEXT, + attachment FILE + ) USING kalamdb WITH (type = 'shared');" + ); + pg.batch_execute(&create_sql) + .await + .expect("CREATE TABLE USING kalamdb with FILE column for update test"); + env.wait_for_kalamdb_table_exists(namespace, &table).await; + + let client = kalamlink_client(&env.bearer_token); + let insert_sql = format!( + "INSERT INTO {namespace}.{table} (id, attachment) VALUES ('{row_id}', FILE(\"attachment\"))" + ); + let insert_result = client + .execute_with_files( + &insert_sql, + vec![( + "attachment", + initial_file_name, + initial_file_bytes.clone(), + Some(initial_file_mime), + )], + None, + None, + ) + .await + .expect("insert initial FILE row via KalamLink"); + assert!(insert_result.success(), "initial FILE insert should succeed"); + + let select_sql = format!( + "SELECT attachment::text + FROM {namespace}.{table} + WHERE id = $1" + ); + let initial_row = wait_for_postgres_row(&pg, &select_sql, &row_id).await; + let initial_text: String = initial_row.get(0); + let initial_json = assert_file_json_text( + &initial_text, + initial_file_name, + initial_file_mime, + initial_file_bytes.len(), + ); + let initial_sha256 = initial_json["sha256"] + .as_str() + .expect("initial sha256 should be present") + .to_string(); + + let update_sql = format!( + "UPDATE {namespace}.{table} SET attachment = FILE(\"attachment\") WHERE id = '{row_id}'" + ); + let update_result = client + .execute_with_files( + &update_sql, + vec![( + "attachment", + updated_file_name, + updated_file_bytes.clone(), + Some(updated_file_mime), + )], + None, + None, + ) + .await + .expect("update FILE row via KalamLink"); + assert!(update_result.success(), "FILE update should succeed"); + + let deadline = Instant::now() + Duration::from_secs(5); + let updated_json = loop { + let row = pg + .query_one(&select_sql, &[&row_id]) + .await + .expect("query updated Postgres FILE row"); + let updated_text: String = row.get(0); + let updated_json: serde_json::Value = + serde_json::from_str(&updated_text).expect("parse updated Postgres jsonb text"); + + if updated_json["name"].as_str() == Some(updated_file_name) { + break updated_json; + } + + if Instant::now() >= deadline { + panic!( + "Postgres did not observe updated FILE metadata for {}.{} within timeout", + namespace, table + ); + } + + tokio::time::sleep(Duration::from_millis(100)).await; + }; + + assert_eq!(updated_json["name"].as_str(), Some(updated_file_name)); + assert_eq!(updated_json["mime"].as_str(), Some(updated_file_mime)); + assert_eq!(updated_json["size"].as_u64(), Some(updated_file_bytes.len() as u64)); + assert_ne!(updated_json["sha256"].as_str(), Some(initial_sha256.as_str())); + + pg.batch_execute(&format!("DROP FOREIGN TABLE IF EXISTS {namespace}.{table};")) + .await + .ok(); +} + #[tokio::test] async fn e2e_ddl_alter_add_column() { let env = require_ddl_env!(); @@ -134,12 +547,9 @@ async fn e2e_ddl_alter_drop_column() { let alter_sql = format!("ALTER FOREIGN TABLE {ns}.{table} DROP COLUMN description;"); pg.batch_execute(&alter_sql).await.expect("ALTER DROP COLUMN"); let cols_after = env - .wait_for_kalamdb_columns( - ns, - &table, - "dropped columns to exclude description", - |columns| !columns.iter().any(|column| column == "description"), - ) + .wait_for_kalamdb_columns(ns, &table, "dropped columns to exclude description", |columns| { + !columns.iter().any(|column| column == "description") + }) .await; eprintln!("[DDL] After DROP COLUMN: columns = {cols_after:?}"); assert!( @@ -262,10 +672,15 @@ async fn e2e_ddl_schema_qualified_create() { age INTEGER ) USING kalamdb WITH (type = 'shared');" ); - pg.batch_execute(&sql).await.expect("CREATE TABLE USING kalamdb (schema-qualified)"); + pg.batch_execute(&sql) + .await + .expect("CREATE TABLE USING kalamdb (schema-qualified)"); env.wait_for_kalamdb_table_exists(&ns, &table).await; - assert!(env.kalamdb_table_exists(&ns, &table).await, "KalamDB table {ns}.{table} should exist after schema-qualified CREATE"); + assert!( + env.kalamdb_table_exists(&ns, &table).await, + "KalamDB table {ns}.{table} should exist after schema-qualified CREATE" + ); let cols = env .wait_for_kalamdb_columns(&ns, &table, "schema-qualified columns to exist", |columns| { @@ -283,9 +698,12 @@ async fn e2e_ddl_schema_qualified_create() { .await .expect("ALTER ADD COLUMN (schema-qualified)"); let cols_after = env - .wait_for_kalamdb_columns(&ns, &table, "schema-qualified alter to include email", |columns| { - columns.iter().any(|column| column == "email") - }) + .wait_for_kalamdb_columns( + &ns, + &table, + "schema-qualified alter to include email", + |columns| columns.iter().any(|column| column == "email"), + ) .await; assert!( cols_after.contains(&"email".to_string()), diff --git a/pg/tests/e2e_ddl/options.rs b/pg/tests/e2e_ddl/options.rs index 404580778..e5034c83f 100644 --- a/pg/tests/e2e_ddl/options.rs +++ b/pg/tests/e2e_ddl/options.rs @@ -1,4 +1,6 @@ -use super::common::{ensure_schema_exists, pg_kalam_exec, require_ddl_env, unique_name, DdlTestEnv}; +use super::common::{ + ensure_schema_exists, pg_kalam_exec, require_ddl_env, unique_name, DdlTestEnv, +}; use std::env; use tokio_postgres::{Config, NoTls}; @@ -33,9 +35,7 @@ async fn wait_for_backend_session_cleanup(env: &DdlTestEnv, backend_pid: i32, co } if std::time::Instant::now() >= deadline { - panic!( - "backend pid {backend_pid} remained in system.sessions after {context}" - ); + panic!("backend pid {backend_pid} remained in system.sessions after {context}"); } tokio::time::sleep(std::time::Duration::from_millis(100)).await; @@ -90,7 +90,7 @@ async fn e2e_ddl_create_table_using_kalamdb_forwards_shared_options() { ensure_schema_exists(&pg, &ns).await; pg.batch_execute(&format!( - "CREATE TABLE {ns}.{table} ( + "CREATE TABLE {ns}.{table} ( id BIGINT, title TEXT ) USING kalamdb WITH ( @@ -100,7 +100,7 @@ async fn e2e_ddl_create_table_using_kalamdb_forwards_shared_options() { );" )) .await - .expect("create shared Kalam table with forwarded options"); + .expect("create shared Kalam table with forwarded options"); env.wait_for_kalamdb_table_exists(&ns, &table).await; let metadata = env @@ -151,7 +151,7 @@ async fn e2e_ddl_create_table_using_kalamdb_forwards_stream_ttl() { ensure_schema_exists(&pg, &ns).await; pg.batch_execute(&format!( - "CREATE TABLE {ns}.{table} ( + "CREATE TABLE {ns}.{table} ( event_type TEXT, payload TEXT ) USING kalamdb WITH ( @@ -160,7 +160,7 @@ async fn e2e_ddl_create_table_using_kalamdb_forwards_stream_ttl() { );" )) .await - .expect("create stream Kalam table with ttl"); + .expect("create stream Kalam table with ttl"); env.wait_for_kalamdb_table_exists(&ns, &table).await; let metadata = env @@ -326,8 +326,12 @@ async fn e2e_ddl_create_table_using_kalamdb_disconnect_cleans_session_row() { .expect("create shared Kalam table"); pg.disconnect().await; - wait_for_backend_session_cleanup(env, backend_pid, "disconnect after CREATE TABLE USING kalamdb") - .await; + wait_for_backend_session_cleanup( + env, + backend_pid, + "disconnect after CREATE TABLE USING kalamdb", + ) + .await; let cleanup = env.pg_connect().await; cleanup @@ -352,7 +356,7 @@ async fn e2e_ddl_rejects_unsafe_option_keys() { let error = pg .batch_execute(&format!( - "CREATE TABLE {ns}.{table} ( + "CREATE TABLE {ns}.{table} ( id BIGINT, title TEXT ) USING kalamdb WITH ( @@ -377,9 +381,7 @@ async fn e2e_ddl_rejects_unsafe_option_keys() { pg.batch_execute(&format!("DROP FOREIGN TABLE IF EXISTS {ns}.{table};")) .await .ok(); - pg.batch_execute(&format!("DROP SCHEMA IF EXISTS {ns} CASCADE;")) - .await - .ok(); + pg.batch_execute(&format!("DROP SCHEMA IF EXISTS {ns} CASCADE;")).await.ok(); } #[tokio::test] @@ -398,6 +400,5 @@ async fn e2e_ddl_kalam_exec_disconnect_cleans_session_row() { assert!(result.contains("1"), "unexpected kalam_exec SELECT response: {result}"); pg.disconnect().await; - wait_for_backend_session_cleanup(env, backend_pid, "disconnect after kalam_exec") - .await; + wait_for_backend_session_cleanup(env, backend_pid, "disconnect after kalam_exec").await; } diff --git a/pg/tests/e2e_ddl_common/mod.rs b/pg/tests/e2e_ddl_common/mod.rs index ce287f784..b9c602b70 100644 --- a/pg/tests/e2e_ddl_common/mod.rs +++ b/pg/tests/e2e_ddl_common/mod.rs @@ -184,13 +184,13 @@ impl DdlTestEnv { Ok(env) => { SKIP_REASON.get_or_init(|| None); Some(ENV.get_or_init(|| env)) - } + }, Err(reason) => { eprintln!(" [SKIP] DDL tests skipped: {reason}"); SKIP_REASON.get_or_init(|| Some(reason)); // Store a dummy env so subsequent calls don't re-init. None - } + }, } } @@ -310,9 +310,7 @@ impl DdlTestEnv { if std::time::Instant::now() >= deadline { let expectation = if should_exist { "exist" } else { "be removed" }; - panic!( - "KalamDB table {namespace}.{table} did not {expectation} within timeout" - ); + panic!("KalamDB table {namespace}.{table} did not {expectation} within timeout"); } tokio::time::sleep(Duration::from_millis(100)).await; @@ -371,7 +369,10 @@ impl DdlTestEnv { } async fn ensure_test_db(&self) -> Result<(), String> { - let postgres = self.pg_connect_to("postgres").await.map_err(|e| format!("connect to postgres database: {e}"))?; + let postgres = self + .pg_connect_to("postgres") + .await + .map_err(|e| format!("connect to postgres database: {e}"))?; let exists = postgres .query_opt("SELECT 1 FROM pg_database WHERE datname = $1", &[&TEST_DB]) @@ -434,8 +435,7 @@ impl DdlTestEnv { } .await; - let _ = pg.execute("SELECT pg_advisory_unlock($1)", &[&BOOTSTRAP_LOCK_ID]) - .await; + let _ = pg.execute("SELECT pg_advisory_unlock($1)", &[&BOOTSTRAP_LOCK_ID]).await; pg.disconnect().await; @@ -448,7 +448,7 @@ impl DdlTestEnv { Ok(client) => { client.disconnect().await; return Ok(()); - } + }, Err(_) => { if i == 0 { eprintln!(" waiting for PostgreSQL on port {PG_PORT}..."); @@ -463,10 +463,7 @@ impl DdlTestEnv { )) } - async fn pg_connect_to( - &self, - dbname: &str, - ) -> Result { + async fn pg_connect_to(&self, dbname: &str) -> Result { let (client, conn) = Config::new() .host(PG_HOST) .port(PG_PORT) @@ -493,11 +490,15 @@ impl DdlTestEnv { } let _ = client - .post_json(&format!("{}/v1/api/auth/setup", config.base_url), &serde_json::json!({ - "username": config.setup_username, - "password": config.setup_password, - "root_password": config.root_password, - }), None) + .post_json( + &format!("{}/v1/api/auth/setup", config.base_url), + &serde_json::json!({ + "username": config.setup_username, + "password": config.setup_password, + "root_password": config.root_password, + }), + None, + ) .await; if let Some(token) = @@ -529,10 +530,14 @@ async fn try_login( password: &str, ) -> Option { let resp = client - .post_json(&format!("{base_url}/v1/api/auth/login"), &serde_json::json!({ - "username": username, - "password": password, - }), None) + .post_json( + &format!("{base_url}/v1/api/auth/login"), + &serde_json::json!({ + "username": username, + "password": password, + }), + None, + ) .await .ok()?; if !resp.status.is_success() { diff --git a/pg/tests/e2e_dml/basic.rs b/pg/tests/e2e_dml/basic.rs index 87459c7e0..e69613770 100644 --- a/pg/tests/e2e_dml/basic.rs +++ b/pg/tests/e2e_dml/basic.rs @@ -108,12 +108,7 @@ async fn e2e_user_table_isolation() { let qualified_table = format!("e2e.{table}"); let pg_a = env.pg_connect().await; - create_user_kalam_table( - &pg_a, - &table, - "id TEXT, name TEXT, age INTEGER", - ) - .await; + create_user_kalam_table(&pg_a, &table, "id TEXT, name TEXT, age INTEGER").await; set_user_id(&pg_a, "user-a").await; await_user_shard_leader("user-a").await; @@ -121,17 +116,17 @@ async fn e2e_user_table_isolation() { set_user_id(&pg_b, "user-b").await; await_user_shard_leader("user-b").await; - let user_a_insert = format!( + let user_a_insert = format!( "INSERT INTO {qualified_table} (id, name, age) VALUES \ ('a1', 'Alice', 30), ('a2', 'Ada', 25);" - ); - retry_transient_user_leader_error("user-a insert", || pg_a.batch_execute(&user_a_insert)).await; + ); + retry_transient_user_leader_error("user-a insert", || pg_a.batch_execute(&user_a_insert)).await; - let user_b_insert = format!( + let user_b_insert = format!( "INSERT INTO {qualified_table} (id, name, age) VALUES \ ('b1', 'Bob', 40), ('b2', 'Blake', 35), ('b3', 'Bea', 28);" - ); - retry_transient_user_leader_error("user-b insert", || pg_b.batch_execute(&user_b_insert)).await; + ); + retry_transient_user_leader_error("user-b insert", || pg_b.batch_execute(&user_b_insert)).await; let count_a = count_rows(&pg_a, &qualified_table, None).await; assert_eq!(count_a, 2, "user-a should see 2 rows"); diff --git a/pg/tests/e2e_dml/failure_modes.rs b/pg/tests/e2e_dml/failure_modes.rs index 1da3ec214..c0de8746b 100644 --- a/pg/tests/e2e_dml/failure_modes.rs +++ b/pg/tests/e2e_dml/failure_modes.rs @@ -11,12 +11,7 @@ async fn e2e_duplicate_primary_key_insert_fails() { let table = unique_name("profiles"); let qualified_table = format!("e2e.{table}"); - create_user_kalam_table( - &pg, - &table, - "id TEXT, name TEXT, age INTEGER", - ) - .await; + create_user_kalam_table(&pg, &table, "id TEXT, name TEXT, age INTEGER").await; set_user_id(&pg, "dup-user").await; await_user_shard_leader("dup-user").await; @@ -65,9 +60,7 @@ async fn e2e_insert_without_backing_kalamdb_table_fails() { ) USING kalamdb WITH (type = 'user');" ); - pg.batch_execute(&create_sql) - .await - .expect("create local-only Kalam table"); + pg.batch_execute(&create_sql).await.expect("create local-only Kalam table"); env.kalamdb_sql(&format!("DROP USER TABLE IF EXISTS app.{table}")).await; set_user_id(&pg, "user-1").await; @@ -97,12 +90,7 @@ async fn e2e_user_table_scan_without_user_id_fails_clearly() { let table = unique_name("profiles_missing_uid"); let writer = env.pg_connect().await; - create_user_kalam_table( - &writer, - &table, - "id TEXT, name TEXT, age INTEGER", - ) - .await; + create_user_kalam_table(&writer, &table, "id TEXT, name TEXT, age INTEGER").await; set_user_id(&writer, "scan-user").await; await_user_shard_leader("scan-user").await; let seed_insert_sql = diff --git a/pg/tests/e2e_dml/interop.rs b/pg/tests/e2e_dml/interop.rs index 5ec5d9804..e2b2f6f8c 100644 --- a/pg/tests/e2e_dml/interop.rs +++ b/pg/tests/e2e_dml/interop.rs @@ -1,4 +1,85 @@ -use super::common::{create_shared_kalam_table, delete_all, unique_name, TestEnv}; +use super::common::{ + await_user_shard_leader, count_rows, create_shared_kalam_table, create_user_kalam_table, + delete_all, retry_transient_user_leader_error, set_user_id, unique_name, TestEnv, +}; + +struct KalamTestUser { + username: String, + user_id: String, +} + +fn sql_first_cell_string(result: &serde_json::Value) -> Option { + result["results"] + .as_array() + .and_then(|results| results.first()) + .and_then(|entry| entry["rows"].as_array()) + .and_then(|rows| rows.first()) + .and_then(|row| row.as_array()) + .and_then(|columns| columns.first()) + .and_then(|value| value.as_str().map(ToString::to_string)) +} + +fn sql_first_cell_i64(result: &serde_json::Value) -> Option { + result["results"] + .as_array() + .and_then(|results| results.first()) + .and_then(|entry| entry["rows"].as_array()) + .and_then(|rows| rows.first()) + .and_then(|row| row.as_array()) + .and_then(|columns| columns.first()) + .and_then(|value| { + value + .as_i64() + .or_else(|| value.as_u64().and_then(|raw| i64::try_from(raw).ok())) + .or_else(|| value.as_str().and_then(|raw| raw.parse::().ok())) + }) +} + +async fn create_kalam_test_user(env: &TestEnv, prefix: &str) -> KalamTestUser { + let username = unique_name(prefix); + let password = format!("pw_{username}"); + + env.kalamdb_sql(&format!("CREATE USER '{username}' WITH PASSWORD '{password}' ROLE user")) + .await; + + let lookup = env + .kalamdb_sql(&format!("SELECT user_id FROM system.users WHERE username = '{username}'")) + .await; + let user_id = sql_first_cell_string(&lookup) + .unwrap_or_else(|| panic!("expected user_id for Kalam test user {username}")); + + KalamTestUser { username, user_id } +} + +async fn wait_for_execute_as_user_count( + env: &TestEnv, + user_id: &str, + qualified_table: &str, + row_id: &str, + expected: i64, +) { + let deadline = std::time::Instant::now() + std::time::Duration::from_secs(5); + + loop { + let result = env + .kalamdb_sql(&format!( + "EXECUTE AS USER '{user_id}' (SELECT COUNT(*) FROM {qualified_table} WHERE id = '{row_id}')" + )) + .await; + let count = sql_first_cell_i64(&result).unwrap_or_default(); + if count == expected { + return; + } + + if std::time::Instant::now() >= deadline { + panic!( + "EXECUTE AS USER '{user_id}' expected count {expected} for {qualified_table}.{row_id}, got {count}" + ); + } + + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } +} #[tokio::test] async fn e2e_cross_verify_fdw_to_rest() { @@ -32,7 +113,7 @@ async fn e2e_cross_verify_fdw_to_rest() { } #[tokio::test] -#[ntest::timeout(7000)] +#[ntest::timeout(1200)] async fn e2e_dml_changes_are_visible_in_kalamdb() { let env = TestEnv::global().await; let pg = env.pg_connect().await; @@ -77,13 +158,10 @@ async fn e2e_dml_changes_are_visible_in_kalamdb() { .expect("delete should succeed"); let deleted = env - .kalamdb_sql(&format!("SELECT id FROM e2e.{table} WHERE id = 'sync-1'")) + .kalamdb_sql(&format!("SELECT COUNT(*) FROM e2e.{table} WHERE id = 'sync-1'")) .await; - let deleted_text = serde_json::to_string(&deleted).unwrap_or_default(); - assert!( - !deleted_text.contains("sync-1"), - "deleted row should no longer be visible in KalamDB: {deleted_text}" - ); + let deleted_count = sql_first_cell_i64(&deleted).unwrap_or_default(); + assert_eq!(deleted_count, 0, "deleted row should no longer be visible in KalamDB"); } #[tokio::test] @@ -118,7 +196,7 @@ async fn e2e_select_filters_and_postgres_join_work() { "INSERT INTO local_meta (id, segment) VALUES ('j1', 'bronze'), ('j2', 'silver'), - ('j3', 'gold');" + ('j3', 'gold');", ) .await .expect("insert local metadata"); @@ -144,6 +222,56 @@ async fn e2e_select_filters_and_postgres_join_work() { assert_eq!(second, ("j3", "Gamma", "gold")); } +#[tokio::test] +#[ntest::timeout(1200)] +async fn e2e_shared_tables_can_join_each_other_in_postgres() { + let env = TestEnv::global().await; + let pg = env.pg_connect().await; + let customers_table = unique_name("join_shared_customers"); + let orders_table = unique_name("join_shared_orders"); + + create_shared_kalam_table(&pg, &customers_table, "id TEXT, name TEXT").await; + create_shared_kalam_table(&pg, &orders_table, "id TEXT, customer_id TEXT, total INTEGER").await; + + pg.batch_execute(&format!( + "INSERT INTO e2e.{customers_table} (id, name) VALUES \ + ('c1', 'Alice'), \ + ('c2', 'Bob');" + )) + .await + .expect("insert shared customers"); + + pg.batch_execute(&format!( + "INSERT INTO e2e.{orders_table} (id, customer_id, total) VALUES \ + ('o1', 'c1', 15), \ + ('o2', 'c1', 20), \ + ('o3', 'c2', 30);" + )) + .await + .expect("insert shared orders"); + + let rows = pg + .query( + &format!( + "SELECT c.id, c.name, o.id, o.total + FROM e2e.{customers_table} AS c + JOIN e2e.{orders_table} AS o ON o.customer_id = c.id + ORDER BY c.id, o.id" + ), + &[], + ) + .await + .expect("join shared tables query"); + + assert_eq!(rows.len(), 3, "expected three joined rows across shared tables"); + let first: (&str, &str, &str, i32) = (rows[0].get(0), rows[0].get(1), rows[0].get(2), rows[0].get(3)); + let second: (&str, &str, &str, i32) = (rows[1].get(0), rows[1].get(1), rows[1].get(2), rows[1].get(3)); + let third: (&str, &str, &str, i32) = (rows[2].get(0), rows[2].get(1), rows[2].get(2), rows[2].get(3)); + assert_eq!(first, ("c1", "Alice", "o1", 15)); + assert_eq!(second, ("c1", "Alice", "o2", 20)); + assert_eq!(third, ("c2", "Bob", "o3", 30)); +} + #[tokio::test] #[ntest::timeout(7000)] async fn e2e_search_path_schema_mirror_works_without_namespace_option() { @@ -197,3 +325,101 @@ async fn e2e_search_path_schema_mirror_works_without_namespace_option() { .await .ok(); } + +#[tokio::test] +#[ntest::timeout(1200)] +async fn e2e_user_tables_can_join_each_other_in_postgres() { + let env = TestEnv::global().await; + let pg = env.pg_connect().await; + let user_id = unique_name("join_user"); + let profiles_table = unique_name("join_user_profiles"); + let memberships_table = unique_name("join_user_memberships"); + + create_user_kalam_table(&pg, &profiles_table, "id TEXT, display_name TEXT").await; + create_user_kalam_table(&pg, &memberships_table, "id TEXT, profile_id TEXT, plan TEXT").await; + + await_user_shard_leader(&user_id).await; + set_user_id(&pg, &user_id).await; + + let insert_profiles_sql = format!( + "INSERT INTO e2e.{profiles_table} (id, display_name) VALUES \ + ('u1', 'Alice'), \ + ('u2', 'Bob');" + ); + retry_transient_user_leader_error("insert user profiles", || { + pg.batch_execute(&insert_profiles_sql) + }) + .await; + + let insert_memberships_sql = format!( + "INSERT INTO e2e.{memberships_table} (id, profile_id, plan) VALUES \ + ('m1', 'u1', 'pro'), \ + ('m2', 'u2', 'team');" + ); + retry_transient_user_leader_error("insert user memberships", || { + pg.batch_execute(&insert_memberships_sql) + }) + .await; + + let join_sql = format!( + "SELECT p.id, p.display_name, m.plan + FROM e2e.{profiles_table} AS p + JOIN e2e.{memberships_table} AS m ON m.profile_id = p.id + ORDER BY p.id" + ); + let rows = retry_transient_user_leader_error("join user tables query", || { + pg.query(&join_sql, &[]) + }) + .await; + + assert_eq!(rows.len(), 2, "expected two joined rows across user tables"); + let first: (&str, &str, &str) = (rows[0].get(0), rows[0].get(1), rows[0].get(2)); + let second: (&str, &str, &str) = (rows[1].get(0), rows[1].get(1), rows[1].get(2)); + assert_eq!(first, ("u1", "Alice", "pro")); + assert_eq!(second, ("u2", "Bob", "team")); +} + +#[tokio::test] +#[ntest::timeout(2500)] +async fn e2e_user_table_explicit_userid_routes_to_target_user() { + let env = TestEnv::global().await; + let pg = env.pg_connect().await; + let table = unique_name("userid_route"); + let qualified_table = format!("e2e.{table}"); + let row_id = unique_name("uid_row"); + let writer = create_kalam_test_user(env, "pg_writer_user").await; + let target = create_kalam_test_user(env, "pg_target_user").await; + + create_user_kalam_table(&pg, &table, "id TEXT, body TEXT").await; + + await_user_shard_leader(&writer.user_id).await; + await_user_shard_leader(&target.user_id).await; + + set_user_id(&pg, &writer.user_id).await; + let insert_sql = format!( + "INSERT INTO {qualified_table} (id, body, _userid) VALUES ('{row_id}', 'routed-via-explicit-userid', '{}')", + target.user_id + ); + + retry_transient_user_leader_error("explicit _userid insert", || pg.batch_execute(&insert_sql)) + .await; + + let writer_count = count_rows(&pg, &qualified_table, Some(&format!("id = '{row_id}'"))).await; + assert_eq!(writer_count, 0, "writer session should not see row routed to explicit _userid"); + + set_user_id(&pg, &target.user_id).await; + let target_count = count_rows(&pg, &qualified_table, Some(&format!("id = '{row_id}'"))).await; + assert_eq!(target_count, 1, "target session should see explicitly routed row"); + + wait_for_execute_as_user_count(env, &target.user_id, &qualified_table, &row_id, 1).await; + wait_for_execute_as_user_count(env, &writer.user_id, &qualified_table, &row_id, 0).await; + + let root_result = env + .kalamdb_sql(&format!("SELECT COUNT(*) FROM {qualified_table} WHERE id = '{row_id}'")) + .await; + let root_count = sql_first_cell_i64(&root_result).unwrap_or_default(); + assert_eq!(root_count, 1, "root query should confirm the routed row exists"); + + env.kalamdb_sql(&format!("DROP USER IF EXISTS '{}'", writer.username)).await; + env.kalamdb_sql(&format!("DROP USER IF EXISTS '{}'", target.username)).await; +} diff --git a/pg/tests/e2e_dml/proxy_failures.rs b/pg/tests/e2e_dml/proxy_failures.rs index fab4cf216..9eb24ec5b 100644 --- a/pg/tests/e2e_dml/proxy_failures.rs +++ b/pg/tests/e2e_dml/proxy_failures.rs @@ -1,5 +1,8 @@ use std::time::{Duration, Instant}; -use std::{env, ops::{Deref, DerefMut}}; +use std::{ + env, + ops::{Deref, DerefMut}, +}; use serde_json::Value; use tokio_postgres::{Config, NoTls}; @@ -115,7 +118,9 @@ async fn wait_for_pg_backend_exit(backend_pid: u32, timeout: Duration) { } if Instant::now() >= deadline { - panic!("backend pid {backend_pid} remained in pg_stat_activity past disconnect timeout"); + panic!( + "backend pid {backend_pid} remained in pg_stat_activity past disconnect timeout" + ); } tokio::time::sleep(Duration::from_millis(100)).await; @@ -157,10 +162,7 @@ async fn create_proxy_shared_foreign_table( .await .expect("create proxy foreign table"); - TestEnv::global() - .await - .wait_for_kalamdb_table_exists("e2e", table) - .await; + TestEnv::global().await.wait_for_kalamdb_table_exists("e2e", table).await; } async fn cleanup_proxy_table(env: &TestEnv, table: &str, server_name: &str) { @@ -202,17 +204,14 @@ async fn wait_for_row_count( let deadline = Instant::now() + timeout; loop { - match client - .query_one(&format!("SELECT COUNT(*) FROM {qualified_table}"), &[]) - .await - { + match client.query_one(&format!("SELECT COUNT(*) FROM {qualified_table}"), &[]).await { Ok(row) => { let count: i64 = row.get(0); if count == expected_count { return; } - } - Err(_) => {} + }, + Err(_) => {}, } if Instant::now() >= deadline { @@ -267,7 +266,11 @@ async fn wait_for_transaction_row( } } -async fn wait_for_session_rows(env: &TestEnv, backend_pid: u32, timeout: Duration) -> Vec> { +async fn wait_for_session_rows( + env: &TestEnv, + backend_pid: u32, + timeout: Duration, +) -> Vec> { let deadline = Instant::now() + timeout; loop { @@ -316,10 +319,7 @@ fn proxy_host_port(base_url: &str) -> (String, u16) { let address = base_url.trim_start_matches("http://").trim_start_matches("https://"); let mut parts = address.split(':'); let host = parts.next().unwrap_or("127.0.0.1").to_string(); - let port = parts - .next() - .and_then(|value| value.parse::().ok()) - .unwrap_or(9188); + let port = parts.next().and_then(|value| value.parse::().ok()).unwrap_or(9188); (host, port) } @@ -339,18 +339,18 @@ async fn run_terminal_proxy_cleanup_scenario(action: TerminalAction) { let backend_pid = pg_backend_pid(&pg).await; let tx = pg.transaction().await.expect("begin transaction through proxy"); tx.execute( - &format!( - "INSERT INTO {qualified_table} (id, title, value) VALUES ($1, $2, $3)" - ), - &[&format!("{}-1", action.label()), &format!("{} row", action.label()), &7_i32], + &format!("INSERT INTO {qualified_table} (id, title, value) VALUES ($1, $2, $3)"), + &[ + &format!("{}-1", action.label()), + &format!("{} row", action.label()), + &7_i32, + ], ) .await .expect("stage row through proxy-backed foreign table"); assert!( - proxy - .wait_for_active_connections(1, Duration::from_secs(3)) - .await, + proxy.wait_for_active_connections(1, Duration::from_secs(3)).await, "proxy should observe the gRPC connection before transport failure" ); @@ -358,8 +358,10 @@ async fn run_terminal_proxy_cleanup_scenario(action: TerminalAction) { assert_eq!(session_rows.len(), 1); assert_eq!(string_cell(&session_rows[0], 1).as_deref(), Some("idle in transaction")); assert_eq!(string_cell(&session_rows[0], 3).as_deref(), Some("active")); - let transaction_id = string_cell(&session_rows[0], 2).expect("transaction id in system.sessions"); - let transaction_row = wait_for_transaction_row(env, &transaction_id, Duration::from_secs(3)).await; + let transaction_id = + string_cell(&session_rows[0], 2).expect("transaction id in system.sessions"); + let transaction_row = + wait_for_transaction_row(env, &transaction_id, Duration::from_secs(3)).await; assert_eq!(string_cell(&transaction_row, 2).as_deref(), Some("PgRpc")); assert!(matches!( string_cell(&transaction_row, 3).as_deref(), @@ -370,10 +372,8 @@ async fn run_terminal_proxy_cleanup_scenario(action: TerminalAction) { match action { TerminalAction::Commit => { - let terminal_error = tx - .commit() - .await - .expect_err("commit should fail while proxy is down"); + let terminal_error = + tx.commit().await.expect_err("commit should fail while proxy is down"); let message = postgres_error_text(&terminal_error); assert_transport_or_timeout_error(&message, action.label()); @@ -399,10 +399,7 @@ async fn run_terminal_proxy_cleanup_scenario(action: TerminalAction) { wait_for_transaction_cleanup(env, &transaction_id, Duration::from_secs(5)).await; let final_rows = env - .kalamdb_sql(&format!( - "SELECT id FROM {qualified_table} WHERE id = '{}-1'", - action.label() - )) + .kalamdb_sql(&format!("SELECT id FROM {qualified_table} WHERE id = '{}-1'", action.label())) .await; let final_text = serde_json::to_string(&final_rows).unwrap_or_default(); assert!( @@ -589,4 +586,4 @@ async fn e2e_proxy_blackhole_timeout_recovers_after_traffic_is_restored() { cleanup_proxy_table(env, &table, &server_name).await; proxy.shutdown().await; -} \ No newline at end of file +} diff --git a/pg/tests/e2e_dml/sync_stress.rs b/pg/tests/e2e_dml/sync_stress.rs index dbdb09fcf..1cf109af2 100644 --- a/pg/tests/e2e_dml/sync_stress.rs +++ b/pg/tests/e2e_dml/sync_stress.rs @@ -9,7 +9,8 @@ use tokio::sync::Barrier; type SqlRow = BTreeMap; fn sql_result_rows(result: &Value) -> Vec { - let Some(result_entry) = result["results"].as_array().and_then(|results| results.first()) else { + let Some(result_entry) = result["results"].as_array().and_then(|results| results.first()) + else { return Vec::new(); }; @@ -29,11 +30,7 @@ fn sql_result_rows(result: &Value) -> Vec { rows.iter() .filter_map(|row| row.as_array()) .map(|row| { - columns - .iter() - .cloned() - .zip(row.iter().cloned()) - .collect::>() + columns.iter().cloned().zip(row.iter().cloned()).collect::>() }) .collect::>() }) @@ -79,21 +76,24 @@ fn row_i64(row: &SqlRow, column: &str) -> i64 { let value = row .get(column) .unwrap_or_else(|| panic!("missing column {column} in row {row:?}")); - value_as_i64(value).unwrap_or_else(|| panic!("column {column} is not an i64-compatible value: {value:?}")) + value_as_i64(value) + .unwrap_or_else(|| panic!("column {column} is not an i64-compatible value: {value:?}")) } fn row_f64(row: &SqlRow, column: &str) -> f64 { let value = row .get(column) .unwrap_or_else(|| panic!("missing column {column} in row {row:?}")); - value_as_f64(value).unwrap_or_else(|| panic!("column {column} is not an f64-compatible value: {value:?}")) + value_as_f64(value) + .unwrap_or_else(|| panic!("column {column} is not an f64-compatible value: {value:?}")) } fn row_bool(row: &SqlRow, column: &str) -> bool { let value = row .get(column) .unwrap_or_else(|| panic!("missing column {column} in row {row:?}")); - value_as_bool(value).unwrap_or_else(|| panic!("column {column} is not a bool-compatible value: {value:?}")) + value_as_bool(value) + .unwrap_or_else(|| panic!("column {column} is not a bool-compatible value: {value:?}")) } async fn wait_for_api_sql_rows(sql: &str, expected_count: usize, timeout: Duration) -> Vec { @@ -220,9 +220,7 @@ async fn e2e_bidirectional_typed_roundtrip_between_pg_and_api() { let api_updated_in_pg = pg .query_one( - &format!( - "SELECT label, qty, active, notes FROM {qualified_table} WHERE id = $1" - ), + &format!("SELECT label, qty, active, notes FROM {qualified_table} WHERE id = $1"), &[&"pg-typed-1"], ) .await @@ -257,16 +255,12 @@ async fn e2e_bidirectional_typed_roundtrip_between_pg_and_api() { env.kalamdb_sql(&format!("DELETE FROM {qualified_table} WHERE id = 'pg-typed-2'")) .await; - let deleted_in_api = count_rows(&pg, &qualified_table, Some("id = 'pg-typed-2'")) - .await; + let deleted_in_api = count_rows(&pg, &qualified_table, Some("id = 'pg-typed-2'")).await; assert_eq!(deleted_in_api, 0, "API delete should be visible in PostgreSQL"); - pg.execute( - &format!("DELETE FROM {qualified_table} WHERE id = $1"), - &[&"api-typed-1"], - ) - .await - .expect("delete API-created row through PostgreSQL"); + pg.execute(&format!("DELETE FROM {qualified_table} WHERE id = $1"), &[&"api-typed-1"]) + .await + .expect("delete API-created row through PostgreSQL"); let remaining_rows = wait_for_api_sql_rows( &format!("SELECT id FROM {qualified_table} ORDER BY id"), @@ -387,7 +381,9 @@ async fn e2e_parallel_transactional_inserts_and_updates_stay_consistent() { assert_eq!(row_i64(&api_total[0], "total_rows"), expected_total); let api_inserted = wait_for_api_sql_rows( - &format!("SELECT COUNT(*) AS inserted_rows FROM {qualified_table} WHERE id LIKE 'wrk-%' LIMIT 1"), + &format!( + "SELECT COUNT(*) AS inserted_rows FROM {qualified_table} WHERE id LIKE 'wrk-%' LIMIT 1" + ), 1, Duration::from_secs(5), ) @@ -471,7 +467,10 @@ async fn e2e_transaction_rollback_discards_insert_update_delete_in_pg_and_api() .await .expect("count rows inside rollback transaction") .get(0); - assert_eq!(visible_inside_tx, 2, "transactional view should include insert and delete effects"); + assert_eq!( + visible_inside_tx, 2, + "transactional view should include insert and delete effects" + ); tx.rollback().await.expect("rollback full transactional mutation set"); @@ -518,8 +517,7 @@ async fn e2e_disconnect_abort_discards_uncommitted_changes_in_pg_and_api() { let table = unique_name("disconnect_abort"); let qualified_table = format!("e2e.{table}"); - create_shared_kalam_table(&coordinator, &table, "id TEXT, title TEXT, value INTEGER") - .await; + create_shared_kalam_table(&coordinator, &table, "id TEXT, title TEXT, value INTEGER").await; coordinator .batch_execute(&format!( @@ -547,16 +545,16 @@ async fn e2e_disconnect_abort_discards_uncommitted_changes_in_pg_and_api() { .await .expect("count rows inside SQL transaction") .get(0); - assert_eq!(visible_inside_tx, 2, "transactional session should observe its uncommitted state"); + assert_eq!( + visible_inside_tx, 2, + "transactional session should observe its uncommitted state" + ); pg.disconnect_and_wait_for_session_cleanup().await; wait_for_pg_count(&coordinator, &qualified_table, 2, Duration::from_secs(5)).await; let pg_rows = coordinator - .query( - &format!("SELECT id, title, value FROM {qualified_table} ORDER BY id"), - &[], - ) + .query(&format!("SELECT id, title, value FROM {qualified_table} ORDER BY id"), &[]) .await .expect("query rows after disconnect abort"); assert_eq!(pg_rows.len(), 2); @@ -581,4 +579,4 @@ async fn e2e_disconnect_abort_discards_uncommitted_changes_in_pg_and_api() { assert_eq!(row_i64(base_two, "value"), 20); coordinator.disconnect_and_wait_for_session_cleanup().await; -} \ No newline at end of file +} diff --git a/pg/tests/e2e_dml/transactional.rs b/pg/tests/e2e_dml/transactional.rs index 215c6858e..9f69c8850 100644 --- a/pg/tests/e2e_dml/transactional.rs +++ b/pg/tests/e2e_dml/transactional.rs @@ -11,12 +11,7 @@ async fn e2e_transaction_begin_commit_persists_rows() { let table = unique_name("profiles_tx_commit"); let qualified_table = format!("e2e.{table}"); - create_user_kalam_table( - &pg, - &table, - "id TEXT, name TEXT, age INTEGER", - ) - .await; + create_user_kalam_table(&pg, &table, "id TEXT, name TEXT, age INTEGER").await; set_user_id(&pg, "txn-commit-user").await; await_user_shard_leader("txn-commit-user").await; @@ -47,12 +42,7 @@ async fn e2e_transaction_begin_rollback_discards_rows() { let table = unique_name("profiles_tx_rollback"); let qualified_table = format!("e2e.{table}"); - create_user_kalam_table( - &pg, - &table, - "id TEXT, name TEXT, age INTEGER", - ) - .await; + create_user_kalam_table(&pg, &table, "id TEXT, name TEXT, age INTEGER").await; set_user_id(&pg, "txn-rollback-user").await; await_user_shard_leader("txn-rollback-user").await; @@ -77,12 +67,7 @@ async fn e2e_transaction_duplicate_primary_key_commit_fails() { let table = unique_name("profiles_tx_duplicate"); let qualified_table = format!("e2e.{table}"); - create_user_kalam_table( - &pg, - &table, - "id TEXT, name TEXT, age INTEGER", - ) - .await; + create_user_kalam_table(&pg, &table, "id TEXT, name TEXT, age INTEGER").await; set_user_id(&pg, "txn-duplicate-user").await; await_user_shard_leader("txn-duplicate-user").await; @@ -117,7 +102,7 @@ async fn e2e_transaction_duplicate_primary_key_commit_fails() { let reader = env.pg_connect().await; set_user_id(&reader, "txn-duplicate-user").await; await_user_shard_leader("txn-duplicate-user").await; - let count = count_rows(&reader, &qualified_table, Some("id = 'dup-1'")) .await; + let count = count_rows(&reader, &qualified_table, Some("id = 'dup-1'")).await; assert_eq!(count, 0, "failed commit should leave no committed rows"); } @@ -131,19 +116,13 @@ async fn e2e_transaction_switching_user_id_keeps_rows_in_separate_user_scopes() same_user_shard_pair("txn-scope-user-a", "txn-scope-user-b").await; let pg = env.pg_connect().await; - create_user_kalam_table( - &pg, - &table, - "id TEXT, name TEXT, age INTEGER", - ) - .await; + create_user_kalam_table(&pg, &table, "id TEXT, name TEXT, age INTEGER").await; await_user_shard_leader(&first_user_id).await; await_user_shard_leader(&second_user_id).await; - let (visible_a_in_tx, visible_b_in_tx) = retry_transient_user_leader_error( - "multi-user transaction inflight visibility", - || { + let (visible_a_in_tx, visible_b_in_tx) = + retry_transient_user_leader_error("multi-user transaction inflight visibility", || { let env = &env; let qualified_table = qualified_table.clone(); let first_user_id = first_user_id.clone(); @@ -180,10 +159,7 @@ async fn e2e_transaction_switching_user_id_keeps_rows_in_separate_user_scopes() .await?; let visible_b = tx - .query( - &format!("SELECT id, name FROM {qualified_table} ORDER BY id"), - &[], - ) + .query(&format!("SELECT id, name FROM {qualified_table} ORDER BY id"), &[]) .await? .iter() .map(|row| (row.get::<_, String>(0), row.get::<_, String>(1))) @@ -192,10 +168,7 @@ async fn e2e_transaction_switching_user_id_keeps_rows_in_separate_user_scopes() tx.batch_execute(&format!("SET LOCAL kalam.user_id = '{first_user_id}'")) .await?; let visible_a = tx - .query( - &format!("SELECT id, name FROM {qualified_table} ORDER BY id"), - &[], - ) + .query(&format!("SELECT id, name FROM {qualified_table} ORDER BY id"), &[]) .await? .iter() .map(|row| (row.get::<_, String>(0), row.get::<_, String>(1))) @@ -204,54 +177,59 @@ async fn e2e_transaction_switching_user_id_keeps_rows_in_separate_user_scopes() tx.commit().await?; Ok((visible_a, visible_b)) } - }, - ) - .await; - - assert_eq!(visible_a_in_tx, vec![ - ("profile-1".to_string(), "Alice".to_string()), - ("profile-2".to_string(), "Ava".to_string()), - ]); - assert_eq!(visible_b_in_tx, vec![ - ("profile-1".to_string(), "Bob".to_string()), - ("profile-2".to_string(), "Bea".to_string()), - ]); + }) + .await; + + assert_eq!( + visible_a_in_tx, + vec![ + ("profile-1".to_string(), "Alice".to_string()), + ("profile-2".to_string(), "Ava".to_string()), + ] + ); + assert_eq!( + visible_b_in_tx, + vec![ + ("profile-1".to_string(), "Bob".to_string()), + ("profile-2".to_string(), "Bea".to_string()), + ] + ); let reader_a = env.pg_connect().await; set_user_id(&reader_a, &first_user_id).await; await_user_shard_leader(&first_user_id).await; let rows_a = reader_a - .query( - &format!("SELECT id, name FROM {qualified_table} ORDER BY id"), - &[], - ) + .query(&format!("SELECT id, name FROM {qualified_table} ORDER BY id"), &[]) .await .expect("query user-a rows"); let visible_a = rows_a .iter() .map(|row| (row.get::<_, String>(0), row.get::<_, String>(1))) .collect::>(); - assert_eq!(visible_a, vec![ - ("profile-1".to_string(), "Alice".to_string()), - ("profile-2".to_string(), "Ava".to_string()), - ]); + assert_eq!( + visible_a, + vec![ + ("profile-1".to_string(), "Alice".to_string()), + ("profile-2".to_string(), "Ava".to_string()), + ] + ); let reader_b = env.pg_connect().await; set_user_id(&reader_b, &second_user_id).await; await_user_shard_leader(&second_user_id).await; let rows_b = reader_b - .query( - &format!("SELECT id, name FROM {qualified_table} ORDER BY id"), - &[], - ) + .query(&format!("SELECT id, name FROM {qualified_table} ORDER BY id"), &[]) .await .expect("query user-b rows"); let visible_b = rows_b .iter() .map(|row| (row.get::<_, String>(0), row.get::<_, String>(1))) .collect::>(); - assert_eq!(visible_b, vec![ - ("profile-1".to_string(), "Bob".to_string()), - ("profile-2".to_string(), "Bea".to_string()), - ]); -} \ No newline at end of file + assert_eq!( + visible_b, + vec![ + ("profile-1".to_string(), "Bob".to_string()), + ("profile-2".to_string(), "Bea".to_string()), + ] + ); +} diff --git a/pg/tests/e2e_perf/mod.rs b/pg/tests/e2e_perf/mod.rs index 5ea8ff162..9d8c26cdd 100644 --- a/pg/tests/e2e_perf/mod.rs +++ b/pg/tests/e2e_perf/mod.rs @@ -1,3 +1,4 @@ mod common; +mod payload_sizes; mod stability; mod throughput; diff --git a/pg/tests/e2e_perf/payload_sizes.rs b/pg/tests/e2e_perf/payload_sizes.rs new file mode 100644 index 000000000..ada356aa7 --- /dev/null +++ b/pg/tests/e2e_perf/payload_sizes.rs @@ -0,0 +1,585 @@ +use super::common::{count_rows, create_shared_kalam_table, unique_name, TestEnv}; +use serde_json::Value; +use std::future::Future; +use std::time::Instant; + +const BENCH_ITERATIONS_PER_RUN: usize = 16; +const BENCH_WARMUP_RUNS: usize = 1; +const BENCH_MEASURED_RUNS: usize = 3; +const SELECT_BREAKDOWN_SAMPLES: usize = 8; +const SMALL_ROW_BYTES: usize = 64; +const ONE_KB_ROW_BYTES: usize = 1024; +const TEN_KB_ROW_BYTES: usize = 10 * 1024; +const BENCH_COLUMNS: &str = "id TEXT, payload TEXT, status TEXT, version INTEGER"; + +struct BenchStats { + run_totals_ms: Vec, + median_total_ms: f64, + median_avg_ms: f64, + min_total_ms: f64, + max_total_ms: f64, +} + +struct SelectBreakdownStats { + pg_id_only_query_median_ms: f64, + pg_full_row_query_median_ms: f64, + pg_decode_median_ms: f64, + api_id_only_http_median_ms: f64, + api_id_only_parse_median_ms: f64, + api_full_row_http_median_ms: f64, + api_full_row_parse_median_ms: f64, +} + +impl BenchStats { + fn from_run_totals(run_totals_ms: Vec) -> Self { + assert!( + !run_totals_ms.is_empty(), + "bench stats require at least one measured run" + ); + + let median_total_ms = median_ms(&run_totals_ms); + let median_avg_ms = median_total_ms / BENCH_ITERATIONS_PER_RUN as f64; + let min_total_ms = run_totals_ms + .iter() + .copied() + .min_by(f64::total_cmp) + .expect("bench run totals should not be empty"); + let max_total_ms = run_totals_ms + .iter() + .copied() + .max_by(f64::total_cmp) + .expect("bench run totals should not be empty"); + + Self { + run_totals_ms, + median_total_ms, + median_avg_ms, + min_total_ms, + max_total_ms, + } + } +} + +fn total_bench_runs() -> usize { + BENCH_WARMUP_RUNS + BENCH_MEASURED_RUNS +} + +fn bench_seed(run_index: usize, iteration: usize) -> usize { + run_index * BENCH_ITERATIONS_PER_RUN + iteration +} + +fn bench_row_id(prefix: &str, run_index: usize, iteration: usize) -> String { + format!("{prefix}-r{run_index}-i{iteration}") +} + +fn elapsed_ms(started_at: Instant) -> f64 { + started_at.elapsed().as_secs_f64() * 1000.0 +} + +fn median_ms(samples: &[f64]) -> f64 { + assert!(!samples.is_empty(), "median requires at least one sample"); + + let mut sorted = samples.to_vec(); + sorted.sort_by(f64::total_cmp); + let middle = sorted.len() / 2; + + if sorted.len() % 2 == 0 { + (sorted[middle - 1] + sorted[middle]) / 2.0 + } else { + sorted[middle] + } +} + +fn format_run_totals(run_totals_ms: &[f64]) -> String { + run_totals_ms + .iter() + .map(|value| format!("{value:.1}")) + .collect::>() + .join(", ") +} + +fn sql_row_column_string(result: &Value, column_index: usize) -> Option { + result["results"] + .as_array() + .and_then(|results| results.first()) + .and_then(|entry| entry["rows"].as_array()) + .and_then(|rows| rows.first()) + .and_then(|row| row.as_array()) + .and_then(|columns| columns.get(column_index)) + .and_then(|value| value.as_str().map(ToString::to_string)) +} + +fn benchmark_payload(bytes: usize, seed: usize) -> String { + let base = format!("payload-{seed:04}-"); + let repeated = base.repeat(bytes.div_ceil(base.len())); + repeated[..bytes].to_string() +} + +fn log_benchmark(label: &str, payload_bytes: usize, stats: &BenchStats) { + eprintln!( + "[PERF] {label}: payload={}B warmup_runs={} measured_runs={} iterations/run={} run_totals_ms=[{}] median_total={:.1}ms median_avg={:.2}ms/op range={:.1}..{:.1}ms", + payload_bytes, + BENCH_WARMUP_RUNS, + BENCH_MEASURED_RUNS, + BENCH_ITERATIONS_PER_RUN, + format_run_totals(&stats.run_totals_ms), + stats.median_total_ms, + stats.median_avg_ms, + stats.min_total_ms, + stats.max_total_ms, + ); +} + +fn log_select_breakdown(label: &str, breakdown: &SelectBreakdownStats) { + let inferred_pg_payload_fetch_ms = + breakdown.pg_full_row_query_median_ms - breakdown.pg_id_only_query_median_ms; + let inferred_api_payload_http_ms = + breakdown.api_full_row_http_median_ms - breakdown.api_id_only_http_median_ms; + let inferred_pg_over_http_ms = + breakdown.pg_full_row_query_median_ms - breakdown.api_full_row_http_median_ms; + + eprintln!( + "[PERF] {label} breakdown: samples={} pg_id_only_query_median={:.2}ms pg_full_row_query_median={:.2}ms pg_decode_median={:.2}ms api_id_only_http_median={:.2}ms api_id_only_parse_median={:.2}ms api_full_row_http_median={:.2}ms api_full_row_parse_median={:.2}ms inferred_pg_payload_fetch={:.2}ms inferred_api_payload_http={:.2}ms inferred_pg_over_http={:.2}ms", + SELECT_BREAKDOWN_SAMPLES, + breakdown.pg_id_only_query_median_ms, + breakdown.pg_full_row_query_median_ms, + breakdown.pg_decode_median_ms, + breakdown.api_id_only_http_median_ms, + breakdown.api_id_only_parse_median_ms, + breakdown.api_full_row_http_median_ms, + breakdown.api_full_row_parse_median_ms, + inferred_pg_payload_fetch_ms, + inferred_api_payload_http_ms, + inferred_pg_over_http_ms, + ); +} + +fn assert_benchmark(label: &str, stats: &BenchStats, max_total_ms: f64, max_avg_ms: f64) { + assert!( + stats.median_total_ms < max_total_ms, + "{label} total {:.1}ms exceeded {:.1}ms", + stats.median_total_ms, + max_total_ms + ); + assert!( + stats.median_avg_ms < max_avg_ms, + "{label} avg {:.2}ms/op exceeded {:.2}ms/op", + stats.median_avg_ms, + max_avg_ms + ); +} + +async fn run_benchmark_rounds(mut operation: F) -> BenchStats +where + F: FnMut(usize, usize) -> Fut, + Fut: Future, +{ + for run_index in 0..BENCH_WARMUP_RUNS { + for iteration in 0..BENCH_ITERATIONS_PER_RUN { + operation(run_index, iteration).await; + } + } + + let mut run_totals_ms = Vec::with_capacity(BENCH_MEASURED_RUNS); + for run_index in BENCH_WARMUP_RUNS..total_bench_runs() { + let started_at = Instant::now(); + for iteration in 0..BENCH_ITERATIONS_PER_RUN { + operation(run_index, iteration).await; + } + run_totals_ms.push(elapsed_ms(started_at)); + } + + BenchStats::from_run_totals(run_totals_ms) +} + +async fn seed_rows( + client: &tokio_postgres::Client, + qualified_table: &str, + row_prefix: &str, + payload_bytes: usize, +) { + let insert_stmt = client + .prepare(&format!( + "INSERT INTO {qualified_table} (id, payload, status, version) VALUES ($1, $2, $3, $4)" + )) + .await + .expect("prepare seed insert statement"); + + for run_index in 0..total_bench_runs() { + for iteration in 0..BENCH_ITERATIONS_PER_RUN { + let id = bench_row_id(row_prefix, run_index, iteration); + let payload = benchmark_payload(payload_bytes, bench_seed(run_index, iteration)); + client + .execute(&insert_stmt, &[&id, &payload, &"seeded", &(iteration as i32)]) + .await + .expect("seed benchmark row"); + } + } +} + +async fn measure_select_breakdown( + env: &TestEnv, + pg: &tokio_postgres::Client, + qualified_table: &str, + row_id: &str, + payload_bytes: usize, +) -> SelectBreakdownStats { + let id_only_stmt = pg + .prepare(&format!("SELECT id FROM {qualified_table} WHERE id = $1")) + .await + .expect("prepare id-only select statement"); + let full_row_stmt = pg + .prepare(&format!( + "SELECT id, payload, status, version FROM {qualified_table} WHERE id = $1" + )) + .await + .expect("prepare full-row select statement"); + let id_only_sql = format!("SELECT id FROM {qualified_table} WHERE id = '{row_id}'"); + let full_row_sql = format!( + "SELECT id, payload, status, version FROM {qualified_table} WHERE id = '{row_id}'" + ); + + let mut pg_id_only_query_ms = Vec::with_capacity(SELECT_BREAKDOWN_SAMPLES); + let mut pg_full_row_query_ms = Vec::with_capacity(SELECT_BREAKDOWN_SAMPLES); + let mut pg_decode_ms = Vec::with_capacity(SELECT_BREAKDOWN_SAMPLES); + let mut api_id_only_http_ms = Vec::with_capacity(SELECT_BREAKDOWN_SAMPLES); + let mut api_id_only_parse_ms = Vec::with_capacity(SELECT_BREAKDOWN_SAMPLES); + let mut api_full_row_http_ms = Vec::with_capacity(SELECT_BREAKDOWN_SAMPLES); + let mut api_full_row_parse_ms = Vec::with_capacity(SELECT_BREAKDOWN_SAMPLES); + + for _ in 0..SELECT_BREAKDOWN_SAMPLES { + let started_at = Instant::now(); + let row = pg + .query_one(&id_only_stmt, &[&row_id]) + .await + .expect("select id-only benchmark row"); + pg_id_only_query_ms.push(elapsed_ms(started_at)); + let selected_id: String = row.get(0); + assert_eq!(selected_id, row_id, "pg id-only row id mismatch"); + + let started_at = Instant::now(); + let row = pg + .query_one(&full_row_stmt, &[&row_id]) + .await + .expect("select full benchmark row"); + pg_full_row_query_ms.push(elapsed_ms(started_at)); + + let started_at = Instant::now(); + let selected_id: String = row.get(0); + let payload: String = row.get(1); + let status: String = row.get(2); + let version: i32 = row.get(3); + pg_decode_ms.push(elapsed_ms(started_at)); + assert_eq!(selected_id, row_id, "pg full-row id mismatch"); + assert_eq!(payload.len(), payload_bytes, "pg full-row payload size mismatch"); + assert_eq!(status, "seeded", "pg full-row status mismatch"); + assert_eq!(version, 0, "pg full-row version mismatch"); + + let started_at = Instant::now(); + let response_text = env.kalamdb_sql_text(&id_only_sql).await; + api_id_only_http_ms.push(elapsed_ms(started_at)); + let started_at = Instant::now(); + let response_value: Value = + serde_json::from_str(&response_text).expect("parse KalamDB id-only SQL response"); + api_id_only_parse_ms.push(elapsed_ms(started_at)); + let selected_id = sql_row_column_string(&response_value, 0) + .expect("KalamDB id-only response should include row id"); + assert_eq!(selected_id, row_id, "KalamDB id-only row id mismatch"); + + let started_at = Instant::now(); + let response_text = env.kalamdb_sql_text(&full_row_sql).await; + api_full_row_http_ms.push(elapsed_ms(started_at)); + let started_at = Instant::now(); + let response_value: Value = + serde_json::from_str(&response_text).expect("parse KalamDB full-row SQL response"); + api_full_row_parse_ms.push(elapsed_ms(started_at)); + let selected_id = sql_row_column_string(&response_value, 0) + .expect("KalamDB full-row response should include row id"); + let payload = sql_row_column_string(&response_value, 1) + .expect("KalamDB full-row response should include payload"); + let status = sql_row_column_string(&response_value, 2) + .expect("KalamDB full-row response should include status"); + assert_eq!(selected_id, row_id, "KalamDB full-row id mismatch"); + assert_eq!(payload.len(), payload_bytes, "KalamDB full-row payload size mismatch"); + assert_eq!(status, "seeded", "KalamDB full-row status mismatch"); + } + + SelectBreakdownStats { + pg_id_only_query_median_ms: median_ms(&pg_id_only_query_ms), + pg_full_row_query_median_ms: median_ms(&pg_full_row_query_ms), + pg_decode_median_ms: median_ms(&pg_decode_ms), + api_id_only_http_median_ms: median_ms(&api_id_only_http_ms), + api_id_only_parse_median_ms: median_ms(&api_id_only_parse_ms), + api_full_row_http_median_ms: median_ms(&api_full_row_http_ms), + api_full_row_parse_median_ms: median_ms(&api_full_row_parse_ms), + } +} + +async fn benchmark_insert(label: &str, payload_bytes: usize) -> BenchStats { + let env = TestEnv::global().await; + let pg = env.pg_connect().await; + let client: &tokio_postgres::Client = &pg; + let table = unique_name("perf_pglite_insert"); + let qualified_table = format!("e2e.{table}"); + + create_shared_kalam_table(&pg, &table, BENCH_COLUMNS).await; + + let insert_stmt = pg + .prepare(&format!( + "INSERT INTO {qualified_table} (id, payload, status, version) VALUES ($1, $2, $3, $4)" + )) + .await + .expect("prepare insert statement"); + + let stats = run_benchmark_rounds(|run_index, iteration| { + let id = bench_row_id("insert", run_index, iteration); + let payload = benchmark_payload(payload_bytes, bench_seed(run_index, iteration)); + let insert_stmt = insert_stmt.clone(); + + async move { + client + .execute(&insert_stmt, &[&id, &payload, &"inserted", &(iteration as i32)]) + .await + .expect("insert benchmark row"); + } + }) + .await; + + let count = count_rows(&pg, &qualified_table, None).await; + assert_eq!( + count, + (BENCH_ITERATIONS_PER_RUN * total_bench_runs()) as i64, + "insert benchmark row count mismatch" + ); + + log_benchmark(label, payload_bytes, &stats); + pg.disconnect().await; + stats +} + +async fn benchmark_select(label: &str, payload_bytes: usize) -> BenchStats { + let env = TestEnv::global().await; + let pg = env.pg_connect().await; + let client: &tokio_postgres::Client = &pg; + let table = unique_name("perf_pglite_select"); + let qualified_table = format!("e2e.{table}"); + + create_shared_kalam_table(&pg, &table, BENCH_COLUMNS).await; + seed_rows(&pg, &qualified_table, "select", payload_bytes).await; + + let select_stmt = pg + .prepare(&format!( + "SELECT id, payload, status, version FROM {qualified_table} WHERE id = $1" + )) + .await + .expect("prepare select statement"); + + let stats = run_benchmark_rounds(|run_index, iteration| { + let id = bench_row_id("select", run_index, iteration); + let select_stmt = select_stmt.clone(); + + async move { + let row = client + .query_one(&select_stmt, &[&id]) + .await + .expect("select benchmark row"); + let selected_id: String = row.get(0); + let payload: String = row.get(1); + assert_eq!(selected_id, id, "selected row id mismatch"); + assert_eq!(payload.len(), payload_bytes, "selected payload size mismatch"); + } + }) + .await; + + log_benchmark(label, payload_bytes, &stats); + + if payload_bytes == TEN_KB_ROW_BYTES { + let breakdown = measure_select_breakdown( + env, + &pg, + &qualified_table, + &bench_row_id("select", BENCH_WARMUP_RUNS, 0), + payload_bytes, + ) + .await; + log_select_breakdown(label, &breakdown); + } + + pg.disconnect().await; + stats +} + +async fn benchmark_update(label: &str, payload_bytes: usize) -> BenchStats { + let env = TestEnv::global().await; + let pg = env.pg_connect().await; + let client: &tokio_postgres::Client = &pg; + let table = unique_name("perf_pglite_update"); + let qualified_table = format!("e2e.{table}"); + + create_shared_kalam_table(&pg, &table, BENCH_COLUMNS).await; + seed_rows(&pg, &qualified_table, "update", payload_bytes).await; + + let update_stmt = pg + .prepare(&format!( + "UPDATE {qualified_table} SET payload = $1, status = $2, version = $3 WHERE id = $4" + )) + .await + .expect("prepare update statement"); + + let stats = run_benchmark_rounds(|run_index, iteration| { + let id = bench_row_id("update", run_index, iteration); + let next_payload = benchmark_payload(payload_bytes, bench_seed(run_index, iteration) + 10_000); + let update_stmt = update_stmt.clone(); + + async move { + let rows = client + .execute(&update_stmt, &[&next_payload, &"updated", &((iteration + 1) as i32), &id]) + .await + .expect("update benchmark row"); + assert_eq!(rows, 1, "update should affect exactly one row"); + } + }) + .await; + + let row = pg + .query_one( + &format!( + "SELECT payload, status, version FROM {qualified_table} WHERE id = '{}'", + bench_row_id("update", BENCH_WARMUP_RUNS, 0) + ), + &[], + ) + .await + .expect("select updated benchmark row"); + let payload: String = row.get(0); + let status: String = row.get(1); + let version: i32 = row.get(2); + assert_eq!(payload.len(), payload_bytes, "updated payload size mismatch"); + assert_eq!(status, "updated", "updated status mismatch"); + assert_eq!(version, 1, "updated version mismatch"); + + log_benchmark(label, payload_bytes, &stats); + pg.disconnect().await; + stats +} + +async fn benchmark_delete(label: &str, payload_bytes: usize) -> BenchStats { + let env = TestEnv::global().await; + let pg = env.pg_connect().await; + let client: &tokio_postgres::Client = &pg; + let table = unique_name("perf_pglite_delete"); + let qualified_table = format!("e2e.{table}"); + + create_shared_kalam_table(&pg, &table, BENCH_COLUMNS).await; + seed_rows(&pg, &qualified_table, "delete", payload_bytes).await; + + let delete_stmt = pg + .prepare(&format!("DELETE FROM {qualified_table} WHERE id = $1")) + .await + .expect("prepare delete statement"); + + let stats = run_benchmark_rounds(|run_index, iteration| { + let id = bench_row_id("delete", run_index, iteration); + let delete_stmt = delete_stmt.clone(); + + async move { + let rows = client + .execute(&delete_stmt, &[&id]) + .await + .expect("delete benchmark row"); + assert_eq!(rows, 1, "delete should affect exactly one row"); + } + }) + .await; + + let count = count_rows(&pg, &qualified_table, None).await; + assert_eq!(count, 0, "delete benchmark table should be empty"); + + log_benchmark(label, payload_bytes, &stats); + pg.disconnect().await; + stats +} + +#[tokio::test] +#[ntest::timeout(3100)] +async fn e2e_perf_pglite_insert_small_row() { + let stats = benchmark_insert("pglite-style insert small row", SMALL_ROW_BYTES).await; + assert_benchmark("insert small row", &stats, 42.0, 2.60); +} + +#[tokio::test] +#[ntest::timeout(3400)] +async fn e2e_perf_pglite_select_small_row() { + let stats = benchmark_select("pglite-style select small row", SMALL_ROW_BYTES).await; + assert_benchmark("select small row", &stats, 70.0, 4.40); +} + +#[tokio::test] +#[ntest::timeout(3800)] +async fn e2e_perf_pglite_update_small_row() { + let stats = benchmark_update("pglite-style update small row", SMALL_ROW_BYTES).await; + assert_benchmark("update small row", &stats, 110.0, 6.90); +} + +#[tokio::test] +#[ntest::timeout(3900)] +async fn e2e_perf_pglite_delete_small_row() { + let stats = benchmark_delete("pglite-style delete small row", SMALL_ROW_BYTES).await; + assert_benchmark("delete small row", &stats, 105.0, 6.60); +} + +#[tokio::test] +#[ntest::timeout(3100)] +async fn e2e_perf_pglite_insert_1kb_row() { + let stats = benchmark_insert("pglite-style insert 1kb row", ONE_KB_ROW_BYTES).await; + assert_benchmark("insert 1kb row", &stats, 48.0, 3.00); +} + +#[tokio::test] +#[ntest::timeout(3900)] +async fn e2e_perf_pglite_select_1kb_row() { + let stats = benchmark_select("pglite-style select 1kb row", ONE_KB_ROW_BYTES).await; + assert_benchmark("select 1kb row", &stats, 72.0, 4.50); +} + +#[tokio::test] +#[ntest::timeout(4000)] +async fn e2e_perf_pglite_update_1kb_row() { + let stats = benchmark_update("pglite-style update 1kb row", ONE_KB_ROW_BYTES).await; + assert_benchmark("update 1kb row", &stats, 125.0, 7.85); +} + +#[tokio::test] +#[ntest::timeout(4100)] +async fn e2e_perf_pglite_delete_1kb_row() { + let stats = benchmark_delete("pglite-style delete 1kb row", ONE_KB_ROW_BYTES).await; + assert_benchmark("delete 1kb row", &stats, 108.0, 6.75); +} + +#[tokio::test] +#[ntest::timeout(3500)] +async fn e2e_perf_pglite_insert_10kb_row() { + let stats = benchmark_insert("pglite-style insert 10kb row", TEN_KB_ROW_BYTES).await; + assert_benchmark("insert 10kb row", &stats, 112.0, 7.00); +} + +#[tokio::test] +#[ntest::timeout(4300)] +async fn e2e_perf_pglite_select_10kb_row() { + let stats = benchmark_select("pglite-style select 10kb row", TEN_KB_ROW_BYTES).await; + assert_benchmark("select 10kb row", &stats, 75.0, 4.70); +} + +#[tokio::test] +#[ntest::timeout(4600)] +async fn e2e_perf_pglite_update_10kb_row() { + let stats = benchmark_update("pglite-style update 10kb row", TEN_KB_ROW_BYTES).await; + assert_benchmark("update 10kb row", &stats, 190.0, 11.90); +} + +#[tokio::test] +#[ntest::timeout(4300)] +async fn e2e_perf_pglite_delete_10kb_row() { + let stats = benchmark_delete("pglite-style delete 10kb row", TEN_KB_ROW_BYTES).await; + assert_benchmark("delete 10kb row", &stats, 128.0, 8.00); +} diff --git a/pg/tests/e2e_perf/stability.rs b/pg/tests/e2e_perf/stability.rs index b1a2f49d0..86ce23bd4 100644 --- a/pg/tests/e2e_perf/stability.rs +++ b/pg/tests/e2e_perf/stability.rs @@ -230,9 +230,7 @@ async fn e2e_perf_multi_session_pg_extension_memory_stays_bounded() { ); for client in clients { - let client = Arc::try_unwrap(client) - .ok() - .expect("multi-session client still shared"); + let client = Arc::try_unwrap(client).ok().expect("multi-session client still shared"); client.disconnect().await; } coordinator.disconnect().await; diff --git a/pg/tests/e2e_perf/throughput.rs b/pg/tests/e2e_perf/throughput.rs index 4668f2332..b75390b0a 100644 --- a/pg/tests/e2e_perf/throughput.rs +++ b/pg/tests/e2e_perf/throughput.rs @@ -139,8 +139,11 @@ async fn e2e_perf_sequential_insert_1k() { sql.push_str("BEGIN;"); for index in 0..TOTAL { use std::fmt::Write; - write!(sql, "INSERT INTO {qualified_table} (id, value) VALUES ('pipe-{index}', {index});") - .unwrap(); + write!( + sql, + "INSERT INTO {qualified_table} (id, value) VALUES ('pipe-{index}', {index});" + ) + .unwrap(); } sql.push_str("COMMIT;"); let start = std::time::Instant::now(); @@ -179,12 +182,15 @@ async fn e2e_perf_scan_5k() { let value_index = batch * BATCH + index; values.push(format!("('scan-{value_index}', 'Title {value_index}', {value_index})")); } - let sql = - format!("INSERT INTO {qualified_table} (id, title, value) VALUES {}", values.join(", ")); + let sql = format!( + "INSERT INTO {qualified_table} (id, title, value) VALUES {}", + values.join(", ") + ); pg.batch_execute(&sql).await.expect("seed insert"); } - let (rows, scan_ms) = timed_query(&pg, &format!("SELECT id, title, value FROM {qualified_table}")).await; + let (rows, scan_ms) = + timed_query(&pg, &format!("SELECT id, title, value FROM {qualified_table}")).await; let rows_per_sec = rows.len() as f64 / (scan_ms / 1000.0); eprintln!( @@ -219,8 +225,10 @@ async fn e2e_perf_point_select() { for index in 0..TOTAL { values.push(format!("('pt-{index}', 'data-{index}', {index})")); } - let sql = - format!("INSERT INTO {qualified_table} (id, payload, value) VALUES {}", values.join(", ")); + let sql = format!( + "INSERT INTO {qualified_table} (id, payload, value) VALUES {}", + values.join(", ") + ); pg.batch_execute(&sql).await.expect("seed point table"); let _ = pg @@ -313,9 +321,7 @@ async fn e2e_perf_delete_500() { let start = std::time::Instant::now(); for index in 0..TOTAL { let id = format!("del-{index}"); - pg.execute(&delete_stmt, &[&id]) - .await - .expect("delete row"); + pg.execute(&delete_stmt, &[&id]).await.expect("delete row"); } let delete_ms = start.elapsed().as_secs_f64() * 1000.0; let rows_per_sec = TOTAL as f64 / (delete_ms / 1000.0); @@ -339,12 +345,7 @@ async fn e2e_perf_user_table_insert_scan() { let table = unique_name("perf_user"); let qualified_table = format!("e2e.{table}"); - create_user_kalam_table( - &pg, - &table, - "id TEXT, data TEXT", - ) - .await; + create_user_kalam_table(&pg, &table, "id TEXT, data TEXT").await; set_user_id(&pg, "perf-user-1").await; await_user_shard_leader("perf-user-1").await; @@ -363,7 +364,8 @@ async fn e2e_perf_user_table_insert_scan() { } let insert_ms = start.elapsed().as_secs_f64() * 1000.0; - let (rows, scan_ms) = timed_query(&pg, &format!("SELECT id, data FROM {qualified_table}")).await; + let (rows, scan_ms) = + timed_query(&pg, &format!("SELECT id, data FROM {qualified_table}")).await; eprintln!( "[PERF] User table: INSERT {TOTAL} rows in {insert_ms:.0}ms, SCAN returned {} rows in {scan_ms:.1}ms", @@ -403,8 +405,8 @@ async fn e2e_perf_cross_verify_latency() { &format!("INSERT INTO {qualified_table} (id, value) VALUES ($1, $2)"), &[&id, &(index as i32)], ) - .await - .expect("xv insert"); + .await + .expect("xv insert"); let result = env .kalamdb_sql(&format!("SELECT id, value FROM e2e.{table} WHERE id = '{id}'")) diff --git a/pg/tests/e2e_scenarios/ai_app.rs b/pg/tests/e2e_scenarios/ai_app.rs index 7567f703e..732738bd5 100644 --- a/pg/tests/e2e_scenarios/ai_app.rs +++ b/pg/tests/e2e_scenarios/ai_app.rs @@ -1,6 +1,4 @@ -use super::common::{ - create_shared_kalam_table_in_schema, drop_kalam_tables, unique_name, TestEnv, -}; +use super::common::{create_shared_kalam_table_in_schema, drop_kalam_tables, unique_name, TestEnv}; #[tokio::test] #[ntest::timeout(45000)] diff --git a/pg/tests/e2e_scenarios/iot_agents.rs b/pg/tests/e2e_scenarios/iot_agents.rs index 917075759..afc9819a8 100644 --- a/pg/tests/e2e_scenarios/iot_agents.rs +++ b/pg/tests/e2e_scenarios/iot_agents.rs @@ -1,6 +1,4 @@ -use super::common::{ - create_shared_kalam_table_in_schema, drop_kalam_tables, unique_name, TestEnv, -}; +use super::common::{create_shared_kalam_table_in_schema, drop_kalam_tables, unique_name, TestEnv}; #[tokio::test] #[ntest::timeout(45000)] diff --git a/pg/tests/support/http_client.rs b/pg/tests/support/http_client.rs index 34ac1f53d..bc2fb065d 100644 --- a/pg/tests/support/http_client.rs +++ b/pg/tests/support/http_client.rs @@ -77,12 +77,7 @@ impl TestHttpClient { let status = response.status(); let body = tokio::time::timeout(self.timeout, response.into_body().collect()) .await - .map_err(|_| { - format!( - "reading response from {url} timed out after {:?}", - self.timeout - ) - })? + .map_err(|_| format!("reading response from {url} timed out after {:?}", self.timeout))? .map_err(|error| format!("reading response from {url} failed: {error}"))? .to_bytes(); @@ -91,4 +86,4 @@ impl TestHttpClient { body: String::from_utf8_lossy(&body).into_owned(), }) } -} \ No newline at end of file +}