From 073f4f3ac3c520552cb7f4e49e21e7b0d6ec2e17 Mon Sep 17 00:00:00 2001 From: Marco Bambini Date: Tue, 10 Feb 2026 10:12:04 +0100 Subject: [PATCH 1/2] Added the ability to perform a perform a sync only if a column expression is satisfied --- .gitignore | 3 +- src/cloudsync.c | 114 +++++++++++++++++++-- src/cloudsync.h | 5 +- src/database.h | 4 +- src/dbutils.c | 5 +- src/postgresql/cloudsync--1.0.sql | 12 +++ src/postgresql/cloudsync_postgresql.c | 119 ++++++++++++++++++++++ src/postgresql/database_postgresql.c | 98 ++++++++++++++++-- src/postgresql/sql_postgresql.c | 8 ++ src/sql.h | 1 + src/sqlite/cloudsync_sqlite.c | 71 ++++++++++++- src/sqlite/database_sqlite.c | 103 +++++++++++++++---- src/sqlite/sql_sqlite.c | 8 ++ test/postgresql/20_row_filter.sql | 105 ++++++++++++++++++++ test/postgresql/full_test.sql | 1 + test/unit.c | 137 +++++++++++++++++++++++++- 16 files changed, 754 insertions(+), 40 deletions(-) create mode 100644 test/postgresql/20_row_filter.sql diff --git a/.gitignore b/.gitignore index 9d353ea..646d00e 100644 --- a/.gitignore +++ b/.gitignore @@ -48,4 +48,5 @@ jniLibs/ # System .DS_Store -Thumbs.db \ No newline at end of file +Thumbs.db +CLAUDE.md diff --git a/src/cloudsync.c b/src/cloudsync.c index 92f63ac..d237c6a 100644 --- a/src/cloudsync.c +++ b/src/cloudsync.c @@ -1794,13 +1794,104 @@ int cloudsync_commit_alter (cloudsync_context *data, const char *table_name) { return rc; } +// MARK: - Filter Rewrite - + +// Replace bare column names in a filter expression with prefix-qualified names. +// E.g., filter="user_id = 42", prefix="NEW", columns=["user_id","id"] → "NEW.\"user_id\" = 42" +// Columns must be sorted by length descending by the caller to avoid partial matches. +// Skips content inside single-quoted string literals. +// Returns a newly allocated string (caller must free with cloudsync_memory_free), or NULL on error. +// Helper: check if an identifier token matches a column name. +static bool filter_is_column (const char *token, size_t token_len, char **columns, int ncols) { + for (int i = 0; i < ncols; ++i) { + if (strlen(columns[i]) == token_len && strncmp(token, columns[i], token_len) == 0) + return true; + } + return false; +} + +// Helper: check if character is part of a SQL identifier. +static bool filter_is_ident_char (char c) { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || c == '_'; +} + +char *cloudsync_filter_add_row_prefix (const char *filter, const char *prefix, char **columns, int ncols) { + if (!filter || !prefix || !columns || ncols <= 0) return NULL; + + size_t filter_len = strlen(filter); + size_t prefix_len = strlen(prefix); + + // Each identifier match grows by at most (prefix_len + 3) bytes. + // Worst case: the entire filter is one repeated column reference separated by + // single characters, so up to (filter_len / 2) matches. Use a safe upper bound. + size_t max_growth = (filter_len / 2 + 1) * (prefix_len + 3); + size_t cap = filter_len + max_growth + 64; + char *result = (char *)cloudsync_memory_alloc(cap); + if (!result) return NULL; + size_t out = 0; + + // Single pass: tokenize into identifiers, quoted strings, and everything else. + size_t i = 0; + while (i < filter_len) { + // Skip single-quoted string literals verbatim (handle '' escape) + if (filter[i] == '\'') { + result[out++] = filter[i++]; + while (i < filter_len) { + if (filter[i] == '\'') { + result[out++] = filter[i++]; + // '' is an escaped quote — keep going + if (i < filter_len && filter[i] == '\'') { + result[out++] = filter[i++]; + continue; + } + break; // single ' ends the literal + } + result[out++] = filter[i++]; + } + continue; + } + + // Extract identifier token + if (filter_is_ident_char(filter[i])) { + size_t start = i; + while (i < filter_len && filter_is_ident_char(filter[i])) ++i; + size_t token_len = i - start; + + if (filter_is_column(&filter[start], token_len, columns, ncols)) { + // Emit PREFIX."column_name" + memcpy(&result[out], prefix, prefix_len); out += prefix_len; + result[out++] = '.'; + result[out++] = '"'; + memcpy(&result[out], &filter[start], token_len); out += token_len; + result[out++] = '"'; + } else { + // Not a column — copy as-is + memcpy(&result[out], &filter[start], token_len); out += token_len; + } + continue; + } + + // Any other character — copy as-is + result[out++] = filter[i++]; + } + + result[out] = '\0'; + return result; +} + int cloudsync_refill_metatable (cloudsync_context *data, const char *table_name) { cloudsync_table_context *table = table_lookup(data, table_name); if (!table) return DBRES_ERROR; - + dbvm_t *vm = NULL; int64_t db_version = cloudsync_dbversion_next(data, CLOUDSYNC_VALUE_NOTSET); + // Read row-level filter from settings (if any) + char filter_buf[2048]; + int frc = dbutils_table_settings_get_value(data, table_name, "*", "filter", filter_buf, sizeof(filter_buf)); + const char *filter = (frc == DBRES_OK && filter_buf[0]) ? filter_buf : NULL; + const char *schema = table->schema ? table->schema : ""; char *sql = sql_build_pk_collist_query(schema, table_name); char *pkclause_identifiers = NULL; @@ -1810,18 +1901,22 @@ int cloudsync_refill_metatable (cloudsync_context *data, const char *table_name) char *pkvalues_identifiers = (pkclause_identifiers) ? pkclause_identifiers : "rowid"; // Use database-specific query builder to handle type differences in composite PKs - sql = sql_build_insert_missing_pks_query(schema, table_name, pkvalues_identifiers, table->base_ref, table->meta_ref); + sql = sql_build_insert_missing_pks_query(schema, table_name, pkvalues_identifiers, table->base_ref, table->meta_ref, filter); if (!sql) {rc = DBRES_NOMEM; goto finalize;} rc = database_exec(data, sql); cloudsync_memory_free(sql); if (rc != DBRES_OK) goto finalize; - + // fill missing colums // for each non-pk column: // The new query does 1 encode per source row and one indexed NOT-EXISTS probe. - // The old plan does many decodes per candidate and can’t use an index to rule out matches quickly—so it burns CPU and I/O. - - sql = cloudsync_memory_mprintf(SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL, pkvalues_identifiers, table->base_ref, table->meta_ref); + // The old plan does many decodes per candidate and can't use an index to rule out matches quickly—so it burns CPU and I/O. + + if (filter) { + sql = cloudsync_memory_mprintf(SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL_FILTERED, pkvalues_identifiers, table->base_ref, filter, table->meta_ref); + } else { + sql = cloudsync_memory_mprintf(SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL, pkvalues_identifiers, table->base_ref, table->meta_ref); + } rc = databasevm_prepare(data, sql, (void **)&vm, DBFLAG_PERSISTENT); cloudsync_memory_free(sql); if (rc != DBRES_OK) goto finalize; @@ -2723,8 +2818,13 @@ int cloudsync_init_table (cloudsync_context *data, const char *table_name, const // sync algo with table (unused in this version) // cloudsync_sync_table_key(data, table_name, "*", CLOUDSYNC_KEY_ALGO, crdt_algo_name(algo_new)); + // read row-level filter from settings (if any) + char init_filter_buf[2048]; + int init_frc = dbutils_table_settings_get_value(data, table_name, "*", "filter", init_filter_buf, sizeof(init_filter_buf)); + const char *init_filter = (init_frc == DBRES_OK && init_filter_buf[0]) ? init_filter_buf : NULL; + // check triggers - rc = database_create_triggers(data, table_name, algo_new); + rc = database_create_triggers(data, table_name, algo_new, init_filter); if (rc != DBRES_OK) return cloudsync_set_error(data, "An error occurred while creating triggers", DBRES_MISUSE); // check meta-table diff --git a/src/cloudsync.h b/src/cloudsync.h index 1c68f1f..c882057 100644 --- a/src/cloudsync.h +++ b/src/cloudsync.h @@ -17,7 +17,7 @@ extern "C" { #endif -#define CLOUDSYNC_VERSION "0.9.101" +#define CLOUDSYNC_VERSION "0.9.110" #define CLOUDSYNC_MAX_TABLENAME_LEN 512 #define CLOUDSYNC_VALUE_NOTSET -1 @@ -121,6 +121,9 @@ int local_update_move_meta (cloudsync_table_context *table, const char *pk, size int merge_insert_col (cloudsync_context *data, cloudsync_table_context *table, const char *pk, int pklen, const char *col_name, dbvalue_t *col_value, int64_t col_version, int64_t db_version, const char *site_id, int site_len, int64_t seq, int64_t *rowid); int merge_insert (cloudsync_context *data, cloudsync_table_context *table, const char *insert_pk, int insert_pk_len, int64_t insert_cl, const char *insert_name, dbvalue_t *insert_value, int64_t insert_col_version, int64_t insert_db_version, const char *insert_site_id, int insert_site_id_len, int64_t insert_seq, int64_t *rowid); +// filter rewrite +char *cloudsync_filter_add_row_prefix(const char *filter, const char *prefix, char **columns, int ncols); + // decode bind context char *cloudsync_pk_context_tbl (cloudsync_pk_decode_bind_context *ctx, int64_t *tbl_len); void *cloudsync_pk_context_pk (cloudsync_pk_decode_bind_context *ctx, int64_t *pk_len); diff --git a/src/database.h b/src/database.h index 09531b9..f5324a3 100644 --- a/src/database.h +++ b/src/database.h @@ -70,7 +70,7 @@ bool database_table_exists (cloudsync_context *data, const char *table_name, con bool database_internal_table_exists (cloudsync_context *data, const char *name); bool database_trigger_exists (cloudsync_context *data, const char *table_name); int database_create_metatable (cloudsync_context *data, const char *table_name); -int database_create_triggers (cloudsync_context *data, const char *table_name, table_algo algo); +int database_create_triggers (cloudsync_context *data, const char *table_name, table_algo algo, const char *filter); int database_delete_triggers (cloudsync_context *data, const char *table_name); int database_pk_names (cloudsync_context *data, const char *table_name, char ***names, int *count); int database_cleanup (cloudsync_context *data); @@ -148,7 +148,7 @@ char *sql_build_delete_cols_not_in_schema_query(const char *schema, const char * char *sql_build_pk_collist_query(const char *schema, const char *table_name); char *sql_build_pk_decode_selectlist_query(const char *schema, const char *table_name); char *sql_build_pk_qualified_collist_query(const char *schema, const char *table_name); -char *sql_build_insert_missing_pks_query(const char *schema, const char *table_name, const char *pkvalues_identifiers, const char *base_ref, const char *meta_ref); +char *sql_build_insert_missing_pks_query(const char *schema, const char *table_name, const char *pkvalues_identifiers, const char *base_ref, const char *meta_ref, const char *filter); char *database_table_schema(const char *table_name); char *database_build_meta_ref(const char *schema, const char *table_name); diff --git a/src/dbutils.c b/src/dbutils.c index 15f76ba..5188e69 100644 --- a/src/dbutils.c +++ b/src/dbutils.c @@ -363,7 +363,10 @@ int dbutils_settings_table_load_callback (void *xdata, int ncols, char **values, if (strcmp(key, "algo")!=0) continue; table_algo algo = cloudsync_algo_from_name(value); - if (database_create_triggers(data, table_name, algo) != DBRES_OK) return DBRES_MISUSE; + char fbuf[2048]; + int frc = dbutils_table_settings_get_value(data, table_name, "*", "filter", fbuf, sizeof(fbuf)); + const char *filt = (frc == DBRES_OK && fbuf[0]) ? fbuf : NULL; + if (database_create_triggers(data, table_name, algo, filt) != DBRES_OK) return DBRES_MISUSE; if (table_add_to_context(data, algo, table_name) == false) return DBRES_MISUSE; DEBUG_SETTINGS("load tbl_name: %s value: %s", key, value); diff --git a/src/postgresql/cloudsync--1.0.sql b/src/postgresql/cloudsync--1.0.sql index 7d4517c..0874667 100644 --- a/src/postgresql/cloudsync--1.0.sql +++ b/src/postgresql/cloudsync--1.0.sql @@ -102,6 +102,18 @@ RETURNS boolean AS 'MODULE_PATHNAME', 'cloudsync_set_table' LANGUAGE C VOLATILE; +-- Set row-level filter for conditional sync +CREATE OR REPLACE FUNCTION cloudsync_set_filter(table_name text, filter_expr text) +RETURNS boolean +AS 'MODULE_PATHNAME', 'cloudsync_set_filter' +LANGUAGE C VOLATILE; + +-- Clear row-level filter +CREATE OR REPLACE FUNCTION cloudsync_clear_filter(table_name text) +RETURNS boolean +AS 'MODULE_PATHNAME', 'cloudsync_clear_filter' +LANGUAGE C VOLATILE; + -- Set column-level configuration CREATE OR REPLACE FUNCTION cloudsync_set_column(table_name text, column_name text, key text, value text) RETURNS boolean diff --git a/src/postgresql/cloudsync_postgresql.c b/src/postgresql/cloudsync_postgresql.c index f2200dd..f6458c5 100644 --- a/src/postgresql/cloudsync_postgresql.c +++ b/src/postgresql/cloudsync_postgresql.c @@ -610,6 +610,125 @@ Datum cloudsync_set_column (PG_FUNCTION_ARGS) { PG_RETURN_BOOL(true); } +// MARK: - Row Filter - + +// cloudsync_set_filter - Set a row-level filter for conditional sync +PG_FUNCTION_INFO_V1(cloudsync_set_filter); +Datum cloudsync_set_filter (PG_FUNCTION_ARGS) { + if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cloudsync_set_filter: table and filter expression required"))); + } + + const char *tbl = text_to_cstring(PG_GETARG_TEXT_PP(0)); + const char *filter_expr = text_to_cstring(PG_GETARG_TEXT_PP(1)); + + cloudsync_context *data = get_cloudsync_context(); + bool spi_connected = false; + + int spi_rc = SPI_connect(); + if (spi_rc != SPI_OK_CONNECT) { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPI_connect failed: %d", spi_rc))); + } + spi_connected = true; + + PG_TRY(); + { + // Store filter in table settings + dbutils_table_settings_set_key_value(data, tbl, "*", "filter", filter_expr); + + // Read current algo + table_algo algo = dbutils_table_settings_get_algo(data, tbl); + if (algo == table_algo_none) algo = table_algo_crdt_cls; + + // Drop triggers + database_delete_triggers(data, tbl); + + // Reconnect SPI so that the catalog changes from DROP are visible + SPI_finish(); + spi_connected = false; + spi_rc = SPI_connect(); + if (spi_rc != SPI_OK_CONNECT) { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPI_connect failed: %d", spi_rc))); + } + spi_connected = true; + + // Recreate triggers with filter + int rc = database_create_triggers(data, tbl, algo, filter_expr); + if (rc != DBRES_OK) { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("cloudsync_set_filter: error recreating triggers"))); + } + } + PG_CATCH(); + { + if (spi_connected) SPI_finish(); + PG_RE_THROW(); + } + PG_END_TRY(); + + if (spi_connected) SPI_finish(); + PG_RETURN_BOOL(true); +} + +// cloudsync_clear_filter - Remove the row-level filter for a table +PG_FUNCTION_INFO_V1(cloudsync_clear_filter); +Datum cloudsync_clear_filter (PG_FUNCTION_ARGS) { + if (PG_ARGISNULL(0)) { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cloudsync_clear_filter: table name required"))); + } + + const char *tbl = text_to_cstring(PG_GETARG_TEXT_PP(0)); + + cloudsync_context *data = get_cloudsync_context(); + bool spi_connected = false; + + int spi_rc = SPI_connect(); + if (spi_rc != SPI_OK_CONNECT) { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPI_connect failed: %d", spi_rc))); + } + spi_connected = true; + + PG_TRY(); + { + // Remove filter from settings + dbutils_table_settings_set_key_value(data, tbl, "*", "filter", NULL); + + // Read current algo + table_algo algo = dbutils_table_settings_get_algo(data, tbl); + if (algo == table_algo_none) algo = table_algo_crdt_cls; + + // Drop triggers + database_delete_triggers(data, tbl); + + // Reconnect SPI so that the catalog changes from DROP are visible + SPI_finish(); + spi_connected = false; + spi_rc = SPI_connect(); + if (spi_rc != SPI_OK_CONNECT) { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("SPI_connect failed: %d", spi_rc))); + } + spi_connected = true; + + // Recreate triggers without filter + int rc = database_create_triggers(data, tbl, algo, NULL); + if (rc != DBRES_OK) { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("cloudsync_clear_filter: error recreating triggers"))); + } + } + PG_CATCH(); + { + if (spi_connected) SPI_finish(); + PG_RE_THROW(); + } + PG_END_TRY(); + + if (spi_connected) SPI_finish(); + PG_RETURN_BOOL(true); +} + // MARK: - Schema Alteration - // cloudsync_begin_alter - Begin schema alteration diff --git a/src/postgresql/database_postgresql.c b/src/postgresql/database_postgresql.c index b984deb..10ba5b5 100644 --- a/src/postgresql/database_postgresql.c +++ b/src/postgresql/database_postgresql.c @@ -383,7 +383,8 @@ char *sql_build_pk_qualified_collist_query (const char *schema, const char *tabl char *sql_build_insert_missing_pks_query(const char *schema, const char *table_name, const char *pkvalues_identifiers, - const char *base_ref, const char *meta_ref) { + const char *base_ref, const char *meta_ref, + const char *filter) { UNUSED_PARAMETER(schema); char esc_table[1024]; @@ -398,6 +399,16 @@ char *sql_build_insert_missing_pks_query(const char *schema, const char *table_n // // Example: cloudsync_insert('table', col1, col2) where col1=TEXT, col2=INTEGER // PostgreSQL's VARIADIC handling preserves each type and matches SQLite's encoding. + if (filter) { + return cloudsync_memory_mprintf( + "SELECT cloudsync_insert('%s', %s) " + "FROM %s b " + "WHERE (%s) AND NOT EXISTS (" + " SELECT 1 FROM %s m WHERE m.pk = cloudsync_pk_encode(%s)" + ");", + esc_table, pkvalues_identifiers, base_ref, filter, meta_ref, pkvalues_identifiers + ); + } return cloudsync_memory_mprintf( "SELECT cloudsync_insert('%s', %s) " "FROM %s b " @@ -1503,7 +1514,75 @@ static int database_create_delete_trigger_internal (cloudsync_context *data, con return rc; } -int database_create_triggers (cloudsync_context *data, const char *table_name, table_algo algo) { +// Build trigger WHEN clauses, optionally incorporating a row-level filter. +// INSERT/UPDATE use NEW-prefixed filter, DELETE uses OLD-prefixed filter. +static void database_build_trigger_when( + cloudsync_context *data, const char *table_name, const char *filter, + const char *schema, + char *when_new, size_t when_new_size, + char *when_old, size_t when_old_size) +{ + char *new_filter_str = NULL; + char *old_filter_str = NULL; + + if (filter) { + const char *schema_param = (schema && schema[0]) ? schema : ""; + char esc_tbl[1024], esc_schema[1024]; + sql_escape_literal(table_name, esc_tbl, sizeof(esc_tbl)); + sql_escape_literal(schema_param, esc_schema, sizeof(esc_schema)); + + char col_sql[2048]; + snprintf(col_sql, sizeof(col_sql), + "SELECT column_name::text FROM information_schema.columns " + "WHERE table_name = '%s' AND table_schema = COALESCE(NULLIF('%s', ''), current_schema()) " + "ORDER BY ordinal_position;", + esc_tbl, esc_schema); + + char *col_names[256]; + int ncols = 0; + + dbvm_t *col_vm = NULL; + int crc = databasevm_prepare(data, col_sql, &col_vm, 0); + if (crc == DBRES_OK) { + while (databasevm_step(col_vm) == DBRES_ROW && ncols < 256) { + const char *name = database_column_text(col_vm, 0); + if (name) col_names[ncols++] = cloudsync_memory_mprintf("%s", name); + } + databasevm_finalize(col_vm); + } + + if (ncols > 0) { + new_filter_str = cloudsync_filter_add_row_prefix(filter, "NEW", col_names, ncols); + old_filter_str = cloudsync_filter_add_row_prefix(filter, "OLD", col_names, ncols); + for (int i = 0; i < ncols; ++i) cloudsync_memory_free(col_names[i]); + } + } + + if (new_filter_str) { + snprintf(when_new, when_new_size, + "FOR EACH ROW WHEN (cloudsync_is_sync('%s') = false AND (%s))", + table_name, new_filter_str); + } else { + snprintf(when_new, when_new_size, + "FOR EACH ROW WHEN (cloudsync_is_sync('%s') = false)", + table_name); + } + + if (old_filter_str) { + snprintf(when_old, when_old_size, + "FOR EACH ROW WHEN (cloudsync_is_sync('%s') = false AND (%s))", + table_name, old_filter_str); + } else { + snprintf(when_old, when_old_size, + "FOR EACH ROW WHEN (cloudsync_is_sync('%s') = false)", + table_name); + } + + if (new_filter_str) cloudsync_memory_free(new_filter_str); + if (old_filter_str) cloudsync_memory_free(old_filter_str); +} + +int database_create_triggers (cloudsync_context *data, const char *table_name, table_algo algo, const char *filter) { if (!table_name) return DBRES_MISUSE; // Detect schema from metadata table if it exists, otherwise use cloudsync_schema() @@ -1511,12 +1590,13 @@ int database_create_triggers (cloudsync_context *data, const char *table_name, t char *detected_schema = database_table_schema(table_name); const char *schema = detected_schema ? detected_schema : cloudsync_schema(data); - char trigger_when[1024]; - snprintf(trigger_when, sizeof(trigger_when), - "FOR EACH ROW WHEN (cloudsync_is_sync('%s') = false)", - table_name); + char trigger_when_new[4096]; + char trigger_when_old[4096]; + database_build_trigger_when(data, table_name, filter, schema, + trigger_when_new, sizeof(trigger_when_new), + trigger_when_old, sizeof(trigger_when_old)); - int rc = database_create_insert_trigger_internal(data, table_name, trigger_when, schema); + int rc = database_create_insert_trigger_internal(data, table_name, trigger_when_new, schema); if (rc != DBRES_OK) { if (detected_schema) cloudsync_memory_free(detected_schema); return rc; @@ -1525,7 +1605,7 @@ int database_create_triggers (cloudsync_context *data, const char *table_name, t if (algo == table_algo_crdt_gos) { rc = database_create_update_trigger_gos_internal(data, table_name, schema); } else { - rc = database_create_update_trigger_internal(data, table_name, trigger_when, schema); + rc = database_create_update_trigger_internal(data, table_name, trigger_when_new, schema); } if (rc != DBRES_OK) { if (detected_schema) cloudsync_memory_free(detected_schema); @@ -1535,7 +1615,7 @@ int database_create_triggers (cloudsync_context *data, const char *table_name, t if (algo == table_algo_crdt_gos) { rc = database_create_delete_trigger_gos_internal(data, table_name, schema); } else { - rc = database_create_delete_trigger_internal(data, table_name, trigger_when, schema); + rc = database_create_delete_trigger_internal(data, table_name, trigger_when_old, schema); } if (detected_schema) cloudsync_memory_free(detected_schema); diff --git a/src/postgresql/sql_postgresql.c b/src/postgresql/sql_postgresql.c index 9171c7b..fb9ff8c 100644 --- a/src/postgresql/sql_postgresql.c +++ b/src/postgresql/sql_postgresql.c @@ -400,3 +400,11 @@ const char * const SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL = "SELECT 1 FROM %s _cstemp2 " "WHERE _cstemp2.pk = _cstemp1.pk AND _cstemp2.col_name = $1" ");"; + +const char * const SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL_FILTERED = + "WITH _cstemp1 AS (SELECT cloudsync_pk_encode(%s) AS pk FROM %s WHERE (%s)) " + "SELECT _cstemp1.pk FROM _cstemp1 " + "WHERE NOT EXISTS (" + "SELECT 1 FROM %s _cstemp2 " + "WHERE _cstemp2.pk = _cstemp1.pk AND _cstemp2.col_name = $1" + ");"; diff --git a/src/sql.h b/src/sql.h index 2536978..7c14988 100644 --- a/src/sql.h +++ b/src/sql.h @@ -64,6 +64,7 @@ extern const char * const SQL_PRAGMA_TABLEINFO_PK_COLLIST; extern const char * const SQL_PRAGMA_TABLEINFO_PK_DECODE_SELECTLIST; extern const char * const SQL_CLOUDSYNC_INSERT_MISSING_PKS_FROM_BASE_EXCEPT_SYNC; extern const char * const SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL; +extern const char * const SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL_FILTERED; extern const char * const SQL_CHANGES_INSERT_ROW; #endif diff --git a/src/sqlite/cloudsync_sqlite.c b/src/sqlite/cloudsync_sqlite.c index 0f34daa..556ce08 100644 --- a/src/sqlite/cloudsync_sqlite.c +++ b/src/sqlite/cloudsync_sqlite.c @@ -915,6 +915,69 @@ int dbsync_register_aggregate (sqlite3 *db, const char *name, void (*xstep)(sqli return dbsync_register(db, name, NULL, xstep, xfinal, nargs, pzErrMsg, ctx, ctx_free); } +// MARK: - Row Filter - + +void dbsync_set_filter (sqlite3_context *context, int argc, sqlite3_value **argv) { + DEBUG_FUNCTION("cloudsync_set_filter"); + + const char *tbl = (const char *)database_value_text(argv[0]); + const char *filter_expr = (const char *)database_value_text(argv[1]); + if (!tbl || !filter_expr) { + dbsync_set_error(context, "cloudsync_set_filter: table and filter expression required"); + return; + } + + cloudsync_context *data = (cloudsync_context *)sqlite3_user_data(context); + + // Store filter in table settings + dbutils_table_settings_set_key_value(data, tbl, "*", "filter", filter_expr); + + // Read current algo + table_algo algo = dbutils_table_settings_get_algo(data, tbl); + if (algo == table_algo_none) algo = table_algo_crdt_cls; + + // Drop and recreate triggers with the filter + database_delete_triggers(data, tbl); + int rc = database_create_triggers(data, tbl, algo, filter_expr); + if (rc != DBRES_OK) { + dbsync_set_error(context, "cloudsync_set_filter: error recreating triggers"); + sqlite3_result_error_code(context, rc); + return; + } + + sqlite3_result_int(context, 1); +} + +void dbsync_clear_filter (sqlite3_context *context, int argc, sqlite3_value **argv) { + DEBUG_FUNCTION("cloudsync_clear_filter"); + + const char *tbl = (const char *)database_value_text(argv[0]); + if (!tbl) { + dbsync_set_error(context, "cloudsync_clear_filter: table name required"); + return; + } + + cloudsync_context *data = (cloudsync_context *)sqlite3_user_data(context); + + // Remove filter from table settings (set to NULL/empty) + dbutils_table_settings_set_key_value(data, tbl, "*", "filter", NULL); + + // Read current algo + table_algo algo = dbutils_table_settings_get_algo(data, tbl); + if (algo == table_algo_none) algo = table_algo_crdt_cls; + + // Drop and recreate triggers without filter + database_delete_triggers(data, tbl); + int rc = database_create_triggers(data, tbl, algo, NULL); + if (rc != DBRES_OK) { + dbsync_set_error(context, "cloudsync_clear_filter: error recreating triggers"); + sqlite3_result_error_code(context, rc); + return; + } + + sqlite3_result_int(context, 1); +} + int dbsync_register_functions (sqlite3 *db, char **pzErrMsg) { int rc = SQLITE_OK; @@ -968,7 +1031,13 @@ int dbsync_register_functions (sqlite3 *db, char **pzErrMsg) { rc = dbsync_register_function(db, "cloudsync_set_table", dbsync_set_table, 3, pzErrMsg, ctx, NULL); if (rc != SQLITE_OK) return rc; - + + rc = dbsync_register_function(db, "cloudsync_set_filter", dbsync_set_filter, 2, pzErrMsg, ctx, NULL); + if (rc != SQLITE_OK) return rc; + + rc = dbsync_register_function(db, "cloudsync_clear_filter", dbsync_clear_filter, 1, pzErrMsg, ctx, NULL); + if (rc != SQLITE_OK) return rc; + rc = dbsync_register_function(db, "cloudsync_set_schema", dbsync_set_schema, 1, pzErrMsg, ctx, NULL); if (rc != SQLITE_OK) return rc; diff --git a/src/sqlite/database_sqlite.c b/src/sqlite/database_sqlite.c index 0e9c827..bff4962 100644 --- a/src/sqlite/database_sqlite.c +++ b/src/sqlite/database_sqlite.c @@ -247,11 +247,22 @@ char *sql_build_pk_qualified_collist_query (const char *schema, const char *tabl char *sql_build_insert_missing_pks_query(const char *schema, const char *table_name, const char *pkvalues_identifiers, - const char *base_ref, const char *meta_ref) { + const char *base_ref, const char *meta_ref, + const char *filter) { UNUSED_PARAMETER(schema); // SQLite: Use NOT EXISTS with cloudsync_pk_encode (same approach as PostgreSQL). // This avoids needing pk_decode select list which requires executing a query. + if (filter) { + return cloudsync_memory_mprintf( + "SELECT cloudsync_insert('%q', %s) " + "FROM \"%w\" " + "WHERE (%s) AND NOT EXISTS (" + " SELECT 1 FROM \"%w\" WHERE pk = cloudsync_pk_encode(%s)" + ");", + table_name, pkvalues_identifiers, base_ref, filter, meta_ref, pkvalues_identifiers + ); + } return cloudsync_memory_mprintf( "SELECT cloudsync_insert('%q', %s) " "FROM \"%w\" " @@ -712,31 +723,89 @@ int database_create_delete_trigger (cloudsync_context *data, const char *table_n return rc; } -int database_create_triggers (cloudsync_context *data, const char *table_name, table_algo algo) { +// Build trigger WHEN clauses, optionally incorporating a row-level filter. +// INSERT/UPDATE use NEW-prefixed filter, DELETE uses OLD-prefixed filter. +static void database_build_trigger_when( + cloudsync_context *data, const char *table_name, const char *filter, + char *when_new, size_t when_new_size, + char *when_old, size_t when_old_size) +{ + char *new_filter_str = NULL; + char *old_filter_str = NULL; + + if (filter) { + char sql_cols[1024]; + sqlite3_snprintf(sizeof(sql_cols), sql_cols, + "SELECT name FROM pragma_table_info('%q') ORDER BY cid;", table_name); + + char *col_names[256]; + int ncols = 0; + + sqlite3_stmt *col_vm = NULL; + int col_rc = sqlite3_prepare_v2((sqlite3 *)cloudsync_db(data), sql_cols, -1, &col_vm, NULL); + if (col_rc == SQLITE_OK) { + while (sqlite3_step(col_vm) == SQLITE_ROW && ncols < 256) { + const char *name = (const char *)sqlite3_column_text(col_vm, 0); + if (name) col_names[ncols++] = cloudsync_memory_mprintf("%s", name); + } + sqlite3_finalize(col_vm); + } + + if (ncols > 0) { + new_filter_str = cloudsync_filter_add_row_prefix(filter, "NEW", col_names, ncols); + old_filter_str = cloudsync_filter_add_row_prefix(filter, "OLD", col_names, ncols); + for (int i = 0; i < ncols; ++i) cloudsync_memory_free(col_names[i]); + } + } + + if (new_filter_str) { + sqlite3_snprintf((int)when_new_size, when_new, + "FOR EACH ROW WHEN cloudsync_is_sync('%q') = 0 AND (%s)", table_name, new_filter_str); + } else { + sqlite3_snprintf((int)when_new_size, when_new, + "FOR EACH ROW WHEN cloudsync_is_sync('%q') = 0", table_name); + } + + if (old_filter_str) { + sqlite3_snprintf((int)when_old_size, when_old, + "FOR EACH ROW WHEN cloudsync_is_sync('%q') = 0 AND (%s)", table_name, old_filter_str); + } else { + sqlite3_snprintf((int)when_old_size, when_old, + "FOR EACH ROW WHEN cloudsync_is_sync('%q') = 0", table_name); + } + + if (new_filter_str) cloudsync_memory_free(new_filter_str); + if (old_filter_str) cloudsync_memory_free(old_filter_str); +} + +int database_create_triggers (cloudsync_context *data, const char *table_name, table_algo algo, const char *filter) { DEBUG_DBFUNCTION("dbutils_check_triggers %s", table); - + if (dbutils_settings_check_version(data, "0.8.25") <= 0) { database_delete_triggers(data, table_name); } - - // common part - char buffer1[1024]; - char *trigger_when = sqlite3_snprintf(sizeof(buffer1), buffer1, "FOR EACH ROW WHEN cloudsync_is_sync('%q') = 0", table_name); - - // INSERT TRIGGER - int rc = database_create_insert_trigger(data, table_name, trigger_when); + + char trigger_when_new[4096]; + char trigger_when_old[4096]; + database_build_trigger_when(data, table_name, filter, + trigger_when_new, sizeof(trigger_when_new), + trigger_when_old, sizeof(trigger_when_old)); + + // INSERT TRIGGER (uses NEW prefix) + int rc = database_create_insert_trigger(data, table_name, trigger_when_new); if (rc != SQLITE_OK) return rc; - - // UPDATE TRIGGER + + // UPDATE TRIGGER (uses NEW prefix) if (algo == table_algo_crdt_gos) rc = database_create_update_trigger_gos(data, table_name); - else rc = database_create_update_trigger(data, table_name, trigger_when); + else rc = database_create_update_trigger(data, table_name, trigger_when_new); if (rc != SQLITE_OK) return rc; - - // DELETE TRIGGER + + // DELETE TRIGGER (uses OLD prefix) if (algo == table_algo_crdt_gos) rc = database_create_delete_trigger_gos(data, table_name); - else rc = database_create_delete_trigger(data, table_name, trigger_when); - + else rc = database_create_delete_trigger(data, table_name, trigger_when_old); + if (rc != SQLITE_OK) DEBUG_ALWAYS("database_create_triggers error %s (%d)", sqlite3_errmsg(cloudsync_db(data)), rc); + return rc; } diff --git a/src/sqlite/sql_sqlite.c b/src/sqlite/sql_sqlite.c index 9688245..09f96fe 100644 --- a/src/sqlite/sql_sqlite.c +++ b/src/sqlite/sql_sqlite.c @@ -265,6 +265,14 @@ const char * const SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL = "WHERE _cstemp2.pk = _cstemp1.pk AND _cstemp2.col_name = ?" ");"; +const char * const SQL_CLOUDSYNC_SELECT_PKS_NOT_IN_SYNC_FOR_COL_FILTERED = + "WITH _cstemp1 AS (SELECT cloudsync_pk_encode(%s) AS pk FROM \"%w\" WHERE (%s)) " + "SELECT _cstemp1.pk FROM _cstemp1 " + "WHERE NOT EXISTS (" + "SELECT 1 FROM \"%w\" _cstemp2 " + "WHERE _cstemp2.pk = _cstemp1.pk AND _cstemp2.col_name = ?" + ");"; + const char * const SQL_CHANGES_INSERT_ROW = "INSERT INTO cloudsync_changes(tbl, pk, col_name, col_value, col_version, db_version, site_id, cl, seq) " "VALUES (?,?,?,?,?,?,?,?,?);"; diff --git a/test/postgresql/20_row_filter.sql b/test/postgresql/20_row_filter.sql new file mode 100644 index 0000000..22b4acf --- /dev/null +++ b/test/postgresql/20_row_filter.sql @@ -0,0 +1,105 @@ +-- 'Row-level filter (conditional sync) test' + +\set testid '20' +\ir helper_test_init.sql + +-- Create first database +\connect postgres +\ir helper_psql_conn_setup.sql +DROP DATABASE IF EXISTS cloudsync_test_1; +CREATE DATABASE cloudsync_test_1; + +\connect cloudsync_test_1 +\ir helper_psql_conn_setup.sql +CREATE EXTENSION IF NOT EXISTS cloudsync; + +-- Create table, init, set filter +CREATE TABLE tasks (id TEXT PRIMARY KEY NOT NULL, title TEXT, user_id INTEGER); +SELECT cloudsync_init('tasks') AS _init_site_id_a \gset +SELECT cloudsync_set_filter('tasks', 'user_id = 1') AS _set_filter_ok \gset + +-- Insert matching rows (user_id = 1) and non-matching rows (user_id = 2) +INSERT INTO tasks VALUES ('a', 'Task A', 1); +INSERT INTO tasks VALUES ('b', 'Task B', 2); +INSERT INTO tasks VALUES ('c', 'Task C', 1); + +-- Test 1: Verify only matching rows are tracked in _cloudsync metadata +SELECT COUNT(DISTINCT pk) AS meta_pk_count FROM tasks_cloudsync \gset +SELECT (:meta_pk_count = 2) AS filter_insert_ok \gset +\if :filter_insert_ok +\echo [PASS] (:testid) Only matching rows tracked after INSERT (2 of 3) +\else +\echo [FAIL] (:testid) Expected 2 tracked PKs after INSERT, got :meta_pk_count +SELECT (:fail::int + 1) AS fail \gset +\endif + +-- Test 2: Update non-matching row → no metadata change +SELECT COUNT(*) AS meta_before FROM tasks_cloudsync \gset +UPDATE tasks SET title = 'Task B Updated' WHERE id = 'b'; +SELECT COUNT(*) AS meta_after FROM tasks_cloudsync \gset +SELECT (:meta_before = :meta_after) AS filter_update_nonmatch_ok \gset +\if :filter_update_nonmatch_ok +\echo [PASS] (:testid) Non-matching UPDATE did not change metadata +\else +\echo [FAIL] (:testid) Non-matching UPDATE changed metadata (:meta_before -> :meta_after) +SELECT (:fail::int + 1) AS fail \gset +\endif + +-- Test 3: Delete non-matching row → no metadata change +SELECT COUNT(*) AS meta_before FROM tasks_cloudsync \gset +DELETE FROM tasks WHERE id = 'b'; +SELECT COUNT(*) AS meta_after FROM tasks_cloudsync \gset +SELECT (:meta_before = :meta_after) AS filter_delete_nonmatch_ok \gset +\if :filter_delete_nonmatch_ok +\echo [PASS] (:testid) Non-matching DELETE did not change metadata +\else +\echo [FAIL] (:testid) Non-matching DELETE changed metadata (:meta_before -> :meta_after) +SELECT (:fail::int + 1) AS fail \gset +\endif + +-- Test 4: Roundtrip - sync to second database, verify only filtered rows transfer +SELECT encode(cloudsync_payload_encode(tbl, pk, col_name, col_value, col_version, db_version, site_id, cl, seq), 'hex') AS payload_hex +FROM cloudsync_changes +WHERE site_id = cloudsync_siteid() \gset + +\connect postgres +\ir helper_psql_conn_setup.sql +DROP DATABASE IF EXISTS cloudsync_test_2; +CREATE DATABASE cloudsync_test_2; + +\connect cloudsync_test_2 +\ir helper_psql_conn_setup.sql +CREATE EXTENSION IF NOT EXISTS cloudsync; +CREATE TABLE tasks (id TEXT PRIMARY KEY NOT NULL, title TEXT, user_id INTEGER); +SELECT cloudsync_init('tasks') AS _init_site_id_b \gset +SELECT cloudsync_set_filter('tasks', 'user_id = 1') AS _set_filter_b_ok \gset +SELECT cloudsync_payload_apply(decode(:'payload_hex', 'hex')) AS _apply_ok \gset + +-- Verify: db2 should have only the matching rows +SELECT COUNT(*) AS task_count FROM tasks \gset +SELECT (:task_count = 2) AS roundtrip_count_ok \gset +\if :roundtrip_count_ok +\echo [PASS] (:testid) Roundtrip: correct number of rows synced (2) +\else +\echo [FAIL] (:testid) Roundtrip: expected 2 rows, got :task_count +SELECT (:fail::int + 1) AS fail \gset +\endif + +-- Verify row 'c' exists with user_id = 1 +SELECT COUNT(*) AS c_exists FROM tasks WHERE id = 'c' AND user_id = 1 \gset +SELECT (:c_exists = 1) AS roundtrip_row_ok \gset +\if :roundtrip_row_ok +\echo [PASS] (:testid) Roundtrip: task 'c' with user_id=1 present +\else +\echo [FAIL] (:testid) Roundtrip: task 'c' with user_id=1 not found +SELECT (:fail::int + 1) AS fail \gset +\endif + +-- Cleanup +\ir helper_test_cleanup.sql +\if :should_cleanup +DROP DATABASE IF EXISTS cloudsync_test_1; +DROP DATABASE IF EXISTS cloudsync_test_2; +\else +\echo [INFO] !!!!! +\endif diff --git a/test/postgresql/full_test.sql b/test/postgresql/full_test.sql index 664eaa0..ea57941 100644 --- a/test/postgresql/full_test.sql +++ b/test/postgresql/full_test.sql @@ -27,6 +27,7 @@ \ir 17_uuid_pk_roundtrip.sql \ir 18_bulk_insert_performance.sql \ir 19_uuid_pk_with_unmapped_cols.sql +\ir 20_row_filter.sql -- 'Test summary' \echo '\nTest summary:' diff --git a/test/unit.c b/test/unit.c index e0892ec..506e606 100644 --- a/test/unit.c +++ b/test/unit.c @@ -7521,6 +7521,138 @@ bool do_test_payload_buffer (size_t blob_size) { return success; } +// MARK: - Row Filter Test - + +static int64_t test_query_int(sqlite3 *db, const char *sql) { + sqlite3_stmt *stmt = NULL; + int64_t value = -1; + if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) return -1; + if (sqlite3_step(stmt) == SQLITE_ROW) value = sqlite3_column_int64(stmt, 0); + sqlite3_finalize(stmt); + return value; +} + +bool do_test_row_filter(int nclients, bool print_result, bool cleanup_databases) { + sqlite3 *db[MAX_SIMULATED_CLIENTS] = {NULL}; + bool result = false; + int rc = SQLITE_OK; + + memset(db, 0, sizeof(sqlite3 *) * MAX_SIMULATED_CLIENTS); + if (nclients >= MAX_SIMULATED_CLIENTS) nclients = MAX_SIMULATED_CLIENTS; + if (nclients < 2) nclients = 2; + + time_t timestamp = time(NULL); + int saved_counter = test_counter; + for (int i = 0; i < nclients; ++i) { + db[i] = do_create_database_file(i, timestamp, test_counter++); + if (!db[i]) return false; + + // Create table + rc = sqlite3_exec(db[i], "CREATE TABLE tasks(id TEXT PRIMARY KEY NOT NULL, title TEXT, user_id INTEGER);", NULL, NULL, NULL); + if (rc != SQLITE_OK) goto finalize; + + // Init cloudsync + rc = sqlite3_exec(db[i], "SELECT cloudsync_init('tasks');", NULL, NULL, NULL); + if (rc != SQLITE_OK) goto finalize; + + // Set filter: only sync rows where user_id = 1 + rc = sqlite3_exec(db[i], "SELECT cloudsync_set_filter('tasks', 'user_id = 1');", NULL, NULL, NULL); + if (rc != SQLITE_OK) goto finalize; + } + + // --- Test 1: Insert matching and non-matching rows on db[0] --- + rc = sqlite3_exec(db[0], "INSERT INTO tasks VALUES('a', 'Task A', 1);", NULL, NULL, NULL); + if (rc != SQLITE_OK) goto finalize; + rc = sqlite3_exec(db[0], "INSERT INTO tasks VALUES('b', 'Task B', 2);", NULL, NULL, NULL); + if (rc != SQLITE_OK) goto finalize; + rc = sqlite3_exec(db[0], "INSERT INTO tasks VALUES('c', 'Task C', 1);", NULL, NULL, NULL); + if (rc != SQLITE_OK) goto finalize; + + // Verify: tasks_cloudsync should only have metadata for user_id=1 rows ('a' and 'c') + { + // Count distinct PKs in the meta table + int64_t meta_count = test_query_int(db[0], "SELECT COUNT(DISTINCT pk) FROM tasks_cloudsync;"); + if (meta_count != 2) { + printf("do_test_row_filter: expected 2 tracked PKs after insert, got %" PRId64 "\n", meta_count); + goto finalize; + } + } + + // --- Test 2: Update matching row → metadata should update --- + rc = sqlite3_exec(db[0], "UPDATE tasks SET title='Task A Updated' WHERE id='a';", NULL, NULL, NULL); + if (rc != SQLITE_OK) goto finalize; + + // --- Test 3: Update non-matching row → NO metadata change --- + { + int64_t before = test_query_int(db[0], "SELECT COUNT(*) FROM tasks_cloudsync;"); + rc = sqlite3_exec(db[0], "UPDATE tasks SET title='Task B Updated' WHERE id='b';", NULL, NULL, NULL); + if (rc != SQLITE_OK) goto finalize; + int64_t after = test_query_int(db[0], "SELECT COUNT(*) FROM tasks_cloudsync;"); + if (after != before) { + printf("do_test_row_filter: non-matching UPDATE changed meta count (%" PRId64 " -> %" PRId64 ")\n", before, after); + goto finalize; + } + } + + // --- Test 4: Delete non-matching row → NO metadata change --- + { + int64_t before = test_query_int(db[0], "SELECT COUNT(*) FROM tasks_cloudsync;"); + rc = sqlite3_exec(db[0], "DELETE FROM tasks WHERE id='b';", NULL, NULL, NULL); + if (rc != SQLITE_OK) goto finalize; + int64_t after = test_query_int(db[0], "SELECT COUNT(*) FROM tasks_cloudsync;"); + if (after != before) { + printf("do_test_row_filter: non-matching DELETE changed meta count (%" PRId64 " -> %" PRId64 ")\n", before, after); + goto finalize; + } + } + + // --- Test 5: Delete matching row → metadata should update (tombstone) --- + rc = sqlite3_exec(db[0], "DELETE FROM tasks WHERE id='a';", NULL, NULL, NULL); + if (rc != SQLITE_OK) goto finalize; + + // --- Test 6: Merge from db[0] to db[1] and verify only filtered rows transfer --- + if (do_merge_using_payload(db[0], db[1], true, true) == false) goto finalize; + + { + // db[1] should have 'c' (user_id=1) and the tombstone for 'a', but NOT 'b' + int64_t task_count = test_query_int(db[1], "SELECT COUNT(*) FROM tasks;"); + if (task_count != 1) { + printf("do_test_row_filter: expected 1 row in db[1] tasks after merge, got %" PRId64 "\n", task_count); + goto finalize; + } + // Verify it's 'c' + int64_t c_exists = test_query_int(db[1], "SELECT COUNT(*) FROM tasks WHERE id='c';"); + if (c_exists != 1) { + printf("do_test_row_filter: expected task 'c' in db[1], not found\n"); + goto finalize; + } + } + + if (print_result) { + printf("\n-> tasks (db[0])\n"); + do_query(db[0], "SELECT * FROM tasks ORDER BY id;", NULL); + printf("\n-> tasks_cloudsync (db[0])\n"); + do_query(db[0], "SELECT hex(pk), col_name, col_version, db_version FROM tasks_cloudsync ORDER BY pk, col_name;", NULL); + printf("\n-> tasks (db[1])\n"); + do_query(db[1], "SELECT * FROM tasks ORDER BY id;", NULL); + } + + result = true; + +finalize: + for (int i = 0; i < nclients; ++i) { + if (rc != SQLITE_OK && db[i] && (sqlite3_errcode(db[i]) != SQLITE_OK)) + printf("do_test_row_filter error: %s\n", sqlite3_errmsg(db[i])); + if (db[i]) close_db(db[i]); + if (cleanup_databases) { + char buf[256]; + do_build_database_path(buf, i, timestamp, saved_counter++); + file_delete_internal(buf); + } + } + return result; +} + int test_report(const char *description, bool result){ printf("%-30s %s\n", description, (result) ? "OK" : "FAILED"); return result ? 0 : 1; @@ -7641,7 +7773,10 @@ int main (int argc, const char * argv[]) { result += test_report("Test Alter Table 1:", do_test_alter(3, 1, print_result, cleanup_databases)); result += test_report("Test Alter Table 2:", do_test_alter(3, 2, print_result, cleanup_databases)); result += test_report("Test Alter Table 3:", do_test_alter(3, 3, print_result, cleanup_databases)); - + + // test row-level filter + result += test_report("Test Row Filter:", do_test_row_filter(2, print_result, cleanup_databases)); + finalize: if (rc != SQLITE_OK) printf("%s (%d)\n", (db) ? sqlite3_errmsg(db) : "N/A", rc); close_db(db); From 15933cc7982b468fd72f4cb58e599540ae56f6d2 Mon Sep 17 00:00:00 2001 From: Andrea Donetti Date: Tue, 10 Feb 2026 10:41:29 -0600 Subject: [PATCH 2/2] chore: prepare for merge to dev --- .../{20_row_filter.sql => 26_row_filter.sql} | 18 +++++++++--------- test/postgresql/full_test.sql | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) rename test/postgresql/{20_row_filter.sql => 26_row_filter.sql} (91%) diff --git a/test/postgresql/20_row_filter.sql b/test/postgresql/26_row_filter.sql similarity index 91% rename from test/postgresql/20_row_filter.sql rename to test/postgresql/26_row_filter.sql index 22b4acf..01c9dde 100644 --- a/test/postgresql/20_row_filter.sql +++ b/test/postgresql/26_row_filter.sql @@ -1,15 +1,15 @@ -- 'Row-level filter (conditional sync) test' -\set testid '20' +\set testid '26' \ir helper_test_init.sql -- Create first database \connect postgres \ir helper_psql_conn_setup.sql -DROP DATABASE IF EXISTS cloudsync_test_1; -CREATE DATABASE cloudsync_test_1; +DROP DATABASE IF EXISTS cloudsync_test_26_a; +CREATE DATABASE cloudsync_test_26_a; -\connect cloudsync_test_1 +\connect cloudsync_test_26_a \ir helper_psql_conn_setup.sql CREATE EXTENSION IF NOT EXISTS cloudsync; @@ -64,10 +64,10 @@ WHERE site_id = cloudsync_siteid() \gset \connect postgres \ir helper_psql_conn_setup.sql -DROP DATABASE IF EXISTS cloudsync_test_2; -CREATE DATABASE cloudsync_test_2; +DROP DATABASE IF EXISTS cloudsync_test_26_b; +CREATE DATABASE cloudsync_test_26_b; -\connect cloudsync_test_2 +\connect cloudsync_test_26_b \ir helper_psql_conn_setup.sql CREATE EXTENSION IF NOT EXISTS cloudsync; CREATE TABLE tasks (id TEXT PRIMARY KEY NOT NULL, title TEXT, user_id INTEGER); @@ -98,8 +98,8 @@ SELECT (:fail::int + 1) AS fail \gset -- Cleanup \ir helper_test_cleanup.sql \if :should_cleanup -DROP DATABASE IF EXISTS cloudsync_test_1; -DROP DATABASE IF EXISTS cloudsync_test_2; +DROP DATABASE IF EXISTS cloudsync_test_26_a; +DROP DATABASE IF EXISTS cloudsync_test_26_b; \else \echo [INFO] !!!!! \endif diff --git a/test/postgresql/full_test.sql b/test/postgresql/full_test.sql index ea57941..34b45db 100644 --- a/test/postgresql/full_test.sql +++ b/test/postgresql/full_test.sql @@ -27,7 +27,7 @@ \ir 17_uuid_pk_roundtrip.sql \ir 18_bulk_insert_performance.sql \ir 19_uuid_pk_with_unmapped_cols.sql -\ir 20_row_filter.sql +\ir 26_row_filter.sql -- 'Test summary' \echo '\nTest summary:'