Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
d8bac9f
Update data_substrate
githubzilla Dec 5, 2025
363d781
Merge remote-tracking branch 'upstream/eloq-10.6.10' into eloq-10.6.10
githubzilla Dec 9, 2025
9e5d1e8
Merge remote-tracking branch 'upstream/eloq-10.6.10' into eloq-10.6.10
githubzilla Dec 11, 2025
c4245ee
Merge remote-tracking branch 'upstream/eloq-10.6.10' into eloq-10.6.10
githubzilla Dec 12, 2025
74850e0
Merge remote-tracking branch 'upstream/eloq-10.6.10' into eloq-10.6.10
githubzilla Dec 24, 2025
04fd052
Merge remote-tracking branch 'upstream/eloq-10.6.10' into eloq-10.6.10
githubzilla Jan 12, 2026
e685b42
Merge remote-tracking branch 'upstream/eloq-10.6.10' into eloq-10.6.10
githubzilla Jan 14, 2026
266e81a
Merge remote-tracking branch 'upstream/eloq-10.6.10' into eloq-10.6.10
githubzilla Jan 16, 2026
5c9ddd5
Merge remote-tracking branch 'upstream/eloq-10.6.10' into eloq-10.6.10
githubzilla Jan 17, 2026
58ccd01
Merge remote-tracking branch 'upstream/eloq-10.6.10' into eloq-10.6.10
githubzilla Feb 8, 2026
7c0f1b0
Merge remote-tracking branch 'upstream/eloq-10.6.10' into eloq-10.6.10
githubzilla Feb 9, 2026
4274ba8
chore: update data_substrate: extend log retention
githubzilla Feb 9, 2026
f8c540b
Merge remote-tracking branch 'upstream/eloq-10.6.10' into eloq-10.6.10
githubzilla Feb 11, 2026
175b5ef
Add range_read_block_on_write_lock test
githubzilla Dec 10, 2025
7868873
Disable auto ckpt when testing
githubzilla Dec 10, 2025
58cfe41
Add range_split_deadlock_abort.test
githubzilla Dec 10, 2025
cdacfdb
Update range_read_block_on_write_lock
githubzilla Dec 11, 2025
d5d5fb8
Update data_substrate
githubzilla Feb 8, 2026
481ab8a
Update test case reflecting comment
githubzilla Feb 9, 2026
0e1a41c
Update data_substrate
githubzilla Feb 11, 2026
3c313aa
Update data_substrate
githubzilla Feb 11, 2026
d9d39ab
Update data_substrate
githubzilla Feb 11, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
set @@default_storage_engine= eloq;
DROP TABLE IF EXISTS t1;
# Test: Range reads SHOULD block when range split holds write lock
# When range split holds write lock (Phase 2-3), reads should block
# because write locks are exclusive and incompatible with read locks.
create table t1 (c0 int auto_increment, c1 char(250), c2 char(250), c3 char(250), c4 int, primary key(c0));
insert into t1 values (0,'initial', 'data', 'row', 1);
SET SESSION debug_dbug="+d,eloq;term_SplitFlushOp_CommitAcquireAllWriteOp_Continue;node_id=-1";
SET SESSION debug_dbug="+d,eloq;at_once;node_id=-1;action=NOTIFY_CHECKPOINTER";
connect conn_read1,localhost,root,,test,$MONO1_PORT;
connect conn_read2,localhost,root,,test,$MONO1_PORT;
connect conn_read3,localhost,root,,test,$MONO1_PORT;
# Test Case 1: Single read should BLOCK (not complete immediately)
connection conn_read1;
begin;
select count(*) from t1;;
connection default;
# Test Case 2: Multiple concurrent reads should all BLOCK
connection conn_read2;
begin;
select count(*) from t1 where c4 = 1;;
connection conn_read3;
begin;
select c0, c1, c2 from t1 order by c0;;
connection default;
# Printing processlist before verification:
# Test Case 3: Complete the range split (releases write lock)
SET SESSION debug_dbug="-d,eloq;term_SplitFlushOp_CommitAcquireAllWriteOp_Continue;node_id=-1";
# Test Case 4: Verify blocked reads complete after write lock is released
connection conn_read1;
count(*)
2048
select count(*) from t1;
count(*)
2048
commit;
connection conn_read2;
count(*)
2048
select count(*) from t1 where c4 = 1;
count(*)
2048
commit;
connection conn_read3;
select c0, c1, c2 from t1;
commit;
# Test Case 5: Verify no queries are still blocked
connection default;
# Printing processlist before verification:
# Test Case 6: Verify reads completed successfully and range split completed
connection conn_read1;
select count(*) from t1;
count(*)
2048
disconnect conn_read1;
disconnect conn_read2;
disconnect conn_read3;
connection default;
drop table t1;
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
set @@default_storage_engine= eloq;
DROP TABLE IF EXISTS t1;
# Test: Range split SHOULD downgrade lock and retry at phase 8 when deadlock detected
# When range split is at phase 8 and deadlock is detected, it should
# downgrade write intent lock and retry phase 8 instead of fully aborting.
#
# This test uses the split_flush_commit_acquire_all_deadlock fault injector
# to simulate a deadlock at phase 8, which triggers the downgrade and retry logic.
create table t1 (c0 int auto_increment, c1 char(250), c2 char(250), c3 char(250), c4 int, primary key(c0));
insert into t1 values (0,'initial', 'data', 'row', 1);
SET SESSION debug_dbug="+d,eloq;split_flush_commit_acquire_all_deadlock;node_id=-1";
# Step 2: Trigger range split with checkpoint
# Range split will progress through phases 1-7, and at phase 8:
# - The fault injector will inject DEAD_LOCK_ABORT error
# - IsDeadlock() will return true
# - Range split will downgrade write intent lock via PostWriteType::DowngradeLock
# - Range split will retry phase 8 after downgrade
SET SESSION debug_dbug="+d,eloq;at_once;node_id=-1;action=NOTIFY_CHECKPOINTER";
# Step 3: Verify range split completed successfully
connection default;
select count(*) from t1;
count(*)
2048
# Step 4: Verify no queries are still blocked
connection default;
drop table t1;
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
--checkpointer_interval=86400
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
--source include/have_eloq.inc
set @@default_storage_engine= eloq;

--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
Comment on lines +1 to +6
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Find tests in the eloq directory that use debug_dbug
echo "=== Tests using debug_dbug ==="
rg -l 'debug_dbug' storage/eloq/mysql-test/mono_basic/t/ 2>/dev/null | sort

echo ""
echo "=== Tests sourcing have_debug.inc ==="
rg -l 'have_debug' storage/eloq/mysql-test/mono_basic/t/ 2>/dev/null | sort

echo ""
echo "=== Tests using both debug_dbug AND have_debug ==="
rg -l 'debug_dbug' storage/eloq/mysql-test/mono_basic/t/ 2>/dev/null | while read f; do
  if rg -q 'have_debug' "$f" 2>/dev/null; then
    echo "$f"
  fi
done

echo ""
echo "=== Tests using debug_dbug but NOT have_debug ==="
rg -l 'debug_dbug' storage/eloq/mysql-test/mono_basic/t/ 2>/dev/null | while read f; do
  if ! rg -q 'have_debug' "$f" 2>/dev/null; then
    echo "$f"
  fi
done

Repository: eloqdata/eloqsql

Length of output: 2890


🏁 Script executed:

#!/bin/bash
# Check if have_debug.inc exists anywhere in the repo
echo "=== Searching for have_debug.inc ==="
find . -name "have_debug.inc" 2>/dev/null

echo ""
echo "=== Checking eloq includes directory ==="
ls -la storage/eloq/mysql-test/mono_basic/include/ 2>/dev/null || echo "Include dir not found"

echo ""
echo "=== All .inc files in eloq includes ==="
ls storage/eloq/mysql-test/mono_basic/include/*.inc 2>/dev/null | head -20

echo ""
echo "=== Checking if have_debug.inc exists in main mysql test suite ==="
find . -path "*/mysql-test/include/have_debug.inc" 2>/dev/null

Repository: eloqdata/eloqsql

Length of output: 837


🏁 Script executed:

#!/bin/bash
echo "=== Content of have_debug.inc ==="
cat mysql-test/include/have_debug.inc

echo ""
echo "=== Content of have_eloq.inc ==="
cat storage/eloq/mysql-test/mono_basic/include/have_eloq.inc

echo ""
echo "=== First few lines of an eloq test file for comparison ==="
head -10 storage/eloq/mysql-test/mono_basic/t/alter_table_recovery.test

Repository: eloqdata/eloqsql

Length of output: 657


🏁 Script executed:

#!/bin/bash
echo "=== Check if eloq tests can source ../../../../../mysql-test/include/have_debug.inc ==="
head -5 storage/eloq/mysql-test/mono_basic/t/alter_table_recovery.test | grep -E 'source|include'

echo ""
echo "=== Check how many eloq tests source long_test.inc ==="
rg -l 'long_test.inc' storage/eloq/mysql-test/mono_basic/t/ | wc -l

echo ""
echo "=== All eloq tests and their includes ==="
for f in storage/eloq/mysql-test/mono_basic/t/*.test; do
  echo "=== $(basename $f) ==="
  head -10 "$f" | grep -E '^\-\-source'
done | head -50

Repository: eloqdata/eloqsql

Length of output: 1918


Source include/have_debug.inc to skip on non-debug builds.

This test uses debug_dbug (lines 20, 40, 104) which requires a debug build. Standard MySQL tests that rely on debug sync points source have_debug.inc to skip gracefully on non-debug builds. Without this, the test may fail unexpectedly if the debug sync points don't fire.

 --source include/have_eloq.inc
+--source include/have_debug.inc
 set @@default_storage_engine= eloq;

Note: This pattern applies to all 20 debug_dbug-using tests in the eloq suite, not just this one. Consider adding have_debug.inc to the shared eloq include files for consistency.

🤖 Prompt for AI Agents
In `@storage/eloq/mysql-test/mono_basic/t/range_read_block_on_write_lock.test`
around lines 1 - 6, This test uses debug sync points via debug_dbug, so add
sourcing of have_debug.inc at the top of the test to skip on non-debug builds:
include/have_debug.inc should be sourced before any debug_dbug usage (i.e.,
before the first debug_dbug call in this file), and consider adding the same
include to the shared eloq include files so all tests that call debug_dbug will
skip when not built with debug support.


--echo # Test: Range reads SHOULD block when range split holds write lock
--echo # When range split holds write lock (Phase 2-3), reads should block
--echo # because write locks are exclusive and incompatible with read locks.

create table t1 (c0 int auto_increment, c1 char(250), c2 char(250), c3 char(250), c4 int, primary key(c0));

# Insert initial data
insert into t1 values (0,'initial', 'data', 'row', 1);

# Set debug point to pause range split at prepare phase (Phase 2-3)
# This must be set BEFORE building up data, so when the split is triggered,
# it will pause at the prepare phase where write lock is held
SET SESSION debug_dbug="+d,eloq;term_SplitFlushOp_CommitAcquireAllWriteOp_Continue;node_id=-1";

# Build up enough data to trigger range split
# We need to exceed range_max_size (1MB for testing builds, 256MB otherwise)
# Using the same pattern as range_split_write_forward.test: double the data each time
# 11 iterations: 1 -> 2 -> 4 -> 8 -> 16 -> 32 -> 64 -> 128 -> 256 -> 512 -> 1024 -> 2048 rows
# With char(250) columns, this should exceed the threshold
let $counter= 11;
--disable_result_log
--disable_query_log
while($counter)
{
insert into t1 (c1, c2, c3, c4) select c1, c2, c3, c4 from t1;
dec $counter;
}
--enable_query_log
--enable_result_log

# Trigger checkpoint to calculate range size and trigger range split if threshold exceeded
# The range split will pause at prepare phase (write lock held) due to the debug point set above
SET SESSION debug_dbug="+d,eloq;at_once;node_id=-1;action=NOTIFY_CHECKPOINTER";
# Wait longer for checkpoint to process and range split to start and pause
--sleep 5

# Create connections: multiple for concurrent reads
--connect (conn_read1,localhost,root,,test,$MONO1_PORT)
--connect (conn_read2,localhost,root,,test,$MONO1_PORT)
--connect (conn_read3,localhost,root,,test,$MONO1_PORT)

# Wait a bit more to ensure range split has started and paused at prepare phase
--sleep 2

--echo # Test Case 1: Single read should BLOCK (not complete immediately)
--connection conn_read1
begin;
--send select count(*) from t1;

# Wait a bit to ensure the read is blocked
--sleep 2

# Verify the read is blocked (check processlist)
# Note: Blocked queries are identified by state and info columns
# We check for connections that have:
# 1. Lock-related state (state like "%lock%")
# 2. Matching query info (info like "select count(*) from t1%")
# 3. NULL info with time >= 1 (waiting/blocked)
--connection default
let $wait_condition=
select count(*) >= 1 from information_schema.processlist
where (state like "%lock%" or info like "select count(*) from t1%")
and id != CONNECTION_ID();
--source include/wait_condition.inc

--echo # Test Case 2: Multiple concurrent reads should all BLOCK
--connection conn_read2
begin;
--send select count(*) from t1 where c4 = 1;

--connection conn_read3
begin;
--send select c0, c1, c2 from t1 order by c0;

# Wait a bit to ensure reads are blocked
--sleep 10

# Verify all reads are blocked
# Note: Blocked queries are identified by state and info columns
# We check for connections that have:
# 1. Lock-related state (state like "%lock%")
# 2. Matching query info (info like "select count(*) from t1%" or "select c0, c1, c2 from t1%")
# 3. NULL info with time >= 1 (waiting/blocked)
--connection default
--echo # Printing processlist before verification:
let $wait_condition=
select count(*) >= 3 from information_schema.processlist
where (state like "%lock%" or
info like "select count(*) from t1%" or
info like "select c0, c1, c2 from t1%")
and id != CONNECTION_ID();
--source include/wait_condition.inc

--echo # Test Case 3: Complete the range split (releases write lock)
# Complete the range split by removing the debug point and triggering checkpoint
# This will allow the split to proceed through all phases and release the write lock
SET SESSION debug_dbug="-d,eloq;term_SplitFlushOp_CommitAcquireAllWriteOp_Continue;node_id=-1";
# SET SESSION debug_dbug="+d,eloq;at_once;node_id=-1;action=NOTIFY_CHECKPOINTER";
--sleep 5

--echo # Test Case 4: Verify blocked reads complete after write lock is released
# All reads should now complete successfully after the range split releases the write lock
--connection conn_read1
--reap
select count(*) from t1;
commit;

--connection conn_read2
--reap
select count(*) from t1 where c4 = 1;
commit;

--connection conn_read3
--disable_result_log
--reap
select c0, c1, c2 from t1;
--enable_result_log
commit;

--echo # Test Case 5: Verify no queries are still blocked
--connection default
# After reaping, queries should be completed
# We verify by checking that there are no blocked queries (identified by state and info columns)
--echo # Printing processlist before verification:
let $wait_condition=
select count(*) = 0 from information_schema.processlist
where info like "select%from t1%"
and id != CONNECTION_ID();
--source include/wait_condition.inc
Comment on lines +132 to +136
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Wait condition in Test Case 5 may match unrelated idle connections.

The predicate (info IS NULL and time >= 1) combined with (info like "select%from t1%" or info IS NULL) will match any idle connection with NULL info and time >= 1, including daemon threads or the default connection itself (excluded only by the lock-state branch, not the NULL-info branch). This can cause the wait_condition to never reach count(*) = 0, leading to a timeout and false test failure.

Proposed fix: tighten the condition to only match t1-related queries
 let $wait_condition=
     select count(*) = 0 from information_schema.processlist
-    where ((state like "%lock%") or
-           (info IS NULL and time >= 1))
-    and (info like "select%from t1%" or info IS NULL);
+    where info like "select%from t1%"
+    and id != CONNECTION_ID();
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
let $wait_condition=
select count(*) = 0 from information_schema.processlist
where ((state like "%lock%") or
(info IS NULL and time >= 1))
and (info like "select%from t1%" or info IS NULL);
--source include/wait_condition.inc
let $wait_condition=
select count(*) = 0 from information_schema.processlist
where info like "select%from t1%"
and id != CONNECTION_ID();
--source include/wait_condition.inc
🤖 Prompt for AI Agents
In `@storage/eloq/mysql-test/mono_basic/t/range_read_block_on_write_lock.test`
around lines 130 - 135, The wait condition stored in $wait_condition is too
permissive because the "(info IS NULL and time >= 1)" branch can match unrelated
idle/daemon connections; tighten it so we only consider connections that
actually reference t1 or are in a lock state. Edit the SQL in $wait_condition to
remove the generic "info IS NULL and time >= 1" clause and instead use just
"(state like "%lock%") OR (info like "select%from t1%")" so only lock-holding or
t1-related queries are considered by the wait condition.


--echo # Test Case 6: Verify reads completed successfully and range split completed
--connection conn_read1
select count(*) from t1;

# Cleanup
--disconnect conn_read1
--disconnect conn_read2
--disconnect conn_read3

--connection default
drop table t1;

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
--checkpointer_interval=86400
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
--source include/have_eloq.inc
set @@default_storage_engine= eloq;

--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings

--echo # Test: Range split SHOULD downgrade lock and retry at phase 8 when deadlock detected
--echo # When range split is at phase 8 and deadlock is detected, it should
--echo # downgrade write intent lock and retry phase 8 instead of fully aborting.
--echo #
--echo # This test uses the split_flush_commit_acquire_all_deadlock fault injector
--echo # to simulate a deadlock at phase 8, which triggers the downgrade and retry logic.

create table t1 (c0 int auto_increment, c1 char(250), c2 char(250), c3 char(250), c4 int, primary key(c0));

# Insert initial data
insert into t1 values (0,'initial', 'data', 'row', 1);

# Step 1: Enable fault injector to simulate deadlock at phase 8
# This fault injector will inject a DEAD_LOCK_ABORT error when range split
# reaches phase 8 (Commit Acquire All Write Lock), which will trigger the
# downgrade and retry logic
SET SESSION debug_dbug="+d,eloq;split_flush_commit_acquire_all_deadlock;node_id=-1";

# Build up enough data in t1 to trigger range split
# We need to exceed range_max_size (1MB for testing builds, 256MB otherwise)
# Using the same pattern as range_read_block_on_write_lock.test: double the data each time
# 11 iterations: 1 -> 2 -> 4 -> 8 -> 16 -> 32 -> 64 -> 128 -> 256 -> 512 -> 1024 -> 2048 rows
# With char(250) columns, this should exceed the threshold
let $counter= 11;
--disable_result_log
--disable_query_log
while($counter)
{
insert into t1 select * from t1;
dec $counter;
}
--enable_query_log
--enable_result_log

--echo # Step 2: Trigger range split with checkpoint
--echo # Range split will progress through phases 1-7, and at phase 8:
--echo # - The fault injector will inject DEAD_LOCK_ABORT error
--echo # - IsDeadlock() will return true
--echo # - Range split will downgrade write intent lock via PostWriteType::DowngradeLock
--echo # - Range split will retry phase 8 after downgrade
SET SESSION debug_dbug="+d,eloq;at_once;node_id=-1;action=NOTIFY_CHECKPOINTER";

# Wait for checkpoint to process and range split to start
--sleep 5

# Wait for range split to reach phase 8, trigger fault injector, downgrade, and retry
--sleep 10

--echo # Step 3: Verify range split completed successfully
--connection default
# Verify range split completed by checking that data is accessible
# If range split failed, we would see errors or the split wouldn't complete
select count(*) from t1;

--echo # Step 4: Verify no queries are still blocked
--connection default
# After range split completes, there should be no blocked queries
let $wait_condition=
select count(*) = 0 from information_schema.processlist
where info like "select%from t1%"
and id != CONNECTION_ID();
--source include/wait_condition.inc

# Cleanup
drop table t1;