Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
143 changes: 143 additions & 0 deletions tests/operator_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1252,4 +1252,147 @@ TEST_F(OperatorTests, HashJoinNullKeys) {
join->close();
}

TEST_F(OperatorTests, ProjectNextViewNonSimpleProjection) {
// Test ProjectOperator::next_view() with non-simple projection (!is_simple_projection_)
// next_view() returns false early when is_simple_projection_ is false
// (BufferScanOperator doesn't implement next_view(), so we test the early-return path)
Schema schema = make_schema(
{{"id", common::ValueType::TYPE_INT64}, {"name", common::ValueType::TYPE_TEXT}});
std::vector<Tuple> data;
data.push_back(make_tuple({common::Value::make_int64(1), common::Value::make_text("alice")}));

auto scan = make_buffer_scan("test_table", data, schema);

// Use a constant expression instead of column reference — this makes is_simple_projection_ =
// false
std::vector<std::unique_ptr<Expression>> cols;
cols.push_back(const_expr(common::Value::make_int64(42))); // constant, not column
auto project = make_project(std::move(scan), std::move(cols));

ASSERT_TRUE(project->init());
ASSERT_TRUE(project->open());

// next() should work (materializes the constant)
Tuple tuple;
EXPECT_TRUE(project->next(tuple));
EXPECT_EQ(tuple.size(), 1U);

// next_view() returns false early for non-simple projection
storage::HeapTable::TupleView view;
EXPECT_FALSE(project->next_view(view));

project->close();
}

TEST_F(OperatorTests, LimitNextView) {
// Test LimitOperator::next_view() — BufferScanOperator doesn't implement next_view()
// (uses base stub that returns false), so we test the early-return paths
Schema schema = make_schema({{"id", common::ValueType::TYPE_INT64}});
std::vector<Tuple> data;
for (int i = 0; i < 5; i++) {
data.push_back(make_tuple({common::Value::make_int64(i)}));
}

auto scan = make_buffer_scan("test_table", data, schema);
auto limit = make_limit(std::move(scan), 2); // limit 2

ASSERT_TRUE(limit->init());
ASSERT_TRUE(limit->open());

// LimitOperator::next_view() calls child_->next_view() which returns false
// (BufferScan doesn't implement next_view). This exercises the early-return path.
storage::HeapTable::TupleView view;
EXPECT_FALSE(limit->next_view(view));
limit->close();
}

TEST_F(OperatorTests, LimitNextViewZeroLimit) {
// Test LimitOperator::next_view() with limit=0
Schema schema = make_schema({{"id", common::ValueType::TYPE_INT64}});
std::vector<Tuple> data;
for (int i = 0; i < 5; i++) {
data.push_back(make_tuple({common::Value::make_int64(i)}));
}

auto scan = make_buffer_scan("test_table", data, schema);
auto limit = make_limit(std::move(scan), 0); // limit 0

ASSERT_TRUE(limit->init());
ASSERT_TRUE(limit->open());

// With limit=0, next_view() returns false early
// This exercises the limit check
storage::HeapTable::TupleView view;
EXPECT_FALSE(limit->next_view(view));
limit->close();
}

TEST_F(OperatorTests, LimitNextViewOffsetExceedsTotal) {
// Test LimitOperator::next_view() when offset exceeds data size
Schema schema = make_schema({{"id", common::ValueType::TYPE_INT64}});
std::vector<Tuple> data;
for (int i = 0; i < 3; i++) {
data.push_back(make_tuple({common::Value::make_int64(i)}));
}

auto scan = make_buffer_scan("test_table", data, schema);
auto limit = make_limit(std::move(scan), 10, 100); // offset 100, limit 10

ASSERT_TRUE(limit->init());
ASSERT_TRUE(limit->open());

// offset 100 exceeds data size 3, so child_->next_view() returns false
// This exercises the offset loop early-return
storage::HeapTable::TupleView view;
EXPECT_FALSE(limit->next_view(view));
limit->close();
}

TEST_F(OperatorTests, FilterNextViewChildReturnsFalse) {
// Test FilterOperator::next_view() when child returns false
// BufferScanOperator doesn't implement next_view(), so child_->next_view() returns false
// This exercises the child returns false path
Schema schema = make_schema({{"id", common::ValueType::TYPE_INT64}});
std::vector<Tuple> data;
for (int i = 0; i < 5; i++) {
data.push_back(make_tuple({common::Value::make_int64(i)}));
}

auto scan = make_buffer_scan("test_table", data, schema);
auto filter = make_filter(
std::move(scan),
binary_expr(col_expr("id"), TokenType::Ge, const_expr(common::Value::make_int64(2))));

ASSERT_TRUE(filter->init());
ASSERT_TRUE(filter->open());

storage::HeapTable::TupleView view;
EXPECT_FALSE(filter->next_view(view));
filter->close();
}

TEST_F(OperatorTests, FilterNextViewAllFiltered) {
// Test FilterOperator::next_view() when all tuples are filtered out
// This exercises the condition evaluate and loop iteration path
Schema schema = make_schema({{"id", common::ValueType::TYPE_INT64}});
std::vector<Tuple> data;
for (int i = 0; i < 3; i++) {
data.push_back(make_tuple({common::Value::make_int64(i)}));
}

auto scan = make_buffer_scan("test_table", data, schema);
// Filter: id > 100 (filters all)
auto filter = make_filter(
std::move(scan),
binary_expr(col_expr("id"), TokenType::Gt, const_expr(common::Value::make_int64(100))));

ASSERT_TRUE(filter->init());
ASSERT_TRUE(filter->open());

storage::HeapTable::TupleView view;
// BufferScan next_view returns false immediately, so we never even get to evaluate condition
EXPECT_FALSE(filter->next_view(view));
filter->close();
}

} // namespace
28 changes: 28 additions & 0 deletions tests/query_executor_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,34 @@ TEST_F(QueryExecutorTests, InsertIntoNonExistentTable) {
EXPECT_FALSE(res.success());
}

TEST_F(QueryExecutorTests, InsertBatchModeSkipsLockAcquisition) {
// Test batch_insert_mode=true skips lock acquisition (line 217)
TestEnvironment env;
execute_sql(env.executor, "CREATE TABLE test_table (id INT, val INT)");

// Enable batch insert mode - skips lock acquisition per line 217
env.executor.set_batch_insert_mode(true);

// BEGIN transaction
execute_sql(env.executor, "BEGIN");

// Multi-row INSERT - should succeed without lock acquisition
const auto res =
execute_sql(env.executor, "INSERT INTO test_table VALUES (1, 10), (2, 20), (3, 30)");
EXPECT_TRUE(res.success());
EXPECT_EQ(res.rows_affected(), 3U);

// COMMIT
execute_sql(env.executor, "COMMIT");

// Verify data was inserted
const auto select_res = execute_sql(env.executor, "SELECT * FROM test_table");
EXPECT_EQ(select_res.row_count(), 3U);

// Cleanup
env.executor.set_batch_insert_mode(false);
}

// ============= SELECT Tests =============

TEST_F(QueryExecutorTests, SelectStarFromEmptyTable) {
Expand Down
106 changes: 106 additions & 0 deletions tests/transaction_manager_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1508,4 +1508,110 @@ TEST(TransactionManagerTests, UndoLogUnknownType) {
static_cast<void>(std::remove("./test_data/unknown_test.heap"));
}

TEST(TransactionManagerTests, UndoDeleteTableNotFound) {
// Test table metadata not found branch in DELETE undo path
// Manually add DELETE undo log for non-existent table, call abort()
auto catalog = Catalog::create();
storage::StorageManager disk_manager("./test_data");
storage::BufferPoolManager bpm(cloudsql::config::Config::DEFAULT_BUFFER_POOL_SIZE,
disk_manager);
LockManager lm;
TransactionManager tm(lm, *catalog, bpm, bpm.get_log_manager());

Transaction* txn = tm.begin();
ASSERT_NE(txn, nullptr);

// Add DELETE undo log for non-existent table
txn->add_undo_log(UndoLog::Type::DELETE, "nonexistent_delete_table",
HeapTable::TupleId(99, 99));

// abort() should hit table metadata lookup failure
tm.abort(txn);
EXPECT_EQ(txn->get_state(), TransactionState::ABORTED);
}

TEST(TransactionManagerTests, UndoUpdatePhysicalRemoveFailure) {
// Test FAULT_PHYSICAL_REMOVE branch in UPDATE undo
storage::StorageManager disk_manager("./test_data");
disk_manager.create_dir_if_not_exists();
recovery::LogManager log_mgr("./test_data/upd_phys_rm_fault.dat");
storage::BufferPoolManager bpm(cloudsql::config::Config::DEFAULT_BUFFER_POOL_SIZE, disk_manager,
&log_mgr);
auto catalog = Catalog::create();
LockManager lm;
TransactionManager tm(lm, *catalog, bpm, &log_mgr);
executor::QueryExecutor exec(*catalog, bpm, lm, tm);

static_cast<void>(std::remove("./test_data/upd_phys_rm.heap"));
static_cast<void>(std::remove("./test_data/upd_phys_rm.idx"));

static_cast<void>(
exec.execute(*Parser(std::make_unique<Lexer>("CREATE TABLE upd_phys_rm (id INT, val INT)"))
.parse_statement()));
static_cast<void>(
exec.execute(*Parser(std::make_unique<Lexer>("CREATE INDEX idx_upr ON upd_phys_rm (val)"))
.parse_statement()));
static_cast<void>(
exec.execute(*Parser(std::make_unique<Lexer>("INSERT INTO upd_phys_rm VALUES (1, 100)"))
.parse_statement()));
static_cast<void>(exec.execute(*Parser(std::make_unique<Lexer>("COMMIT")).parse_statement()));

// UPDATE then ROLLBACK with fault injection
Transaction* txn = tm.begin();
static_cast<void>(exec.execute(
*Parser(std::make_unique<Lexer>("UPDATE upd_phys_rm SET val = 999 WHERE id = 1"))
.parse_statement()));

// Arm fault for physical_remove failure during UPDATE undo
cloudsql::common::FaultInjection::instance().set_fault(cloudsql::common::FAULT_PHYSICAL_REMOVE);
tm.abort(txn);
EXPECT_EQ(txn->get_state(), TransactionState::ABORTED);
cloudsql::common::FaultInjection::instance().clear();

static_cast<void>(std::remove("./test_data/upd_phys_rm.heap"));
static_cast<void>(std::remove("./test_data/upd_phys_rm.idx"));
}

TEST(TransactionManagerTests, UndoUpdateUndoRemoveFailure) {
// Test FAULT_UNDO_REMOVE branch in UPDATE undo old_rid restore
storage::StorageManager disk_manager("./test_data");
disk_manager.create_dir_if_not_exists();
recovery::LogManager log_mgr("./test_data/upd_undo_rm_fault.dat");
storage::BufferPoolManager bpm(cloudsql::config::Config::DEFAULT_BUFFER_POOL_SIZE, disk_manager,
&log_mgr);
auto catalog = Catalog::create();
LockManager lm;
TransactionManager tm(lm, *catalog, bpm, &log_mgr);
executor::QueryExecutor exec(*catalog, bpm, lm, tm);

static_cast<void>(std::remove("./test_data/upd_undo_rm.heap"));
static_cast<void>(std::remove("./test_data/upd_undo_rm.idx"));

static_cast<void>(
exec.execute(*Parser(std::make_unique<Lexer>("CREATE TABLE upd_undo_rm (id INT, val INT)"))
.parse_statement()));
static_cast<void>(
exec.execute(*Parser(std::make_unique<Lexer>("CREATE INDEX idx_uur ON upd_undo_rm (val)"))
.parse_statement()));
static_cast<void>(
exec.execute(*Parser(std::make_unique<Lexer>("INSERT INTO upd_undo_rm VALUES (1, 100)"))
.parse_statement()));
static_cast<void>(exec.execute(*Parser(std::make_unique<Lexer>("COMMIT")).parse_statement()));

// UPDATE then ROLLBACK with fault injection for undo_remove
Transaction* txn = tm.begin();
static_cast<void>(exec.execute(
*Parser(std::make_unique<Lexer>("UPDATE upd_undo_rm SET val = 999 WHERE id = 1"))
.parse_statement()));

// Arm fault for undo_remove failure during UPDATE undo (old_rid restore)
cloudsql::common::FaultInjection::instance().set_fault(cloudsql::common::FAULT_UNDO_REMOVE);
tm.abort(txn);
EXPECT_EQ(txn->get_state(), TransactionState::ABORTED);
cloudsql::common::FaultInjection::instance().clear();

static_cast<void>(std::remove("./test_data/upd_undo_rm.heap"));
static_cast<void>(std::remove("./test_data/upd_undo_rm.idx"));
}

} // namespace
Loading