diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bedc149..0a1ee34 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,7 +2,7 @@ name: CI on: push: - branches: [main, master, fixrace] + branches: [main, master, fixrace, fixconflict] pull_request: branches: [main, master] @@ -42,6 +42,46 @@ jobs: - name: Run unit tests run: zig build test + integration-tests: + runs-on: ubuntu-latest + needs: build + steps: + - uses: actions/checkout@v4 + + - name: Cache Zig + uses: actions/cache@v4 + with: + path: ~/zig + key: zig-${{ env.ZIG_VERSION }} + + - name: Install Zig + run: | + if [ ! -d ~/zig ]; then + mkdir -p ~/zig + curl -L ${{ env.ZIG_URL }} | tar -xJ -C ~/zig --strip-components=1 + fi + echo "$HOME/zig" >> $GITHUB_PATH + + - name: Patch ecdsa.zig for API compatibility + run: sed -i 's/mem\.trimLeft/mem.trimStart/g' ~/zig/lib/std/crypto/ecdsa.zig + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install Python dependencies for HTTP/2 tests + run: | + python -m venv tests/.venv + source tests/.venv/bin/activate + pip install hypercorn + + - name: Build all binaries + run: zig build build-all + + - name: Run integration tests + run: zig build test-integration + release-build: runs-on: ubuntu-latest if: github.event_name == 'push' diff --git a/build.zig b/build.zig index 2a23751..60bfb80 100644 --- a/build.zig +++ b/build.zig @@ -114,16 +114,18 @@ pub fn build(b: *std.Build) void { .optimize = optimize, .link_libc = true, }); - integration_tests_mod.addImport("zzz", zzz_module); - integration_tests_mod.addImport("tls", tls_module); - const integration_tests = b.addTest(.{ + + // Add integration test as executable (for better output) + const integration_exe = b.addExecutable(.{ .name = "integration_tests", .root_module = integration_tests_mod, }); - const run_integration_tests = b.addRunArtifact(integration_tests); - // Integration tests need the binaries built first - run_integration_tests.step.dependOn(&build_test_backend_echo.step); - run_integration_tests.step.dependOn(&build_load_balancer.step); + const run_integration_exe = b.addRunArtifact(integration_exe); + run_integration_exe.step.dependOn(&build_test_backend_echo.step); + run_integration_exe.step.dependOn(&build_load_balancer.step); + + const integration_test_step = b.step("test-integration", "Run integration tests"); + integration_test_step.dependOn(&run_integration_exe.step); // Steps const build_all = b.step("build-all", "Build backends and load balancer"); @@ -136,9 +138,6 @@ pub fn build(b: *std.Build) void { const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&run_unit_tests.step); - const integration_test_step = b.step("test-integration", "Run integration tests"); - integration_test_step.dependOn(&run_integration_tests.step); - const run_lb_step = b.step("run", "Run load balancer (use --mode mp|sp)"); run_lb_step.dependOn(&run_load_balancer_cmd.step); diff --git a/docs/plans/2025-12-25-zig-test-harness.md b/docs/plans/2025-12-25-zig-test-harness.md new file mode 100644 index 0000000..ac7bda4 --- /dev/null +++ b/docs/plans/2025-12-25-zig-test-harness.md @@ -0,0 +1,1184 @@ +# Zig Test Harness Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Replace Python pytest integration tests with pure Zig tests using a minimal describe/it test harness. + +**Architecture:** Create a lightweight test harness (~100 LOC) providing Jest-like `describe`/`it` organization on top of Zig's native `std.testing`. Refactor existing integration tests to use this harness, add missing test coverage, and improve reliability with proper port waiting. + +**Tech Stack:** Zig 0.16, std.testing, std.posix (sockets), std.json + +--- + +## Task 1: Create Test Harness Module + +**Files:** +- Create: `tests/harness.zig` + +**Step 1: Write the test harness** + +```zig +//! Minimal Jest-like test harness for Zig integration tests. +//! +//! Provides describe/it semantics with beforeAll/afterAll hooks. +//! Built on top of std.testing for seamless integration. + +const std = @import("std"); +const testing = std.testing; + +pub const TestFn = *const fn (std.mem.Allocator) anyerror!void; + +pub const TestCase = struct { + name: []const u8, + func: TestFn, +}; + +pub const Suite = struct { + name: []const u8, + tests: []const TestCase, + before_all: ?TestFn = null, + after_all: ?TestFn = null, + before_each: ?TestFn = null, + after_each: ?TestFn = null, +}; + +/// Run a test suite with optional lifecycle hooks +pub fn runSuite(allocator: std.mem.Allocator, suite: Suite) !void { + std.debug.print("\n\x1b[1m{s}\x1b[0m\n", .{suite.name}); + + // beforeAll + if (suite.before_all) |before| { + try before(allocator); + } + + var passed: usize = 0; + var failed: usize = 0; + + for (suite.tests) |t| { + // beforeEach + if (suite.before_each) |before| { + before(allocator) catch |err| { + std.debug.print(" \x1b[31m✗\x1b[0m {s} (beforeEach failed: {})\n", .{ t.name, err }); + failed += 1; + continue; + }; + } + + // Run test + if (t.func(allocator)) |_| { + std.debug.print(" \x1b[32m✓\x1b[0m {s}\n", .{t.name}); + passed += 1; + } else |err| { + std.debug.print(" \x1b[31m✗\x1b[0m {s} ({})\n", .{ t.name, err }); + failed += 1; + } + + // afterEach + if (suite.after_each) |after| { + after(allocator) catch |err| { + std.debug.print(" \x1b[33m⚠\x1b[0m afterEach failed: {}\n", .{err}); + }; + } + } + + // afterAll + if (suite.after_all) |after| { + after(allocator) catch |err| { + std.debug.print(" \x1b[33m⚠\x1b[0m afterAll failed: {}\n", .{err}); + }; + } + + std.debug.print("\n {d} passed, {d} failed\n", .{ passed, failed }); + + if (failed > 0) { + return error.TestsFailed; + } +} + +/// Helper to create a test case inline +pub fn it(name: []const u8, func: TestFn) TestCase { + return .{ .name = name, .func = func }; +} +``` + +**Step 2: Verify it compiles** + +Run: `cd /Users/nick/repos/zzz/examples/zzz-fix && zig build-lib tests/harness.zig --name harness 2>&1 || echo "Expected - just checking syntax"` + +Expected: No syntax errors (may show "no root module" which is fine) + +**Step 3: Commit** + +```bash +git add tests/harness.zig +git commit -m "feat(tests): add minimal Jest-like test harness" +``` + +--- + +## Task 2: Create Test Utilities Module + +**Files:** +- Create: `tests/test_utils.zig` + +**Step 1: Write port waiting and HTTP utilities** + +```zig +//! Test utilities for integration tests. +//! +//! Provides: +//! - Port availability waiting +//! - HTTP request helpers +//! - JSON response parsing + +const std = @import("std"); +const posix = std.posix; + +pub const TEST_HOST = "127.0.0.1"; +pub const BACKEND1_PORT: u16 = 19001; +pub const BACKEND2_PORT: u16 = 19002; +pub const BACKEND3_PORT: u16 = 19003; +pub const LB_PORT: u16 = 18080; + +/// Wait for a port to accept connections +pub fn waitForPort(port: u16, timeout_ms: u64) !void { + const start = std.time.milliTimestamp(); + const deadline = start + @as(i64, @intCast(timeout_ms)); + + while (std.time.milliTimestamp() < deadline) { + if (tryConnect(port)) { + return; + } + std.time.sleep(100 * std.time.ns_per_ms); + } + return error.PortTimeout; +} + +fn tryConnect(port: u16) bool { + const addr = std.net.Address.initIp4(.{ 127, 0, 0, 1 }, port); + const sock = posix.socket(posix.AF.INET, posix.SOCK.STREAM, 0) catch return false; + defer posix.close(sock); + + posix.connect(sock, &addr.any, addr.getOsSockLen()) catch return false; + return true; +} + +/// Make an HTTP request and return the response body +pub fn httpRequest( + allocator: std.mem.Allocator, + method: []const u8, + port: u16, + path: []const u8, + headers: ?[]const [2][]const u8, + body: ?[]const u8, +) ![]const u8 { + // Build request + var request = std.ArrayList(u8).init(allocator); + defer request.deinit(); + + try request.writer().print("{s} {s} HTTP/1.1\r\n", .{ method, path }); + try request.writer().print("Host: {s}:{d}\r\n", .{ TEST_HOST, port }); + + if (headers) |hdrs| { + for (hdrs) |h| { + try request.writer().print("{s}: {s}\r\n", .{ h[0], h[1] }); + } + } + + if (body) |b| { + try request.writer().print("Content-Length: {d}\r\n", .{b.len}); + } + + try request.appendSlice("Connection: close\r\n\r\n"); + + if (body) |b| { + try request.appendSlice(b); + } + + // Connect and send + const addr = std.net.Address.initIp4(.{ 127, 0, 0, 1 }, port); + const sock = try posix.socket(posix.AF.INET, posix.SOCK.STREAM, 0); + defer posix.close(sock); + + try posix.connect(sock, &addr.any, addr.getOsSockLen()); + + _ = try posix.send(sock, request.items, 0); + + // Read response + var response = std.ArrayList(u8).init(allocator); + errdefer response.deinit(); + + var buf: [4096]u8 = undefined; + while (true) { + const n = try posix.recv(sock, &buf, 0); + if (n == 0) break; + try response.appendSlice(buf[0..n]); + } + + return response.toOwnedSlice(); +} + +/// Extract JSON body from HTTP response +pub fn extractJsonBody(response: []const u8) ![]const u8 { + const separator = "\r\n\r\n"; + const idx = std.mem.indexOf(u8, response, separator) orelse return error.NoBodyFound; + return response[idx + separator.len ..]; +} + +/// Parse JSON response and get a string field +pub fn getJsonString(allocator: std.mem.Allocator, json: []const u8, field: []const u8) ![]const u8 { + const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json, .{}); + defer parsed.deinit(); + + const value = parsed.value.object.get(field) orelse return error.FieldNotFound; + return allocator.dupe(u8, value.string); +} + +/// Parse JSON response and get an integer field +pub fn getJsonInt(allocator: std.mem.Allocator, json: []const u8, field: []const u8) !i64 { + const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json, .{}); + defer parsed.deinit(); + + const value = parsed.value.object.get(field) orelse return error.FieldNotFound; + return value.integer; +} + +/// Check if a header exists in the JSON headers object +pub fn hasHeader(allocator: std.mem.Allocator, json: []const u8, header: []const u8) !bool { + const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json, .{}); + defer parsed.deinit(); + + const headers_val = parsed.value.object.get("headers") orelse return error.NoHeaders; + const headers = headers_val.object; + + // Case-insensitive check + var iter = headers.iterator(); + while (iter.next()) |entry| { + if (std.ascii.eqlIgnoreCase(entry.key_ptr.*, header)) { + return true; + } + } + return false; +} + +/// Get header value (case-insensitive) +pub fn getHeader(allocator: std.mem.Allocator, json: []const u8, header: []const u8) ![]const u8 { + const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json, .{}); + defer parsed.deinit(); + + const headers_val = parsed.value.object.get("headers") orelse return error.NoHeaders; + const headers = headers_val.object; + + var iter = headers.iterator(); + while (iter.next()) |entry| { + if (std.ascii.eqlIgnoreCase(entry.key_ptr.*, header)) { + return allocator.dupe(u8, entry.value_ptr.string); + } + } + return error.HeaderNotFound; +} +``` + +**Step 2: Verify it compiles** + +Run: `cd /Users/nick/repos/zzz/examples/zzz-fix && zig build-lib tests/test_utils.zig --name test_utils 2>&1 | head -5` + +**Step 3: Commit** + +```bash +git add tests/test_utils.zig +git commit -m "feat(tests): add test utilities for HTTP and port waiting" +``` + +--- + +## Task 3: Create Process Manager + +**Files:** +- Create: `tests/process_manager.zig` + +**Step 1: Write process management utilities** + +```zig +//! Process manager for integration tests. +//! +//! Handles spawning and cleanup of backend/load balancer processes. + +const std = @import("std"); +const posix = std.posix; +const test_utils = @import("test_utils.zig"); + +pub const Process = struct { + child: std.process.Child, + name: []const u8, + allocator: std.mem.Allocator, + + pub fn kill(self: *Process) void { + _ = self.child.kill() catch {}; + _ = self.child.wait() catch {}; + } + + pub fn deinit(self: *Process) void { + self.allocator.free(self.name); + } +}; + +pub const ProcessManager = struct { + allocator: std.mem.Allocator, + processes: std.ArrayList(Process), + + pub fn init(allocator: std.mem.Allocator) ProcessManager { + return .{ + .allocator = allocator, + .processes = std.ArrayList(Process).init(allocator), + }; + } + + pub fn deinit(self: *ProcessManager) void { + self.stopAll(); + self.processes.deinit(); + } + + pub fn startBackend(self: *ProcessManager, port: u16, server_id: []const u8) !void { + var port_buf: [8]u8 = undefined; + const port_str = try std.fmt.bufPrint(&port_buf, "{d}", .{port}); + + var child = std.process.Child.init( + &.{ "./zig-out/bin/test_backend_echo", "--port", port_str, "--id", server_id }, + self.allocator, + ); + child.stdin_behavior = .Ignore; + child.stdout_behavior = .Ignore; + child.stderr_behavior = .Ignore; + + try child.spawn(); + + try self.processes.append(.{ + .child = child, + .name = try std.fmt.allocPrint(self.allocator, "backend_{s}", .{server_id}), + .allocator = self.allocator, + }); + + // Wait for port to be ready + try test_utils.waitForPort(port, 10000); + } + + pub fn startLoadBalancer(self: *ProcessManager, backend_ports: []const u16) !void { + var args = std.ArrayList([]const u8).init(self.allocator); + defer args.deinit(); + + try args.append("./zig-out/bin/load_balancer"); + try args.append("--port"); + + var lb_port_buf: [8]u8 = undefined; + const lb_port_str = try std.fmt.bufPrint(&lb_port_buf, "{d}", .{test_utils.LB_PORT}); + try args.append(try self.allocator.dupe(u8, lb_port_str)); + + // Use single-process mode for easier testing + try args.append("--mode"); + try args.append("sp"); + + for (backend_ports) |port| { + try args.append("--backend"); + var buf: [32]u8 = undefined; + const backend_str = try std.fmt.bufPrint(&buf, "127.0.0.1:{d}", .{port}); + try args.append(try self.allocator.dupe(u8, backend_str)); + } + + var child = std.process.Child.init(args.items, self.allocator); + child.stdin_behavior = .Ignore; + child.stdout_behavior = .Ignore; + child.stderr_behavior = .Ignore; + + try child.spawn(); + + try self.processes.append(.{ + .child = child, + .name = try self.allocator.dupe(u8, "load_balancer"), + .allocator = self.allocator, + }); + + // Wait for LB port + try test_utils.waitForPort(test_utils.LB_PORT, 10000); + + // Wait for health checks (backends need to be marked healthy) + std.time.sleep(2 * std.time.ns_per_s); + } + + pub fn stopAll(self: *ProcessManager) void { + // Stop in reverse order (LB first, then backends) + while (self.processes.items.len > 0) { + var proc = self.processes.pop(); + proc.kill(); + proc.deinit(); + } + } +}; +``` + +**Step 2: Verify it compiles** + +Run: `cd /Users/nick/repos/zzz/examples/zzz-fix && zig build-lib tests/process_manager.zig --name pm 2>&1 | head -5` + +**Step 3: Commit** + +```bash +git add tests/process_manager.zig +git commit -m "feat(tests): add process manager for test fixtures" +``` + +--- + +## Task 4: Create Basic Tests Suite + +**Files:** +- Create: `tests/suites/basic.zig` + +**Step 1: Create directory** + +Run: `mkdir -p /Users/nick/repos/zzz/examples/zzz-fix/tests/suites` + +**Step 2: Write basic tests** + +```zig +//! Basic proxy functionality tests. +//! +//! Tests HTTP method forwarding: GET, POST, PUT, PATCH + +const std = @import("std"); +const harness = @import("../harness.zig"); +const utils = @import("../test_utils.zig"); +const ProcessManager = @import("../process_manager.zig").ProcessManager; + +var pm: ProcessManager = undefined; + +fn beforeAll(allocator: std.mem.Allocator) !void { + pm = ProcessManager.init(allocator); + try pm.startBackend(utils.BACKEND1_PORT, "backend1"); + try pm.startLoadBalancer(&.{utils.BACKEND1_PORT}); +} + +fn afterAll(_: std.mem.Allocator) !void { + pm.deinit(); +} + +fn testGetRequest(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/test/path", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const uri = try utils.getJsonString(allocator, body, "uri"); + defer allocator.free(uri); + + try std.testing.expectEqualStrings("GET", method); + try std.testing.expectEqualStrings("/test/path", uri); +} + +fn testPostRequest(allocator: std.mem.Allocator) !void { + const req_body = "{\"test\":\"data\",\"number\":42}"; + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/api/endpoint", headers, req_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const recv_body = try utils.getJsonString(allocator, body, "body"); + defer allocator.free(recv_body); + + try std.testing.expectEqualStrings("POST", method); + try std.testing.expectEqualStrings(req_body, recv_body); +} + +fn testPutRequest(allocator: std.mem.Allocator) !void { + const req_body = "Updated content"; + + const response = try utils.httpRequest(allocator, "PUT", utils.LB_PORT, "/resource/123", null, req_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const uri = try utils.getJsonString(allocator, body, "uri"); + defer allocator.free(uri); + + try std.testing.expectEqualStrings("PUT", method); + try std.testing.expectEqualStrings("/resource/123", uri); +} + +fn testPatchRequest(allocator: std.mem.Allocator) !void { + const req_body = "{\"field\":\"name\",\"value\":\"new\"}"; + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + + const response = try utils.httpRequest(allocator, "PATCH", utils.LB_PORT, "/api/resource/456", headers, req_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const uri = try utils.getJsonString(allocator, body, "uri"); + defer allocator.free(uri); + + try std.testing.expectEqualStrings("PATCH", method); + try std.testing.expectEqualStrings("/api/resource/456", uri); +} + +fn testResponseStructure(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + + // Verify all expected fields exist + _ = try utils.getJsonString(allocator, body, "server_id"); + _ = try utils.getJsonString(allocator, body, "method"); + _ = try utils.getJsonString(allocator, body, "uri"); + _ = try utils.getJsonInt(allocator, body, "body_length"); + _ = try utils.hasHeader(allocator, body, "Host"); +} + +pub const suite = harness.Suite{ + .name = "Basic Proxy Functionality", + .before_all = beforeAll, + .after_all = afterAll, + .tests = &.{ + harness.it("forwards GET requests correctly", testGetRequest), + harness.it("forwards POST requests with JSON body", testPostRequest), + harness.it("forwards PUT requests with body", testPutRequest), + harness.it("forwards PATCH requests with body", testPatchRequest), + harness.it("returns complete response structure", testResponseStructure), + }, +}; +``` + +**Step 3: Commit** + +```bash +git add tests/suites/basic.zig +git commit -m "feat(tests): add basic proxy tests suite" +``` + +--- + +## Task 5: Create Headers Tests Suite + +**Files:** +- Create: `tests/suites/headers.zig` + +**Step 1: Write headers tests** + +```zig +//! Header handling tests. +//! +//! Tests header forwarding: Content-Type, custom headers, hop-by-hop filtering + +const std = @import("std"); +const harness = @import("../harness.zig"); +const utils = @import("../test_utils.zig"); +const ProcessManager = @import("../process_manager.zig").ProcessManager; + +var pm: ProcessManager = undefined; + +fn beforeAll(allocator: std.mem.Allocator) !void { + pm = ProcessManager.init(allocator); + try pm.startBackend(utils.BACKEND1_PORT, "backend1"); + try pm.startLoadBalancer(&.{utils.BACKEND1_PORT}); +} + +fn afterAll(_: std.mem.Allocator) !void { + pm.deinit(); +} + +fn testContentTypeForwarded(allocator: std.mem.Allocator) !void { + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/", headers, "{}"); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const ct = try utils.getHeader(allocator, body, "Content-Type"); + defer allocator.free(ct); + + try std.testing.expect(std.mem.indexOf(u8, ct, "application/json") != null); +} + +fn testCustomHeadersForwarded(allocator: std.mem.Allocator) !void { + const headers = &[_][2][]const u8{ + .{ "X-Custom-Header", "CustomValue" }, + .{ "X-Request-ID", "test-123" }, + .{ "X-API-Key", "secret-key" }, + }; + + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", headers, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + + const custom = try utils.getHeader(allocator, body, "X-Custom-Header"); + defer allocator.free(custom); + try std.testing.expectEqualStrings("CustomValue", custom); + + const req_id = try utils.getHeader(allocator, body, "X-Request-ID"); + defer allocator.free(req_id); + try std.testing.expectEqualStrings("test-123", req_id); +} + +fn testAuthorizationHeaderForwarded(allocator: std.mem.Allocator) !void { + const headers = &[_][2][]const u8{.{ "Authorization", "Bearer token123" }}; + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", headers, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const auth = try utils.getHeader(allocator, body, "Authorization"); + defer allocator.free(auth); + + try std.testing.expectEqualStrings("Bearer token123", auth); +} + +fn testHopByHopHeadersFiltered(allocator: std.mem.Allocator) !void { + const headers = &[_][2][]const u8{ + .{ "Connection", "keep-alive" }, + .{ "Keep-Alive", "timeout=5" }, + .{ "X-Safe-Header", "should-be-forwarded" }, + }; + + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", headers, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + + // Hop-by-hop headers should NOT be forwarded + const has_connection = utils.hasHeader(allocator, body, "Connection") catch false; + try std.testing.expect(!has_connection); + + // Safe headers should be forwarded + const safe = try utils.getHeader(allocator, body, "X-Safe-Header"); + defer allocator.free(safe); + try std.testing.expectEqualStrings("should-be-forwarded", safe); +} + +fn testHostHeaderPresent(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const host = try utils.getHeader(allocator, body, "Host"); + defer allocator.free(host); + + try std.testing.expect(std.mem.indexOf(u8, host, "127.0.0.1") != null); +} + +pub const suite = harness.Suite{ + .name = "Header Handling", + .before_all = beforeAll, + .after_all = afterAll, + .tests = &.{ + harness.it("forwards Content-Type header", testContentTypeForwarded), + harness.it("forwards custom X-* headers", testCustomHeadersForwarded), + harness.it("forwards Authorization header", testAuthorizationHeaderForwarded), + harness.it("filters hop-by-hop headers", testHopByHopHeadersFiltered), + harness.it("includes Host header to backend", testHostHeaderPresent), + }, +}; +``` + +**Step 2: Commit** + +```bash +git add tests/suites/headers.zig +git commit -m "feat(tests): add header handling tests suite" +``` + +--- + +## Task 6: Create Body Forwarding Tests Suite + +**Files:** +- Create: `tests/suites/body.zig` + +**Step 1: Write body forwarding tests** + +```zig +//! Body forwarding tests. +//! +//! Tests request body handling: empty, large, JSON, binary, Content-Length + +const std = @import("std"); +const harness = @import("../harness.zig"); +const utils = @import("../test_utils.zig"); +const ProcessManager = @import("../process_manager.zig").ProcessManager; + +var pm: ProcessManager = undefined; + +fn beforeAll(allocator: std.mem.Allocator) !void { + pm = ProcessManager.init(allocator); + try pm.startBackend(utils.BACKEND1_PORT, "backend1"); + try pm.startLoadBalancer(&.{utils.BACKEND1_PORT}); +} + +fn afterAll(_: std.mem.Allocator) !void { + pm.deinit(); +} + +fn testEmptyBodyPost(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/", null, ""); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const body_len = try utils.getJsonInt(allocator, body, "body_length"); + + try std.testing.expectEqual(@as(i64, 0), body_len); +} + +fn testLargeBody(allocator: std.mem.Allocator) !void { + // 1KB payload + const large_body = try allocator.alloc(u8, 1024); + defer allocator.free(large_body); + @memset(large_body, 'X'); + + const headers = &[_][2][]const u8{.{ "Content-Type", "text/plain" }}; + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/", headers, large_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const body_len = try utils.getJsonInt(allocator, body, "body_length"); + + try std.testing.expectEqual(@as(i64, 1024), body_len); +} + +fn testJsonBodyPreserved(allocator: std.mem.Allocator) !void { + const json_body = + \\{"user":"john_doe","email":"john@example.com","age":30,"active":true} + ; + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/api/users", headers, json_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const recv_body = try utils.getJsonString(allocator, body, "body"); + defer allocator.free(recv_body); + + try std.testing.expectEqualStrings(json_body, recv_body); +} + +fn testBinaryData(allocator: std.mem.Allocator) !void { + // UTF-8 safe binary data + const binary_data = "Binary test data with special chars: \xc2\xa9\xc2\xae"; + const headers = &[_][2][]const u8{.{ "Content-Type", "application/octet-stream" }}; + + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/upload", headers, binary_data); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const body_len = try utils.getJsonInt(allocator, body, "body_length"); + + try std.testing.expectEqual(@as(i64, binary_data.len), body_len); +} + +fn testContentLengthCorrect(allocator: std.mem.Allocator) !void { + const req_body = "{\"key\":\"value\",\"number\":42}"; + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/", headers, req_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + + // Check Content-Length header matches body length + const cl = try utils.getHeader(allocator, body, "Content-Length"); + defer allocator.free(cl); + const cl_int = try std.fmt.parseInt(i64, cl, 10); + + const body_len = try utils.getJsonInt(allocator, body, "body_length"); + + try std.testing.expectEqual(cl_int, body_len); + try std.testing.expectEqual(@as(i64, req_body.len), body_len); +} + +fn testSequentialPosts(allocator: std.mem.Allocator) !void { + const bodies = [_][]const u8{ + "{\"id\":1,\"name\":\"first\"}", + "{\"id\":2,\"name\":\"second\"}", + "{\"id\":3,\"name\":\"third\"}", + }; + + for (bodies) |req_body| { + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/", headers, req_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const recv_body = try utils.getJsonString(allocator, body, "body"); + defer allocator.free(recv_body); + + try std.testing.expectEqualStrings(req_body, recv_body); + } +} + +pub const suite = harness.Suite{ + .name = "Body Forwarding", + .before_all = beforeAll, + .after_all = afterAll, + .tests = &.{ + harness.it("handles empty POST body", testEmptyBodyPost), + harness.it("handles large body (1KB)", testLargeBody), + harness.it("preserves JSON body exactly", testJsonBodyPreserved), + harness.it("handles binary data", testBinaryData), + harness.it("sets Content-Length correctly", testContentLengthCorrect), + harness.it("handles multiple sequential POSTs", testSequentialPosts), + }, +}; +``` + +**Step 2: Commit** + +```bash +git add tests/suites/body.zig +git commit -m "feat(tests): add body forwarding tests suite" +``` + +--- + +## Task 7: Create Load Balancing Tests Suite + +**Files:** +- Create: `tests/suites/load_balancing.zig` + +**Step 1: Write load balancing tests** + +```zig +//! Load balancing tests. +//! +//! Tests round-robin distribution across multiple backends + +const std = @import("std"); +const harness = @import("../harness.zig"); +const utils = @import("../test_utils.zig"); +const ProcessManager = @import("../process_manager.zig").ProcessManager; + +var pm: ProcessManager = undefined; + +fn beforeAll(allocator: std.mem.Allocator) !void { + pm = ProcessManager.init(allocator); + try pm.startBackend(utils.BACKEND1_PORT, "backend1"); + try pm.startBackend(utils.BACKEND2_PORT, "backend2"); + try pm.startBackend(utils.BACKEND3_PORT, "backend3"); + try pm.startLoadBalancer(&.{ utils.BACKEND1_PORT, utils.BACKEND2_PORT, utils.BACKEND3_PORT }); +} + +fn afterAll(_: std.mem.Allocator) !void { + pm.deinit(); +} + +fn testRoundRobinDistribution(allocator: std.mem.Allocator) !void { + var counts = std.StringHashMap(usize).init(allocator); + defer counts.deinit(); + + // Make 9 requests (divisible by 3) + for (0..9) |_| { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const server_id = try utils.getJsonString(allocator, body, "server_id"); + defer allocator.free(server_id); + + const key = try allocator.dupe(u8, server_id); + const result = try counts.getOrPut(key); + if (result.found_existing) { + allocator.free(key); + result.value_ptr.* += 1; + } else { + result.value_ptr.* = 1; + } + } + + // Each backend should get exactly 3 requests + try std.testing.expectEqual(@as(usize, 3), counts.count()); + + var iter = counts.iterator(); + while (iter.next()) |entry| { + try std.testing.expectEqual(@as(usize, 3), entry.value_ptr.*); + allocator.free(entry.key_ptr.*); + } +} + +fn testAllBackendsReachable(allocator: std.mem.Allocator) !void { + var seen = std.StringHashMap(void).init(allocator); + defer seen.deinit(); + + // Make up to 12 requests, should hit all 3 backends + for (0..12) |_| { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const server_id = try utils.getJsonString(allocator, body, "server_id"); + + if (!seen.contains(server_id)) { + try seen.put(try allocator.dupe(u8, server_id), {}); + } + allocator.free(server_id); + + if (seen.count() >= 3) break; + } + + try std.testing.expectEqual(@as(usize, 3), seen.count()); + + // Cleanup keys + var iter = seen.keyIterator(); + while (iter.next()) |key| { + allocator.free(key.*); + } +} + +pub const suite = harness.Suite{ + .name = "Load Balancing", + .before_all = beforeAll, + .after_all = afterAll, + .tests = &.{ + harness.it("distributes requests with round-robin (3/3/3)", testRoundRobinDistribution), + harness.it("reaches all configured backends", testAllBackendsReachable), + }, +}; +``` + +**Step 2: Commit** + +```bash +git add tests/suites/load_balancing.zig +git commit -m "feat(tests): add load balancing tests suite" +``` + +--- + +## Task 8: Create Test Runner + +**Files:** +- Replace: `tests/integration_test.zig` + +**Step 1: Write the new test runner** + +```zig +//! Integration Test Runner +//! +//! Runs all test suites using the describe/it harness. +//! Run with: zig build test-integration + +const std = @import("std"); +const harness = @import("harness.zig"); + +// Import all test suites +const basic = @import("suites/basic.zig"); +const headers = @import("suites/headers.zig"); +const body = @import("suites/body.zig"); +const load_balancing = @import("suites/load_balancing.zig"); + +pub fn main() !void { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + defer _ = gpa.deinit(); + const allocator = gpa.allocator(); + + std.debug.print("\n\x1b[1;36m╔══════════════════════════════════════╗\x1b[0m\n", .{}); + std.debug.print("\x1b[1;36m║ Load Balancer Integration Tests ║\x1b[0m\n", .{}); + std.debug.print("\x1b[1;36m╚══════════════════════════════════════╝\x1b[0m\n", .{}); + + const suites = [_]harness.Suite{ + basic.suite, + headers.suite, + body.suite, + load_balancing.suite, + }; + + var total_passed: usize = 0; + var total_failed: usize = 0; + var suite_failures: usize = 0; + + for (suites) |suite| { + harness.runSuite(allocator, suite) catch { + suite_failures += 1; + }; + total_passed += suite.tests.len; // Approximate + } + + std.debug.print("\n\x1b[1m════════════════════════════════════════\x1b[0m\n", .{}); + if (suite_failures == 0) { + std.debug.print("\x1b[32m✓ All test suites passed!\x1b[0m\n", .{}); + } else { + std.debug.print("\x1b[31m✗ {d} suite(s) had failures\x1b[0m\n", .{suite_failures}); + std.process.exit(1); + } +} + +// Also support zig test +test "run all integration tests" { + try main(); +} +``` + +**Step 2: Commit** + +```bash +git add tests/integration_test.zig +git commit -m "refactor(tests): replace old integration tests with harness runner" +``` + +--- + +## Task 9: Update Build Configuration + +**Files:** +- Modify: `build.zig` + +**Step 1: Update integration test module imports** + +Find and replace the integration tests section (around line 111-127): + +```zig + // E2E Integration tests + const integration_tests_mod = b.createModule(.{ + .root_source_file = b.path("tests/integration_test.zig"), + .target = target, + .optimize = optimize, + .link_libc = true, + }); + integration_tests_mod.addImport("zzz", zzz_module); + integration_tests_mod.addImport("tls", tls_module); + + // Add integration test as executable (for better output) + const integration_exe = b.addExecutable(.{ + .name = "integration_tests", + .root_module = integration_tests_mod, + }); + const run_integration_exe = b.addRunArtifact(integration_exe); + run_integration_exe.step.dependOn(&build_test_backend_echo.step); + run_integration_exe.step.dependOn(&build_load_balancer.step); + + const integration_test_step = b.step("test-integration", "Run integration tests"); + integration_test_step.dependOn(&run_integration_exe.step); +``` + +**Step 2: Commit** + +```bash +git add build.zig +git commit -m "build: update integration test configuration for new harness" +``` + +--- + +## Task 10: Build and Verify Tests Run + +**Step 1: Build everything** + +Run: `cd /Users/nick/repos/zzz/examples/zzz-fix && zig build build-all` + +Expected: Clean build with no errors + +**Step 2: Run the integration tests** + +Run: `cd /Users/nick/repos/zzz/examples/zzz-fix && zig build test-integration 2>&1` + +Expected: All tests pass with colored output showing describe/it structure + +**Step 3: Commit if passing** + +```bash +git add -A +git commit -m "test: verify all integration tests pass" +``` + +--- + +## Task 11: Remove Python Test Files + +**Files:** +- Delete: `tests/conftest.py` +- Delete: `tests/test_basic.py` +- Delete: `tests/test_headers.py` +- Delete: `tests/test_body_forwarding.py` +- Delete: `tests/test_load_balancing.py` +- Delete: `tests/requirements.txt` +- Delete: `tests/PYTEST_README.md` +- Delete: `tests/direct.py` +- Delete: `tests/quick_test.sh` +- Delete: `tests/run_integration_tests.sh` + +**Step 1: Remove Python test files** + +Run: +```bash +cd /Users/nick/repos/zzz/examples/zzz-fix/tests +rm -f conftest.py test_basic.py test_headers.py test_body_forwarding.py test_load_balancing.py +rm -f requirements.txt PYTEST_README.md direct.py quick_test.sh run_integration_tests.sh +rm -rf venv __pycache__ +``` + +**Step 2: Update tests/README.md** + +Create `tests/README.md`: + +```markdown +# Load Balancer Integration Tests + +Pure Zig integration tests using a minimal Jest-like test harness. + +## Running Tests + +```bash +# Build all binaries first +zig build build-all + +# Run integration tests +zig build test-integration +``` + +## Test Structure + +``` +tests/ +├── harness.zig # Jest-like describe/it test harness +├── test_utils.zig # HTTP client and utilities +├── process_manager.zig # Process spawning/cleanup +├── integration_test.zig # Test runner (main entry point) +└── suites/ + ├── basic.zig # GET/POST/PUT/PATCH forwarding + ├── headers.zig # Header handling + ├── body.zig # Body forwarding + └── load_balancing.zig # Round-robin distribution +``` + +## Test Coverage + +- **Basic (5 tests)**: HTTP method forwarding +- **Headers (5 tests)**: Header forwarding and hop-by-hop filtering +- **Body (6 tests)**: Body handling including large payloads +- **Load Balancing (2 tests)**: Round-robin distribution + +Total: **18 tests** +``` + +**Step 3: Commit** + +```bash +git add -A +git commit -m "chore: remove Python test framework, add Zig test README" +``` + +--- + +## Summary + +After completing all tasks: + +- **18 integration tests** in pure Zig +- **No Python dependency** +- **Jest-like syntax** with describe/it blocks +- **Proper process management** with cleanup +- **Robust port waiting** instead of fixed delays +- **Colored terminal output** for test results + +Run with: `zig build test-integration` diff --git a/src/multiprocess/proxy_test.zig b/src/multiprocess/proxy_test.zig index c19d418..bac8aae 100644 --- a/src/multiprocess/proxy_test.zig +++ b/src/multiprocess/proxy_test.zig @@ -70,9 +70,6 @@ test "buildRequestHeaders: GET request without body" { try testing.expect(std.mem.indexOf(u8, headers, "User-Agent: TestClient/1.0\r\n") != null); try testing.expect(std.mem.indexOf(u8, headers, "Accept: application/json\r\n") != null); - // Verify Connection header - try testing.expect(std.mem.indexOf(u8, headers, "Connection: keep-alive\r\n") != null); - // Verify no Content-Length for GET without body try testing.expect(std.mem.indexOf(u8, headers, "Content-Length:") == null); @@ -209,8 +206,8 @@ test "buildRequestHeaders: hop-by-hop headers are filtered" { try testing.expect(std.mem.indexOf(u8, headers, "User-Agent: TestClient/1.0\r\n") != null); try testing.expect(std.mem.indexOf(u8, headers, "Accept: text/html\r\n") != null); - // Verify proxy adds its own Connection: keep-alive - try testing.expect(std.mem.indexOf(u8, headers, "Connection: keep-alive\r\n") != null); + // Verify Connection header is NOT added (hop-by-hop, HTTP/1.1 is keep-alive by default) + try testing.expect(std.mem.indexOf(u8, headers, "Connection:") == null); } diff --git a/src/proxy/request.zig b/src/proxy/request.zig index 3dcb46a..ff4e7a6 100644 --- a/src/proxy/request.zig +++ b/src/proxy/request.zig @@ -114,10 +114,8 @@ pub fn buildRequestHeaders( pos += @intCast(content_len_header.len); } - // Write Connection: keep-alive - const conn_header = "Connection: keep-alive\r\n"; - @memcpy(buffer[pos..][0..conn_header.len], conn_header); - pos += conn_header.len; + // Note: Don't add Connection header - HTTP/1.1 is keep-alive by default, + // and hop-by-hop headers should not be forwarded (RFC 2616 Section 13.5.1) // End headers with \r\n buffer[pos] = '\r'; diff --git a/tests/PYTEST_README.md b/tests/PYTEST_README.md deleted file mode 100644 index 20b2b94..0000000 --- a/tests/PYTEST_README.md +++ /dev/null @@ -1,220 +0,0 @@ -# Load Balancer Pytest Integration Tests - -This directory contains pytest-based integration tests for the load balancer, providing better assertions, fixtures, and maintainability compared to the original bash/curl tests. - -## Test Structure - -### Test Files - -- **`test_basic.py`** - Basic proxy functionality tests - - GET, POST, PUT, PATCH request forwarding - - Request body handling - - Response structure validation - -- **`test_headers.py`** - Header handling tests - - Content-Type and Authorization headers - - Custom X-* headers - - Host header behavior - - Hop-by-hop header filtering - -- **`test_body_forwarding.py`** - Request body forwarding tests - - Empty bodies - - Large bodies (1KB) - - JSON body preservation - - Binary data handling - - Content-Length header verification - - Sequential POST requests - -- **`test_load_balancing.py`** - Load balancing tests - - Round-robin distribution (9 requests → 3/3/3 distribution) - - Backend reachability verification - -### Fixtures (`conftest.py`) - -- **`backend`** - Single echo backend server (port 19001) -- **`backends`** - Three echo backend servers (ports 19001-19003) -- **`load_balancer`** - Load balancer with single backend (port 18080) -- **`load_balancer_multi`** - Load balancer with three backends (port 18080) - -All fixtures have function scope for test isolation and automatic cleanup. - -## Setup - -### Prerequisites - -```bash -# Build the binaries first -/usr/local/zig-0.16.0-dev/zig build -``` - -### Install Dependencies - -```bash -cd tests -python3 -m venv venv -source venv/bin/activate -pip install -r requirements.txt -``` - -## Running Tests - -### Run All Tests - -```bash -cd tests -source venv/bin/activate -pytest -v -``` - -### Run Specific Test File - -```bash -pytest test_basic.py -v -pytest test_headers.py -v -pytest test_body_forwarding.py -v -pytest test_load_balancing.py -v -``` - -### Run Specific Test - -```bash -pytest test_basic.py::test_get_request_forwarded -v -``` - -### Run with Coverage - -```bash -pytest -v --cov=../src --cov-report=html -``` - -## Test Execution Details - -### Timing Considerations - -The multiprocess load balancer has health check probes that run with these characteristics: - -- **Initial delay**: 1 second before first probe -- **Probe interval**: 5 seconds between health checks -- **Healthy threshold**: 2 consecutive successful probes required - -Therefore, fixtures that start the load balancer wait **12 seconds** to ensure: -- 1s initial delay -- 5s first health check -- 5s second health check -- +1s buffer - -This ensures backends are marked HEALTHY before tests run. - -### Process Management - -- Each fixture creates processes with `preexec_fn=os.setsid` to create new process groups -- Cleanup uses `os.killpg()` to kill entire process groups (important for multiprocess load balancer) -- An `atexit` handler ensures all processes are cleaned up even if tests crash -- Function-scoped fixtures provide test isolation but increase runtime - -### Performance - -- Full test suite: ~4 minutes (18 tests) -- Per-test overhead: ~13 seconds (backend + load balancer startup + health check wait) -- Load balancing tests: Additional overhead for 3 backend startup - -## Test Coverage - -### Basic Functionality (5 tests) -- ✅ GET request forwarding -- ✅ POST with JSON body -- ✅ PUT with body -- ✅ PATCH with body -- ✅ Response structure validation - -### Headers (5 tests) -- ✅ Content-Type header forwarding -- ✅ Custom headers (X-*) -- ✅ Authorization header -- ✅ Hop-by-hop headers (validated) -- ✅ Host header to backend - -### Body Forwarding (6 tests) -- ✅ Empty body POST -- ✅ Large body (1KB) -- ✅ JSON body preservation -- ✅ Binary data (with UTF-8 safe chars) -- ✅ Content-Length header -- ✅ Multiple sequential POSTs - -### Load Balancing (2 tests) -- ✅ Round-robin distribution (3/3/3) -- ✅ All backends reachable - -## Echo Backend Response Format - -The test backend returns JSON with request details: - -```json -{ - "server_id": "backend1", - "method": "POST", - "uri": "/api/test", - "headers": { - "content-type": "application/json", - "host": "127.0.0.1:19001" - }, - "body": "{\"key\": \"value\"}", - "body_length": 16 -} -``` - -## Known Limitations - -1. **Large Bodies**: Bodies > 1KB may cause backend health check failures (503 errors). Tests use 1KB as the "large body" size. - -2. **Binary Data**: Tests use UTF-8 safe binary data to avoid JSON encoding issues with control characters. - -3. **Test Duration**: Function-scoped fixtures mean each test restarts all servers, increasing runtime but ensuring isolation. - -4. **Port Conflicts**: Tests use fixed ports (18080, 19001-19003). Ensure no other processes use these ports. - -## Troubleshooting - -### Tests Timing Out - -If tests timeout during fixture setup: -- Increase `@pytest.mark.timeout()` values in test files -- Check that ports 18080, 19001-19003 are not in use - -### 503 Service Unavailable Errors - -If tests get 503 responses: -- Backend health checks may not have completed -- Increase the `time.sleep()` value in `start_load_balancer()` in `conftest.py` -- Check backend logs for connection issues - -### Zombie Processes - -If processes aren't cleaned up: -```bash -pkill -9 -f 'load_balancer_mp' -pkill -9 -f 'test_backend_echo' -``` - -## Migration from Bash Tests - -The original bash tests in `run_integration_tests.sh` have been fully replaced by these pytest tests with improvements: - -- ✅ Better assertions (pytest vs manual string parsing) -- ✅ Automatic fixture management (vs manual process tracking) -- ✅ Test isolation (function scope vs shared servers) -- ✅ JSON validation (vs jq piping) -- ✅ Timeout handling (pytest-timeout vs manual) -- ✅ Detailed error messages (pytest vs custom fail()) -- ✅ Extensible (easy to add new tests) - -## Future Improvements - -- [ ] Add pytest-xdist for parallel test execution -- [ ] Add test markers (@pytest.mark.slow, @pytest.mark.fast) -- [ ] Session-scoped fixtures for faster execution (trade-off with isolation) -- [ ] Health check endpoint verification -- [ ] Circuit breaker behavior tests -- [ ] Failover and retry logic tests -- [ ] Metrics and monitoring endpoint tests diff --git a/tests/README.md b/tests/README.md index 430d5bb..9c68eec 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,208 +1,138 @@ # Load Balancer Integration Tests -This directory contains end-to-end integration tests for the load balancer. +This directory contains end-to-end integration tests for the load balancer, written entirely in Zig. ## Overview -The integration tests verify the complete request/response flow through the load balancer, including: +The integration tests verify the complete request/response flow through the load balancer: -- Request forwarding (GET, POST, PUT) +- Request forwarding (GET, POST, PUT, PATCH) - Request body forwarding - Header forwarding (including filtering hop-by-hop headers) - Load balancing (round-robin distribution) -- Connection pooling - Multiple sequential requests -## Prerequisites - -- Built binaries: `zig build build-all` -- `jq` for JSON parsing: `brew install jq` (macOS) or `apt-get install jq` (Linux) -- `nc` (netcat) for port checking (usually pre-installed) - ## Running Tests -### Quick Test - -Run a quick sanity check to verify basic functionality: - -```bash -./tests/quick_test.sh -``` - -This will: -1. Start one test backend -2. Test the backend directly -3. Start the load balancer -4. Test through the load balancer - -### Full Integration Test Suite - -Run the comprehensive test suite: +Build and run the integration tests: ```bash -./tests/run_integration_tests.sh +zig build test-integration ``` -This will run all tests including: -- GET request forwarding -- POST request with JSON body -- Custom header forwarding -- Multiple sequential requests -- Round-robin load balancing across 3 backends - Expected output: ``` -======================================== -Load Balancer Integration Tests -======================================== - -[INFO] Starting basic tests (single backend)... -[INFO] Started backend backend1 on port 19001 (PID: 12345) -[INFO] Started load balancer on port 18080 (PID: 12346) -[INFO] Testing GET request forwarding -[PASS] GET request forwarded correctly -[INFO] Testing POST request with JSON body -[PASS] POST request with JSON body forwarded correctly -[INFO] Testing custom header forwarding -[PASS] Custom headers forwarded correctly -[INFO] Testing multiple sequential requests -[PASS] Multiple sequential requests work correctly -[INFO] Testing round-robin load balancing -[PASS] Round-robin distributes requests evenly (3/3/3) - -======================================== -All tests passed! -======================================== +╔══════════════════════════════════════╗ +║ Load Balancer Integration Tests ║ +╚══════════════════════════════════════╝ + +Basic Proxy Functionality + ✓ forwards GET requests correctly + ✓ forwards POST requests with JSON body + ✓ forwards PUT requests with body + ✓ forwards PATCH requests with body + ✓ returns complete response structure + + 5 passed, 0 failed + +Header Handling + ✓ forwards Content-Type header + ✓ forwards custom X-* headers + ✓ forwards Authorization header + ✓ filters hop-by-hop headers + ✓ includes Host header to backend + + 5 passed, 0 failed + +Body Forwarding + ✓ handles empty POST body + ✓ handles large body (1KB) + ✓ preserves JSON body exactly + ✓ handles binary data + ✓ sets Content-Length correctly + ✓ handles multiple sequential POSTs + + 6 passed, 0 failed + +Load Balancing + ✓ distributes requests with round-robin (3/3/3) + ✓ reaches all configured backends + + 2 passed, 0 failed + +════════════════════════════════════════ +✓ All test suites passed! ``` -## Test Components - -### Test Backend Echo Server (`test_backend_echo.zig`) +## Test Architecture -A specialized backend server that echoes back request details in JSON format: -- Server ID -- HTTP method -- Request URI -- All headers -- Request body -- Body length +### Test Harness (`harness.zig`) -This allows tests to verify that the load balancer correctly forwards all request components. +A minimal Jest-like test framework providing: +- `describe`/`it` semantics +- `beforeAll`/`afterAll` lifecycle hooks +- Colorized output +- Pass/fail counting -### Integration Test Script (`run_integration_tests.sh`) +### Test Utilities (`test_utils.zig`) -A bash script that: -1. Starts test backend servers -2. Starts the load balancer -3. Makes HTTP requests using `curl` -4. Verifies responses using `jq` -5. Cleans up all processes +HTTP client helpers: +- `waitForPort()` - Wait for a server to accept connections +- `httpRequest()` - Make HTTP requests and get responses +- JSON parsing helpers for response validation -### Test Coverage +### Process Manager (`process_manager.zig`) -**Basic Functionality:** -- ✅ GET requests proxied correctly -- ✅ POST requests with JSON body forwarded correctly -- ✅ Custom headers forwarded -- ✅ Hop-by-hop headers NOT forwarded -- ✅ Multiple sequential requests work +Manages backend and load balancer processes: +- Spawns test backends on ports 19001-19003 +- Spawns load balancer on port 18080 +- Handles cleanup on test completion -**Load Balancing:** -- ✅ Round-robin distributes requests evenly across backends -- ✅ All backends receive traffic +### Test Suites (`suites/`) -**Connection Pooling:** -- ✅ Multiple requests reuse connections -- ✅ No connection leaks +- `basic.zig` - Basic HTTP method forwarding +- `headers.zig` - Header handling and filtering +- `body.zig` - Request body forwarding +- `load_balancing.zig` - Round-robin distribution ## Test Ports -The tests use the following ports: -- Load Balancer: `18080` -- Backend 1: `19001` -- Backend 2: `19002` -- Backend 3: `19003` - -Make sure these ports are available before running tests. - -## Troubleshooting - -### Port Already in Use - -If you see connection errors, check for processes using the test ports: - -```bash -# Check ports -lsof -i :18080 -lsof -i :19001 - -# Kill any stuck processes -killall test_backend_echo load_balancer_sp -``` - -### Tests Fail with "command not found: jq" - -Install jq: - -```bash -# macOS -brew install jq - -# Linux -sudo apt-get install jq -``` - -### Tests Timeout - -Increase the `STARTUP_DELAY_MS` in the test script if servers are slow to start: - -```bash -# Edit tests/run_integration_tests.sh -STARTUP_DELAY_MS=1000 # Increase from 500 to 1000 -``` +| Component | Port | +|-----------|------| +| Load Balancer | 18080 | +| Backend 1 | 19001 | +| Backend 2 | 19002 | +| Backend 3 | 19003 | ## Adding New Tests -To add a new test case: - -1. Add a test function following the pattern: -```bash -test_my_feature() { - info "Testing my feature" +1. Create a test function in the appropriate suite: - # Make request - local response=$(curl -s http://localhost:$LB_PORT/) +```zig +fn testMyFeature(allocator: std.mem.Allocator) !void { + const response = try test_utils.httpRequest( + allocator, "GET", test_utils.LB_PORT, "/my-path", null, null + ); + defer allocator.free(response); - # Verify response - local result=$(echo "$response" | jq -r '.some_field') - if [ "$result" != "expected" ]; then - fail "My feature" "Expected 'expected', got '$result'" - fi - - pass "My feature works correctly" -} -``` + const body = try test_utils.extractJsonBody(response); + const method = try test_utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); -2. Call the function from `main()`: -```bash -main() { - # ... existing setup ... - test_my_feature - # ... + try std.testing.expectEqualStrings("GET", method); } ``` -## CI/CD Integration - -To integrate with CI/CD pipelines: +2. Add the test to the suite's `tests` array: -```yaml -# Example GitHub Actions -- name: Build - run: zig build build-all - -- name: Run Integration Tests - run: ./tests/run_integration_tests.sh +```zig +pub const suite = harness.Suite{ + .name = "My Suite", + .tests = &.{ + harness.it("tests my feature", testMyFeature), + // ... + }, +}; ``` ## Manual Testing @@ -210,11 +140,14 @@ To integrate with CI/CD pipelines: For manual testing with curl: ```bash -# Start backend +# Build all components +zig build + +# Start backend manually ./zig-out/bin/test_backend_echo --port 19001 --id backend1 & # Start load balancer -./zig-out/bin/load_balancer_sp --port 18080 --backend 127.0.0.1:19001 & +./zig-out/bin/load_balancer --port 18080 --mode sp --backend 127.0.0.1:19001 & # Test GET curl http://localhost:18080/ @@ -225,5 +158,5 @@ curl -X POST -H "Content-Type: application/json" \ http://localhost:18080/ # Clean up -killall test_backend_echo load_balancer_sp +killall test_backend_echo load_balancer ``` diff --git a/tests/__pycache__/conftest.cpython-314-pytest-9.0.2.pyc b/tests/__pycache__/conftest.cpython-314-pytest-9.0.2.pyc deleted file mode 100644 index b4704e4..0000000 Binary files a/tests/__pycache__/conftest.cpython-314-pytest-9.0.2.pyc and /dev/null differ diff --git a/tests/__pycache__/conftest.cpython-314.pyc b/tests/__pycache__/conftest.cpython-314.pyc deleted file mode 100644 index e581244..0000000 Binary files a/tests/__pycache__/conftest.cpython-314.pyc and /dev/null differ diff --git a/tests/__pycache__/debug_test.cpython-314-pytest-9.0.2.pyc b/tests/__pycache__/debug_test.cpython-314-pytest-9.0.2.pyc deleted file mode 100644 index e110e60..0000000 Binary files a/tests/__pycache__/debug_test.cpython-314-pytest-9.0.2.pyc and /dev/null differ diff --git a/tests/__pycache__/direct.cpython-314-pytest-9.0.2.pyc b/tests/__pycache__/direct.cpython-314-pytest-9.0.2.pyc deleted file mode 100644 index 998673d..0000000 Binary files a/tests/__pycache__/direct.cpython-314-pytest-9.0.2.pyc and /dev/null differ diff --git a/tests/__pycache__/h2_backend.cpython-314.pyc b/tests/__pycache__/h2_backend.cpython-314.pyc new file mode 100644 index 0000000..b54c7a3 Binary files /dev/null and b/tests/__pycache__/h2_backend.cpython-314.pyc differ diff --git a/tests/__pycache__/test_basic.cpython-314-pytest-9.0.2.pyc b/tests/__pycache__/test_basic.cpython-314-pytest-9.0.2.pyc deleted file mode 100644 index ccf004e..0000000 Binary files a/tests/__pycache__/test_basic.cpython-314-pytest-9.0.2.pyc and /dev/null differ diff --git a/tests/__pycache__/test_basic.cpython-314.pyc b/tests/__pycache__/test_basic.cpython-314.pyc deleted file mode 100644 index f103f2d..0000000 Binary files a/tests/__pycache__/test_basic.cpython-314.pyc and /dev/null differ diff --git a/tests/__pycache__/test_body_forwarding.cpython-314-pytest-9.0.2.pyc b/tests/__pycache__/test_body_forwarding.cpython-314-pytest-9.0.2.pyc deleted file mode 100644 index e9c1264..0000000 Binary files a/tests/__pycache__/test_body_forwarding.cpython-314-pytest-9.0.2.pyc and /dev/null differ diff --git a/tests/__pycache__/test_headers.cpython-314-pytest-9.0.2.pyc b/tests/__pycache__/test_headers.cpython-314-pytest-9.0.2.pyc deleted file mode 100644 index 64b0628..0000000 Binary files a/tests/__pycache__/test_headers.cpython-314-pytest-9.0.2.pyc and /dev/null differ diff --git a/tests/__pycache__/test_load_balancing.cpython-314-pytest-9.0.2.pyc b/tests/__pycache__/test_load_balancing.cpython-314-pytest-9.0.2.pyc deleted file mode 100644 index 81baca7..0000000 Binary files a/tests/__pycache__/test_load_balancing.cpython-314-pytest-9.0.2.pyc and /dev/null differ diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index 023c028..0000000 --- a/tests/conftest.py +++ /dev/null @@ -1,276 +0,0 @@ -""" -Pytest fixtures for load balancer integration tests. - -Provides fixtures for starting/stopping: -- Backend echo servers -- Load balancer instances -""" -import os -import signal -import socket -import subprocess -import time -from typing import List, Tuple -import pytest -import atexit - - -# Track all processes for cleanup -_all_processes = [] - - -def cleanup_all_processes(): - """Kill all tracked processes on exit.""" - for process in _all_processes: - try: - if process.poll() is None: # Process is still running - # Kill process group - try: - os.killpg(os.getpgid(process.pid), signal.SIGTERM) - except Exception: - pass - process.kill() - process.wait(timeout=2) - except Exception: - pass - - -atexit.register(cleanup_all_processes) - - -def wait_for_port(port: int, timeout: int = 10) -> bool: - """ - Wait for a port to be ready to accept connections. - - Args: - port: Port number to check - timeout: Maximum seconds to wait - - Returns: - True if port becomes available, False if timeout - """ - start_time = time.time() - while time.time() - start_time < timeout: - try: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.settimeout(1) - result = sock.connect_ex(('127.0.0.1', port)) - sock.close() - if result == 0: - return True - except Exception: - pass - time.sleep(0.1) - return False - - -def start_backend(port: int, server_id: str) -> subprocess.Popen: - """ - Start an echo backend server on the given port. - - Args: - port: Port number for the backend - server_id: Identifier for this backend instance - - Returns: - Popen process object - """ - # Get the project root directory (parent of tests/) - project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - backend_binary = os.path.join(project_root, "zig-out", "bin", "test_backend_echo") - - if not os.path.exists(backend_binary): - raise FileNotFoundError( - f"Backend binary not found at {backend_binary}. Run 'zig build' first." - ) - - process = subprocess.Popen( - [backend_binary, "--port", str(port), "--id", server_id], - stdin=subprocess.DEVNULL, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - close_fds=True, - start_new_session=True, - ) - - # Track for cleanup - _all_processes.append(process) - - # Wait for the port to be ready - if not wait_for_port(port, timeout=10): - try: - os.killpg(os.getpgid(process.pid), signal.SIGTERM) - except Exception: - pass - process.kill() - process.wait() - raise RuntimeError(f"Backend {server_id} failed to start on port {port}") - - return process - - -def start_load_balancer(backend_ports: List[int], lb_port: int = 18080) -> subprocess.Popen: - """ - Start the load balancer pointing to the given backends. - - Args: - backend_ports: List of backend port numbers - lb_port: Port for the load balancer - - Returns: - Popen process object - """ - # Get the project root directory (parent of tests/) - project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - lb_binary = os.path.join(project_root, "zig-out", "bin", "load_balancer_mp") - - if not os.path.exists(lb_binary): - raise FileNotFoundError( - f"Load balancer binary not found at {lb_binary}. Run 'zig build' first." - ) - - # Build command line arguments - # Limit to 2 workers for tests to avoid high CPU usage - cmd = [lb_binary, "--port", str(lb_port), "-w", "2"] - for port in backend_ports: - cmd.extend(["--backend", f"127.0.0.1:{port}"]) - - process = subprocess.Popen( - cmd, - stdin=subprocess.DEVNULL, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - close_fds=True, - start_new_session=True, - ) - - # Track for cleanup - _all_processes.append(process) - - # Wait for the port to be ready - if not wait_for_port(lb_port, timeout=10): - try: - os.killpg(os.getpgid(process.pid), signal.SIGTERM) - except Exception: - pass - process.kill() - process.wait() - raise RuntimeError(f"Load balancer failed to start on port {lb_port}") - - # Backends start as healthy by default, but health probe needs time to verify - # 1s initial delay + first probe cycle - time.sleep(5) - - return process - - -@pytest.fixture(scope="module") -def backend() -> str: - """ - Start a single echo backend server. - - Yields: - URL of the backend server (e.g., "http://127.0.0.1:19001") - """ - port = 19001 - server_id = "backend1" - - process = start_backend(port, server_id) - - try: - yield f"http://127.0.0.1:{port}" - finally: - try: - os.killpg(os.getpgid(process.pid), signal.SIGTERM) - except Exception: - pass - process.kill() - try: - process.wait(timeout=2) - except subprocess.TimeoutExpired: - process.kill() - - -@pytest.fixture(scope="module") -def backends() -> Tuple[str, str, str]: - """ - Start three echo backend servers for load balancing tests. - - Yields: - Tuple of three backend URLs - """ - ports = [19001, 19002, 19003] - server_ids = ["backend1", "backend2", "backend3"] - processes = [] - - try: - for port, server_id in zip(ports, server_ids): - process = start_backend(port, server_id) - processes.append(process) - - yield tuple(f"http://127.0.0.1:{port}" for port in ports) - finally: - for process in processes: - try: - os.killpg(os.getpgid(process.pid), signal.SIGTERM) - except Exception: - pass - process.kill() - try: - process.wait(timeout=2) - except subprocess.TimeoutExpired: - process.kill() - - -@pytest.fixture(scope="module") -def load_balancer(backend) -> str: - """ - Start load balancer with a single backend. - - Yields: - URL of the load balancer (e.g., "http://127.0.0.1:18080") - """ - lb_port = 18080 - backend_port = 19001 - - process = start_load_balancer([backend_port], lb_port) - - try: - yield f"http://127.0.0.1:{lb_port}" - finally: - try: - os.killpg(os.getpgid(process.pid), signal.SIGTERM) - except Exception: - pass - process.kill() - try: - process.wait(timeout=2) - except subprocess.TimeoutExpired: - process.kill() - - -@pytest.fixture(scope="module") -def load_balancer_multi(backends) -> str: - """ - Start load balancer with three backends. - - Yields: - URL of the load balancer (e.g., "http://127.0.0.1:18080") - """ - lb_port = 18080 - backend_ports = [19001, 19002, 19003] - - process = start_load_balancer(backend_ports, lb_port) - - try: - yield f"http://127.0.0.1:{lb_port}" - finally: - try: - os.killpg(os.getpgid(process.pid), signal.SIGTERM) - except Exception: - pass - process.kill() - try: - process.wait(timeout=2) - except subprocess.TimeoutExpired: - process.kill() diff --git a/tests/direct.py b/tests/direct.py deleted file mode 100644 index 7c75410..0000000 --- a/tests/direct.py +++ /dev/null @@ -1,21 +0,0 @@ -import subprocess -import time - -def test_direct(): - backend = subprocess.Popen( - ['../zig-out/bin/test_backend_echo', '--port', '19001', '--id', 'test'], - ) - time.sleep(1) - - lb = subprocess.Popen( - ['../zig-out/bin/load_balancer_mp', '--port', '18080', '-w', '1', '--backend', '127.0.0.1:19001'], - ) - - print("\n>>> Check CPU now! <<<") - time.sleep(5) - - lb.terminate() - backend.terminate() - -if __name__ == "__main__": - test_direct() diff --git a/tests/h2_backend.py b/tests/h2_backend.py new file mode 100644 index 0000000..795eff4 --- /dev/null +++ b/tests/h2_backend.py @@ -0,0 +1,55 @@ +""" +HTTP/2 Test Backend for Integration Tests + +An ASGI application that echoes request details in JSON format. +Run with hypercorn for HTTP/2 support: + + hypercorn h2_backend:app --bind 0.0.0.0:9443 \ + --certfile ../test_certs/cert.pem \ + --keyfile ../test_certs/key.pem +""" + +import json + + +async def app(scope, receive, send): + """ASGI application that echoes request details.""" + if scope["type"] != "http": + return + + # Read request body + body = b"" + while True: + message = await receive() + body += message.get("body", b"") + if not message.get("more_body", False): + break + + # Build response + headers = {k.decode(): v.decode() for k, v in scope.get("headers", [])} + + response_data = { + "server_id": "h2_backend", + "method": scope["method"], + "uri": scope["path"] + ("?" + scope["query_string"].decode() if scope.get("query_string") else ""), + "headers": headers, + "body": body.decode("utf-8", errors="replace"), + "body_length": len(body), + "http_version": scope.get("http_version", "2"), + } + + response_body = json.dumps(response_data).encode() + + await send({ + "type": "http.response.start", + "status": 200, + "headers": [ + [b"content-type", b"application/json"], + [b"content-length", str(len(response_body)).encode()], + ], + }) + + await send({ + "type": "http.response.body", + "body": response_body, + }) diff --git a/tests/harness.zig b/tests/harness.zig new file mode 100644 index 0000000..1bc11dd --- /dev/null +++ b/tests/harness.zig @@ -0,0 +1,81 @@ +//! Minimal Jest-like test harness for Zig integration tests. +//! +//! Provides describe/it semantics with beforeAll/afterAll hooks. +//! Built on top of std.testing for seamless integration. + +const std = @import("std"); +const testing = std.testing; + +pub const TestFn = *const fn (std.mem.Allocator) anyerror!void; + +pub const TestCase = struct { + name: []const u8, + func: TestFn, +}; + +pub const Suite = struct { + name: []const u8, + tests: []const TestCase, + before_all: ?TestFn = null, + after_all: ?TestFn = null, + before_each: ?TestFn = null, + after_each: ?TestFn = null, +}; + +/// Run a test suite with optional lifecycle hooks +pub fn runSuite(allocator: std.mem.Allocator, suite: Suite) !void { + std.debug.print("\n\x1b[1m{s}\x1b[0m\n", .{suite.name}); + + // beforeAll + if (suite.before_all) |before| { + try before(allocator); + } + + var passed: usize = 0; + var failed: usize = 0; + + for (suite.tests) |t| { + // beforeEach + if (suite.before_each) |before| { + before(allocator) catch |err| { + std.debug.print(" \x1b[31m✗\x1b[0m {s} (beforeEach failed: {})\n", .{ t.name, err }); + failed += 1; + continue; + }; + } + + // Run test + if (t.func(allocator)) |_| { + std.debug.print(" \x1b[32m✓\x1b[0m {s}\n", .{t.name}); + passed += 1; + } else |err| { + std.debug.print(" \x1b[31m✗\x1b[0m {s} ({})\n", .{ t.name, err }); + failed += 1; + } + + // afterEach + if (suite.after_each) |after| { + after(allocator) catch |err| { + std.debug.print(" \x1b[33m⚠\x1b[0m afterEach failed: {}\n", .{err}); + }; + } + } + + // afterAll + if (suite.after_all) |after| { + after(allocator) catch |err| { + std.debug.print(" \x1b[33m⚠\x1b[0m afterAll failed: {}\n", .{err}); + }; + } + + std.debug.print("\n {d} passed, {d} failed\n", .{ passed, failed }); + + if (failed > 0) { + return error.TestsFailed; + } +} + +/// Helper to create a test case inline +pub fn it(name: []const u8, func: TestFn) TestCase { + return .{ .name = name, .func = func }; +} diff --git a/tests/integration_test.zig b/tests/integration_test.zig index 92f48d7..afa2e73 100644 --- a/tests/integration_test.zig +++ b/tests/integration_test.zig @@ -1,553 +1,54 @@ -/// End-to-End Integration Tests for Load Balancer -/// -/// These tests verify complete request/response flows through the load balancer -/// including header forwarding, body forwarding, load balancing, and failover. -/// -/// Test Architecture: -/// 1. Start test backend servers (echo servers that return request details) -/// 2. Start load balancer pointing to test backends -/// 3. Make HTTP requests to load balancer -/// 4. Verify responses contain expected data -/// 5. Clean up processes -/// -/// Run with: zig build test-integration -const std = @import("std"); -const testing = std.testing; -const log = std.log.scoped(.integration_test); - -const Io = std.Io; -const posix = std.posix; - -// Test configuration -const BACKEND1_PORT: u16 = 19001; -const BACKEND2_PORT: u16 = 19002; -const BACKEND3_PORT: u16 = 19003; -const LB_PORT: u16 = 18080; -const TEST_HOST = "127.0.0.1"; - -// Timeouts -const STARTUP_DELAY_MS = 500; // Wait for servers to start -const SHUTDOWN_DELAY_MS = 100; // Wait for graceful shutdown -const REQUEST_TIMEOUT_MS = 5000; // Maximum time for a request - -/// Process handle for cleanup -const ProcessHandle = struct { - child: std.process.Child, - name: []const u8, - - fn kill(self: *ProcessHandle) void { - _ = self.child.kill() catch |err| { - log.warn("Failed to kill {s}: {}", .{ self.name, err }); - }; - } - - fn wait(self: *ProcessHandle) void { - _ = self.child.wait() catch |err| { - log.warn("Failed to wait for {s}: {}", .{ self.name, err }); - }; - } -}; - -/// Test fixture - manages backend and load balancer processes -const TestFixture = struct { - allocator: std.mem.Allocator, - backends: std.ArrayListUnmanaged(ProcessHandle), - load_balancer: ?ProcessHandle, - - fn init(allocator: std.mem.Allocator) TestFixture { - return .{ - .allocator = allocator, - .backends = .empty, - .load_balancer = null, - }; - } - - fn deinit(self: *TestFixture) void { - self.stopAll(); - self.backends.deinit(self.allocator); - } - - fn startBackend(self: *TestFixture, port: u16, server_id: []const u8) !void { - const exe_path = "./zig-out/bin/test_backend_echo"; - - var port_buf: [16]u8 = undefined; - const port_str = try std.fmt.bufPrint(&port_buf, "{d}", .{port}); +//! Integration Test Runner +//! +//! Runs all test suites using the describe/it harness. +//! Run with: zig build test-integration - var child = std.process.Child.init(&.{ - exe_path, - "--port", - port_str, - "--id", - server_id, - }, self.allocator); - - child.stdin_behavior = .Ignore; - child.stdout_behavior = .Ignore; - child.stderr_behavior = .Ignore; - - try child.spawn(); - - try self.backends.append(self.allocator, .{ - .child = child, - .name = try std.fmt.allocPrint(self.allocator, "backend_{s}", .{server_id}), - }); - - log.info("Started backend {s} on port {d} (PID: {d})", .{ server_id, port, child.id }); - } - - fn startLoadBalancer(self: *TestFixture, backend_ports: []const u16) !void { - const exe_path = "./zig-out/bin/load_balancer_sp"; // Use single-process for easier testing - - var args: std.ArrayListUnmanaged([]const u8) = .empty; - defer args.deinit(self.allocator); - - try args.append(self.allocator, exe_path); - try args.append(self.allocator, "--port"); - - var lb_port_buf: [16]u8 = undefined; - const lb_port_str = try std.fmt.bufPrint(&lb_port_buf, "{d}", .{LB_PORT}); - try args.append(self.allocator, try self.allocator.dupe(u8, lb_port_str)); - - // Add backends - for (backend_ports) |port| { - try args.append(self.allocator, "--backend"); - var backend_buf: [32]u8 = undefined; - const backend_str = try std.fmt.bufPrint(&backend_buf, "127.0.0.1:{d}", .{port}); - try args.append(self.allocator, try self.allocator.dupe(u8, backend_str)); - } - - var child = std.process.Child.init(args.items, self.allocator); - - child.stdin_behavior = .Ignore; - child.stdout_behavior = .Ignore; - child.stderr_behavior = .Ignore; - - try child.spawn(); - - self.load_balancer = .{ - .child = child, - .name = try self.allocator.dupe(u8, "load_balancer"), +const std = @import("std"); +const harness = @import("harness.zig"); + +// Import all test suites +const basic = @import("suites/basic.zig"); +const headers = @import("suites/headers.zig"); +const body = @import("suites/body.zig"); +const load_balancing = @import("suites/load_balancing.zig"); +const http2 = @import("suites/http2.zig"); + +pub fn main() !void { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + defer _ = gpa.deinit(); + const allocator = gpa.allocator(); + + std.debug.print("\n\x1b[1;36m╔══════════════════════════════════════╗\x1b[0m\n", .{}); + std.debug.print("\x1b[1;36m║ Load Balancer Integration Tests ║\x1b[0m\n", .{}); + std.debug.print("\x1b[1;36m╚══════════════════════════════════════╝\x1b[0m\n", .{}); + + const suites = [_]harness.Suite{ + basic.suite, + headers.suite, + body.suite, + load_balancing.suite, + http2.suite, + }; + + var suite_failures: usize = 0; + + for (suites) |suite| { + harness.runSuite(allocator, suite) catch |err| { + std.debug.print(" Suite error: {}\n", .{err}); + suite_failures += 1; }; - - log.info("Started load balancer on port {d} (PID: {d})", .{ LB_PORT, child.id }); - } - - fn stopAll(self: *TestFixture) void { - // Stop load balancer first - if (self.load_balancer) |*lb| { - log.info("Stopping load balancer...", .{}); - lb.kill(); - lb.wait(); - self.allocator.free(lb.name); - } - - // Stop backends - for (self.backends.items) |*backend| { - log.info("Stopping {s}...", .{backend.name}); - backend.kill(); - backend.wait(); - self.allocator.free(backend.name); - } - - self.backends.clearRetainingCapacity(); - self.load_balancer = null; - } - - fn waitForStartup(self: *TestFixture) !void { - _ = self; - posix.nanosleep(0, STARTUP_DELAY_MS * std.time.ns_per_ms); - } -}; - -/// HTTP client for making test requests -fn makeHttpRequest( - allocator: std.mem.Allocator, - method: []const u8, - path: []const u8, - headers: ?std.StringHashMap([]const u8), - body: ?[]const u8, -) ![]const u8 { - // Build request - var request: std.ArrayListUnmanaged(u8) = .empty; - defer request.deinit(allocator); - - // Request line - const request_line = try std.fmt.allocPrint(allocator, "{s} {s} HTTP/1.1\r\nHost: {s}:{d}\r\n", .{ method, path, TEST_HOST, LB_PORT }); - try request.appendSlice(allocator, request_line); - - // Custom headers - if (headers) |hdrs| { - var iter = hdrs.iterator(); - while (iter.next()) |entry| { - const header_line = try std.fmt.allocPrint(allocator, "{s}: {s}\r\n", .{ entry.key_ptr.*, entry.value_ptr.* }); - try request.appendSlice(allocator, header_line); - } - } - - // Body - if (body) |b| { - const content_len = try std.fmt.allocPrint(allocator, "Content-Length: {d}\r\n", .{b.len}); - try request.appendSlice(allocator, content_len); - } - - try request.appendSlice(allocator, "Connection: close\r\n"); - try request.appendSlice(allocator, "\r\n"); - - if (body) |b| { - try request.appendSlice(allocator, b); - } - - // Connect and send - const addr = try Io.net.IpAddress.parse(TEST_HOST, LB_PORT); - const tcp_socket = try posix.socket(addr.family(), posix.SOCK.STREAM, posix.IPPROTO.TCP); - errdefer posix.close(tcp_socket); - - try posix.connect(tcp_socket, &addr.addr, addr.addrLen()); - const stream = std.fs.File{ .handle = tcp_socket }; - defer stream.close(); - - try stream.writeAll(request.items); - - // Read response - var response: std.ArrayListUnmanaged(u8) = .empty; - errdefer response.deinit(allocator); - - var buf: [4096]u8 = undefined; - while (true) { - const n = try stream.read(&buf); - if (n == 0) break; - try response.appendSlice(allocator, buf[0..n]); - } - - return response.toOwnedSlice(allocator); -} - -fn extractBody(response: []const u8) ![]const u8 { - // Find \r\n\r\n separating headers from body - const separator = "\r\n\r\n"; - if (std.mem.indexOf(u8, response, separator)) |idx| { - return response[idx + separator.len ..]; - } - return error.NoBodyFound; -} - -fn parseJson(allocator: std.mem.Allocator, json_str: []const u8) !std.json.Parsed(std.json.Value) { - return try std.json.parseFromSlice(std.json.Value, allocator, json_str, .{}); -} - -// ============================================================================ -// Basic Functionality Tests -// ============================================================================ - -test "integration: GET request forwarded correctly" { - const allocator = testing.allocator; - - var fixture = TestFixture.init(allocator); - defer fixture.deinit(); - - // Start backend and load balancer - try fixture.startBackend(BACKEND1_PORT, "backend1"); - try fixture.startLoadBalancer(&.{BACKEND1_PORT}); - try fixture.waitForStartup(); - - // Make GET request - const response = try makeHttpRequest(allocator, "GET", "/test/path", null, null); - defer allocator.free(response); - - const body = try extractBody(response); - const parsed = try parseJson(allocator, body); - defer parsed.deinit(); - - const root = parsed.value.object; - - // Verify response - try testing.expectEqualStrings("GET", root.get("method").?.string); - try testing.expectEqualStrings("/test/path", root.get("uri").?.string); - try testing.expectEqual(@as(i64, 0), root.get("body_length").?.integer); -} - -test "integration: POST request with JSON body forwarded correctly" { - const allocator = testing.allocator; - - var fixture = TestFixture.init(allocator); - defer fixture.deinit(); - - try fixture.startBackend(BACKEND1_PORT, "backend1"); - try fixture.startLoadBalancer(&.{BACKEND1_PORT}); - try fixture.waitForStartup(); - - // Create headers - var headers = std.StringHashMap([]const u8).init(allocator); - defer headers.deinit(); - try headers.put("Content-Type", "application/json"); - - const request_body = "{\"test\":\"data\",\"number\":42}"; - - // Make POST request - const response = try makeHttpRequest(allocator, "POST", "/api/endpoint", headers, request_body); - defer allocator.free(response); - - const body = try extractBody(response); - const parsed = try parseJson(allocator, body); - defer parsed.deinit(); - - const root = parsed.value.object; - - // Verify response - try testing.expectEqualStrings("POST", root.get("method").?.string); - try testing.expectEqualStrings("/api/endpoint", root.get("uri").?.string); - try testing.expectEqualStrings(request_body, root.get("body").?.string); - try testing.expectEqual(@as(i64, request_body.len), root.get("body_length").?.integer); - - // Verify Content-Type header was forwarded - const resp_headers = root.get("headers").?.object; - try testing.expect(resp_headers.get("Content-Type") != null); -} - -test "integration: PUT request with body forwarded correctly" { - const allocator = testing.allocator; - - var fixture = TestFixture.init(allocator); - defer fixture.deinit(); - - try fixture.startBackend(BACKEND1_PORT, "backend1"); - try fixture.startLoadBalancer(&.{BACKEND1_PORT}); - try fixture.waitForStartup(); - - const request_body = "Updated content for PUT request"; - - // Make PUT request - const response = try makeHttpRequest(allocator, "PUT", "/resource/123", null, request_body); - defer allocator.free(response); - - const body = try extractBody(response); - const parsed = try parseJson(allocator, body); - defer parsed.deinit(); - - const root = parsed.value.object; - - try testing.expectEqualStrings("PUT", root.get("method").?.string); - try testing.expectEqualStrings("/resource/123", root.get("uri").?.string); - try testing.expectEqualStrings(request_body, root.get("body").?.string); -} - -test "integration: Custom headers forwarded to backend" { - const allocator = testing.allocator; - - var fixture = TestFixture.init(allocator); - defer fixture.deinit(); - - try fixture.startBackend(BACKEND1_PORT, "backend1"); - try fixture.startLoadBalancer(&.{BACKEND1_PORT}); - try fixture.waitForStartup(); - - // Create custom headers - var headers = std.StringHashMap([]const u8).init(allocator); - defer headers.deinit(); - try headers.put("X-Custom-Header", "CustomValue"); - try headers.put("X-Request-ID", "test-123"); - try headers.put("Authorization", "Bearer token123"); - - const response = try makeHttpRequest(allocator, "GET", "/", headers, null); - defer allocator.free(response); - - const body = try extractBody(response); - const parsed = try parseJson(allocator, body); - defer parsed.deinit(); - - const root = parsed.value.object; - const resp_headers = root.get("headers").?.object; - - // Verify custom headers were forwarded - try testing.expectEqualStrings("CustomValue", resp_headers.get("X-Custom-Header").?.string); - try testing.expectEqualStrings("test-123", resp_headers.get("X-Request-ID").?.string); - try testing.expectEqualStrings("Bearer token123", resp_headers.get("Authorization").?.string); -} - -test "integration: Hop-by-hop headers NOT forwarded" { - const allocator = testing.allocator; - - var fixture = TestFixture.init(allocator); - defer fixture.deinit(); - - try fixture.startBackend(BACKEND1_PORT, "backend1"); - try fixture.startLoadBalancer(&.{BACKEND1_PORT}); - try fixture.waitForStartup(); - - // Create headers including hop-by-hop headers - var headers = std.StringHashMap([]const u8).init(allocator); - defer headers.deinit(); - try headers.put("Connection", "keep-alive"); - try headers.put("Keep-Alive", "timeout=5"); - try headers.put("Transfer-Encoding", "chunked"); - try headers.put("X-Safe-Header", "this-should-be-forwarded"); - - const response = try makeHttpRequest(allocator, "GET", "/", headers, null); - defer allocator.free(response); - - const body = try extractBody(response); - const parsed = try parseJson(allocator, body); - defer parsed.deinit(); - - const root = parsed.value.object; - const resp_headers = root.get("headers").?.object; - - // Verify hop-by-hop headers were NOT forwarded - try testing.expect(resp_headers.get("Connection") == null); - try testing.expect(resp_headers.get("Keep-Alive") == null); - try testing.expect(resp_headers.get("Transfer-Encoding") == null); - - // But safe headers should be forwarded - try testing.expectEqualStrings("this-should-be-forwarded", resp_headers.get("X-Safe-Header").?.string); -} - -// ============================================================================ -// Load Balancing Tests -// ============================================================================ - -test "integration: Round-robin distributes requests across backends" { - const allocator = testing.allocator; - - var fixture = TestFixture.init(allocator); - defer fixture.deinit(); - - // Start multiple backends - try fixture.startBackend(BACKEND1_PORT, "backend1"); - try fixture.startBackend(BACKEND2_PORT, "backend2"); - try fixture.startBackend(BACKEND3_PORT, "backend3"); - try fixture.startLoadBalancer(&.{ BACKEND1_PORT, BACKEND2_PORT, BACKEND3_PORT }); - try fixture.waitForStartup(); - - // Track which backends handle requests - var backend_counts = std.StringHashMap(usize).init(allocator); - defer backend_counts.deinit(); - - // Make multiple requests - const num_requests = 9; // Divisible by 3 for perfect distribution - for (0..num_requests) |i| { - var path_buf: [64]u8 = undefined; - const path = try std.fmt.bufPrint(&path_buf, "/request/{d}", .{i}); - - const response = try makeHttpRequest(allocator, "GET", path, null, null); - defer allocator.free(response); - - const body = try extractBody(response); - const parsed = try parseJson(allocator, body); - defer parsed.deinit(); - - const root = parsed.value.object; - const server_id = root.get("server_id").?.string; - - // Count requests per backend - const result = try backend_counts.getOrPut(server_id); - if (!result.found_existing) { - result.value_ptr.* = 0; - } - result.value_ptr.* += 1; } - // Verify round-robin distribution - // Each backend should handle num_requests/3 requests - const expected_per_backend = num_requests / 3; - - try testing.expectEqual(expected_per_backend, backend_counts.get("backend1").?); - try testing.expectEqual(expected_per_backend, backend_counts.get("backend2").?); - try testing.expectEqual(expected_per_backend, backend_counts.get("backend3").?); -} - -// ============================================================================ -// Connection Pooling Tests -// ============================================================================ - -test "integration: Multiple sequential requests work" { - const allocator = testing.allocator; - - var fixture = TestFixture.init(allocator); - defer fixture.deinit(); - - try fixture.startBackend(BACKEND1_PORT, "backend1"); - try fixture.startLoadBalancer(&.{BACKEND1_PORT}); - try fixture.waitForStartup(); - - // Make multiple sequential requests to verify pooling works - for (0..5) |i| { - var path_buf: [64]u8 = undefined; - const path = try std.fmt.bufPrint(&path_buf, "/seq/{d}", .{i}); - - const response = try makeHttpRequest(allocator, "GET", path, null, null); - defer allocator.free(response); - - const body = try extractBody(response); - const parsed = try parseJson(allocator, body); - defer parsed.deinit(); - - const root = parsed.value.object; - - // Verify each request succeeds - try testing.expectEqualStrings("backend1", root.get("server_id").?.string); - try testing.expectEqualStrings(path, root.get("uri").?.string); + std.debug.print("\n\x1b[1m════════════════════════════════════════\x1b[0m\n", .{}); + if (suite_failures == 0) { + std.debug.print("\x1b[32m✓ All test suites passed!\x1b[0m\n", .{}); + } else { + std.debug.print("\x1b[31m✗ {d} suite(s) had failures\x1b[0m\n", .{suite_failures}); + return error.TestSuitesFailed; } } -// ============================================================================ -// Body Forwarding Edge Cases -// ============================================================================ - -test "integration: Large POST body forwarded correctly" { - const allocator = testing.allocator; - - var fixture = TestFixture.init(allocator); - defer fixture.deinit(); - - try fixture.startBackend(BACKEND1_PORT, "backend1"); - try fixture.startLoadBalancer(&.{BACKEND1_PORT}); - try fixture.waitForStartup(); - - // Create a larger body (4KB) - var large_body = try allocator.alloc(u8, 4096); - defer allocator.free(large_body); - @memset(large_body, 'X'); - - const response = try makeHttpRequest(allocator, "POST", "/large", null, large_body); - defer allocator.free(response); - - const body = try extractBody(response); - const parsed = try parseJson(allocator, body); - defer parsed.deinit(); - - const root = parsed.value.object; - - // Verify body length - try testing.expectEqual(@as(i64, large_body.len), root.get("body_length").?.integer); -} - -test "integration: Empty body on POST works" { - const allocator = testing.allocator; - - var fixture = TestFixture.init(allocator); - defer fixture.deinit(); - - try fixture.startBackend(BACKEND1_PORT, "backend1"); - try fixture.startLoadBalancer(&.{BACKEND1_PORT}); - try fixture.waitForStartup(); - - const response = try makeHttpRequest(allocator, "POST", "/empty", null, ""); - defer allocator.free(response); - - const body = try extractBody(response); - const parsed = try parseJson(allocator, body); - defer parsed.deinit(); - - const root = parsed.value.object; - - try testing.expectEqualStrings("POST", root.get("method").?.string); - try testing.expectEqual(@as(i64, 0), root.get("body_length").?.integer); -} - -// ============================================================================ -// Test Main -// ============================================================================ - -test { - std.testing.refAllDecls(@This()); +// Also support zig test +test "run all integration tests" { + try main(); } diff --git a/tests/process_manager.zig b/tests/process_manager.zig new file mode 100644 index 0000000..8247a1b --- /dev/null +++ b/tests/process_manager.zig @@ -0,0 +1,230 @@ +//! Process manager for integration tests. +//! +//! Handles spawning and cleanup of backend/load balancer processes. +//! Supports both HTTP/1.1 (Zig) and HTTP/2 (Python/hypercorn) backends. + +const std = @import("std"); +const posix = std.posix; +const test_utils = @import("test_utils.zig"); + +pub const H2_BACKEND_PORT: u16 = 9443; + +pub const Process = struct { + child: std.process.Child, + name: []const u8, + allocator: std.mem.Allocator, + + pub fn kill(self: *Process) void { + _ = self.child.kill() catch {}; + _ = self.child.wait() catch {}; + } + + pub fn deinit(self: *Process) void { + self.allocator.free(self.name); + } +}; + +pub const ProcessManager = struct { + allocator: std.mem.Allocator, + processes: std.ArrayList(Process), + + pub fn init(allocator: std.mem.Allocator) ProcessManager { + return .{ + .allocator = allocator, + .processes = .empty, + }; + } + + pub fn deinit(self: *ProcessManager) void { + self.stopAll(); + self.processes.deinit(self.allocator); + } + + pub fn startBackend(self: *ProcessManager, port: u16, server_id: []const u8) !void { + var port_buf: [8]u8 = undefined; + const port_str = try std.fmt.bufPrint(&port_buf, "{d}", .{port}); + + var child = std.process.Child.init( + &.{ "./zig-out/bin/test_backend_echo", "--port", port_str, "--id", server_id }, + self.allocator, + ); + child.stdin_behavior = .Ignore; + child.stdout_behavior = .Ignore; + child.stderr_behavior = .Ignore; + + try child.spawn(); + errdefer { + _ = child.kill() catch {}; + _ = child.wait() catch {}; + } + + try self.processes.append(self.allocator, .{ + .child = child, + .name = try std.fmt.allocPrint(self.allocator, "backend_{s}", .{server_id}), + .allocator = self.allocator, + }); + + // Wait for port to be ready + try test_utils.waitForPort(port, 10000); + } + + pub fn startLoadBalancer(self: *ProcessManager, backend_ports: []const u16) !void { + var args: std.ArrayList([]const u8) = .empty; + defer args.deinit(self.allocator); + + // Track strings we allocate so we can free them + var allocated_strings: std.ArrayList([]const u8) = .empty; + defer { + for (allocated_strings.items) |s| self.allocator.free(s); + allocated_strings.deinit(self.allocator); + } + + try args.append(self.allocator, "./zig-out/bin/load_balancer"); + try args.append(self.allocator, "--port"); + + var lb_port_buf: [8]u8 = undefined; + const lb_port_str = try std.fmt.bufPrint(&lb_port_buf, "{d}", .{test_utils.LB_PORT}); + const lb_port_dup = try self.allocator.dupe(u8, lb_port_str); + try allocated_strings.append(self.allocator, lb_port_dup); + try args.append(self.allocator, lb_port_dup); + + // Use single-process mode for easier testing + try args.append(self.allocator, "--mode"); + try args.append(self.allocator, "sp"); + + for (backend_ports) |port| { + try args.append(self.allocator, "--backend"); + var buf: [32]u8 = undefined; + const backend_str = try std.fmt.bufPrint(&buf, "127.0.0.1:{d}", .{port}); + const backend_dup = try self.allocator.dupe(u8, backend_str); + try allocated_strings.append(self.allocator, backend_dup); + try args.append(self.allocator, backend_dup); + } + + var child = std.process.Child.init(args.items, self.allocator); + child.stdin_behavior = .Ignore; + child.stdout_behavior = .Ignore; + child.stderr_behavior = .Ignore; + + try child.spawn(); + errdefer { + _ = child.kill() catch {}; + _ = child.wait() catch {}; + } + + try self.processes.append(self.allocator, .{ + .child = child, + .name = try self.allocator.dupe(u8, "load_balancer"), + .allocator = self.allocator, + }); + + // Wait for LB port + try test_utils.waitForPort(test_utils.LB_PORT, 10000); + + // Wait for health checks (backends need to be marked healthy) + posix.nanosleep(2, 0); + } + + /// Start HTTP/2 backend using Python hypercorn + pub fn startH2Backend(self: *ProcessManager) !void { + // Use bash to activate venv and run hypercorn + var child = std.process.Child.init( + &.{ + "/bin/bash", "-c", + "cd tests && source .venv/bin/activate && " ++ + "hypercorn h2_backend:app --bind 0.0.0.0:9443 " ++ + "--certfile ../test_certs/cert.pem " ++ + "--keyfile ../test_certs/key.pem " ++ + "2>&1", + }, + self.allocator, + ); + child.stdin_behavior = .Ignore; + child.stdout_behavior = .Ignore; + child.stderr_behavior = .Ignore; + + try child.spawn(); + errdefer { + _ = child.kill() catch {}; + _ = child.wait() catch {}; + } + + try self.processes.append(self.allocator, .{ + .child = child, + .name = try self.allocator.dupe(u8, "h2_backend"), + .allocator = self.allocator, + }); + + // Wait for port to be ready (HTTPS so need longer timeout) + try test_utils.waitForTlsPort(H2_BACKEND_PORT, 15000); + } + + /// Start load balancer configured for HTTP/2 TLS backend + pub fn startLoadBalancerH2(self: *ProcessManager) !void { + var args: std.ArrayList([]const u8) = .empty; + defer args.deinit(self.allocator); + + // Track strings we allocate so we can free them + var allocated_strings: std.ArrayList([]const u8) = .empty; + defer { + for (allocated_strings.items) |s| self.allocator.free(s); + allocated_strings.deinit(self.allocator); + } + + try args.append(self.allocator, "./zig-out/bin/load_balancer"); + try args.append(self.allocator, "--port"); + + var lb_port_buf: [8]u8 = undefined; + const lb_port_str = try std.fmt.bufPrint(&lb_port_buf, "{d}", .{test_utils.LB_H2_PORT}); + const lb_port_dup = try self.allocator.dupe(u8, lb_port_str); + try allocated_strings.append(self.allocator, lb_port_dup); + try args.append(self.allocator, lb_port_dup); + + // Use single-process mode for easier testing + try args.append(self.allocator, "--mode"); + try args.append(self.allocator, "sp"); + + // Use HTTPS backend + try args.append(self.allocator, "--backend"); + var buf: [64]u8 = undefined; + const backend_str = try std.fmt.bufPrint(&buf, "https://127.0.0.1:{d}", .{H2_BACKEND_PORT}); + const backend_dup = try self.allocator.dupe(u8, backend_str); + try allocated_strings.append(self.allocator, backend_dup); + try args.append(self.allocator, backend_dup); + + // Skip TLS verification for self-signed test certs + try args.append(self.allocator, "--insecure"); + + var child = std.process.Child.init(args.items, self.allocator); + child.stdin_behavior = .Ignore; + child.stdout_behavior = .Ignore; + child.stderr_behavior = .Ignore; + + try child.spawn(); + errdefer { + _ = child.kill() catch {}; + _ = child.wait() catch {}; + } + + try self.processes.append(self.allocator, .{ + .child = child, + .name = try self.allocator.dupe(u8, "load_balancer_h2"), + .allocator = self.allocator, + }); + + // Wait for LB port + try test_utils.waitForPort(test_utils.LB_H2_PORT, 10000); + + // Wait for health checks and HTTP/2 connection establishment + posix.nanosleep(3, 0); + } + + pub fn stopAll(self: *ProcessManager) void { + // Stop in reverse order (LB first, then backends) + while (self.processes.pop()) |*proc| { + var p = proc.*; + p.kill(); + p.deinit(); + } + } +}; diff --git a/tests/quick_test.sh b/tests/quick_test.sh deleted file mode 100755 index 80f1794..0000000 --- a/tests/quick_test.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -echo "Starting backend..." -./zig-out/bin/test_backend_echo --port 19001 --id backend1 & -sleep 2 - -echo "Testing backend directly..." -curl -s http://localhost:19001/test | jq .server_id - -echo "" -echo "Starting load balancer..." -./zig-out/bin/load_balancer_sp --port 18080 --backend 127.0.0.1:19001 & -sleep 2 - -echo "Testing load balancer..." -curl -s http://localhost:18080/test | jq .server_id - -echo "" -echo "Cleaning up..." -killall test_backend_echo load_balancer_sp 2>/dev/null diff --git a/tests/requirements.txt b/tests/requirements.txt deleted file mode 100644 index 266a946..0000000 --- a/tests/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -pytest>=7.0 -requests>=2.28 -pytest-timeout>=2.0 diff --git a/tests/run_integration_tests.sh b/tests/run_integration_tests.sh deleted file mode 100755 index 8d7a038..0000000 --- a/tests/run_integration_tests.sh +++ /dev/null @@ -1,309 +0,0 @@ -#!/bin/bash - -# Integration Tests for Load Balancer -# This script starts backends and load balancer, then runs tests using curl - -set -e - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Configuration -BACKEND1_PORT=19001 -BACKEND2_PORT=19002 -BACKEND3_PORT=19003 -LB_PORT=18080 - -# Process tracking -PIDS=() - -# Cleanup function -cleanup() { - echo "" - echo "Cleaning up..." - for pid in "${PIDS[@]}"; do - if kill -0 $pid 2>/dev/null; then - echo "Killing process $pid" - kill $pid 2>/dev/null || true - fi - done - wait 2>/dev/null || true - echo "Cleanup complete" -} - -# Register cleanup on exit -trap cleanup EXIT INT TERM - -# Helper functions -pass() { - echo -e "${GREEN}[PASS]${NC} $1" -} - -fail() { - echo -e "${RED}[FAIL]${NC} $1" - echo " $2" - exit 1 -} - -info() { - echo -e "${YELLOW}[INFO]${NC} $1" -} - -# Start a backend server -start_backend() { - local port=$1 - local id=$2 - - ./zig-out/bin/test_backend_echo --port $port --id $id > /dev/null 2>&1 & - local pid=$! - PIDS+=($pid) - info "Started backend $id on port $port (PID: $pid)" -} - -# Start load balancer -start_load_balancer() { - local backends="$@" - - ./zig-out/bin/load_balancer_sp --port $LB_PORT $backends > /dev/null 2>&1 & - local pid=$! - PIDS+=($pid) - info "Started load balancer on port $LB_PORT (PID: $pid)" -} - -# Wait for server to be ready -wait_for_server() { - local port=$1 - local max_attempts=20 - local attempt=0 - - while ! nc -z localhost $port 2>/dev/null; do - attempt=$((attempt + 1)) - if [ $attempt -ge $max_attempts ]; then - fail "Server on port $port failed to start" "Timeout waiting for server" - fi - sleep 0.1 - done -} - -# Test: GET request -test_get_request() { - info "Testing GET request forwarding" - - local response=$(curl -s -w "\n%{http_code}" http://localhost:$LB_PORT/) - local status=$(echo "$response" | tail -n 1) - local body=$(echo "$response" | sed '$d') - - if [ "$status" != "200" ]; then - fail "GET request" "Expected status 200, got $status" - fi - - # Check if response is JSON - if ! echo "$body" | jq . > /dev/null 2>&1; then - fail "GET request" "Response is not valid JSON: $body" - fi - - local method=$(echo "$body" | jq -r '.method') - local uri=$(echo "$body" | jq -r '.uri') - - if [ "$method" != "GET" ]; then - fail "GET request" "Expected method GET, got $method" - fi - - if [ "$uri" != "/" ]; then - fail "GET request" "Expected URI /, got $uri" - fi - - pass "GET request forwarded correctly" -} - -# Test: POST request with JSON body -test_post_with_body() { - info "Testing POST request with JSON body" - - local request_body='{"test":"data","number":42}' - local response=$(curl -s -w "\n%{http_code}" -X POST \ - -H "Content-Type: application/json" \ - -d "$request_body" \ - http://localhost:$LB_PORT/) - - local status=$(echo "$response" | tail -n 1) - local body=$(echo "$response" | sed '$d') - - if [ "$status" != "200" ]; then - fail "POST with body" "Expected status 200, got $status" - fi - - local method=$(echo "$body" | jq -r '.method') - local uri=$(echo "$body" | jq -r '.uri') - local response_body=$(echo "$body" | jq -r '.body') - local body_length=$(echo "$body" | jq -r '.body_length') - - if [ "$method" != "POST" ]; then - fail "POST with body" "Expected method POST, got $method" - fi - - if [ "$uri" != "/" ]; then - fail "POST with body" "Expected URI /, got $uri" - fi - - if [ "$response_body" != "$request_body" ]; then - fail "POST with body" "Body mismatch. Expected: $request_body, Got: $response_body" - fi - - local expected_len=${#request_body} - if [ "$body_length" != "$expected_len" ]; then - fail "POST with body" "Body length mismatch. Expected: $expected_len, Got: $body_length" - fi - - pass "POST request with JSON body forwarded correctly" -} - -# Test: Custom headers -test_custom_headers() { - info "Testing custom header forwarding" - - local response=$(curl -s \ - -H "X-Custom-Header: CustomValue" \ - -H "X-Request-ID: test-123" \ - -H "Authorization: Bearer token123" \ - http://localhost:$LB_PORT/) - - local custom_header=$(echo "$response" | jq -r '.headers["X-Custom-Header"]') - local request_id=$(echo "$response" | jq -r '.headers["X-Request-ID"]') - local auth=$(echo "$response" | jq -r '.headers.Authorization') - - if [ "$custom_header" != "CustomValue" ]; then - fail "Custom headers" "X-Custom-Header not forwarded correctly. Got: $custom_header" - fi - - if [ "$request_id" != "test-123" ]; then - fail "Custom headers" "X-Request-ID not forwarded correctly. Got: $request_id" - fi - - if [ "$auth" != "Bearer token123" ]; then - fail "Custom headers" "Authorization not forwarded correctly. Got: $auth" - fi - - pass "Custom headers forwarded correctly" -} - -# Test: Round-robin load balancing -test_round_robin() { - info "Testing round-robin load balancing" - - # Clean up previous test - cleanup - PIDS=() - - # Start 3 backends - start_backend $BACKEND1_PORT "backend1" - start_backend $BACKEND2_PORT "backend2" - start_backend $BACKEND3_PORT "backend3" - - # Start load balancer with all 3 backends - start_load_balancer \ - --backend "127.0.0.1:$BACKEND1_PORT" \ - --backend "127.0.0.1:$BACKEND2_PORT" \ - --backend "127.0.0.1:$BACKEND3_PORT" - - # Wait for servers - sleep 1 - wait_for_server $LB_PORT - - # Make 9 requests and track which backend handles each - local backend1_count=0 - local backend2_count=0 - local backend3_count=0 - - for i in {1..9}; do - local response=$(curl -s http://localhost:$LB_PORT/) - local server_id=$(echo "$response" | jq -r '.server_id') - - case "$server_id" in - "backend1") backend1_count=$((backend1_count + 1)) ;; - "backend2") backend2_count=$((backend2_count + 1)) ;; - "backend3") backend3_count=$((backend3_count + 1)) ;; - esac - done - - # Each backend should handle exactly 3 requests - if [ $backend1_count -ne 3 ] || [ $backend2_count -ne 3 ] || [ $backend3_count -ne 3 ]; then - fail "Round-robin" "Distribution not even. Backend1: $backend1_count, Backend2: $backend2_count, Backend3: $backend3_count" - fi - - pass "Round-robin distributes requests evenly (3/3/3)" -} - -# Test: Multiple sequential requests -test_sequential_requests() { - info "Testing multiple sequential requests" - - for i in {1..5}; do - local response=$(curl -s http://localhost:$LB_PORT/) - local uri=$(echo "$response" | jq -r '.uri') - - if [ "$uri" != "/" ]; then - fail "Sequential requests" "Request $i failed. Expected URI /, got $uri" - fi - done - - pass "Multiple sequential requests work correctly" -} - -# Main test execution -main() { - echo "========================================" - echo "Load Balancer Integration Tests" - echo "========================================" - echo "" - - # Check if binaries exist - if [ ! -f "./zig-out/bin/test_backend_echo" ]; then - fail "Setup" "test_backend_echo binary not found. Run 'zig build' first." - fi - - if [ ! -f "./zig-out/bin/load_balancer_sp" ]; then - fail "Setup" "load_balancer_sp binary not found. Run 'zig build' first." - fi - - # Check if jq is installed - if ! command -v jq &> /dev/null; then - fail "Setup" "jq is required for JSON parsing. Please install it." - fi - - # Check if nc is available - if ! command -v nc &> /dev/null; then - fail "Setup" "nc (netcat) is required for port checking. Please install it." - fi - - info "Starting basic tests (single backend)..." - - # Start single backend for basic tests - start_backend $BACKEND1_PORT "backend1" - start_load_balancer --backend "127.0.0.1:$BACKEND1_PORT" - - # Wait for servers to start - sleep 1 - wait_for_server $BACKEND1_PORT - wait_for_server $LB_PORT - - # Run basic tests - test_get_request - test_post_with_body - test_custom_headers - test_sequential_requests - - # Run round-robin test (requires restarting with multiple backends) - test_round_robin - - echo "" - echo "========================================" - echo -e "${GREEN}All tests passed!${NC}" - echo "========================================" -} - -# Run tests -main diff --git a/tests/suites/basic.zig b/tests/suites/basic.zig new file mode 100644 index 0000000..6bc9459 --- /dev/null +++ b/tests/suites/basic.zig @@ -0,0 +1,154 @@ +//! Basic proxy functionality tests. +//! +//! Tests HTTP method forwarding: GET, POST, PUT, PATCH, DELETE +//! Tests URI handling: query strings, deep paths + +const std = @import("std"); +const harness = @import("../harness.zig"); +const utils = @import("../test_utils.zig"); +const ProcessManager = @import("../process_manager.zig").ProcessManager; + +var pm: ProcessManager = undefined; + +fn beforeAll(allocator: std.mem.Allocator) !void { + pm = ProcessManager.init(allocator); + try pm.startBackend(utils.BACKEND1_PORT, "backend1"); + try pm.startLoadBalancer(&.{utils.BACKEND1_PORT}); +} + +fn afterAll(_: std.mem.Allocator) !void { + pm.deinit(); +} + +fn testGetRequest(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/test/path", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const uri = try utils.getJsonString(allocator, body, "uri"); + defer allocator.free(uri); + + try std.testing.expectEqualStrings("GET", method); + try std.testing.expectEqualStrings("/test/path", uri); +} + +fn testPostRequest(allocator: std.mem.Allocator) !void { + const req_body = "{\"test\":\"data\",\"number\":42}"; + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/api/endpoint", headers, req_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const recv_body = try utils.getJsonString(allocator, body, "body"); + defer allocator.free(recv_body); + + try std.testing.expectEqualStrings("POST", method); + try std.testing.expectEqualStrings(req_body, recv_body); +} + +fn testPutRequest(allocator: std.mem.Allocator) !void { + const req_body = "Updated content"; + + const response = try utils.httpRequest(allocator, "PUT", utils.LB_PORT, "/resource/123", null, req_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const uri = try utils.getJsonString(allocator, body, "uri"); + defer allocator.free(uri); + + try std.testing.expectEqualStrings("PUT", method); + try std.testing.expectEqualStrings("/resource/123", uri); +} + +fn testPatchRequest(allocator: std.mem.Allocator) !void { + const req_body = "{\"field\":\"name\",\"value\":\"new\"}"; + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + + const response = try utils.httpRequest(allocator, "PATCH", utils.LB_PORT, "/api/resource/456", headers, req_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const uri = try utils.getJsonString(allocator, body, "uri"); + defer allocator.free(uri); + + try std.testing.expectEqualStrings("PATCH", method); + try std.testing.expectEqualStrings("/api/resource/456", uri); +} + +fn testResponseStructure(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + + // Verify all expected fields exist + const server_id = try utils.getJsonString(allocator, body, "server_id"); + defer allocator.free(server_id); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const uri = try utils.getJsonString(allocator, body, "uri"); + defer allocator.free(uri); + _ = try utils.getJsonInt(allocator, body, "body_length"); + _ = try utils.hasHeader(allocator, body, "Host"); +} + +fn testDeleteRequest(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "DELETE", utils.LB_PORT, "/api/resource/789", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const uri = try utils.getJsonString(allocator, body, "uri"); + defer allocator.free(uri); + + try std.testing.expectEqualStrings("DELETE", method); + try std.testing.expectEqualStrings("/api/resource/789", uri); +} + +fn testQueryStringForwarding(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/search?q=hello&page=2&sort=desc", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const uri = try utils.getJsonString(allocator, body, "uri"); + defer allocator.free(uri); + + try std.testing.expectEqualStrings("/search?q=hello&page=2&sort=desc", uri); +} + +fn testDeepPathForwarding(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/api/v1/users/123/posts/456/comments", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const uri = try utils.getJsonString(allocator, body, "uri"); + defer allocator.free(uri); + + try std.testing.expectEqualStrings("/api/v1/users/123/posts/456/comments", uri); +} + +pub const suite = harness.Suite{ + .name = "Basic Proxy Functionality", + .before_all = beforeAll, + .after_all = afterAll, + .tests = &.{ + harness.it("forwards GET requests correctly", testGetRequest), + harness.it("forwards POST requests with JSON body", testPostRequest), + harness.it("forwards PUT requests with body", testPutRequest), + harness.it("forwards PATCH requests with body", testPatchRequest), + harness.it("forwards DELETE requests", testDeleteRequest), + harness.it("forwards query strings correctly", testQueryStringForwarding), + harness.it("forwards deep paths correctly", testDeepPathForwarding), + harness.it("returns complete response structure", testResponseStructure), + }, +}; diff --git a/tests/suites/body.zig b/tests/suites/body.zig new file mode 100644 index 0000000..08d20fa --- /dev/null +++ b/tests/suites/body.zig @@ -0,0 +1,130 @@ +//! Body forwarding tests. +//! +//! Tests request body handling: empty, large, JSON, binary, Content-Length + +const std = @import("std"); +const harness = @import("../harness.zig"); +const utils = @import("../test_utils.zig"); +const ProcessManager = @import("../process_manager.zig").ProcessManager; + +var pm: ProcessManager = undefined; + +fn beforeAll(allocator: std.mem.Allocator) !void { + pm = ProcessManager.init(allocator); + try pm.startBackend(utils.BACKEND1_PORT, "backend1"); + try pm.startLoadBalancer(&.{utils.BACKEND1_PORT}); +} + +fn afterAll(_: std.mem.Allocator) !void { + pm.deinit(); +} + +fn testEmptyBodyPost(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/", null, ""); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const body_len = try utils.getJsonInt(allocator, body, "body_length"); + + try std.testing.expectEqual(@as(i64, 0), body_len); +} + +fn testLargeBody(allocator: std.mem.Allocator) !void { + // 1KB payload + const large_body = try allocator.alloc(u8, 1024); + defer allocator.free(large_body); + @memset(large_body, 'X'); + + const headers = &[_][2][]const u8{.{ "Content-Type", "text/plain" }}; + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/", headers, large_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const body_len = try utils.getJsonInt(allocator, body, "body_length"); + + try std.testing.expectEqual(@as(i64, 1024), body_len); +} + +fn testJsonBodyPreserved(allocator: std.mem.Allocator) !void { + const json_body = + \\{"user":"john_doe","email":"john@example.com","age":30,"active":true} + ; + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/api/users", headers, json_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const recv_body = try utils.getJsonString(allocator, body, "body"); + defer allocator.free(recv_body); + + try std.testing.expectEqualStrings(json_body, recv_body); +} + +fn testBinaryData(allocator: std.mem.Allocator) !void { + // UTF-8 safe binary data + const binary_data = "Binary test data with special chars: \xc2\xa9\xc2\xae"; + const headers = &[_][2][]const u8{.{ "Content-Type", "application/octet-stream" }}; + + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/upload", headers, binary_data); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const body_len = try utils.getJsonInt(allocator, body, "body_length"); + + try std.testing.expectEqual(@as(i64, binary_data.len), body_len); +} + +fn testContentLengthCorrect(allocator: std.mem.Allocator) !void { + const req_body = "{\"key\":\"value\",\"number\":42}"; + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/", headers, req_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + + // Check Content-Length header matches body length + const cl = try utils.getHeader(allocator, body, "Content-Length"); + defer allocator.free(cl); + const cl_int = try std.fmt.parseInt(i64, cl, 10); + + const body_len = try utils.getJsonInt(allocator, body, "body_length"); + + try std.testing.expectEqual(cl_int, body_len); + try std.testing.expectEqual(@as(i64, req_body.len), body_len); +} + +fn testSequentialPosts(allocator: std.mem.Allocator) !void { + const bodies = [_][]const u8{ + "{\"id\":1,\"name\":\"first\"}", + "{\"id\":2,\"name\":\"second\"}", + "{\"id\":3,\"name\":\"third\"}", + }; + + for (bodies) |req_body| { + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/", headers, req_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const recv_body = try utils.getJsonString(allocator, body, "body"); + defer allocator.free(recv_body); + + try std.testing.expectEqualStrings(req_body, recv_body); + } +} + +pub const suite = harness.Suite{ + .name = "Body Forwarding", + .before_all = beforeAll, + .after_all = afterAll, + .tests = &.{ + harness.it("handles empty POST body", testEmptyBodyPost), + harness.it("handles large body (1KB)", testLargeBody), + harness.it("preserves JSON body exactly", testJsonBodyPreserved), + harness.it("handles binary data", testBinaryData), + harness.it("sets Content-Length correctly", testContentLengthCorrect), + harness.it("handles multiple sequential POSTs", testSequentialPosts), + }, +}; diff --git a/tests/suites/headers.zig b/tests/suites/headers.zig new file mode 100644 index 0000000..bd70ab0 --- /dev/null +++ b/tests/suites/headers.zig @@ -0,0 +1,132 @@ +//! Header handling tests. +//! +//! Tests header forwarding: Content-Type, custom headers, hop-by-hop filtering + +const std = @import("std"); +const harness = @import("../harness.zig"); +const utils = @import("../test_utils.zig"); +const ProcessManager = @import("../process_manager.zig").ProcessManager; + +var pm: ProcessManager = undefined; + +fn beforeAll(allocator: std.mem.Allocator) !void { + pm = ProcessManager.init(allocator); + try pm.startBackend(utils.BACKEND1_PORT, "backend1"); + try pm.startLoadBalancer(&.{utils.BACKEND1_PORT}); +} + +fn afterAll(_: std.mem.Allocator) !void { + pm.deinit(); +} + +fn testContentTypeForwarded(allocator: std.mem.Allocator) !void { + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + const response = try utils.httpRequest(allocator, "POST", utils.LB_PORT, "/", headers, "{}"); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const ct = try utils.getHeader(allocator, body, "Content-Type"); + defer allocator.free(ct); + + try std.testing.expect(std.mem.indexOf(u8, ct, "application/json") != null); +} + +fn testCustomHeadersForwarded(allocator: std.mem.Allocator) !void { + const headers = &[_][2][]const u8{ + .{ "X-Custom-Header", "CustomValue" }, + .{ "X-Request-ID", "test-123" }, + .{ "X-API-Key", "secret-key" }, + }; + + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", headers, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + + const custom = try utils.getHeader(allocator, body, "X-Custom-Header"); + defer allocator.free(custom); + try std.testing.expectEqualStrings("CustomValue", custom); + + const req_id = try utils.getHeader(allocator, body, "X-Request-ID"); + defer allocator.free(req_id); + try std.testing.expectEqualStrings("test-123", req_id); +} + +fn testAuthorizationHeaderForwarded(allocator: std.mem.Allocator) !void { + const headers = &[_][2][]const u8{.{ "Authorization", "Bearer token123" }}; + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", headers, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const auth = try utils.getHeader(allocator, body, "Authorization"); + defer allocator.free(auth); + + try std.testing.expectEqualStrings("Bearer token123", auth); +} + +fn testHopByHopHeadersFiltered(allocator: std.mem.Allocator) !void { + const headers = &[_][2][]const u8{ + .{ "Connection", "keep-alive" }, + .{ "Keep-Alive", "timeout=5" }, + .{ "X-Safe-Header", "should-be-forwarded" }, + }; + + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", headers, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + + // Hop-by-hop headers should NOT be forwarded + const has_connection = utils.hasHeader(allocator, body, "Connection") catch false; + try std.testing.expect(!has_connection); + + // Safe headers should be forwarded + const safe = try utils.getHeader(allocator, body, "X-Safe-Header"); + defer allocator.free(safe); + try std.testing.expectEqualStrings("should-be-forwarded", safe); +} + +fn testHostHeaderPresent(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const host = try utils.getHeader(allocator, body, "Host"); + defer allocator.free(host); + + try std.testing.expect(std.mem.indexOf(u8, host, "127.0.0.1") != null); +} + +fn testResponseContentTypeForwarded(allocator: std.mem.Allocator) !void { + // Backend echo server always returns application/json + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", null, null); + defer allocator.free(response); + + // Check the actual HTTP response headers (not JSON body) + const content_type = try utils.getResponseHeaderValue(response, "Content-Type"); + try std.testing.expect(std.mem.indexOf(u8, content_type, "application/json") != null); +} + +fn testResponseStatusCodeForwarded(allocator: std.mem.Allocator) !void { + // Backend returns 200 OK for normal requests + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", null, null); + defer allocator.free(response); + + const status = try utils.getResponseStatusCode(response); + try std.testing.expectEqual(@as(u16, 200), status); +} + +pub const suite = harness.Suite{ + .name = "Header Handling", + .before_all = beforeAll, + .after_all = afterAll, + .tests = &.{ + harness.it("forwards Content-Type header", testContentTypeForwarded), + harness.it("forwards custom X-* headers", testCustomHeadersForwarded), + harness.it("forwards Authorization header", testAuthorizationHeaderForwarded), + harness.it("filters hop-by-hop headers", testHopByHopHeadersFiltered), + harness.it("includes Host header to backend", testHostHeaderPresent), + harness.it("forwards response Content-Type", testResponseContentTypeForwarded), + harness.it("forwards response status code", testResponseStatusCodeForwarded), + }, +}; diff --git a/tests/suites/http2.zig b/tests/suites/http2.zig new file mode 100644 index 0000000..b07e62c --- /dev/null +++ b/tests/suites/http2.zig @@ -0,0 +1,87 @@ +//! HTTP/2 backend tests. +//! +//! Tests HTTP/2 protocol support with TLS backend. +//! Uses Python hypercorn as HTTP/2 backend. +//! +//! Note: Custom header forwarding is not yet implemented for HTTP/2 backends. +//! The H2Connection.request() API only supports method, path, host, and body. + +const std = @import("std"); +const harness = @import("../harness.zig"); +const utils = @import("../test_utils.zig"); +const ProcessManager = @import("../process_manager.zig").ProcessManager; + +var pm: ProcessManager = undefined; + +fn beforeAll(allocator: std.mem.Allocator) !void { + pm = ProcessManager.init(allocator); + try pm.startH2Backend(); + try pm.startLoadBalancerH2(); +} + +fn afterAll(_: std.mem.Allocator) !void { + pm.deinit(); +} + +fn testH2GetRequest(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "GET", utils.LB_H2_PORT, "/test/h2", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const uri = try utils.getJsonString(allocator, body, "uri"); + defer allocator.free(uri); + + try std.testing.expectEqualStrings("GET", method); + try std.testing.expectEqualStrings("/test/h2", uri); +} + +fn testH2PostRequest(allocator: std.mem.Allocator) !void { + const req_body = "{\"protocol\":\"h2\",\"test\":true}"; + const headers = &[_][2][]const u8{.{ "Content-Type", "application/json" }}; + + const response = try utils.httpRequest(allocator, "POST", utils.LB_H2_PORT, "/api/h2", headers, req_body); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const method = try utils.getJsonString(allocator, body, "method"); + defer allocator.free(method); + const recv_body = try utils.getJsonString(allocator, body, "body"); + defer allocator.free(recv_body); + + try std.testing.expectEqualStrings("POST", method); + try std.testing.expectEqualStrings(req_body, recv_body); +} + +fn testH2ServerIdentity(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "GET", utils.LB_H2_PORT, "/", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const server_id = try utils.getJsonString(allocator, body, "server_id"); + defer allocator.free(server_id); + + // Verify we're hitting the HTTP/2 Python backend + try std.testing.expectEqualStrings("h2_backend", server_id); +} + +fn testH2ResponseStatus(allocator: std.mem.Allocator) !void { + const response = try utils.httpRequest(allocator, "GET", utils.LB_H2_PORT, "/", null, null); + defer allocator.free(response); + + const status = try utils.getResponseStatusCode(response); + try std.testing.expectEqual(@as(u16, 200), status); +} + +pub const suite = harness.Suite{ + .name = "HTTP/2 Backend Support", + .before_all = beforeAll, + .after_all = afterAll, + .tests = &.{ + harness.it("forwards GET requests over HTTP/2", testH2GetRequest), + harness.it("forwards POST requests with body over HTTP/2", testH2PostRequest), + harness.it("reaches HTTP/2 backend correctly", testH2ServerIdentity), + harness.it("returns correct status code from HTTP/2", testH2ResponseStatus), + }, +}; diff --git a/tests/suites/load_balancing.zig b/tests/suites/load_balancing.zig new file mode 100644 index 0000000..5002179 --- /dev/null +++ b/tests/suites/load_balancing.zig @@ -0,0 +1,94 @@ +//! Load balancing tests. +//! +//! Tests round-robin distribution across multiple backends + +const std = @import("std"); +const harness = @import("../harness.zig"); +const utils = @import("../test_utils.zig"); +const ProcessManager = @import("../process_manager.zig").ProcessManager; + +var pm: ProcessManager = undefined; + +fn beforeAll(allocator: std.mem.Allocator) !void { + pm = ProcessManager.init(allocator); + try pm.startBackend(utils.BACKEND1_PORT, "backend1"); + try pm.startBackend(utils.BACKEND2_PORT, "backend2"); + try pm.startBackend(utils.BACKEND3_PORT, "backend3"); + try pm.startLoadBalancer(&.{ utils.BACKEND1_PORT, utils.BACKEND2_PORT, utils.BACKEND3_PORT }); +} + +fn afterAll(_: std.mem.Allocator) !void { + pm.deinit(); +} + +fn testRoundRobinDistribution(allocator: std.mem.Allocator) !void { + var counts = std.StringHashMap(usize).init(allocator); + defer counts.deinit(); + + // Make 9 requests (divisible by 3) + for (0..9) |_| { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const server_id = try utils.getJsonString(allocator, body, "server_id"); + defer allocator.free(server_id); + + const key = try allocator.dupe(u8, server_id); + const result = try counts.getOrPut(key); + if (result.found_existing) { + allocator.free(key); + result.value_ptr.* += 1; + } else { + result.value_ptr.* = 1; + } + } + + // Each backend should get exactly 3 requests + try std.testing.expectEqual(@as(usize, 3), counts.count()); + + var iter = counts.iterator(); + while (iter.next()) |entry| { + try std.testing.expectEqual(@as(usize, 3), entry.value_ptr.*); + allocator.free(entry.key_ptr.*); + } +} + +fn testAllBackendsReachable(allocator: std.mem.Allocator) !void { + var seen = std.StringHashMap(void).init(allocator); + defer seen.deinit(); + + // Make up to 12 requests, should hit all 3 backends + for (0..12) |_| { + const response = try utils.httpRequest(allocator, "GET", utils.LB_PORT, "/", null, null); + defer allocator.free(response); + + const body = try utils.extractJsonBody(response); + const server_id = try utils.getJsonString(allocator, body, "server_id"); + + if (!seen.contains(server_id)) { + try seen.put(try allocator.dupe(u8, server_id), {}); + } + allocator.free(server_id); + + if (seen.count() >= 3) break; + } + + try std.testing.expectEqual(@as(usize, 3), seen.count()); + + // Cleanup keys + var iter = seen.keyIterator(); + while (iter.next()) |key| { + allocator.free(key.*); + } +} + +pub const suite = harness.Suite{ + .name = "Load Balancing", + .before_all = beforeAll, + .after_all = afterAll, + .tests = &.{ + harness.it("distributes requests with round-robin (3/3/3)", testRoundRobinDistribution), + harness.it("reaches all configured backends", testAllBackendsReachable), + }, +}; diff --git a/tests/test_basic.py b/tests/test_basic.py deleted file mode 100644 index 713e2d5..0000000 --- a/tests/test_basic.py +++ /dev/null @@ -1,110 +0,0 @@ -""" -Basic proxy functionality tests. - -Tests that the load balancer correctly forwards: -- Different HTTP methods (GET, POST, PUT, PATCH) -- Request bodies -- Request URIs -""" -import requests -import pytest - - -@pytest.mark.timeout(20) -def test_get_request_forwarded(load_balancer): - """Test that GET requests are forwarded to the backend.""" - response = requests.get(f"{load_balancer}/", timeout=5) - - assert response.status_code == 200 - data = response.json() - - assert data["method"] == "GET" - assert data["uri"] == "/" - assert "server_id" in data - - -@pytest.mark.timeout(20) -def test_post_request_with_body(load_balancer): - """Test that POST requests with JSON body are forwarded correctly.""" - request_body = {"test": "data", "number": 42} - - response = requests.post( - f"{load_balancer}/", - json=request_body, - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - assert data["method"] == "POST" - assert data["uri"] == "/" - # The body should be the JSON string representation - assert "test" in data["body"] - assert "data" in data["body"] - assert data["body_length"] > 0 - - -@pytest.mark.timeout(20) -def test_put_request_with_body(load_balancer): - """Test that PUT requests with body are forwarded correctly.""" - request_body = {"action": "update", "value": 123} - - response = requests.put( - f"{load_balancer}/api/resource", - json=request_body, - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - assert data["method"] == "PUT" - assert data["uri"] == "/api/resource" - assert "action" in data["body"] - assert "update" in data["body"] - assert data["body_length"] > 0 - - -@pytest.mark.timeout(20) -def test_patch_request_with_body(load_balancer): - """Test that PATCH requests with body are forwarded correctly.""" - request_body = {"field": "name", "value": "new_value"} - - response = requests.patch( - f"{load_balancer}/api/resource/123", - json=request_body, - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - assert data["method"] == "PATCH" - assert data["uri"] == "/api/resource/123" - assert "field" in data["body"] - assert "name" in data["body"] - assert data["body_length"] > 0 - - -@pytest.mark.timeout(20) -def test_response_contains_request_info(load_balancer): - """Test that echo server returns complete request information.""" - response = requests.get(f"{load_balancer}/test/path", timeout=5) - - assert response.status_code == 200 - data = response.json() - - # Verify all expected fields are present - assert "server_id" in data - assert "method" in data - assert "uri" in data - assert "headers" in data - assert "body" in data - assert "body_length" in data - - # Verify values - assert data["method"] == "GET" - assert data["uri"] == "/test/path" - assert isinstance(data["headers"], dict) - assert data["body_length"] == 0 # GET request has no body diff --git a/tests/test_body_forwarding.py b/tests/test_body_forwarding.py deleted file mode 100644 index 4acb0ea..0000000 --- a/tests/test_body_forwarding.py +++ /dev/null @@ -1,160 +0,0 @@ -""" -Body forwarding tests. - -Tests that the load balancer correctly forwards request bodies: -- Empty bodies -- Large bodies -- JSON bodies -- Binary data -- Content-Length header -""" -import requests -import pytest -import json - - -@pytest.mark.timeout(20) -def test_empty_body_post(load_balancer): - """Test that POST with empty body works correctly.""" - response = requests.post( - f"{load_balancer}/", - data="", - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - assert data["method"] == "POST" - assert data["body"] == "" - assert data["body_length"] == 0 - - -@pytest.mark.timeout(30) -def test_large_body(load_balancer): - """Test that large request bodies (1KB) are forwarded correctly.""" - # Create a 1KB payload (10KB seems to cause backend health issues) - large_data = "x" * 1024 - - response = requests.post( - f"{load_balancer}/", - data=large_data, - headers={"Content-Type": "text/plain"}, - timeout=10 - ) - - assert response.status_code == 200 - data = response.json() - - assert data["method"] == "POST" - assert data["body_length"] == 1024 - assert len(data["body"]) == 1024 - assert data["body"] == large_data - - -@pytest.mark.timeout(20) -def test_json_body_preserved(load_balancer): - """Test that JSON body arrives intact at backend.""" - request_body = { - "user": "john_doe", - "email": "john@example.com", - "age": 30, - "active": True, - "tags": ["python", "testing", "pytest"], - "metadata": { - "created": "2024-01-01", - "updated": "2024-01-15" - } - } - - response = requests.post( - f"{load_balancer}/api/users", - json=request_body, - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - assert data["method"] == "POST" - assert data["uri"] == "/api/users" - - # Parse the body that was received by backend - received_body = json.loads(data["body"]) - - # Verify all fields match - assert received_body == request_body - - -@pytest.mark.timeout(20) -def test_binary_body(load_balancer): - """Test that binary data is forwarded correctly.""" - # Use printable binary data to avoid JSON encoding issues - # (control characters in JSON can cause issues) - binary_data = b"Binary test data with some special chars: \xc2\xa9\xc2\xae" - - response = requests.post( - f"{load_balancer}/upload", - data=binary_data, - headers={"Content-Type": "application/octet-stream"}, - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - assert data["method"] == "POST" - assert data["uri"] == "/upload" - assert data["body_length"] == len(binary_data) - - -@pytest.mark.timeout(20) -def test_content_length_set_correctly(load_balancer): - """Test that backend receives correct Content-Length header.""" - request_body = {"key": "value", "number": 42} - request_json = json.dumps(request_body) - - response = requests.post( - f"{load_balancer}/", - data=request_json, - headers={"Content-Type": "application/json"}, - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - # Check that Content-Length was received - headers_lower = {k.lower(): v for k, v in data["headers"].items()} - assert "content-length" in headers_lower - - # Content-Length should match the body length - content_length = int(headers_lower["content-length"]) - assert content_length == len(request_json) - assert data["body_length"] == len(request_json) - - -@pytest.mark.timeout(20) -def test_multiple_sequential_posts(load_balancer): - """Test multiple sequential POST requests with different bodies.""" - test_cases = [ - {"id": 1, "name": "first"}, - {"id": 2, "name": "second"}, - {"id": 3, "name": "third"}, - ] - - for test_body in test_cases: - response = requests.post( - f"{load_balancer}/", - json=test_body, - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - assert data["method"] == "POST" - - # Verify the body was forwarded correctly - received_body = json.loads(data["body"]) - assert received_body == test_body diff --git a/tests/test_headers.py b/tests/test_headers.py deleted file mode 100644 index ce2451b..0000000 --- a/tests/test_headers.py +++ /dev/null @@ -1,128 +0,0 @@ -""" -Header handling tests. - -Tests that the load balancer correctly: -- Forwards standard headers (Content-Type, Authorization) -- Forwards custom headers (X-*) -- Handles hop-by-hop headers -- Sets Host header to backend -""" -import requests -import pytest - - -@pytest.mark.timeout(20) -def test_content_type_forwarded(load_balancer): - """Test that Content-Type header is forwarded to backend.""" - response = requests.post( - f"{load_balancer}/", - json={"test": "data"}, - headers={"Content-Type": "application/json"}, - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - # Check that Content-Type was received by backend - assert "content-type" in data["headers"] or "Content-Type" in data["headers"] - content_type = data["headers"].get("content-type") or data["headers"].get("Content-Type") - assert "application/json" in content_type - - -@pytest.mark.timeout(20) -def test_custom_header_forwarded(load_balancer): - """Test that custom X-* headers are forwarded.""" - custom_headers = { - "X-Custom-Header": "CustomValue", - "X-Request-ID": "test-123", - "X-API-Key": "secret-key" - } - - response = requests.get( - f"{load_balancer}/", - headers=custom_headers, - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - # Check custom headers (case-insensitive) - headers_lower = {k.lower(): v for k, v in data["headers"].items()} - - assert "x-custom-header" in headers_lower - assert headers_lower["x-custom-header"] == "CustomValue" - - assert "x-request-id" in headers_lower - assert headers_lower["x-request-id"] == "test-123" - - assert "x-api-key" in headers_lower - assert headers_lower["x-api-key"] == "secret-key" - - -@pytest.mark.timeout(20) -def test_authorization_header_forwarded(load_balancer): - """Test that Authorization header is forwarded.""" - response = requests.get( - f"{load_balancer}/", - headers={"Authorization": "Bearer token123"}, - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - # Check Authorization header (case-insensitive) - headers_lower = {k.lower(): v for k, v in data["headers"].items()} - assert "authorization" in headers_lower - assert headers_lower["authorization"] == "Bearer token123" - - -@pytest.mark.timeout(20) -def test_hop_by_hop_headers_not_forwarded(load_balancer): - """ - Test that hop-by-hop headers are NOT forwarded to backend. - - Hop-by-hop headers should be removed by the proxy: - - Connection - - Keep-Alive - - Transfer-Encoding (when not needed) - """ - # Note: Some HTTP libraries automatically add/remove hop-by-hop headers, - # so this test may need adjustment based on actual behavior - response = requests.get( - f"{load_balancer}/", - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - headers_lower = {k.lower(): v for k, v in data["headers"].items()} - - # These hop-by-hop headers should generally not be forwarded - # (though implementation may vary) - # At minimum, check that the backend receives valid headers - assert isinstance(data["headers"], dict) - - -@pytest.mark.timeout(20) -def test_host_header_set_to_backend(load_balancer): - """Test that Host header is set to the backend address.""" - response = requests.get( - f"{load_balancer}/", - timeout=5 - ) - - assert response.status_code == 200 - data = response.json() - - # Check that Host header exists and points to backend - headers_lower = {k.lower(): v for k, v in data["headers"].items()} - assert "host" in headers_lower - - # Host should be the backend address (127.0.0.1:19001) - # or the load balancer might forward the original host - host = headers_lower["host"] - assert "127.0.0.1" in host or "localhost" in host diff --git a/tests/test_load_balancing.py b/tests/test_load_balancing.py deleted file mode 100644 index a5db286..0000000 --- a/tests/test_load_balancing.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -Load balancing tests. - -Tests that the load balancer correctly: -- Distributes requests using round-robin algorithm -- Reaches all configured backends -""" -import requests -import pytest -from collections import Counter - - -@pytest.mark.timeout(60) -def test_round_robin_distribution(load_balancer_multi): - """ - Test that requests are distributed evenly using round-robin. - - With 3 backends and 9 requests, each backend should receive exactly 3 requests. - """ - server_ids = [] - - # Make 9 requests - for _ in range(9): - response = requests.get(f"{load_balancer_multi}/", timeout=5) - assert response.status_code == 200 - - data = response.json() - server_ids.append(data["server_id"]) - - # Count requests per backend - distribution = Counter(server_ids) - - # Each backend should receive exactly 3 requests - assert len(distribution) == 3, f"Expected 3 backends, got {len(distribution)}" - - for backend_id, count in distribution.items(): - assert count == 3, f"Backend {backend_id} received {count} requests, expected 3" - - -@pytest.mark.timeout(60) -def test_requests_reach_all_backends(load_balancer_multi): - """ - Test that all backends receive at least one request. - - This verifies that the load balancer is configured with all backends - and can reach each one. - """ - server_ids = set() - - # Make up to 12 requests, should hit all 3 backends - for _ in range(12): - response = requests.get(f"{load_balancer_multi}/", timeout=5) - assert response.status_code == 200 - - data = response.json() - server_ids.add(data["server_id"]) - - # Stop early if we've hit all backends - if len(server_ids) >= 3: - break - - # Verify we hit all 3 backends - assert len(server_ids) == 3, f"Expected 3 unique backends, got {len(server_ids)}: {server_ids}" - - # Verify backend naming - expected_backends = {"backend1", "backend2", "backend3"} - assert server_ids == expected_backends, f"Backend IDs mismatch. Expected {expected_backends}, got {server_ids}" diff --git a/tests/test_output.txt b/tests/test_output.txt deleted file mode 100644 index 291c8f5..0000000 --- a/tests/test_output.txt +++ /dev/null @@ -1,20 +0,0 @@ -============================= test session starts ============================== -platform darwin -- Python 3.14.0, pytest-9.0.2, pluggy-1.6.0 -- /Users/nick/repos/zzz/examples/load_balancer/tests/venv/bin/python3.14 -cachedir: .pytest_cache -rootdir: /Users/nick/repos/zzz/examples/load_balancer/tests -plugins: timeout-2.4.0 -collecting ... collected 18 items - -test_basic.py::test_get_request_forwarded PASSED [ 5%] -test_basic.py::test_post_request_with_body PASSED [ 11%] -test_basic.py::test_put_request_with_body PASSED [ 16%] -test_basic.py::test_patch_request_with_body PASSED [ 22%] -test_basic.py::test_response_contains_request_info PASSED [ 27%] -test_body_forwarding.py::test_empty_body_post PASSED [ 33%] -test_body_forwarding.py::test_large_body PASSED [ 38%] -test_body_forwarding.py::test_json_body_preserved PASSED [ 44%] -test_body_forwarding.py::test_binary_body PASSED [ 50%] -test_body_forwarding.py::test_content_length_set_correctly PASSED [ 55%] -test_body_forwarding.py::test_multiple_sequential_posts PASSED [ 61%] -test_headers.py::test_content_type_forwarded PASSED [ 66%] -test_headers.py::test_custom_header_forwarded \ No newline at end of file diff --git a/tests/test_utils.zig b/tests/test_utils.zig new file mode 100644 index 0000000..a50ad27 --- /dev/null +++ b/tests/test_utils.zig @@ -0,0 +1,240 @@ +//! Test utilities for integration tests. +//! +//! Provides: +//! - Port availability waiting +//! - HTTP request helpers +//! - JSON response parsing + +const std = @import("std"); +const posix = std.posix; + +pub const TEST_HOST = "127.0.0.1"; +pub const BACKEND1_PORT: u16 = 19001; +pub const BACKEND2_PORT: u16 = 19002; +pub const BACKEND3_PORT: u16 = 19003; +pub const LB_PORT: u16 = 18080; +pub const LB_H2_PORT: u16 = 18081; // Load balancer port for HTTP/2 tests + +/// Wait for a port to accept connections +pub fn waitForPort(port: u16, timeout_ms: u64) !void { + const start = std.time.Instant.now() catch return error.TimerUnavailable; + const timeout_ns = timeout_ms * std.time.ns_per_ms; + + while (true) { + if (tryConnect(port)) { + return; + } + const now = std.time.Instant.now() catch return error.TimerUnavailable; + if (now.since(start) >= timeout_ns) { + return error.PortTimeout; + } + posix.nanosleep(0, 100 * std.time.ns_per_ms); + } +} + +fn tryConnect(port: u16) bool { + const sock = posix.socket(posix.AF.INET, posix.SOCK.STREAM, posix.IPPROTO.TCP) catch return false; + defer posix.close(sock); + + // Create sockaddr_in for 127.0.0.1 + const addr: posix.sockaddr.in = .{ + .port = std.mem.nativeToBig(u16, port), + .addr = std.mem.nativeToBig(u32, 0x7F000001), // 127.0.0.1 + }; + + posix.connect(sock, @ptrCast(&addr), @sizeOf(posix.sockaddr.in)) catch return false; + return true; +} + +/// Wait for a TLS port to accept connections (same as waitForPort but with longer default wait) +pub fn waitForTlsPort(port: u16, timeout_ms: u64) !void { + // TLS ports may take longer to become ready + return waitForPort(port, timeout_ms); +} + +/// Make an HTTP request and return the response body +pub fn httpRequest( + allocator: std.mem.Allocator, + method: []const u8, + port: u16, + path: []const u8, + headers: ?[]const [2][]const u8, + body: ?[]const u8, +) ![]const u8 { + // Build request + var request: std.ArrayList(u8) = .empty; + defer request.deinit(allocator); + + // Request line + const request_line = try std.fmt.allocPrint(allocator, "{s} {s} HTTP/1.1\r\n", .{ method, path }); + defer allocator.free(request_line); + try request.appendSlice(allocator, request_line); + + // Host header + const host_header = try std.fmt.allocPrint(allocator, "Host: {s}:{d}\r\n", .{ TEST_HOST, port }); + defer allocator.free(host_header); + try request.appendSlice(allocator, host_header); + + if (headers) |hdrs| { + for (hdrs) |h| { + const hdr = try std.fmt.allocPrint(allocator, "{s}: {s}\r\n", .{ h[0], h[1] }); + defer allocator.free(hdr); + try request.appendSlice(allocator, hdr); + } + } + + if (body) |b| { + const cl = try std.fmt.allocPrint(allocator, "Content-Length: {d}\r\n", .{b.len}); + defer allocator.free(cl); + try request.appendSlice(allocator, cl); + } + + try request.appendSlice(allocator, "Connection: close\r\n\r\n"); + + if (body) |b| { + try request.appendSlice(allocator, b); + } + + // Connect and send + const sock = try posix.socket(posix.AF.INET, posix.SOCK.STREAM, posix.IPPROTO.TCP); + defer posix.close(sock); + + // Create sockaddr_in for 127.0.0.1 + const addr: posix.sockaddr.in = .{ + .port = std.mem.nativeToBig(u16, port), + .addr = std.mem.nativeToBig(u32, 0x7F000001), // 127.0.0.1 + }; + + try posix.connect(sock, @ptrCast(&addr), @sizeOf(posix.sockaddr.in)); + + var sent: usize = 0; + while (sent < request.items.len) { + const n = try posix.send(sock, request.items[sent..], 0); + sent += n; + } + + // Read response + var response: std.ArrayList(u8) = .empty; + errdefer response.deinit(allocator); + + var buf: [4096]u8 = undefined; + while (true) { + const n = try posix.recv(sock, &buf, 0); + if (n == 0) break; + try response.appendSlice(allocator, buf[0..n]); + } + + return response.toOwnedSlice(allocator); +} + +/// Extract JSON body from HTTP response +pub fn extractJsonBody(response: []const u8) ![]const u8 { + const separator = "\r\n\r\n"; + const idx = std.mem.indexOf(u8, response, separator) orelse return error.NoBodyFound; + return response[idx + separator.len ..]; +} + +/// Parse JSON response and get a string field +pub fn getJsonString(allocator: std.mem.Allocator, json: []const u8, field: []const u8) ![]const u8 { + const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json, .{}); + defer parsed.deinit(); + + const value = parsed.value.object.get(field) orelse return error.FieldNotFound; + return switch (value) { + .string => |s| allocator.dupe(u8, s), + else => error.FieldNotString, + }; +} + +/// Parse JSON response and get an integer field +pub fn getJsonInt(allocator: std.mem.Allocator, json: []const u8, field: []const u8) !i64 { + const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json, .{}); + defer parsed.deinit(); + + const value = parsed.value.object.get(field) orelse return error.FieldNotFound; + return value.integer; +} + +/// Check if a header exists in the JSON headers object +pub fn hasHeader(allocator: std.mem.Allocator, json: []const u8, header: []const u8) !bool { + const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json, .{}); + defer parsed.deinit(); + + const headers_val = parsed.value.object.get("headers") orelse return error.NoHeaders; + const headers = headers_val.object; + + // Case-insensitive check + var iter = headers.iterator(); + while (iter.next()) |entry| { + if (std.ascii.eqlIgnoreCase(entry.key_ptr.*, header)) { + return true; + } + } + return false; +} + +/// Get header value (case-insensitive) +pub fn getHeader(allocator: std.mem.Allocator, json: []const u8, header: []const u8) ![]const u8 { + const parsed = try std.json.parseFromSlice(std.json.Value, allocator, json, .{}); + defer parsed.deinit(); + + const headers_val = parsed.value.object.get("headers") orelse return error.NoHeaders; + const headers = headers_val.object; + + var iter = headers.iterator(); + while (iter.next()) |entry| { + if (std.ascii.eqlIgnoreCase(entry.key_ptr.*, header)) { + return switch (entry.value_ptr.*) { + .string => |s| allocator.dupe(u8, s), + else => error.HeaderNotString, + }; + } + } + return error.HeaderNotFound; +} + +/// Extract HTTP status code from response +pub fn getResponseStatusCode(response: []const u8) !u16 { + // Find first line: "HTTP/1.1 200 OK\r\n" + const line_end = std.mem.indexOf(u8, response, "\r\n") orelse return error.InvalidResponse; + const status_line = response[0..line_end]; + + // Find first space after HTTP version + const first_space = std.mem.indexOf(u8, status_line, " ") orelse return error.InvalidResponse; + const after_space = status_line[first_space + 1 ..]; + + // Find second space (end of status code) + const second_space = std.mem.indexOf(u8, after_space, " ") orelse after_space.len; + const status_str = after_space[0..second_space]; + + return std.fmt.parseInt(u16, status_str, 10) catch error.InvalidResponse; +} + +/// Get response header value (from HTTP headers, not JSON body) +pub fn getResponseHeaderValue(response: []const u8, header_name: []const u8) ![]const u8 { + const separator = "\r\n\r\n"; + const header_end = std.mem.indexOf(u8, response, separator) orelse return error.NoBodyFound; + const headers_section = response[0..header_end]; + + // Search for the header (case-insensitive) + var line_start: usize = 0; + while (std.mem.indexOfPos(u8, headers_section, line_start, "\r\n")) |line_end| { + const line = headers_section[line_start..line_end]; + + // Find colon + if (std.mem.indexOf(u8, line, ":")) |colon_pos| { + const name = line[0..colon_pos]; + if (std.ascii.eqlIgnoreCase(name, header_name)) { + // Skip colon and any leading whitespace + var value_start = colon_pos + 1; + while (value_start < line.len and line[value_start] == ' ') { + value_start += 1; + } + return line[value_start..]; + } + } + + line_start = line_end + 2; + } + return error.HeaderNotFound; +}