diff --git a/.gitignore b/.gitignore index 55fc5d8..bfc612e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,3 @@ -lib/ zig-out/ .zig-cache/ -book_titles.txt -tests/db* +zig-pkg diff --git a/Makefile b/Makefile index a496227..f1f9a8a 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ zig ?= zig .PHONY: t t: TEST_FILTER='${F}' '${zig}' build test -Dtsan=true -Dforce_blocking=false -freference-trace --summary all - TEST_FILTER='${F}' '${zig}' build test -Dtsan=true -Dforce_blocking=true -freference-trace --summary all + TEST_FILTER='${F}' '${zig}' build test -Dforce_blocking=true -freference-trace --summary all .PHONY: tn tn: diff --git a/build.zig b/build.zig index b9842ae..79bc1f1 100644 --- a/build.zig +++ b/build.zig @@ -6,18 +6,19 @@ pub fn build(b: *std.Build) !void { const dep_opts = .{ .target = target, .optimize = optimize }; const metrics_module = b.dependency("metrics", dep_opts).module("metrics"); - const websocket_module = b.dependency("websocket", dep_opts).module("websocket"); + // const websocket_module = b.dependency("websocket", dep_opts).module("websocket"); const enable_tsan = b.option(bool, "tsan", "Enable ThreadSanitizer"); const httpz_module = b.addModule("httpz", .{ + .link_libc = true, .root_source_file = b.path("src/httpz.zig"), .target = target, .optimize = optimize, .sanitize_thread = enable_tsan, .imports = &.{ .{ .name = "metrics", .module = metrics_module }, - .{ .name = "websocket", .module = websocket_module }, + // .{ .name = "websocket", .module = websocket_module }, }, }); { @@ -33,7 +34,6 @@ pub fn build(b: *std.Build) !void { .filters = test_filter orelse &.{}, .test_runner = .{ .path = b.path("test_runner.zig"), .mode = .simple }, }); - tests.linkLibC(); const force_blocking = b.option(bool, "force_blocking", "Force blocking mode") orelse false; { const options = b.addOptions(); @@ -41,13 +41,13 @@ pub fn build(b: *std.Build) !void { tests.root_module.addOptions("build", options); } { - const options = b.addOptions(); - options.addOption(bool, "websocket_blocking", force_blocking); - websocket_module.addOptions("build", options); + // const options = b.addOptions(); + // options.addOption(bool, "websocket_blocking", force_blocking); + // websocket_module.addOptions("build", options); } tests.root_module.addImport("metrics", metrics_module); - tests.root_module.addImport("websocket", websocket_module); + // tests.root_module.addImport("websocket", websocket_module); const run_test = b.addRunArtifact(tests); run_test.has_side_effects = true; @@ -67,8 +67,9 @@ pub fn build(b: *std.Build) !void { .{ .file = "examples/05_request_takeover.zig", .name = "example_5" }, .{ .file = "examples/06_middleware.zig", .name = "example_6" }, .{ .file = "examples/07_advanced_routing.zig", .name = "example_7" }, - .{ .file = "examples/08_websocket.zig", .name = "example_8" }, - .{ .file = "examples/09_shutdown.zig", .name = "example_9", .libc = true }, + // @ZIG016 + // .{ .file = "examples/08_websocket.zig", .name = "example_8" }, + // .{ .file = "examples/09_shutdown.zig", .name = "example_9", .libc = true }, .{ .file = "examples/10_file_upload.zig", .name = "example_10" }, .{ .file = "examples/11_html_streaming.zig", .name = "example_11" }, }; @@ -86,9 +87,6 @@ pub fn build(b: *std.Build) !void { }, }), }); - if (ex.libc) { - exe.linkLibC(); - } b.installArtifact(exe); const run_cmd = b.addRunArtifact(exe); diff --git a/build.zig.zon b/build.zig.zon index 80a05a8..4213bca 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -5,13 +5,13 @@ .fingerprint = 0x472add02ac73d53c, .dependencies = .{ .metrics = .{ - .url = "https://github.com/karlseguin/metrics.zig/archive/603954879849c331a26529b88254770089acac8b.tar.gz", - .hash = "metrics-0.0.0-W7G4eP2_AQAdJGKMonHeZFaY4oU4ZXPFFTqFCFXItX3O", - }, - .websocket = .{ - .url = "https://github.com/karlseguin/websocket.zig/archive/4deaaef2b4475a63f19c5e2f43e38fd55464b118.tar.gz", - .hash = "websocket-0.1.0-ZPISdZJxAwAt6Ys_JpoHQQV3NpWCof_N9Jg-Ul2g7OoV", + .url = "https://github.com/karlseguin/metrics.zig/archive/6de29b83a750a06c438d268543e0e3c3c1b309da.tar.gz", + .hash = "metrics-0.0.0-W7G4eIegAQD4XxA9Co7Atbw59u_2zvxYf406AZuoAHPM", }, +// .websocket = .{ +// .url = "https://github.com/karlseguin/websocket.zig/archive/4deaaef2b4475a63f19c5e2f43e38fd55464b118.tar.gz", +// .hash = "websocket-0.1.0-ZPISdZJxAwAt6Ys_JpoHQQV3NpWCof_N9Jg-Ul2g7OoV", +// }, // .websocket = .{ .path = "../websocket.zig" }, }, } diff --git a/examples/01_basic.zig b/examples/01_basic.zig index 47de0b8..342689b 100644 --- a/examples/01_basic.zig +++ b/examples/01_basic.zig @@ -7,14 +7,13 @@ const PORT = 8801; // This example demonstrates basic httpz usage, with focus on using the // httpz.Request and httpz.Response objects. -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; // We pass a "void" handler. This is the simplest, but limits what we can do // The last parameter is an instance of our handler. Since we have // a void handler, we pass a void value: i.e. {}. - var server = try httpz.Server(void).init(allocator, .{ + var server = try httpz.Server(void).init(init.io, allocator, .{ .address = .localhost(PORT), .request = .{ // httpz has a number of tweakable configuration settings (see readme) diff --git a/examples/02_handler.zig b/examples/02_handler.zig index db3bf7e..05a00c3 100644 --- a/examples/02_handler.zig +++ b/examples/02_handler.zig @@ -9,14 +9,13 @@ const PORT = 8802; // including things such as a DB pool) and how to define not found and error // handlers. -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; // We specify our "Handler" and, as the last parameter to init, pass an // instance of it. var handler = Handler{}; - var server = try httpz.Server(*Handler).init(allocator, .{ .address = .localhost(PORT) }, &handler); + var server = try httpz.Server(*Handler).init(init.io, allocator, .{ .address = .localhost(PORT) }, &handler); defer server.deinit(); diff --git a/examples/03_dispatch.zig b/examples/03_dispatch.zig index 402446f..168d534 100644 --- a/examples/03_dispatch.zig +++ b/examples/03_dispatch.zig @@ -1,5 +1,7 @@ const std = @import("std"); const httpz = @import("httpz"); + +const Io = std.Io; const Allocator = std.mem.Allocator; const PORT = 8803; @@ -7,12 +9,13 @@ const PORT = 8803; // This example uses a custom dispatch method on our handler for greater control // in how actions are executed. -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; - var handler = Handler{}; - var server = try httpz.Server(*Handler).init(allocator, .{ .address = .localhost(PORT) }, &handler); + var handler = Handler{ + .io = init.io, + }; + var server = try httpz.Server(*Handler).init(init.io, allocator, .{ .address = .localhost(PORT) }, &handler); defer server.deinit(); @@ -30,6 +33,8 @@ pub fn main() !void { } const Handler = struct { + io: Io, + // In addition to the special "notFound" and "uncaughtError" shown in example 2 // the special "dispatch" method can be used to gain more control over request handling. pub fn dispatch(self: *Handler, action: httpz.Action(*Handler), req: *httpz.Request, res: *httpz.Response) !void { @@ -37,12 +42,13 @@ const Handler = struct { // httpz supports middlewares, but in many cases, having a dispatch is good // enough and is much more straightforward. - var start = try std.time.Timer.start(); + var start = Io.Timestamp.now(self.io, .awake); // We don't _have_ to call the action if we don't want to. For example // we could do authentication and set the response directly on error. try action(self, req, res); - std.debug.print("ts={d} us={d} path={s}\n", .{ std.time.timestamp(), start.lap() / 1000, req.url.path }); + const elapsed = start.untilNow(self.io, .awake); + std.debug.print("ts={d} us={d} path={s}\n", .{ start.toSeconds(), elapsed.toMicroseconds(), req.url.path }); } }; diff --git a/examples/04_action_context.zig b/examples/04_action_context.zig index a2d3a92..7f2099c 100644 --- a/examples/04_action_context.zig +++ b/examples/04_action_context.zig @@ -7,12 +7,11 @@ const PORT = 8804; // This example is very similar to 03_dispatch.zig, but shows how the action // state can be a different type than the handler. -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; var handler = Handler{}; - var server = try httpz.Server(*Handler).init(allocator, .{ .address = .localhost(PORT) }, &handler); + var server = try httpz.Server(*Handler).init(init.io, allocator, .{ .address = .localhost(PORT) }, &handler); defer server.deinit(); diff --git a/examples/05_request_takeover.zig b/examples/05_request_takeover.zig index 508730b..642d243 100644 --- a/examples/05_request_takeover.zig +++ b/examples/05_request_takeover.zig @@ -7,12 +7,11 @@ const PORT = 8805; // This example uses the Handler's "handle" function to completely takeover // request processing from httpz. -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; var handler = Handler{}; - var server = try httpz.Server(*Handler).init(allocator, .{ .address = .localhost(PORT) }, &handler); + var server = try httpz.Server(*Handler).init(init.io, allocator, .{ .address = .localhost(PORT) }, &handler); defer server.deinit(); diff --git a/examples/06_middleware.zig b/examples/06_middleware.zig index 2a60846..e2358b3 100644 --- a/examples/06_middleware.zig +++ b/examples/06_middleware.zig @@ -13,11 +13,10 @@ const PORT = 8806; // See middleware/Logger.zig for an example of how to write a middleware -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; - var server = try httpz.Server(void).init(allocator, .{ .address = .localhost(PORT) }, {}); + var server = try httpz.Server(void).init(init.io, allocator, .{ .address = .localhost(PORT) }, {}); defer server.deinit(); @@ -27,7 +26,7 @@ pub fn main() !void { // creates an instance of the middleware with the given configuration // see example/middleware/Logger.zig - const logger = try server.middleware(Logger, .{ .query = true }); + const logger = try server.middleware(Logger, .{ .io = init.io, .query = true }); var router = try server.router(.{}); diff --git a/examples/07_advanced_routing.zig b/examples/07_advanced_routing.zig index b2cf5c5..c424a8d 100644 --- a/examples/07_advanced_routing.zig +++ b/examples/07_advanced_routing.zig @@ -1,6 +1,7 @@ const std = @import("std"); const httpz = @import("httpz"); +const Io = std.Io; const Allocator = std.mem.Allocator; const PORT = 8807; @@ -9,19 +10,20 @@ const PORT = 8807; // and route configuration. (The previous example, with middleware, also showed // per-route configuration for middleware specifically). -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; var default_handler = Handler{ + .io = init.io, .log = true, }; var nolog_handler = Handler{ + .io = init.io, .log = false, }; - var server = try httpz.Server(*Handler).init(allocator, .{ .address = .localhost(PORT) }, &default_handler); + var server = try httpz.Server(*Handler).init(init.io, allocator, .{ .address = .localhost(PORT) }, &default_handler); defer server.deinit(); @@ -48,6 +50,7 @@ pub fn main() !void { } const Handler = struct { + io: Io, log: bool, // special dispatch set in the info route @@ -58,7 +61,7 @@ const Handler = struct { pub fn dispatch(h: *Handler, action: httpz.Action(*Handler), req: *httpz.Request, res: *httpz.Response) !void { try action(h, req, res); if (h.log) { - std.debug.print("ts={d} path={s} status={d}\n", .{ std.time.timestamp(), req.url.path, res.status }); + std.debug.print("ts={d} path={s} status={d}\n", .{ Io.Timestamp.now(h.io, .real), req.url.path, res.status }); } } }; diff --git a/examples/08_websocket.zig b/examples/08_websocket.zig index 7de6fac..3d4485f 100644 --- a/examples/08_websocket.zig +++ b/examples/08_websocket.zig @@ -13,13 +13,12 @@ pub const std_options = std.Options{ .log_scope_levels = &[_]std.log.ScopeLevel{ } }; // This example show how to upgrade a request to websocket. -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; // For websocket support, you _must_ define a Handler, and your Handler _must_ // have a WebsocketHandler declaration - var server = try httpz.Server(Handler).init(allocator, .{ .address = .localhost(PORT) }, Handler{}); + var server = try httpz.Server(Handler).init(init.io, allocator, .{ .address = .localhost(PORT) }, Handler{}); defer server.deinit(); diff --git a/examples/09_shutdown.zig b/examples/09_shutdown.zig index 2adbc7e..2988dbf 100644 --- a/examples/09_shutdown.zig +++ b/examples/09_shutdown.zig @@ -9,14 +9,12 @@ const PORT = 8809; var server_instance: ?*httpz.Server(void) = null; -pub fn main() !void { +pub fn main(init: std.process.Init) !void { if (comptime @import("builtin").os.tag == .windows) { std.debug.print("This example does not run on Windows. Sorry\n", .{}); return error.PlatformNotSupported; } - - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); + const allocator = init.gpa; // call our shutdown function (below) when // SIGINT or SIGTERM are received @@ -31,7 +29,7 @@ pub fn main() !void { .flags = 0, }, null); - var server = try httpz.Server(void).init(allocator, .{ .address = .localhost(PORT) }, {}); + var server = try httpz.Server(void).init(init.io, allocator, .{ .address = .localhost(PORT) }, {}); defer server.deinit(); var router = try server.router(.{}); diff --git a/examples/10_file_upload.zig b/examples/10_file_upload.zig index 4da333f..70b0907 100644 --- a/examples/10_file_upload.zig +++ b/examples/10_file_upload.zig @@ -11,11 +11,10 @@ const PORT = 8810; // 3. Save uploaded files to disk // 4. Handle both file and regular form fields -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; - var server = try httpz.Server(void).init(allocator, .{ + var server = try httpz.Server(void).init(init.io, allocator, .{ .address = .localhost(PORT), .request = .{ // Configure the maximum number of multipart form fields diff --git a/examples/11_html_streaming.zig b/examples/11_html_streaming.zig index acf6fe0..dcdac49 100644 --- a/examples/11_html_streaming.zig +++ b/examples/11_html_streaming.zig @@ -5,11 +5,10 @@ const Allocator = std.mem.Allocator; const PORT = 8801; /// This example demonstrates HTML streaming. -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; - var server = try httpz.Server(void).init(allocator, .{ + var server = try httpz.Server(void).init(init.io, allocator, .{ .address = .localhost(PORT), }, {}); defer server.deinit(); @@ -29,7 +28,6 @@ pub fn main() !void { } fn index(_: *httpz.Request, res: *httpz.Response) !void { - const wait_time = 1_000_000_000; // 1 second try res.chunk( \\ @@ -45,10 +43,12 @@ fn index(_: *httpz.Request, res: *httpz.Response) !void { \\ \\ ); - std.Thread.sleep(wait_time); + + const io = res.conn.io; + try io.sleep(.fromSeconds(1), .awake); try res.chunk("\nItem 2"); - std.Thread.sleep(wait_time); + try io.sleep(.fromSeconds(1), .awake); try res.chunk("\nItem 0"); - std.Thread.sleep(wait_time); + try io.sleep(.fromSeconds(1), .awake); try res.chunk("\nItem 1"); } diff --git a/examples/middleware/Logger.zig b/examples/middleware/Logger.zig index 41f12c2..84d58fc 100644 --- a/examples/middleware/Logger.zig +++ b/examples/middleware/Logger.zig @@ -10,8 +10,11 @@ const std = @import("std"); const httpz = @import("httpz"); +const Io = std.Io; + const Logger = @This(); +io: Io, query: bool, // Must define an `init` method, which will accept your Config @@ -19,6 +22,7 @@ query: bool, // here mc will give you access to the server's allocator and arena pub fn init(config: Config) !Logger { return .{ + .io = config.io, .query = config.query, }; } @@ -34,11 +38,11 @@ pub fn execute(self: *const Logger, req: *httpz.Request, res: *httpz.Response, e // Better to use an std.time.Timer to measure elapsed time // but we need the "start" time for our log anyways, so while this might occasionally // report wrong/strange "elapsed" time, it's simpler to do. - const start = std.time.microTimestamp(); + const start = Io.Timestamp.now(self.io, .awake); defer { - const elapsed = std.time.microTimestamp() - start; - std.log.info("{d}\t{s}?{s}\t{d}\t{d}us", .{start, req.url.path, if (self.query) req.url.query else "", res.status, elapsed}); + const elapsed = start.untilNow(self.io, .awake); + std.log.info("{d}\t{s}?{s}\t{d}\t{d}us", .{start, req.url.path, if (self.query) req.url.query else "", res.status, elapsed.toMicroseconds()}); } // If you don't call executor.next(), there will be no further processing of @@ -48,5 +52,6 @@ pub fn execute(self: *const Logger, req: *httpz.Request, res: *httpz.Response, e // Must defined a pub config structure, even if it's empty pub const Config = struct { - query: bool, + io: Io, + query: bool, }; diff --git a/readme.md b/readme.md index 5c4d232..80fdfc5 100644 --- a/readme.md +++ b/readme.md @@ -1,17 +1,19 @@ # An HTTP/1.1 server for Zig. +## Zig Version +This is for Zig 0.16.0. Use the [zig-0.15.2](https://github.com/karlseguin/http.zig/tree/zig-0.15) branch for Zig 0.15 or the [dev](https://github.com/karlseguin/http.zig/tree/dev) which may or may not be up to date with zig dev. + ```zig const std = @import("std"); const httpz = @import("httpz"); -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; // More advance cases will use a custom "Handler" instead of "void". // The last parameter is our handler instance, since we have a "void" // handler, we passed a void ({}) value. - var server = try httpz.Server(void).init(allocator, .{ + var server = try httpz.Server(void).init(init.io, allocator, .{ // use .all(5882) to bind to all interfaces, i.e. 0.0.0.0 .address = .localhost(5882), }, {}); @@ -115,11 +117,10 @@ const pg = @import("pg"); const std = @import("std"); const httpz = @import("httpz"); -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; - var db = try pg.Pool.init(allocator, .{ + var db = try pg.Pool.init(init.io, allocator, .{ .connect = .{ .port = 5432, .host = "localhost"}, .auth = .{.username = "user", .database = "db", .password = "pass"} }); @@ -129,7 +130,7 @@ pub fn main() !void { .db = db, }; - var server = try httpz.Server(*App).init(allocator, .{.address = .localhost(5882)}, &app); + var server = try httpz.Server(*App).init(init.io, allocator, .{.address = .localhost(5882)}, &app); var router = try server.router(.{}); router.get("/api/user/:id", getUser, .{}); try server.listen(); @@ -265,11 +266,10 @@ The library supports both simple and complex use cases. A simple use case is sho const std = @import("std"); const httpz = @import("httpz"); -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + const allocator = init.gpa; - var server = try httpz.Server(void).init(allocator, .{.address = .localhost(5882)}, {}); + var server = try httpz.Server(void).init(init.io, allocator, .{.address = .localhost(5882)}, {}); // overwrite the default notFound handler server.notFound(notFound); @@ -700,7 +700,7 @@ The last parameter to the various `router` methods is a route configuration. In You can specify a separate configuration for each route. To change the configuration for a group of routes, you have two options. The first, is to directly change the router's `handler`, `dispatcher` and `middlewares` field. Any subsequent routes will use these values: ```zig -var server = try httpz.Server(Handler).init(allocator, .{.address = .localhost(5882)}, &handler); +var server = try httpz.Server(Handler).init(io, allocator, .{.address = .localhost(5882)}, &handler); var router = try server.router(.{}); @@ -803,7 +803,7 @@ A middleware is a struct which exposes a nested `Config` type, a public `init` f A middleware instance is created using `server.middleware()` and can then be used with the router: ```zig -var server = try httpz.Server(void).init(allocator, .{.address = .localhost(5882)}, {}); +var server = try httpz.Server(void).init(io, allocator, .{.address = .localhost(5882)}, {}); // the middleware method takes the struct name and its configuration const cors = try server.middleware(httpz.middleware.Cors, .{ diff --git a/src/buffer.zig b/src/buffer.zig index a9aaee5..f5e3fe2 100644 --- a/src/buffer.zig +++ b/src/buffer.zig @@ -1,9 +1,9 @@ const std = @import("std"); const metrics = @import("metrics.zig"); - const blockingMode = @import("httpz.zig").blockingMode; -const Mutex = std.Thread.Mutex; +const Io = std.Io; +const Mutex = Io.Mutex; const Allocator = std.mem.Allocator; pub const Buffer = struct { @@ -25,13 +25,14 @@ pub const Buffer = struct { pub const Pool = struct { const M = if (blockingMode()) Mutex else void; + io: Io, + mutex: M, available: usize, buffers: []Buffer, allocator: Allocator, buffer_size: usize, - mutex: M, - pub fn init(allocator: Allocator, count: usize, buffer_size: usize) !Pool { + pub fn init(io: Io, allocator: Allocator, count: usize, buffer_size: usize) !Pool { const buffers = try allocator.alloc(Buffer, count); errdefer allocator.free(buffers); @@ -51,7 +52,8 @@ pub const Pool = struct { } return .{ - .mutex = if (comptime blockingMode()) .{} else {}, + .io = io, + .mutex = if (comptime blockingMode()) .init else {}, .buffers = buffers, .available = count, .allocator = allocator, @@ -67,17 +69,6 @@ pub const Pool = struct { allocator.free(self.buffers); } - pub fn grow(self: *Pool, arena: Allocator, buffer: *Buffer, current_size: usize, new_size: usize) !Buffer { - if (buffer.type == .dynamic and arena.resize(buffer.data, new_size)) { - buffer.data = buffer.data.ptr[0..new_size]; - return buffer.*; - } - const new_buffer = try self.arenaAlloc(arena, new_size); - @memcpy(new_buffer.data[0..current_size], buffer.data[0..current_size]); - self.release(buffer.*); - return new_buffer; - } - pub fn static(self: Pool, size: usize) !Buffer { return .{ .type = .static, @@ -145,19 +136,19 @@ pub const Pool = struct { inline fn lock(self: *Pool) void { if (comptime blockingMode()) { - self.mutex.lock(); + self.mutex.lockUncancelable(self.io); } } inline fn unlock(self: *Pool) void { if (comptime blockingMode()) { - self.mutex.unlock(); + self.mutex.unlock(self.io); } } }; const t = @import("t.zig"); test "BufferPool" { - var pool = try Pool.init(t.allocator, 2, 10); + var pool = try Pool.init(t.io, t.allocator, 2, 10); defer pool.deinit(); { @@ -197,42 +188,3 @@ test "BufferPool" { pool.release(buf4); } } - -test "BufferPool: grow" { - defer t.reset(); - - var pool = try Pool.init(t.allocator, 1, 10); - defer pool.deinit(); - - { - // grow a dynamic buffer - var buf1 = try pool.alloc(15); - @memcpy(buf1.data[0..5], "hello"); - const buf2 = try pool.grow(t.arena.allocator(), &buf1, 5, 20); - defer pool.free(buf2); - try t.expectEqual(20, buf2.data.len); - try t.expectString("hello", buf2.data[0..5]); - } - - { - // grow a static buffer - var buf1 = try pool.static(15); - defer pool.free(buf1); - @memcpy(buf1.data[0..6], "hello2"); - const buf2 = try pool.grow(t.arena.allocator(), &buf1, 6, 21); - defer pool.free(buf2); - try t.expectEqual(21, buf2.data.len); - try t.expectString("hello2", buf2.data[0..6]); - } - - { - // grow a pooled buffer - var buf1 = try pool.alloc(8); - @memcpy(buf1.data[0..7], "hello2a"); - const buf2 = try pool.grow(t.arena.allocator(), &buf1, 7, 14); - defer pool.free(buf2); - try t.expectEqual(14, buf2.data.len); - try t.expectString("hello2a", buf2.data[0..7]); - try t.expectEqual(1, pool.available); - } -} diff --git a/src/config.zig b/src/config.zig index 406e8ad..3b25a94 100644 --- a/src/config.zig +++ b/src/config.zig @@ -1,12 +1,14 @@ const std = @import("std"); + +const posix = @import("posix.zig"); const httpz = @import("httpz.zig"); const request = @import("request.zig"); const response = @import("response.zig"); -const Address = std.net.Address; +const Io = std.Io; pub const Config = struct { - address: AddressConfig = .localhost(5882), + address: Address = .localhost(5882), workers: Worker = .{}, request: Request = .{}, response: Response = .{}, @@ -14,23 +16,35 @@ pub const Config = struct { thread_pool: ThreadPool = .{}, websocket: Websocket = .{}, - pub const AddressConfig = union(enum) { - ip: IpAddress, + pub const Address = union(enum) { + ip: Io.net.IpAddress, unix: []const u8, - addr: Address, - pub fn localhost(port: u16) AddressConfig { - return .{ .addr = .initIp4(.{ 127, 0, 0, 1 }, port) }; + pub fn localhost(port: u16) Address { + return .{ .ip = .{ .ip4 = .{ .bytes = .{ 127, 0, 0, 1 }, .port = port } } }; } - pub fn all(port: u16) AddressConfig { - return .{ .addr = .initIp4(.{ 0, 0, 0, 0 }, port) }; + pub fn all(port: u16) Address { + return .{ .ip = .{ .ip4 = .{ .bytes = .{ 0, 0, 0, 0 }, .port = port } } }; } - }; - pub const IpAddress = struct { - host: []const u8, - port: u16, + pub fn toPosix(address: Address, io: Io) !posix.Address { + switch (address) { + .unix => |path| { + if (comptime Io.net.has_unix_sockets == false) { + return error.UnixPathNotSupported; + } + // Best-effort cleanup of a stale socket file; ignore errors + // (file may not exist yet). + Io.Dir.deleteFileAbsolute(io, path) catch {}; + return posix.Address.initUnix(path); + }, + .ip => |ip| switch (ip) { + .ip4 => |ip4| return posix.Address.initIp4(ip4.bytes, ip4.port), + .ip6 => |ip6| return posix.Address.initIp6(ip6.bytes, ip6.port, ip6.flow, ip6.interface.index), + }, + } + } }; pub const ThreadPool = struct { @@ -80,28 +94,6 @@ pub const Config = struct { compression_write_treshold: ?usize = null, }; - pub fn parseAddress(self: *const Config) !Address { - return switch (self.address) { - .ip => |i| try .parseIp(i.host, i.port), - .unix => |unix_path| b: { - if (comptime std.net.has_unix_sockets == false) { - break :b error.UnixPathNotSupported; - } - std.fs.deleteFileAbsolute(unix_path) catch {}; - break :b try .initUnix(unix_path); - }, - .addr => |a| a, - }; - } - - pub fn isUnixAddress(config: *const Config) bool { - return switch (config.address) { - .unix => true, - .ip => false, - .addr => |a| a.any.family == std.posix.AF.UNIX, - }; - } - pub fn threadPoolCount(self: *const Config) u32 { return self.thread_pool.count orelse 32; } diff --git a/src/httpz.zig b/src/httpz.zig index b087599..156d752 100644 --- a/src/httpz.zig +++ b/src/httpz.zig @@ -2,8 +2,10 @@ const std = @import("std"); const builtin = @import("builtin"); pub const testing = @import("testing.zig"); -pub const websocket = @import("websocket"); +// @ZIG016 +// pub const websocket = @import("websocket"); +const posix = @import("posix.zig"); pub const routing = @import("router.zig"); pub const request = @import("request.zig"); pub const response = @import("response.zig"); @@ -17,8 +19,8 @@ pub const Url = @import("url.zig").Url; pub const Config = @import("config.zig").Config; const Thread = std.Thread; +const Io = std.Io; const net = std.net; -const posix = std.posix; const Allocator = std.mem.Allocator; const FixedBufferAllocator = std.heap.FixedBufferAllocator; @@ -252,24 +254,26 @@ pub fn Server(comptime H: type) type { }; return struct { + io: Io, handler: H, config: Config, arena: Allocator, allocator: Allocator, _router: Router(H, ActionArg), - _mut: Thread.Mutex, + _mut: Io.Mutex, _workers: []Worker, - _cond: Thread.Condition, - _listener: ?posix.socket_t, + _cond: Io.Condition, + _listener: ?posix.fd_t, _max_request_per_connection: usize, _middlewares: []const Middleware(H), - _websocket_state: websocket.server.WorkerState, + // ZIG016 + // _websocket_state: websocket.server.WorkerState, _middleware_registry: std.SinglyLinkedList, const Self = @This(); const Worker = if (blockingMode()) worker.Blocking(*Self, WebsocketHandler) else worker.NonBlocking(*Self, WebsocketHandler); - pub fn init(allocator: Allocator, config: Config, handler: H) !Self { + pub fn init(io: Io, allocator: Allocator, config: Config, handler: H) !Self { // Be mindful about where we pass this arena. Most things are able to // do dynamic allocation, and need to be able to free when they're // done with their memory. Only use this for stuff that's created on @@ -283,50 +287,54 @@ pub fn Server(comptime H: type) type { // do not pass arena.allocator to WorkerState, it needs to be able to // allocate and free at will. - const ws_config = config.websocket; - var websocket_state = try websocket.server.WorkerState.init(allocator, .{ - .max_message_size = ws_config.max_message_size, - .buffers = .{ - .small_size = if (has_websocket) ws_config.small_buffer_size else 0, - .small_pool = if (has_websocket) ws_config.small_buffer_pool else 0, - .large_size = if (has_websocket) ws_config.large_buffer_size else 0, - .large_pool = if (has_websocket) ws_config.large_buffer_pool else 0, - }, - // disable handshake memory allocation since httpz is handling - // the handshake request directly - .handshake = .{ - .count = 0, - .max_size = 0, - .max_headers = 0, - }, - .compression = if (ws_config.compression) .{ - .write_threshold = ws_config.compression_write_treshold, - .retain_write_buffer = ws_config.compression_retain_writer, - } else null, - }); - errdefer websocket_state.deinit(); + + // @ZIG016 + // const ws_config = config.websocket; + // var websocket_state = try websocket.server.WorkerState.init(allocator, .{ + // .max_message_size = ws_config.max_message_size, + // .buffers = .{ + // .small_size = if (has_websocket) ws_config.small_buffer_size else 0, + // .small_pool = if (has_websocket) ws_config.small_buffer_pool else 0, + // .large_size = if (has_websocket) ws_config.large_buffer_size else 0, + // .large_pool = if (has_websocket) ws_config.large_buffer_pool else 0, + // }, + // // disable handshake memory allocation since httpz is handling + // // the handshake request directly + // .handshake = .{ + // .count = 0, + // .max_size = 0, + // .max_headers = 0, + // }, + // .compression = if (ws_config.compression) .{ + // .write_threshold = ws_config.compression_write_treshold, + // .retain_write_buffer = ws_config.compression_retain_writer, + // } else null, + // }); + // errdefer websocket_state.deinit(); const workers = try arena.allocator().alloc(Worker, config.workerCount()); return .{ + .io = io, .config = config, .handler = handler, .allocator = allocator, .arena = arena.allocator(), - ._mut = .{}, - ._cond = .{}, + ._mut = .init, + ._cond = .init, ._workers = workers, ._listener = null, ._middlewares = &.{}, ._middleware_registry = .{}, - ._websocket_state = websocket_state, + // ._websocket_state = websocket_state, ._router = try Router(H, ActionArg).init(arena.allocator(), default_dispatcher, handler), ._max_request_per_connection = config.timeout.request_count orelse MAX_REQUEST_COUNT, }; } pub fn deinit(self: *Self) void { - self._websocket_state.deinit(); + // @ZIG016 + // self._websocket_state.deinit(); var node = self._middleware_registry.first; while (node) |n| { @@ -342,24 +350,25 @@ pub fn Server(comptime H: type) type { pub fn listen(self: *Self) !void { // incase "stop" is waiting - defer self._cond.signal(); - self._mut.lock(); - errdefer self._mut.unlock(); + const io = self.io; - const config = self.config; + defer self._cond.signal(io); + self._mut.lockUncancelable(io); + errdefer self._mut.unlock(io); - const no_delay = config.isUnixAddress(); - const address = try config.parseAddress(); + const config = self.config; + const address = try config.address.toPosix(io); + const is_unix_socket = address.any.family == posix.AF.UNIX; const listener = blk: { var sock_flags: u32 = posix.SOCK.STREAM | posix.SOCK.CLOEXEC; if (blockingMode() == false) sock_flags |= posix.SOCK.NONBLOCK; - const proto = if (address.any.family == posix.AF.UNIX) @as(u32, 0) else posix.IPPROTO.TCP; + const proto = if (is_unix_socket) @as(u32, 0) else posix.IPPROTO.TCP; break :blk try posix.socket(address.any.family, sock_flags, proto); }; - if (no_delay) { + if (is_unix_socket) { // TODO: Broken on darwin: // https://github.com/ziglang/zig/issues/17260 // if (@hasDecl(os.TCP, "NODELAY")) { @@ -370,7 +379,7 @@ pub fn Server(comptime H: type) type { try posix.setsockopt(listener, posix.SOL.SOCKET, posix.SO.REUSEADDR, &std.mem.toBytes(@as(c_int, 1))); - if (!config.isUnixAddress() and self._workers.len > 1) { + if (is_unix_socket == false and self._workers.len > 1) { if (@hasDecl(posix.SO, "REUSEPORT_LB")) { try posix.setsockopt(listener, posix.SOL.SOCKET, posix.SO.REUSEPORT_LB, &std.mem.toBytes(@as(c_int, 1))); } else if (@hasDecl(posix.SO, "REUSEPORT")) { @@ -390,14 +399,14 @@ pub fn Server(comptime H: type) type { const allocator = self.allocator; if (comptime blockingMode()) { - workers[0] = try worker.Blocking(*Self, WebsocketHandler).init(allocator, self, &config); + workers[0] = try worker.Blocking(*Self, WebsocketHandler).init(io, allocator, self, &config); defer workers[0].deinit(); const thrd = try Thread.spawn(.{}, worker.Blocking(*Self, WebsocketHandler).listen, .{ &workers[0], listener }); // incase listenInNewThread was used and is waiting for us to start - self._cond.signal(); - self._mut.unlock(); + self._cond.signal(io); + self._mut.unlock(io); // This will unblock when server.stop() is called and the listening // socket is closed. @@ -412,10 +421,10 @@ pub fn Server(comptime H: type) type { workers[i].stop(); }; - var ready_sem = std.Thread.Semaphore{}; + var ready_sem = Io.Semaphore{}; const threads = try self.arena.alloc(Thread, workers.len); for (0..workers.len) |i| { - workers[i] = try Worker.init(allocator, self, &config); + workers[i] = try Worker.init(io, allocator, self, &config); errdefer { workers[i].stop(); workers[i].deinit(); @@ -425,12 +434,12 @@ pub fn Server(comptime H: type) type { } for (0..workers.len) |_| { - ready_sem.wait(); + ready_sem.waitUncancelable(io); } // incase listenInNewThread was used and is waiting for us to start - self._cond.signal(); - self._mut.unlock(); + self._cond.signal(io); + self._mut.unlock(io); for (threads) |thrd| { thrd.join(); @@ -439,19 +448,21 @@ pub fn Server(comptime H: type) type { } pub fn listenInNewThread(self: *Self) !std.Thread { - self._mut.lock(); - defer self._mut.unlock(); + const io = self.io; + self._mut.lockUncancelable(io); + defer self._mut.unlock(io); const thrd = try std.Thread.spawn(.{}, listen, .{self}); // we don't return until listen() signals us that the server is up - self._cond.wait(&self._mut); + self._cond.waitUncancelable(io, &self._mut); return thrd; } pub fn stop(self: *Self) void { - self._mut.lock(); - defer self._mut.unlock(); + const io = self.io; + self._mut.lockUncancelable(io); + defer self._mut.unlock(io); for (self._workers) |*w| { if (self._listener == null) { @@ -651,56 +662,57 @@ pub fn blockingMode() bool { }; } -pub fn upgradeWebsocket(comptime H: type, req: *Request, res: *Response, ctx: anytype) !bool { - const upgrade = req.header("upgrade") orelse return false; - if (std.ascii.eqlIgnoreCase(upgrade, "websocket") == false) { - return false; - } - - const version = req.header("sec-websocket-version") orelse return false; - if (std.ascii.eqlIgnoreCase(version, "13") == false) { - return false; - } - - // firefox will send multiple values for this header - const connection = req.header("connection") orelse return false; - if (std.ascii.indexOfIgnoreCase(connection, "upgrade") == null) { - return false; - } - - const key = req.header("sec-websocket-key") orelse return false; - - const http_conn = res.conn; - const ws_worker: *websocket.server.Worker(H) = @ptrCast(@alignCast(http_conn.ws_worker)); - - var hc = try ws_worker.createConn(http_conn.stream.handle, http_conn.address, worker.timestamp(0)); - errdefer ws_worker.cleanupConn(hc); - - hc.handler = try H.init(&hc.conn, ctx); - - var compression = false; - if (ws_worker.canCompress()) { - if (req.header("sec-websocket-extensions")) |ext| { - compression = try websocket.Handshake.parseExtension(ext) != null; - } - } - - var reply_buf: [512]u8 = undefined; - const reply = try websocket.Handshake.createReply(key, null, compression, &reply_buf); - var writer = http_conn.stream.writer(&.{}); - const w = &writer.interface; - try w.writeAll(reply); - try w.flush(); - - if (comptime std.meta.hasFn(H, "afterInit")) { - const params = @typeInfo(@TypeOf(H.afterInit)).@"fn".params; - try if (comptime params.len == 1) hc.handler.?.afterInit() else hc.handler.?.afterInit(ctx); - } - try ws_worker.setupConnection(hc); - res.written = true; - http_conn.handover = .{ .websocket = hc }; - return true; -} +// @ZIG016 +// pub fn upgradeWebsocket(comptime H: type, req: *Request, res: *Response, ctx: anytype) !bool { +// const upgrade = req.header("upgrade") orelse return false; +// if (std.ascii.eqlIgnoreCase(upgrade, "websocket") == false) { +// return false; +// } + +// const version = req.header("sec-websocket-version") orelse return false; +// if (std.ascii.eqlIgnoreCase(version, "13") == false) { +// return false; +// } + +// // firefox will send multiple values for this header +// const connection = req.header("connection") orelse return false; +// if (std.ascii.indexOfIgnoreCase(connection, "upgrade") == null) { +// return false; +// } + +// const key = req.header("sec-websocket-key") orelse return false; + +// const http_conn = res.conn; +// const ws_worker: *websocket.server.Worker(H) = @ptrCast(@alignCast(http_conn.ws_worker)); + +// var hc = try ws_worker.createConn(http_conn.stream.handle, http_conn.address, worker.timestamp(0)); +// errdefer ws_worker.cleanupConn(hc); + +// hc.handler = try H.init(&hc.conn, ctx); + +// var compression = false; +// if (ws_worker.canCompress()) { +// if (req.header("sec-websocket-extensions")) |ext| { +// compression = try websocket.Handshake.parseExtension(ext) != null; +// } +// } + +// var reply_buf: [512]u8 = undefined; +// const reply = try websocket.Handshake.createReply(key, null, compression, &reply_buf); +// var writer = http_conn.stream.writer(&.{}); +// const w = &writer.interface; +// try w.writeAll(reply); +// try w.flush(); + +// if (comptime std.meta.hasFn(H, "afterInit")) { +// const params = @typeInfo(@TypeOf(H.afterInit)).@"fn".params; +// try if (comptime params.len == 1) hc.handler.?.afterInit() else hc.handler.?.afterInit(ctx); +// } +// try ws_worker.setupConnection(hc); +// res.written = true; +// http_conn.handover = .{ .websocket = hc }; +// return true; +// } // std.heap.StackFallbackAllocator is very specific. It's really _stack_ as it // requires a comptime size. Also, it uses non-public calls from the FixedBufferAllocator. @@ -771,7 +783,7 @@ fn drain(req: *Request) !void { } const t = @import("t.zig"); -var global_test_allocator = std.heap.GeneralPurposeAllocator(.{}){}; +var global_test_allocator = std.heap.DebugAllocator(.{}){}; var test_handler_dispatch = TestHandlerDispatch{ .state = 10 }; var test_handler_disaptch_context = TestHandlerDispatchContext{ .state = 20 }; @@ -785,7 +797,8 @@ var dispatch_server: Server(*TestHandlerDispatch) = undefined; var dispatch_action_context_server: Server(*TestHandlerDispatchContext) = undefined; var reuse_server: Server(void) = undefined; var handle_server: Server(TestHandlerHandle) = undefined; -var websocket_server: Server(TestWebsocketHandler) = undefined; +// @ZIG016 +// var websocket_server: Server(TestWebsocketHandler) = undefined; var cors_wildcard_server: Server(void) = undefined; var cors_single_server: Server(void) = undefined; var cors_multiple_server: Server(void) = undefined; @@ -798,7 +811,7 @@ test "tests:beforeAll" { const ga = global_test_allocator.allocator(); { - default_server = try Server(void).init(ga, .{ + default_server = try Server(void).init(t.io, ga, .{ .address = .localhost(5992), .request = .{ .lazy_read_size = 4_096, @@ -842,7 +855,7 @@ test "tests:beforeAll" { } { - dispatch_default_server = try Server(*TestHandlerDefaultDispatch).init(ga, .{ .address = .localhost(5993) }, &test_handler_default_dispatch1); + dispatch_default_server = try Server(*TestHandlerDefaultDispatch).init(t.io, ga, .{ .address = .localhost(5993) }, &test_handler_default_dispatch1); var router = try dispatch_default_server.router(.{}); router.get("/", TestHandlerDefaultDispatch.echo, .{}); router.get("/write/*", TestHandlerDefaultDispatch.echoWrite, .{}); @@ -864,14 +877,14 @@ test "tests:beforeAll" { } { - dispatch_server = try Server(*TestHandlerDispatch).init(ga, .{ .address = .localhost(5994) }, &test_handler_dispatch); + dispatch_server = try Server(*TestHandlerDispatch).init(t.io, ga, .{ .address = .localhost(5994) }, &test_handler_dispatch); var router = try dispatch_server.router(.{}); router.get("/", TestHandlerDispatch.root, .{}); test_server_threads[2] = try dispatch_server.listenInNewThread(); } { - dispatch_action_context_server = try Server(*TestHandlerDispatchContext).init(ga, .{ .address = .localhost(5995) }, &test_handler_disaptch_context); + dispatch_action_context_server = try Server(*TestHandlerDispatchContext).init(t.io, ga, .{ .address = .localhost(5995) }, &test_handler_disaptch_context); var router = try dispatch_action_context_server.router(.{}); router.get("/", TestHandlerDispatchContext.root, .{}); test_server_threads[3] = try dispatch_action_context_server.listenInNewThread(); @@ -880,26 +893,30 @@ test "tests:beforeAll" { { // with only 1 worker, and a min/max conn of 1, each request should // hit our reset path. - reuse_server = try Server(void).init(ga, .{ .address = .localhost(5996), .workers = .{ .count = 1, .min_conn = 1, .max_conn = 1 } }, {}); + reuse_server = try Server(void).init(t.io, ga, .{ .address = .localhost(5996), .workers = .{ .count = 1, .min_conn = 1, .max_conn = 1 } }, {}); var router = try reuse_server.router(.{}); router.get("/test/writer", TestDummyHandler.reuseWriter, .{}); test_server_threads[4] = try reuse_server.listenInNewThread(); } { - handle_server = try Server(TestHandlerHandle).init(ga, .{ .address = .localhost(5997) }, TestHandlerHandle{}); + handle_server = try Server(TestHandlerHandle).init(t.io, ga, .{ .address = .localhost(5997) }, TestHandlerHandle{}); test_server_threads[5] = try handle_server.listenInNewThread(); } - { - websocket_server = try Server(TestWebsocketHandler).init(ga, .{ .address = .localhost(5998) }, TestWebsocketHandler{}); - var router = try websocket_server.router(.{}); - router.get("/ws", TestWebsocketHandler.upgrade, .{}); - test_server_threads[6] = try websocket_server.listenInNewThread(); - } + // @ZIG016 + // { + // websocket_server = try Server(TestWebsocketHandler).init(ga, .{ .address = .localhost(5998) }, TestWebsocketHandler{}); + // var router = try websocket_server.router(.{}); + // router.get("/ws", TestWebsocketHandler.upgrade, .{}); + // test_server_threads[6] = try websocket_server.listenInNewThread(); + // } + test_server_threads[6] = try Thread.spawn(.{}, struct { + fn dummy() void {} + }.dummy, .{}); { - cors_wildcard_server = try Server(void).init(ga, .{ .address = .localhost(5999) }, {}); + cors_wildcard_server = try Server(void).init(t.io, ga, .{ .address = .localhost(5999) }, {}); var cors_wildcard = try cors_wildcard_server.arena.alloc(Middleware(void), 1); cors_wildcard[0] = try cors_wildcard_server.middleware(middleware.Cors, .{ .origin = "*", @@ -913,7 +930,7 @@ test "tests:beforeAll" { } { - cors_single_server = try Server(void).init(ga, .{ .address = .localhost(6000) }, {}); + cors_single_server = try Server(void).init(t.io, ga, .{ .address = .localhost(6000) }, {}); var cors_single = try cors_single_server.arena.alloc(Middleware(void), 1); cors_single[0] = try cors_single_server.middleware(middleware.Cors, .{ .origin = "https://example.com", @@ -926,7 +943,7 @@ test "tests:beforeAll" { } { - cors_multiple_server = try Server(void).init(ga, .{ .address = .localhost(6001) }, {}); + cors_multiple_server = try Server(void).init(t.io, ga, .{ .address = .localhost(6001) }, {}); var cors_multiple = try cors_multiple_server.arena.alloc(Middleware(void), 1); cors_multiple[0] = try cors_multiple_server.middleware(middleware.Cors, .{ .origin = "https://example.com, https://api.example.com, https://test.local", @@ -949,7 +966,8 @@ test "tests:afterAll" { dispatch_action_context_server.stop(); reuse_server.stop(); handle_server.stop(); - websocket_server.stop(); + // @ZIG016 + // websocket_server.stop(); cors_wildcard_server.stop(); cors_single_server.stop(); cors_multiple_server.stop(); @@ -964,16 +982,17 @@ test "tests:afterAll" { dispatch_action_context_server.deinit(); reuse_server.deinit(); handle_server.deinit(); - websocket_server.deinit(); + // @ZIG016 + // websocket_server.deinit(); cors_wildcard_server.deinit(); cors_single_server.deinit(); cors_multiple_server.deinit(); - try t.expectEqual(false, global_test_allocator.detectLeaks()); + try t.expectEqual(0, global_test_allocator.detectLeaks()); } test "httpz: quick shutdown" { - var server = try Server(void).init(t.allocator, .{ .address = .localhost(6992) }, {}); + var server = try Server(void).init(t.io, t.allocator, .{ .address = .localhost(6992) }, {}); const thrd = try server.listenInNewThread(); server.stop(); thrd.join(); @@ -982,7 +1001,7 @@ test "httpz: quick shutdown" { test "httpz: bind failure releases mutex" { // Start a server to occupy the port - var server1 = try Server(void).init(t.allocator, .{ .address = .localhost(6993) }, {}); + var server1 = try Server(void).init(t.io, t.allocator, .{ .address = .localhost(6993) }, {}); const thrd1 = try server1.listenInNewThread(); defer { server1.stop(); @@ -991,7 +1010,7 @@ test "httpz: bind failure releases mutex" { } // Try to start another server on the same port - will fail to bind - var server2 = try Server(void).init(t.allocator, .{ .address = .localhost(6993) }, {}); + var server2 = try Server(void).init(t.io, t.allocator, .{ .address = .localhost(6993) }, {}); defer server2.deinit(); // First call fails with AddressInUse @@ -1004,15 +1023,15 @@ test "httpz: bind failure releases mutex" { test "httpz: shutdown without listen" { // Should not throw a .BADF (unreachable) error - var server = try Server(void).init(t.allocator, .{ .address = .localhost(6992) }, {}); + var server = try Server(void).init(t.io, t.allocator, .{ .address = .localhost(6992) }, {}); server.stop(); server.deinit(); } test "httpz: invalid request" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("TEA HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1023,8 +1042,8 @@ test "httpz: invalid request" { test "httpz: invalid request path" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("TEA /hello\rn\nWorld:test HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1035,8 +1054,8 @@ test "httpz: invalid request path" { test "httpz: invalid header name" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET / HTTP/1.1\r\nOver: 9000\r\nHel\tlo:World\r\n\r\n"); try w.flush(); @@ -1047,8 +1066,8 @@ test "httpz: invalid header name" { test "httpz: invalid content length value (1)" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET / HTTP/1.1\r\nContent-Length: HaHA\r\n\r\n"); try w.flush(); @@ -1059,8 +1078,8 @@ test "httpz: invalid content length value (1)" { test "httpz: invalid content length value (2)" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET / HTTP/1.1\r\nContent-Length: 1.0\r\n\r\n"); try w.flush(); @@ -1071,8 +1090,8 @@ test "httpz: invalid content length value (2)" { test "httpz: body too big" { const stream = testStream(5993); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("POST / HTTP/1.1\r\nContent-Length: 999999999999999999\r\n\r\n"); try w.flush(); @@ -1083,8 +1102,8 @@ test "httpz: body too big" { test "httpz: overflow content length" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET / HTTP/1.1\r\nContent-Length: 999999999999999999999999999\r\n\r\n"); try w.flush(); @@ -1095,8 +1114,9 @@ test "httpz: overflow content length" { test "httpz: no route" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET / HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1107,8 +1127,8 @@ test "httpz: no route" { test "httpz: no route with custom notFound handler" { const stream = testStream(5993); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /not_found HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1122,8 +1142,8 @@ test "httpz: unhandled exception" { defer std.testing.log_level = .warn; const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /fail HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1137,8 +1157,8 @@ test "httpz: unhandled exception with custom error handler" { defer std.testing.log_level = .warn; const stream = testStream(5993); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /fail HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1149,10 +1169,10 @@ test "httpz: unhandled exception with custom error handler" { test "httpz: custom methods" { const stream = testStream(5992); - defer stream.close(); + defer stream.close(t.io); { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/method HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1162,7 +1182,7 @@ test "httpz: custom methods" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("PUT /test/method HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1172,7 +1192,7 @@ test "httpz: custom methods" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("TEA /test/method HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1182,7 +1202,7 @@ test "httpz: custom methods" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("PING /test/method HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1192,7 +1212,7 @@ test "httpz: custom methods" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("TEA /test/other HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1203,8 +1223,8 @@ test "httpz: custom methods" { test "httpz: route params" { const stream = testStream(5993); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /api/v2/users/9001 HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1215,8 +1235,8 @@ test "httpz: route params" { test "httpz: request and response headers" { const stream = testStream(5993); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/headers HTTP/1.1\r\nHeader-Name: Header-Value\r\n\r\n"); try w.flush(); @@ -1227,8 +1247,8 @@ test "httpz: request and response headers" { test "httpz: content-length body" { const stream = testStream(5993); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/body/cl HTTP/1.1\r\nHeader-Name: Header-Value\r\nContent-Length: 4\r\n\r\nabcz"); try w.flush(); @@ -1239,8 +1259,8 @@ test "httpz: content-length body" { test "httpz: json response" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/json HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); try w.flush(); @@ -1251,8 +1271,8 @@ test "httpz: json response" { test "httpz: query" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/query?fav=keemun%20te%61%21 HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); try w.flush(); @@ -1263,8 +1283,8 @@ test "httpz: query" { test "httpz: chunked" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/chunked HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); try w.flush(); @@ -1275,8 +1295,8 @@ test "httpz: chunked" { test "httpz: route-specific dispatcher" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("HEAD /test/dispatcher HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1287,8 +1307,8 @@ test "httpz: route-specific dispatcher" { test "httpz: middlewares" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; { @@ -1304,10 +1324,10 @@ test "httpz: middlewares" { test "httpz: CORS" { const stream = testStream(5992); - defer stream.close(); + defer stream.close(t.io); { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /echo HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1321,7 +1341,7 @@ test "httpz: CORS" { { // cors endpoint but not cors options - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("OPTIONS /test/cors HTTP/1.1\r\nOrigin: httpz.local\r\nSec-Fetch-Mode: navigate\r\n\r\n"); try w.flush(); @@ -1336,7 +1356,7 @@ test "httpz: CORS" { { // cors request - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("OPTIONS /test/cors HTTP/1.1\r\nOrigin: httpz.local\r\nSec-Fetch-Mode: cors\r\n\r\n"); try w.flush(); @@ -1351,7 +1371,7 @@ test "httpz: CORS" { { // cors request, non-options - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/cors HTTP/1.1\r\nOrigin: httpz.local\r\nSec-Fetch-Mode: cors\r\n\r\n"); try w.flush(); @@ -1367,10 +1387,10 @@ test "httpz: CORS" { test "httpz: CORS wildcard origin" { const stream = testStream(5999); - defer stream.close(); + defer stream.close(t.io); { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/cors HTTP/1.1\r\nOrigin: https://example.com\r\n\r\n"); try w.flush(); @@ -1381,7 +1401,7 @@ test "httpz: CORS wildcard origin" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/cors HTTP/1.1\r\nOrigin: https://any-domain.com\r\n\r\n"); try w.flush(); @@ -1392,7 +1412,7 @@ test "httpz: CORS wildcard origin" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("OPTIONS /test/cors HTTP/1.1\r\nOrigin: https://test.com\r\nSec-Fetch-Mode: cors\r\n\r\n"); try w.flush(); @@ -1407,7 +1427,7 @@ test "httpz: CORS wildcard origin" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/cors HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1420,10 +1440,10 @@ test "httpz: CORS wildcard origin" { test "httpz: CORS single origin" { const stream = testStream(6000); - defer stream.close(); + defer stream.close(t.io); { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/cors HTTP/1.1\r\nOrigin: https://example.com\r\n\r\n"); try w.flush(); @@ -1435,7 +1455,7 @@ test "httpz: CORS single origin" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/cors HTTP/1.1\r\nOrigin: https://attacker.com\r\n\r\n"); try w.flush(); @@ -1447,7 +1467,7 @@ test "httpz: CORS single origin" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("OPTIONS /test/cors HTTP/1.1\r\nOrigin: https://example.com\r\nSec-Fetch-Mode: cors\r\n\r\n"); try w.flush(); @@ -1461,7 +1481,7 @@ test "httpz: CORS single origin" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("OPTIONS /test/cors HTTP/1.1\r\nOrigin: https://wrong.com\r\nSec-Fetch-Mode: cors\r\n\r\n"); try w.flush(); @@ -1475,10 +1495,10 @@ test "httpz: CORS single origin" { test "httpz: CORS multiple origins" { const stream = testStream(6001); - defer stream.close(); + defer stream.close(t.io); { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/cors HTTP/1.1\r\nOrigin: https://example.com\r\n\r\n"); try w.flush(); @@ -1490,7 +1510,7 @@ test "httpz: CORS multiple origins" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/cors HTTP/1.1\r\nOrigin: https://api.example.com\r\n\r\n"); try w.flush(); @@ -1502,7 +1522,7 @@ test "httpz: CORS multiple origins" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/cors HTTP/1.1\r\nOrigin: https://test.local\r\n\r\n"); try w.flush(); @@ -1514,7 +1534,7 @@ test "httpz: CORS multiple origins" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/cors HTTP/1.1\r\nOrigin: https://attacker.com\r\n\r\n"); try w.flush(); @@ -1526,7 +1546,7 @@ test "httpz: CORS multiple origins" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("OPTIONS /test/cors HTTP/1.1\r\nOrigin: https://api.example.com\r\nSec-Fetch-Mode: cors\r\n\r\n"); try w.flush(); @@ -1541,7 +1561,7 @@ test "httpz: CORS multiple origins" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("OPTIONS /test/cors HTTP/1.1\r\nOrigin: https://not-in-list.com\r\nSec-Fetch-Mode: cors\r\n\r\n"); try w.flush(); @@ -1554,10 +1574,10 @@ test "httpz: CORS multiple origins" { test "httpz: router groups" { const stream = testStream(5993); - defer stream.close(); + defer stream.close(t.io); { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET / HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1569,7 +1589,7 @@ test "httpz: router groups" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /admin/users HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1581,7 +1601,7 @@ test "httpz: router groups" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("PUT /admin/users/:id HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1593,7 +1613,7 @@ test "httpz: router groups" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("HEAD /debug/ping HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1605,7 +1625,7 @@ test "httpz: router groups" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("OPTIONS /debug/stats HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1617,7 +1637,7 @@ test "httpz: router groups" { } { - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("POST /login HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1631,8 +1651,8 @@ test "httpz: router groups" { test "httpz: event stream" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/stream HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); try w.flush(); @@ -1650,8 +1670,8 @@ test "httpz: event stream" { test "httpz: event stream sync" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/streamsync HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); try w.flush(); @@ -1669,8 +1689,8 @@ test "httpz: event stream sync" { test "httpz: keepalive" { const stream = testStream(5993); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /api/v2/users/9001 HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1685,8 +1705,8 @@ test "httpz: keepalive" { test "httpz: route data" { const stream = testStream(5992); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/route_data HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); try w.flush(); @@ -1698,8 +1718,8 @@ test "httpz: route data" { test "httpz: keepalive with explicit write" { const stream = testStream(5993); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /write/9001 HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1714,12 +1734,12 @@ test "httpz: keepalive with explicit write" { test "httpz: request in chunks" { const stream = testStream(5993); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /api/v2/use"); try w.flush(); - std.Thread.sleep(std.time.ns_per_ms * 10); + try t.io.sleep(.fromMilliseconds(10), .awake); try w.writeAll("rs/11 HTTP/1.1\r\n\r\n"); try w.flush(); @@ -1731,8 +1751,8 @@ test "httpz: writer re-use" { defer t.reset(); const stream = testStream(5996); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; var expected: [10]TestUser = undefined; @@ -1755,8 +1775,8 @@ test "httpz: writer re-use" { test "httpz: custom dispatch without action context" { const stream = testStream(5994); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET / HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); try w.flush(); @@ -1767,8 +1787,8 @@ test "httpz: custom dispatch without action context" { test "httpz: custom dispatch with action context" { const stream = testStream(5995); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /?name=teg HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); try w.flush(); @@ -1779,8 +1799,8 @@ test "httpz: custom dispatch with action context" { test "httpz: custom handle" { const stream = testStream(5997); - defer stream.close(); - var writer = stream.writer(&.{}); + defer stream.close(t.io); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /whatever?name=teg HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); try w.flush(); @@ -1793,9 +1813,9 @@ test "httpz: request body reader" { { // no body const stream = testStream(5992); - defer stream.close(); + defer stream.close(t.io); - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/req_reader HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); @@ -1809,9 +1829,9 @@ test "httpz: request body reader" { { // small body const stream = testStream(5992); - defer stream.close(); + defer stream.close(t.io); - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll("GET /test/req_reader HTTP/1.1\r\nContent-Length: 4\r\n\r\n123z"); @@ -1826,9 +1846,9 @@ test "httpz: request body reader" { { // medium body const stream = testStream(5992); - defer stream.close(); + defer stream.close(t.io); - var writer = stream.writer(&.{}); + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; try w.writeAll(std.fmt.comptimePrint("GET /test/req_reader HTTP/1.1\r\nContent-Length: {d}\r\n\r\n" ++ ("a" ** length), .{length})); @@ -1846,17 +1866,17 @@ test "httpz: request body reader" { // a bit of fuzzing for (0..10) |_| { const stream = testStream(5992); - defer stream.close(); + defer stream.close(t.io); var buf: [1024]u8 = undefined; - var writer = stream.writer(&buf); + var writer = stream.writer(t.io, &buf); const w = &writer.interface; var req: []const u8 = std.fmt.comptimePrint("GET /test/req_reader HTTP/1.1\r\nContent-Length: {d}\r\n\r\n" ++ ("a" ** length), .{length}); while (req.len > 0) { const len = random.uintAtMost(usize, req.len - 1) + 1; try w.writeAll(req[0..len]); - std.Thread.sleep(std.time.ns_per_ms * 2); + try t.io.sleep(.fromMilliseconds(2), .awake); req = req[len..]; } @@ -1868,150 +1888,151 @@ test "httpz: request body reader" { } } -test "websocket: invalid request" { - const stream = testStream(5998); - defer stream.close(); - var writer = stream.writer(&.{}); - const w = &writer.interface; - try w.writeAll("GET /ws HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); - try w.flush(); - - var res = testReadParsed(stream); - defer res.deinit(); - try t.expectString("invalid websocket", res.body); -} - -test "websocket: upgrade" { - const stream = testStream(5998); - defer stream.close(); - var writer = stream.writer(&.{}); - const w = &writer.interface; - try w.writeAll("GET /ws HTTP/1.1\r\nContent-Length: 0\r\n"); - try w.writeAll("upgrade: WEBsocket\r\n"); - try w.writeAll("Sec-Websocket-verSIon: 13\r\n"); - try w.writeAll("ConnectioN: abc,upgrade,123\r\n"); - try w.writeAll("SEC-WEBSOCKET-KeY: a-secret-key\r\n\r\n"); - try w.flush(); - - var res = testReadHeader(stream); - defer res.deinit(); - try t.expectEqual(101, res.status); - try t.expectString("websocket", res.headers.get("Upgrade").?); - try t.expectString("upgrade", res.headers.get("Connection").?); - try t.expectString("55eM2SNGu+68v5XXrr982mhPFkU=", res.headers.get("Sec-Websocket-Accept").?); - - try w.writeAll(&websocket.frameText("over 9000!")); - - // https://github.com/karlseguin/http.zig/pull/188 - try w.flush(); - std.Thread.sleep(std.time.ns_per_ms * 5); - - try w.writeAll(&websocket.frameText("close")); - try w.flush(); - - var pos: usize = 0; - var buf: [100]u8 = undefined; - var wait_count: usize = 0; - var reader = stream.reader(&.{}); - const r = reader.interface(); - while (pos < 16) { - const n = r.readSliceShort(buf[pos..]) catch |err| - switch (err) { - error.ReadFailed => { - if (reader.getError()) |e| { - switch (e) { - error.WouldBlock => { - if (wait_count == 100) { - break; - } - wait_count += 1; - std.Thread.sleep(std.time.ns_per_ms); - continue; - }, - else => {}, - } - } - return err; - }, - }; - - if (n == 0) { - break; - } - pos += n; - } - try t.expectEqual(16, pos); - try t.expectEqual(129, buf[0]); - try t.expectEqual(10, buf[1]); - try t.expectString("over 9000!", buf[2..12]); - try t.expectString(&.{ 136, 2, 3, 232 }, buf[12..16]); -} - -// Stress test: multiple concurrent websocket clients sending many messages each. -// Run repeatedly (e.g. zig build test -Dtest-filter="websocket: stress" or run 50x) -// to verify no race in reader.done() / allocator free when using thread pool. -test "websocket: stress" { - if (force_blocking) return; // non-blocking mode only (thread pool) - const num_clients = 8; - const messages_per_client = 150; - - // When run with -Dtest-filter="websocket: stress", tests:beforeAll may not run, - // so nothing is listening on 5998. Wait for port and start our own server if needed. - var stress_server: ?Server(TestWebsocketHandler) = null; - var stress_listen_thread: ?Thread = null; - testing.waitForPort(5998) catch { - stress_server = try Server(TestWebsocketHandler).init(t.allocator, .{ .address = .localhost(5998) }, TestWebsocketHandler{}); - var router = try stress_server.?.router(.{}); - router.get("/ws", TestWebsocketHandler.upgrade, .{}); - stress_listen_thread = try stress_server.?.listenInNewThread(); - try testing.waitForPort(5998); - }; - defer if (stress_server) |*srv| { - srv.stop(); - if (stress_listen_thread) |thrd| thrd.join(); - srv.deinit(); - }; - - var threads: [num_clients]Thread = undefined; - for (0..num_clients) |i| { - threads[i] = Thread.spawn(.{}, struct { - fn run(_: usize) void { - const stream = testStream(5998); - defer stream.close(); - - var writer = stream.writer(&.{}); - const w = &writer.interface; - w.writeAll("GET /ws HTTP/1.1\r\nContent-Length: 0\r\n") catch return; - w.writeAll("upgrade: WEBsocket\r\n") catch return; - w.writeAll("Sec-Websocket-verSIon: 13\r\n") catch return; - w.writeAll("ConnectioN: upgrade\r\n") catch return; - w.writeAll("SEC-WEBSOCKET-KeY: a-secret-key\r\n\r\n") catch return; - w.flush() catch return; - - var buf: [1024]u8 = undefined; - var pos: usize = 0; - var reader = stream.reader(&.{}); - const r = reader.interface(); - while (!std.mem.endsWith(u8, buf[0..pos], "\r\n\r\n")) { - if (pos >= buf.len) return; - var vecs: [1][]u8 = .{buf[pos..]}; - const n = r.readVec(&vecs) catch return; - if (n == 0) return; - pos += n; - } - if (pos < 12 or !std.mem.startsWith(u8, buf[0..12], "HTTP/1.1 101")) return; - - for (0..messages_per_client) |_| { - const frame = websocket.frameText("stress"); - w.writeAll(&frame) catch return; - } - w.writeAll(&websocket.frameText("close")) catch return; - w.flush() catch return; - } - }.run, .{i}) catch return; - } - for (&threads) |*th| th.join(); -} +// @ZIG016 +// test "websocket: invalid request" { +// const stream = testStream(5998); +// defer stream.close(t.io); +// var writer = stream.writer(t.io, &.{}); +// const w = &writer.interface; +// try w.writeAll("GET /ws HTTP/1.1\r\nContent-Length: 0\r\n\r\n"); +// try w.flush(); + +// var res = testReadParsed(stream); +// defer res.deinit(); +// try t.expectString("invalid websocket", res.body); +// } + +// test "websocket: upgrade" { +// const stream = testStream(5998); +// defer stream.close(t.io); +// var writer = stream.writer(t.io, &.{}); +// const w = &writer.interface; +// try w.writeAll("GET /ws HTTP/1.1\r\nContent-Length: 0\r\n"); +// try w.writeAll("upgrade: WEBsocket\r\n"); +// try w.writeAll("Sec-Websocket-verSIon: 13\r\n"); +// try w.writeAll("ConnectioN: abc,upgrade,123\r\n"); +// try w.writeAll("SEC-WEBSOCKET-KeY: a-secret-key\r\n\r\n"); +// try w.flush(); + +// var res = testReadHeader(stream); +// defer res.deinit(); +// try t.expectEqual(101, res.status); +// try t.expectString("websocket", res.headers.get("Upgrade").?); +// try t.expectString("upgrade", res.headers.get("Connection").?); +// try t.expectString("55eM2SNGu+68v5XXrr982mhPFkU=", res.headers.get("Sec-Websocket-Accept").?); + +// try w.writeAll(&websocket.frameText("over 9000!")); + +// // https://github.com/karlseguin/http.zig/pull/188 +// try w.flush(); +// std.Thread.sleep(std.time.ns_per_ms * 5); + +// try w.writeAll(&websocket.frameText("close")); +// try w.flush(); + +// var pos: usize = 0; +// var buf: [100]u8 = undefined; +// var wait_count: usize = 0; +// var reader = stream.reader(&.{}); +// const r = reader.interface(); +// while (pos < 16) { +// const n = r.readSliceShort(buf[pos..]) catch |err| +// switch (err) { +// error.ReadFailed => { +// if (reader.getError()) |e| { +// switch (e) { +// error.WouldBlock => { +// if (wait_count == 100) { +// break; +// } +// wait_count += 1; +// std.Thread.sleep(std.time.ns_per_ms); +// continue; +// }, +// else => {}, +// } +// } +// return err; +// }, +// }; + +// if (n == 0) { +// break; +// } +// pos += n; +// } +// try t.expectEqual(16, pos); +// try t.expectEqual(129, buf[0]); +// try t.expectEqual(10, buf[1]); +// try t.expectString("over 9000!", buf[2..12]); +// try t.expectString(&.{ 136, 2, 3, 232 }, buf[12..16]); +// } + +// // Stress test: multiple concurrent websocket clients sending many messages each. +// // Run repeatedly (e.g. zig build test -Dtest-filter="websocket: stress" or run 50x) +// // to verify no race in reader.done() / allocator free when using thread pool. +// test "websocket: stress" { +// if (force_blocking) return; // non-blocking mode only (thread pool) +// const num_clients = 8; +// const messages_per_client = 150; + +// // When run with -Dtest-filter="websocket: stress", tests:beforeAll may not run, +// // so nothing is listening on 5998. Wait for port and start our own server if needed. +// var stress_server: ?Server(TestWebsocketHandler) = null; +// var stress_listen_thread: ?Thread = null; +// testing.waitForPort(5998) catch { +// stress_server = try Server(TestWebsocketHandler).init(t.allocator, .{ .address = .localhost(5998) }, TestWebsocketHandler{}); +// var router = try stress_server.?.router(.{}); +// router.get("/ws", TestWebsocketHandler.upgrade, .{}); +// stress_listen_thread = try stress_server.?.listenInNewThread(); +// try testing.waitForPort(5998); +// }; +// defer if (stress_server) |*srv| { +// srv.stop(); +// if (stress_listen_thread) |thrd| thrd.join(); +// srv.deinit(); +// }; + +// var threads: [num_clients]Thread = undefined; +// for (0..num_clients) |i| { +// threads[i] = Thread.spawn(.{}, struct { +// fn run(_: usize) void { +// const stream = testStream(5998); +// defer stream.close(); + +// var writer = stream.writer(t.io, &.{}); +// const w = &writer.interface; +// w.writeAll("GET /ws HTTP/1.1\r\nContent-Length: 0\r\n") catch return; +// w.writeAll("upgrade: WEBsocket\r\n") catch return; +// w.writeAll("Sec-Websocket-verSIon: 13\r\n") catch return; +// w.writeAll("ConnectioN: upgrade\r\n") catch return; +// w.writeAll("SEC-WEBSOCKET-KeY: a-secret-key\r\n\r\n") catch return; +// w.flush() catch return; + +// var buf: [1024]u8 = undefined; +// var pos: usize = 0; +// var reader = stream.reader(&.{}); +// const r = reader.interface(); +// while (!std.mem.endsWith(u8, buf[0..pos], "\r\n\r\n")) { +// if (pos >= buf.len) return; +// var vecs: [1][]u8 = .{buf[pos..]}; +// const n = r.readVec(&vecs) catch return; +// if (n == 0) return; +// pos += n; +// } +// if (pos < 12 or !std.mem.startsWith(u8, buf[0..12], "HTTP/1.1 101")) return; + +// for (0..messages_per_client) |_| { +// const frame = websocket.frameText("stress"); +// w.writeAll(&frame) catch return; +// } +// w.writeAll(&websocket.frameText("close")) catch return; +// w.flush() catch return; +// } +// }.run, .{i}) catch return; +// } +// for (&threads) |*th| th.join(); +// } test "ContentType: forX" { inline for (@typeInfo(ContentType).@"enum".fields) |field| { @@ -2034,46 +2055,36 @@ test "ContentType: forX" { try t.expectEqual(ContentType.UNKNOWN, ContentType.forFile("must.spice")); } -fn testStream(port: u16) std.net.Stream { +fn testStream(port: u16) Io.net.Stream { const timeout = std.mem.toBytes(posix.timeval{ .sec = 0, .usec = 20_000, }); - const address = std.net.Address.parseIp("127.0.0.1", port) catch unreachable; - const stream = std.net.tcpConnectToAddress(address) catch unreachable; - posix.setsockopt(stream.handle, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &timeout) catch unreachable; - posix.setsockopt(stream.handle, posix.SOL.SOCKET, posix.SO.SNDTIMEO, &timeout) catch unreachable; + const address = Io.net.IpAddress.parse("127.0.0.1", port) catch unreachable; + const stream = address.connect(t.io, .{ .mode = .stream }) catch unreachable; + posix.setsockopt(stream.socket.handle, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &timeout) catch unreachable; + posix.setsockopt(stream.socket.handle, posix.SOL.SOCKET, posix.SO.SNDTIMEO, &timeout) catch unreachable; return stream; } -fn testReadAll(stream: std.net.Stream, buf: []u8) []u8 { +fn testReadAll(stream: Io.net.Stream, buf: []u8) []u8 { var pos: usize = 0; var blocked = false; - var reader = stream.reader(&.{}); - const r = reader.interface(); while (true) { std.debug.assert(pos < buf.len); - var vecs: [1][]u8 = .{buf[pos..]}; - const n = r.readVec(&vecs) catch |err| - switch (err) { - error.ReadFailed => { - if (reader.getError()) |e| { - switch (e) { - error.WouldBlock => { - if (blocked) return buf[0..pos]; - blocked = true; - std.Thread.sleep(std.time.ns_per_ms); - continue; - }, - error.ConnectionResetByPeer => return buf[0..pos], - else => @panic(@errorName(e)), - } - } - @panic(@errorName(err)); - }, - error.EndOfStream => 0, - }; + const n = posix.read(stream.socket.handle, buf[pos..]) catch |err| switch (err) { + error.WouldBlock => { + if (blocked) { + return buf[0..pos]; + } + blocked = true; + std.Io.sleep(t.io, .fromMilliseconds(1), .awake) catch unreachable; + continue; + }, + error.ConnectionResetByPeer => return buf[0..pos], + else => @panic(@errorName(err)), + }; if (n == 0) { return buf[0..pos]; @@ -2084,32 +2095,33 @@ fn testReadAll(stream: std.net.Stream, buf: []u8) []u8 { unreachable; } -fn testReadParsed(stream: std.net.Stream) testing.Testing.Response { +fn testReadParsed(stream: Io.net.Stream) testing.Testing.Response { var buf: [4096]u8 = undefined; const data = testReadAll(stream, &buf); return testing.parse(data) catch unreachable; } -fn testReadHeader(stream: std.net.Stream) testing.Testing.Response { +fn testReadHeader(stream: Io.net.Stream) testing.Testing.Response { var pos: usize = 0; var blocked = false; var buf: [1024]u8 = undefined; - var reader = stream.reader(&.{}); - const r = reader.interface(); + var reader = stream.reader(t.io, &.{}); + const r = &reader.interface; while (true) { std.debug.assert(pos < buf.len); var vecs: [1][]u8 = .{buf[pos..]}; const n = r.readVec(&vecs) catch |err| switch (err) { error.ReadFailed => { - if (reader.getError()) |e| { + if (reader.err) |e| { switch (e) { - error.WouldBlock => { - if (blocked) unreachable; - blocked = true; - std.Thread.sleep(std.time.ns_per_ms); - continue; - }, + // @ZIG016 + // error.WouldBlock => { + // if (blocked) unreachable; + // blocked = true; + // std.Thread.sleep(std.time.ns_per_ms); + // continue; + // }, else => @panic(@errorName(e)), } } @@ -2180,7 +2192,7 @@ const TestDummyHandler = struct { fn eventStreamSync(_: *Request, res: *Response) !void { res.status = 818; const stream = try res.startEventStreamSync(); - var w = stream.writer(&.{}); + var w = stream.writer(res.conn.io, &.{}); w.interface.writeAll("hello") catch unreachable; w.interface.writeAll("a sync message") catch unreachable; } @@ -2206,8 +2218,8 @@ const TestDummyHandler = struct { const StreamContext = struct { data: []const u8, - fn handle(self: StreamContext, stream: std.net.Stream) void { - var writer = stream.writer(&.{}); + fn handle(self: StreamContext, stream: Io.net.Stream) void { + var writer = stream.writer(t.io, &.{}); const w = &writer.interface; w.writeAll(self.data) catch unreachable; w.writeAll("a message") catch unreachable; @@ -2359,38 +2371,39 @@ const TestHandlerHandle = struct { } }; -const TestWebsocketHandler = struct { - pub const WebsocketHandler = struct { - ctx: u32, - conn: *websocket.Conn, - - pub fn init(conn: *websocket.Conn, ctx: u32) !WebsocketHandler { - return .{ - .ctx = ctx, - .conn = conn, - }; - } - - pub fn afterInit(self: *WebsocketHandler, ctx: u32) !void { - try t.expectEqual(self.ctx, ctx); - } - - pub fn clientMessage(self: *WebsocketHandler, data: []const u8) !void { - if (std.mem.eql(u8, data, "close")) { - self.conn.close(.{}) catch {}; - return; - } - try self.conn.write(data); - } - }; - - pub fn upgrade(_: TestWebsocketHandler, req: *Request, res: *Response) !void { - if (try upgradeWebsocket(WebsocketHandler, req, res, 9001) == false) { - res.status = 500; - res.body = "invalid websocket"; - } - } -}; +// @ZIG016 +// const TestWebsocketHandler = struct { +// pub const WebsocketHandler = struct { +// ctx: u32, +// conn: *websocket.Conn, + +// pub fn init(conn: *websocket.Conn, ctx: u32) !WebsocketHandler { +// return .{ +// .ctx = ctx, +// .conn = conn, +// }; +// } + +// pub fn afterInit(self: *WebsocketHandler, ctx: u32) !void { +// try t.expectEqual(self.ctx, ctx); +// } + +// pub fn clientMessage(self: *WebsocketHandler, data: []const u8) !void { +// if (std.mem.eql(u8, data, "close")) { +// self.conn.close(.{}) catch {}; +// return; +// } +// try self.conn.write(data); +// } +// }; + +// pub fn upgrade(_: TestWebsocketHandler, req: *Request, res: *Response) !void { +// if (try upgradeWebsocket(WebsocketHandler, req, res, 9001) == false) { +// res.status = 500; +// res.body = "invalid websocket"; +// } +// } +// }; const TestMiddleware = struct { const Config = struct { diff --git a/src/posix.zig b/src/posix.zig new file mode 100644 index 0000000..9140f46 --- /dev/null +++ b/src/posix.zig @@ -0,0 +1,650 @@ +const std = @import("std"); +const builtin = @import("builtin"); + +const posix = std.posix; +const windows = std.io.windows; +pub const system = posix.system; + +pub const O = system.O; +pub const F = system.F; +pub const AF = posix.AF; +pub const SO = posix.SO; +pub const SOL = posix.SOL; +pub const SOCK = posix.SOCK; +pub const fd_t = posix.fd_t; +pub const socket_t = posix.socket_t; +pub const timeval = posix.timeval; +pub const IPPROTO = posix.IPPROTO; +pub const sockaddr = posix.sockaddr; +pub const timespec = posix.timespec; +pub const socklen_t = posix.socklen_t; +pub const Kevent = system.Kevent; + +const native_os = builtin.os.tag; + +pub fn socket(domain: u32, socket_type: u32, protocol: u32) !socket_t { + if (native_os == .windows) { + // These flags are not actually part of the Windows API, instead they are converted here for compatibility + const filtered_sock_type = socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC); + var flags: u32 = windows.ws2_32.WSA_FLAG_OVERLAPPED; + if ((socket_type & SOCK.CLOEXEC) != 0) flags |= windows.ws2_32.WSA_FLAG_NO_HANDLE_INHERIT; + + const rc = try windows.WSASocketW( + @bitCast(domain), + @bitCast(filtered_sock_type), + @bitCast(protocol), + null, + 0, + flags, + ); + errdefer windows.closesocket(rc) catch unreachable; + if ((socket_type & SOCK.NONBLOCK) != 0) { + var mode: c_ulong = 1; // nonblocking + if (windows.ws2_32.SOCKET_ERROR == windows.ws2_32.ioctlsocket(rc, windows.ws2_32.FIONBIO, &mode)) { + switch (windows.ws2_32.WSAGetLastError()) { + // have not identified any error codes that should be handled yet + else => unreachable, + } + } + } + return rc; + } + + const have_sock_flags = !builtin.target.os.tag.isDarwin() and native_os != .haiku; + const filtered_sock_type = if (!have_sock_flags) + socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC) + else + socket_type; + const rc = posix.system.socket(domain, filtered_sock_type, protocol); + switch (posix.errno(rc)) { + .SUCCESS => { + const fd: fd_t = @intCast(rc); + errdefer close(fd); + if (!have_sock_flags) { + try setSockFlags(fd, socket_type); + } + return fd; + }, + .ACCES => return error.AccessDenied, + .AFNOSUPPORT => return error.AddressFamilyNotSupported, + .INVAL => return error.ProtocolFamilyNotAvailable, + .MFILE => return error.ProcessFdQuotaExceeded, + .NFILE => return error.SystemFdQuotaExceeded, + .NOBUFS => return error.SystemResources, + .NOMEM => return error.SystemResources, + .PROTONOSUPPORT => return error.ProtocolNotSupported, + .PROTOTYPE => return error.SocketTypeNotSupported, + else => return error.Unexpected, + } +} + +fn setSockFlags(sock: socket_t, flags: u32) !void { + if ((flags & SOCK.CLOEXEC) != 0) { + if (native_os == .windows) { + // TODO: Find out if this is supported for sockets + } else { + var fd_flags = fcntl(sock, F.GETFD, 0) catch |err| switch (err) { + error.FileBusy => unreachable, + error.Locked => unreachable, + error.PermissionDenied => unreachable, + error.DeadLock => unreachable, + error.LockedRegionLimitExceeded => unreachable, + else => |e| return e, + }; + fd_flags |= system.FD_CLOEXEC; + _ = fcntl(sock, F.SETFD, fd_flags) catch |err| switch (err) { + error.FileBusy => unreachable, + error.Locked => unreachable, + error.PermissionDenied => unreachable, + error.DeadLock => unreachable, + error.LockedRegionLimitExceeded => unreachable, + else => |e| return e, + }; + } + } + if ((flags & SOCK.NONBLOCK) != 0) { + if (native_os == .windows) { + var mode: c_ulong = 1; + if (windows.ws2_32.ioctlsocket(sock, windows.ws2_32.FIONBIO, &mode) == windows.ws2_32.SOCKET_ERROR) { + switch (windows.ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => unreachable, + .WSAENETDOWN => return error.NetworkSubsystemFailed, + .WSAENOTSOCK => return error.FileDescriptorNotASocket, + // TODO: handle more errors + else => |err| return windows.unexpectedWSAError(err), + } + } + } else { + var fl_flags = fcntl(sock, F.GETFL, 0) catch |err| switch (err) { + error.FileBusy => unreachable, + error.Locked => unreachable, + error.PermissionDenied => unreachable, + error.DeadLock => unreachable, + error.LockedRegionLimitExceeded => unreachable, + else => |e| return e, + }; + fl_flags |= 1 << @bitOffsetOf(O, "NONBLOCK"); + _ = fcntl(sock, F.SETFL, fl_flags) catch |err| switch (err) { + error.FileBusy => unreachable, + error.Locked => unreachable, + error.PermissionDenied => unreachable, + error.DeadLock => unreachable, + error.LockedRegionLimitExceeded => unreachable, + else => |e| return e, + }; + } + } +} + +pub fn fcntl(fd: fd_t, cmd: i32, arg: usize) !usize { + while (true) { + const rc = posix.system.fcntl(fd, cmd, arg); + switch (posix.errno(rc)) { + .SUCCESS => return @intCast(rc), + .INTR => continue, + .AGAIN, .ACCES => return error.Locked, + .BADF => unreachable, + .BUSY => return error.FileBusy, + .INVAL => unreachable, // invalid parameters + .PERM => return error.PermissionDenied, + .MFILE => return error.ProcessFdQuotaExceeded, + .NOTDIR => unreachable, // invalid parameter + .DEADLK => return error.DeadLock, + .NOLCK => return error.LockedRegionLimitExceeded, + else => return error.Unexpected, + } + } +} + +pub fn close(fd: fd_t) void { + if (native_os == .windows) { + return windows.CloseHandle(fd); + } + switch (posix.errno(system.close(fd))) { + .BADF => unreachable, // Always a race condition. + .INTR => return, // This is still a success. See https://github.com/ziglang/zig/issues/2425 + else => return, + } +} + +pub fn setsockopt(fd: socket_t, level: i32, optname: u32, opt: []const u8) !void { + if (native_os == .windows) { + const rc = windows.ws2_32.setsockopt(fd, level, @intCast(optname), opt.ptr, @intCast(opt.len)); + if (rc == windows.ws2_32.SOCKET_ERROR) { + switch (windows.ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => unreachable, + .WSAENETDOWN => return error.NetworkSubsystemFailed, + .WSAEFAULT => unreachable, + .WSAENOTSOCK => return error.FileDescriptorNotASocket, + .WSAEINVAL => return error.SocketNotBound, + else => |err| return windows.unexpectedWSAError(err), + } + } + return; + } else { + switch (posix.errno(system.setsockopt(fd, level, optname, opt.ptr, @intCast(opt.len)))) { + .SUCCESS => {}, + .BADF => unreachable, // always a race condition + .NOTSOCK => unreachable, // always a race condition + .INVAL => unreachable, + .FAULT => unreachable, + .DOM => return error.TimeoutTooBig, + .ISCONN => return error.AlreadyConnected, + .NOPROTOOPT => return error.InvalidProtocolOption, + .NOMEM => return error.SystemResources, + .NOBUFS => return error.SystemResources, + .PERM => return error.PermissionDenied, + .NODEV => return error.NoDevice, + .OPNOTSUPP => return error.OperationNotSupported, + else => return error.Unexpected, + } + } +} + +pub fn bind(sock: socket_t, addr: *const sockaddr, len: socklen_t) !void { + if (native_os == .windows) { + const rc = windows.bind(sock, addr, len); + if (rc == windows.ws2_32.SOCKET_ERROR) { + switch (windows.ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => unreachable, // not initialized WSA + .WSAEACCES => return error.AccessDenied, + .WSAEADDRINUSE => return error.AddressInUse, + .WSAEADDRNOTAVAIL => return error.AddressNotAvailable, + .WSAENOTSOCK => return error.FileDescriptorNotASocket, + .WSAEFAULT => unreachable, // invalid pointers + .WSAEINVAL => return error.AlreadyBound, + .WSAENOBUFS => return error.SystemResources, + .WSAENETDOWN => return error.NetworkSubsystemFailed, + else => |err| return windows.unexpectedWSAError(err), + } + unreachable; + } + return; + } else { + const rc = system.bind(sock, addr, len); + switch (posix.errno(rc)) { + .SUCCESS => return, + .ACCES, .PERM => return error.AccessDenied, + .ADDRINUSE => return error.AddressInUse, + .BADF => unreachable, // always a race condition if this error is returned + .INVAL => unreachable, // invalid parameters + .NOTSOCK => unreachable, // invalid `sockfd` + .AFNOSUPPORT => return error.AddressFamilyNotSupported, + .ADDRNOTAVAIL => return error.AddressNotAvailable, + .FAULT => unreachable, // invalid `addr` pointer + .LOOP => return error.SymLinkLoop, + .NAMETOOLONG => return error.NameTooLong, + .NOENT => return error.FileNotFound, + .NOMEM => return error.SystemResources, + .NOTDIR => return error.NotDir, + .ROFS => return error.ReadOnlyFileSystem, + else => return error.Unexpected, + } + } + unreachable; +} + +pub const Address = extern union { + any: posix.sockaddr, + in: posix.sockaddr.in, + in6: posix.sockaddr.in6, + un: if (@hasDecl(posix.sockaddr, "un")) posix.sockaddr.un else posix.sockaddr, + + pub fn initUnix(path: []const u8) !Address { + var sock_addr = posix.sockaddr.un{ + .family = AF.UNIX, + .path = undefined, + }; + + // Add 1 to ensure a terminating 0 is present in the path array for maximum portability. + if (path.len + 1 > sock_addr.path.len) { + return error.NameTooLong; + } + + @memset(&sock_addr.path, 0); + @memcpy(sock_addr.path[0..path.len], path); + + return .{ .un = sock_addr }; + } + + pub fn initIp4(addr: [4]u8, port: u16) !Address { + return .{ .in = .{ + .port = std.mem.nativeToBig(u16, port), + .addr = @as(*align(1) const u32, @ptrCast(&addr)).*, + } }; + } + + pub fn initIp6(addr: [16]u8, port: u16, flowinfo: u32, scope_id: u32) !Address { + return .{ .in6 = .{ + .addr = addr, + .port = std.mem.nativeToBig(u16, port), + .flowinfo = flowinfo, + .scope_id = scope_id, + } }; + } + + pub fn getOsSockLen(self: Address) posix.socklen_t { + return switch (self.any.family) { + posix.AF.INET => @sizeOf(posix.sockaddr.in), + posix.AF.INET6 => @sizeOf(posix.sockaddr.in6), + posix.AF.UNIX => if (@hasDecl(posix.sockaddr, "un")) + @intCast(@offsetOf(posix.sockaddr.un, "path") + std.mem.indexOfScalar(u8, &self.un.path, 0).? + 1) + else + @sizeOf(posix.sockaddr), + else => @sizeOf(posix.sockaddr), + }; + } + + pub fn toIOAddress(self: Address) std.Io.net.IpAddress { + return switch (self.any.family) { + posix.AF.INET => { + const bytes: *const [4]u8 = @ptrCast(&self.in.addr); + return .{ .ip4 = .{ .bytes = bytes.*, .port = std.mem.bigToNative(u16, self.in.port) } }; + }, + posix.AF.INET6 => { + // @ZIG016 I don't think this is correct + const bytes: *const [16]u8 = @ptrCast(&self.in.addr); + return .{ .ip6 = .{ .bytes = bytes.*, .port = std.mem.bigToNative(u16, self.in.port) } }; + }, + else => .{ .ip4 = .unspecified(0) }, + }; + } +}; + +pub fn listen(sock: socket_t, backlog: u31) !void { + if (native_os == .windows) { + const rc = windows.listen(sock, backlog); + if (rc == windows.ws2_32.SOCKET_ERROR) { + switch (windows.ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => unreachable, // not initialized WSA + .WSAENETDOWN => return error.NetworkSubsystemFailed, + .WSAEADDRINUSE => return error.AddressInUse, + .WSAEISCONN => return error.AlreadyConnected, + .WSAEINVAL => return error.SocketNotBound, + .WSAEMFILE, .WSAENOBUFS => return error.SystemResources, + .WSAENOTSOCK => return error.FileDescriptorNotASocket, + .WSAEOPNOTSUPP => return error.OperationNotSupported, + .WSAEINPROGRESS => unreachable, + else => |err| return windows.unexpectedWSAError(err), + } + } + return; + } else { + const rc = system.listen(sock, backlog); + switch (posix.errno(rc)) { + .SUCCESS => return, + .ADDRINUSE => return error.AddressInUse, + .BADF => unreachable, + .NOTSOCK => return error.FileDescriptorNotASocket, + .OPNOTSUPP => return error.OperationNotSupported, + else => return error.Unexpected, + } + } +} + +pub fn accept( + /// This argument is a socket that has been created with `socket`, bound to a local address + /// with `bind`, and is listening for connections after a `listen`. + sock: socket_t, + /// This argument is a pointer to a sockaddr structure. This structure is filled in with the + /// address of the peer socket, as known to the communications layer. The exact format of the + /// address returned addr is determined by the socket's address family (see `socket` and the + /// respective protocol man pages). + addr: ?*sockaddr, + /// This argument is a value-result argument: the caller must initialize it to contain the + /// size (in bytes) of the structure pointed to by addr; on return it will contain the actual size + /// of the peer address. + /// + /// The returned address is truncated if the buffer provided is too small; in this case, `addr_size` + /// will return a value greater than was supplied to the call. + addr_size: ?*socklen_t, + /// The following values can be bitwise ORed in flags to obtain different behavior: + /// * `SOCK.NONBLOCK` - Set the `NONBLOCK` file status flag on the open file description (see `open`) + /// referred to by the new file descriptor. Using this flag saves extra calls to `fcntl` to achieve + /// the same result. + /// * `SOCK.CLOEXEC` - Set the close-on-exec (`FD_CLOEXEC`) flag on the new file descriptor. See the + /// description of the `CLOEXEC` flag in `open` for reasons why this may be useful. + flags: u32, +) !socket_t { + const have_accept4 = !(builtin.target.os.tag.isDarwin() or native_os == .windows or native_os == .haiku); + std.debug.assert(0 == (flags & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC))); // Unsupported flag(s) + + const accepted_sock: socket_t = while (true) { + const rc = if (have_accept4) + system.accept4(sock, addr, addr_size, flags) + else if (native_os == .windows) + windows.accept(sock, addr, addr_size) + else + system.accept(sock, addr, addr_size); + + if (native_os == .windows) { + if (rc == windows.ws2_32.INVALID_SOCKET) { + switch (windows.ws2_32.WSAGetLastError()) { + .WSANOTINITIALISED => unreachable, // not initialized WSA + .WSAECONNRESET => return error.ConnectionResetByPeer, + .WSAEFAULT => unreachable, + .WSAENOTSOCK => return error.FileDescriptorNotASocket, + .WSAEINVAL => return error.SocketNotListening, + .WSAEMFILE => return error.ProcessFdQuotaExceeded, + .WSAENETDOWN => return error.NetworkSubsystemFailed, + .WSAENOBUFS => return error.FileDescriptorNotASocket, + .WSAEOPNOTSUPP => return error.OperationNotSupported, + .WSAEWOULDBLOCK => return error.WouldBlock, + else => |err| return windows.unexpectedWSAError(err), + } + } else { + break rc; + } + } else { + switch (posix.errno(rc)) { + .SUCCESS => break @intCast(rc), + .INTR => continue, + .AGAIN => return error.WouldBlock, + .BADF => { + // ZIG016 This is not right. If we hit this, it's always certainly + // an error - we're trying to read from a socket after it's been closed + // which is not safe. But, the code around this worked in 0.15 and I + // consider this entire 0.16 transition experimental. + return error.SocketNotListening; + }, + .CONNABORTED => return error.ConnectionAborted, + .FAULT => unreachable, + .INVAL => return error.SocketNotListening, + .NOTSOCK => unreachable, + .MFILE => return error.ProcessFdQuotaExceeded, + .NFILE => return error.SystemFdQuotaExceeded, + .NOBUFS => return error.SystemResources, + .NOMEM => return error.SystemResources, + .OPNOTSUPP => unreachable, + .PROTO => return error.ProtocolFailure, + .PERM => return error.BlockedByFirewall, + else => return error.Unexpected, + } + } + }; + + errdefer switch (native_os) { + .windows => windows.closesocket(accepted_sock) catch unreachable, + else => close(accepted_sock), + }; + if (!have_accept4) { + try setSockFlags(accepted_sock, flags); + } + return accepted_sock; +} + +const iovec_const = extern struct { + base: [*]const u8, + len: usize, +}; + +pub fn write(fd: fd_t, bytes: []const u8) !usize { + if (bytes.len == 0) return 0; + if (native_os == .windows) { + return windows.WriteFile(fd, bytes, null); + } + + const max_count = switch (native_os) { + .linux => 0x7ffff000, + .macos, .ios, .watchos, .tvos, .visionos => std.math.maxInt(i32), + else => std.math.maxInt(isize), + }; + while (true) { + const rc = system.write(fd, bytes.ptr, @min(bytes.len, max_count)); + switch (posix.errno(rc)) { + .SUCCESS => return @intCast(rc), + .INTR => continue, + .INVAL => return error.InvalidArgument, + .FAULT => unreachable, + .SRCH => return error.ProcessNotFound, + .AGAIN => return error.WouldBlock, + .BADF => return error.NotOpenForWriting, // can be a race condition. + .DESTADDRREQ => unreachable, // `connect` was never called. + .DQUOT => return error.DiskQuota, + .FBIG => return error.FileTooBig, + .IO => return error.InputOutput, + .NOSPC => return error.NoSpaceLeft, + .ACCES => return error.AccessDenied, + .PERM => return error.PermissionDenied, + .PIPE => return error.BrokenPipe, + .CONNRESET => return error.ConnectionResetByPeer, + .BUSY => return error.DeviceBusy, + .NXIO => return error.NoDevice, + .MSGSIZE => return error.MessageTooBig, + else => return error.Unexpected, + } + } +} + +pub fn read(fd: fd_t, buf: []u8) !usize { + if (buf.len == 0) return 0; + if (native_os == .windows) { + return windows.ReadFile(fd, buf, null); + } + + // Prevents EINVAL. + const max_count = switch (native_os) { + .linux => 0x7ffff000, + .macos, .ios, .watchos, .tvos, .visionos => std.math.maxInt(i32), + else => std.math.maxInt(isize), + }; + while (true) { + const rc = system.read(fd, buf.ptr, @min(buf.len, max_count)); + switch (posix.errno(rc)) { + .SUCCESS => return @intCast(rc), + .INTR => continue, + .INVAL => unreachable, + .FAULT => unreachable, + .SRCH => return error.ProcessNotFound, + .AGAIN => return error.WouldBlock, + .CANCELED => return error.Canceled, + .BADF => return error.NotOpenForReading, // Can be a race condition. + .IO => return error.InputOutput, + .ISDIR => return error.IsDir, + .NOBUFS => return error.SystemResources, + .NOMEM => return error.SystemResources, + .NOTCONN => return error.SocketNotConnected, + .CONNRESET => return error.ConnectionResetByPeer, + .TIMEDOUT => return error.ConnectionTimedOut, + else => return error.Unexpected, + } + } +} + +pub const ShutdownHow = enum { recv, send, both }; + +/// Shutdown socket send/receive operations +pub fn shutdown(sock: socket_t, how: ShutdownHow) !void { + if (native_os == .windows) { + const result = windows.ws2_32.shutdown(sock, switch (how) { + .recv => windows.ws2_32.SD_RECEIVE, + .send => windows.ws2_32.SD_SEND, + .both => windows.ws2_32.SD_BOTH, + }); + if (0 != result) switch (windows.ws2_32.WSAGetLastError()) { + .WSAECONNABORTED => return error.ConnectionAborted, + .WSAECONNRESET => return error.ConnectionResetByPeer, + .WSAEINPROGRESS => return error.BlockingOperationInProgress, + .WSAEINVAL => unreachable, + .WSAENETDOWN => return error.NetworkSubsystemFailed, + .WSAENOTCONN => return error.SocketNotConnected, + .WSAENOTSOCK => unreachable, + .WSANOTINITIALISED => unreachable, + else => |err| return windows.unexpectedWSAError(err), + }; + } else { + const rc = system.shutdown(sock, switch (how) { + .recv => posix.SHUT.RD, + .send => posix.SHUT.WR, + .both => posix.SHUT.RDWR, + }); + switch (posix.errno(rc)) { + .SUCCESS => return, + .BADF => unreachable, + .INVAL => unreachable, + .NOTCONN => return error.SocketNotConnected, + .NOTSOCK => unreachable, + .NOBUFS => return error.SystemResources, + else => return error.Unexpected, + } + } +} + +pub fn kevent( + kq: i32, + changelist: []const Kevent, + eventlist: []Kevent, + timeout: ?*const timespec, +) !usize { + while (true) { + const rc = system.kevent( + kq, + changelist.ptr, + std.math.cast(c_int, changelist.len) orelse return error.Overflow, + eventlist.ptr, + std.math.cast(c_int, eventlist.len) orelse return error.Overflow, + timeout, + ); + switch (posix.errno(rc)) { + .SUCCESS => return @intCast(rc), + .ACCES => return error.AccessDenied, + .FAULT => unreachable, + .BADF => unreachable, // Always a race condition. + .INTR => continue, + .INVAL => unreachable, + .NOENT => return error.EventNotFound, + .NOMEM => return error.SystemResources, + .SRCH => return error.ProcessNotFound, + else => unreachable, + } + } +} + +pub fn kqueue() !i32 { + const rc = system.kqueue(); + switch (posix.errno(rc)) { + .SUCCESS => return @intCast(rc), + .MFILE => return error.ProcessFdQuotaExceeded, + .NFILE => return error.SystemFdQuotaExceeded, + else => return error.Unexpected, + } +} + +pub fn eventfd(initval: u32, flags: u32) !i32 { + const rc = system.eventfd(initval, flags); + switch (posix.errno(rc)) { + .SUCCESS => return @intCast(rc), + .INVAL => unreachable, // invalid parameters + .MFILE => return error.ProcessFdQuotaExceeded, + .NFILE => return error.SystemFdQuotaExceeded, + .NODEV => return error.SystemResources, + .NOMEM => return error.SystemResources, + else => return error.Unexpected, + } +} + +pub fn epoll_create1(flags: u32) !i32 { + const rc = system.epoll_create1(flags); + switch (posix.errno(rc)) { + .SUCCESS => return @intCast(rc), + .INVAL => unreachable, + .MFILE => return error.ProcessFdQuotaExceeded, + .NFILE => return error.SystemFdQuotaExceeded, + .NOMEM => return error.SystemResources, + else => return error.Unexpected + } +} + +pub fn epoll_ctl(epfd: i32, op: u32, fd: i32, event: ?*system.epoll_event) !void { + const rc = system.epoll_ctl(epfd, op, fd, event); + switch (posix.errno(rc)) { + .SUCCESS => return, + .BADF => unreachable, // always a race condition if this happens + .EXIST => return error.FileDescriptorAlreadyPresentInSet, + .INVAL => unreachable, + .LOOP => return error.OperationCausesCircularLoop, + .NOENT => return error.FileDescriptorNotRegistered, + .NOMEM => return error.SystemResources, + .NOSPC => return error.UserResourceLimitReached, + .PERM => return error.FileDescriptorIncompatibleWithEpoll, + else => return error.Unexpected, + } +} + +/// Waits for an I/O event on an epoll file descriptor. +/// Returns the number of file descriptors ready for the requested I/O, +/// or zero if no file descriptor became ready during the requested timeout milliseconds. +pub fn epoll_wait(epfd: i32, events: []system.epoll_event, timeout: i32) usize { + while (true) { + // TODO get rid of the @intCast + const rc = system.epoll_wait(epfd, events.ptr, @intCast(events.len), timeout); + switch (posix.errno(rc)) { + .SUCCESS => return @intCast(rc), + .INTR => continue, + .BADF => unreachable, + .FAULT => unreachable, + .INVAL => unreachable, + else => unreachable, + } + } +} diff --git a/src/request.zig b/src/request.zig index aba1466..6f7e3b7 100644 --- a/src/request.zig +++ b/src/request.zig @@ -7,14 +7,15 @@ const metrics = @import("metrics.zig"); const Self = @This(); +const posix = @import("posix.zig"); const Url = @import("url.zig").Url; -const HTTPConn = @import("worker.zig").HTTPConn; const Params = @import("params.zig").Params; +const HTTPConn = @import("worker.zig").HTTPConn; +const Config = @import("config.zig").Config.Request; const StringKeyValue = @import("key_value.zig").StringKeyValue; const MultiFormKeyValue = @import("key_value.zig").MultiFormKeyValue; -const Config = @import("config.zig").Config.Request; -const Address = std.net.Address; +const Address = std.Io.net.IpAddress; const Allocator = std.mem.Allocator; const ArenaAllocator = std.heap.ArenaAllocator; @@ -194,16 +195,16 @@ pub const Request = struct { const conn = self.conn; if (self.unread_body > 0) { try conn.blockingMode(); - const timeval = std.mem.toBytes(std.posix.timeval{ + const timeval = std.mem.toBytes(posix.timeval{ .sec = @intCast(@divTrunc(timeout_ms, 1000)), .usec = @intCast(@mod(timeout_ms, 1000) * 1000), }); - try std.posix.setsockopt(conn.stream.handle, std.posix.SOL.SOCKET, std.posix.SO.RCVTIMEO, &timeval); + try posix.setsockopt(conn.stream.socket.handle, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &timeval); } return .{ .buffer = buf, - .socket = conn.stream.handle, + .socket = conn.stream.socket.handle, .unread_body = &self.unread_body, .interface = .{ .end = 0, @@ -517,7 +518,7 @@ pub const Request = struct { pub const Reader = struct { buffer: []const u8, unread_body: *usize, - socket: std.posix.socket_t, + socket: posix.socket_t, interface: std.Io.Reader, pub fn stream(io_r: *std.Io.Reader, w: *std.Io.Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize { @@ -543,7 +544,7 @@ pub const Request = struct { } const buf = if (into.len > unread) into[0..unread] else into; - const n = try std.posix.read(self.socket, buf); + const n = try posix.read(self.socket, buf); self.unread_body.* = unread - n; return n; } @@ -555,7 +556,7 @@ pub const Request = struct { pub fn get(self: Cookie, name: []const u8) ?[]const u8 { var it = std.mem.splitScalar(u8, self.header, ';'); while (it.next()) |kv| { - const trimmed = std.mem.trimLeft(u8, kv, " "); + const trimmed = std.mem.trimStart(u8, kv, " "); if (name.len >= trimmed.len) { // need at least an '=' beyond the name continue; @@ -705,17 +706,16 @@ pub const State = struct { } // returns true if the header has been fully parsed - pub fn parse(self: *State, conn: *HTTPConn, stream: *std.Io.Reader) !bool { + pub fn parse(self: *State, conn: *HTTPConn, source: anytype) !bool { if (self.body != null) { // if we have a body, then we've read the header. We want to read into // self.body, not self.buf. - return self.readBody(stream); + return self.readBody(source); } var len = self.len; const buf = self.buf; - var vecs: [1][]u8 = .{buf[len..]}; - const n = try stream.readVec(&vecs); + const n = try zig016HackRead(source, buf[len..]); if (n == 0) { return false; } @@ -1053,15 +1053,29 @@ pub const State = struct { return false; } - fn readBody(self: *State, stream: *std.Io.Reader) !bool { + fn readBody(self: *State, source: anytype) !bool { const buf = self.body.?.data; - - var vecs: [1][]u8 = .{buf[self.body_pos..]}; - self.body_pos += try stream.readVec(&vecs); + self.body_pos += try zig016HackRead(source, buf[self.body_pos..]); return (self.body_pos == self.body_len); } }; +// Zig 0.16's Io.net.Stream doesn't expose WouldBlock. It just panics. I don't +// understand why it's like that. But we're in a transition, and I just want to +// make this work. So, in "real" code, `source` will be a socket_t. In tests, +// `source` will be an Io.Reader. +// In theory, I woulc wrap the `socket_t` in a `Io.Reader` that behaves like I +// want it to, but this is _a lot_ easier, especially since all of this will +// be re-worked when networking is fully working in Zig. +fn zig016HackRead(source: anytype, buf: []u8) !usize { + if (@TypeOf(source) == posix.socket_t) { + return posix.read(source, buf); + } + // source is a reader + var vecs: [1][]u8 = .{buf}; + return source.readVec(&vecs); +} + const allowedHeaderValueByte = blk: { var v = [_]bool{false} ** 256; for ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_ :;.,/\"'?!(){}[]@<>=-+*#$&`|~^%\t\\") |b| { @@ -1790,8 +1804,8 @@ test "request: cookie" { fn testParse(input: []const u8, config: Config) !Request { var ctx = t.Context.allocInit(t.arena.allocator(), .{ .request = config }); ctx.write(input); - var reader = ctx.stream.reader(&.{}); - const r = reader.interface(); + var reader = ctx.stream.reader(t.io, &.{}); + const r = &reader.interface; while (true) { const done = try ctx.conn.req_state.parse(ctx.conn, r); if (done) break; @@ -1803,8 +1817,8 @@ fn expectParseError(expected: anyerror, input: []const u8, config: Config) !void var ctx = t.Context.init(.{ .request = config }); defer ctx.deinit(); - var reader = ctx.stream.reader(&.{}); - const r = reader.interface(); + var reader = ctx.stream.reader(t.io, &.{}); + const r = &reader.interface; ctx.write(input); try t.expectError(expected, ctx.conn.req_state.parse(ctx.conn, r)); } diff --git a/src/response.zig b/src/response.zig index e0a8bc5..de378d2 100644 --- a/src/response.zig +++ b/src/response.zig @@ -8,11 +8,11 @@ const HTTPConn = @import("worker.zig").HTTPConn; const Config = @import("config.zig").Config.Response; const StringKeyValue = @import("key_value.zig").StringKeyValue; +const Io = std.Io; const mem = std.mem; -const Stream = std.net.Stream; +const Writer = Io.Writer; const Allocator = mem.Allocator; const ArenaAllocator = std.heap.ArenaAllocator; -const Writer = std.Io.Writer; const Self = @This(); @@ -112,7 +112,7 @@ pub const Response = struct { self.headers.add(n, v); } - pub fn startEventStream(self: *Response, ctx: anytype, comptime handler: fn (@TypeOf(ctx), std.net.Stream) void) !void { + pub fn startEventStream(self: *Response, ctx: anytype, comptime handler: fn (@TypeOf(ctx), Io.net.Stream) void) !void { self.content_type = .EVENTS; self.headers.add("Cache-Control", "no-cache"); self.headers.add("Connection", "keep-alive"); @@ -129,7 +129,7 @@ pub const Response = struct { thread.detach(); } - pub fn startEventStreamSync(self: *Response) !std.net.Stream { + pub fn startEventStreamSync(self: *Response) !Io.net.Stream { self.content_type = .EVENTS; self.headers.add("Cache-Control", "no-cache"); self.headers.add("Connection", "keep-alive"); @@ -351,7 +351,7 @@ pub fn serializeCookie(arena: Allocator, name: []const u8, value: []const u8, co const domain = cookie.domain; const estimated_len = name.len + value.len + path.len + domain.len + 110; - var buf = std.ArrayListUnmanaged(u8){}; + var buf: std.ArrayList(u8) = .empty; try buf.ensureTotalCapacity(arena, estimated_len); buf.appendSliceAssumeCapacity(name); diff --git a/src/t.zig b/src/t.zig index 05a820f..1ed2b54 100644 --- a/src/t.zig +++ b/src/t.zig @@ -4,8 +4,10 @@ // which is exposed as httpz.testing. const std = @import("std"); const httpz = @import("httpz.zig"); +const posix = @import("posix.zig"); -const posix = std.posix; +const Io = std.Io; +pub const io = std.testing.io; const Allocator = std.mem.Allocator; const Conn = @import("worker.zig").HTTPConn; @@ -27,16 +29,16 @@ pub fn reset() void { pub fn getRandom() std.Random.DefaultPrng { var seed: u64 = undefined; - posix.getrandom(std.mem.asBytes(&seed)) catch unreachable; + io.random(std.mem.asBytes(&seed)); return std.Random.DefaultPrng.init(seed); } pub const Context = struct { // the stream that the server gets - stream: std.net.Stream, + stream: Io.net.Stream, // the client (e.g. browser stream) - client: std.net.Stream, + client: Io.net.Stream, closed: bool = false, @@ -80,8 +82,8 @@ pub const Context = struct { posix.setsockopt(pair[1], posix.SOL.SOCKET, posix.SO.SNDBUF, &std.mem.toBytes(@as(c_int, 20_000))) catch unreachable; } - const server = std.net.Stream{ .handle = pair[0] }; - const client = std.net.Stream{ .handle = pair[1] }; + const server = Io.net.Stream{ .socket = .{ .handle = pair[0], .address = .{ .ip4 = .{ .bytes = [4]u8{ 127, 0, 0, 1 }, .port = 0 } } } }; + const client = Io.net.Stream{ .socket = .{ .handle = pair[1], .address = .{ .ip4 = .{ .bytes = [4]u8{ 127, 0, 0, 1 }, .port = 0 } } } }; var ctx_arena = ctx_allocator.create(std.heap.ArenaAllocator) catch unreachable; ctx_arena.* = std.heap.ArenaAllocator.init(ctx_allocator); @@ -89,7 +91,7 @@ pub const Context = struct { const aa = ctx_arena.allocator(); const bp = aa.create(BufferPool) catch unreachable; - bp.* = BufferPool.init(aa, 2, 256) catch unreachable; + bp.* = BufferPool.init(io, aa, 2, 256) catch unreachable; var config = config_; { @@ -108,11 +110,12 @@ pub const Context = struct { const conn = aa.create(Conn) catch unreachable; conn.* = .{ - ._mut = .{}, + .io = io, + ._mut = .init, ._state = .request, .handover = .close, .stream = server, - .address = std.net.Address.initIp4([_]u8{ 127, 0, 0, 200 }, 0), + .address = .{ .ip4 = .{ .bytes = [_]u8{ 127, 0, 0, 200 }, .port = 0 } }, .req_state = req_state, .res_state = res_state, .timeout = 0, @@ -135,9 +138,9 @@ pub const Context = struct { pub fn deinit(self: *Context) void { if (self.closed == false) { self.closed = true; - self.stream.close(); + self.stream.close(io); } - self.client.close(); + self.client.close(io); const ctx_allocator = arena.child_allocator; self.arena.deinit(); @@ -172,7 +175,7 @@ pub const Context = struct { pub fn close(self: *Context) void { if (self.closed == false) { self.closed = true; - self.stream.close(); + self.stream.close(io); } } @@ -181,7 +184,7 @@ pub const Context = struct { self.to_read.appendSlice(self.arena.allocator(), data) catch unreachable; } else { var buf: [1024]u8 = undefined; - var writer = self.client.writer(&buf); + var writer = self.client.writer(io, &buf); const w = &writer.interface; w.writeAll(data) catch unreachable; w.flush() catch unreachable; @@ -192,18 +195,19 @@ pub const Context = struct { var buf: [1024]u8 = undefined; var arr: std.ArrayList(u8) = .empty; - var reader = self.client.reader(&.{}); - const r = reader.interface(); + var reader = self.client.reader(io, &.{}); + const r = &reader.interface; while (true) { const n = r.readSliceShort(&buf) catch |err| switch (err) { error.ReadFailed => { - if (reader.getError()) |e| { - switch (e) { - error.WouldBlock => return arr, - else => return e, - } - } + // @ZIG016 + // if (reader.err) |e| { + // switch (e) { + // error.WouldBlock => return arr, + // else => return e, + // } + // } return err; }, }; @@ -217,10 +221,11 @@ pub const Context = struct { var pos: usize = 0; var buf = try allocator.alloc(u8, expected.len); defer allocator.free(buf); - var reader = self.client.reader(&.{}); - const r = reader.interface(); + + const socket = self.client.socket.handle; + while (pos < buf.len) { - const n = try r.readSliceShort(buf[pos..]); + const n = try posix.read(socket, buf[pos..]); if (n == 0) break; pos += n; } @@ -229,25 +234,18 @@ pub const Context = struct { // should have no extra data // let's check, with a shor timeout, which could let things slip, but // else we slow down fuzz tests too much - posix.setsockopt(self.client.handle, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &std.mem.toBytes(posix.timeval{ + posix.setsockopt(socket, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &std.mem.toBytes(posix.timeval{ .sec = 0, .usec = 1_000, })) catch unreachable; - const n = r.readSliceShort(buf[0..]) catch |err| blk: switch (err) { - error.ReadFailed => { - if (reader.getError()) |e| { - switch (e) { - error.WouldBlock => break :blk 0, - else => @panic(@errorName(e)), - } - } - @panic(@errorName(err)); - }, + const n = posix.read(socket, buf[0..]) catch |err| blk: switch (err) { + error.WouldBlock => break :blk 0, + else => @panic(@errorName(err)), }; try expectEqual(0, n); - posix.setsockopt(self.client.handle, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &std.mem.toBytes(posix.timeval{ + posix.setsockopt(socket, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &std.mem.toBytes(posix.timeval{ .sec = 0, .usec = 20_000, })) catch unreachable; @@ -256,7 +254,7 @@ pub const Context = struct { fn random(self: *Context) std.Random { if (self._random == null) { var seed: u64 = undefined; - posix.getrandom(std.mem.asBytes(&seed)) catch unreachable; + io.random(std.mem.asBytes(&seed)); self._random = std.Random.DefaultPrng.init(seed); } return self._random.?.random(); @@ -267,7 +265,7 @@ pub const Context = struct { const fr = FakeReader{ .pos = self.to_read_pos, - .buf = self.to_read.items, + .buf = self.to_read.toOwnedSlice(self.arena.allocator()) catch unreachable, .random = self.random(), }; diff --git a/src/thread_pool.zig b/src/thread_pool.zig index ab8bfd5..ac008d0 100644 --- a/src/thread_pool.zig +++ b/src/thread_pool.zig @@ -1,5 +1,6 @@ const std = @import("std"); +const Io = std.Io; const Thread = std.Thread; const Allocator = std.mem.Allocator; @@ -33,7 +34,7 @@ pub fn ThreadPool(comptime F: anytype) type { const Self = @This(); // we expect allocator to be an Arena - pub fn init(allocator: Allocator, opts: Opts) !Self { + pub fn init(io: Io, allocator: Allocator, opts: Opts) !Self { var arena = std.heap.ArenaAllocator.init(allocator); errdefer arena.deinit(); @@ -49,7 +50,7 @@ pub fn ThreadPool(comptime F: anytype) type { }; for (0..workers.len) |i| { - workers[i] = try Worker(F).init(aa, &workers[@mod(i + i, workers.len)], opts); + workers[i] = try Worker(F).init(io, aa, &workers[@mod(i + i, workers.len)], opts); } for (0..workers.len) |i| { threads[i] = try Thread.spawn(.{}, Worker(F).run, .{&workers[i]}); @@ -129,6 +130,8 @@ fn Worker(comptime F: anytype) type { const Args = SpawnArgs(FullArgs); return struct { + io: Io, + // position in queue to read from tail: usize, @@ -141,52 +144,56 @@ fn Worker(comptime F: anytype) type { buffer: []u8, stopped: bool, - mutex: Thread.Mutex, - read_cond: Thread.Condition, - write_cond: Thread.Condition, + mutex: Io.Mutex, + read_cond: Io.Condition, + write_cond: Io.Condition, peer: *Worker(F), const Self = @This(); // we expect allocator to be an Arena - pub fn init(allocator: Allocator, peer: *Worker(F), opts: Opts) !Self { + pub fn init(io: Io, allocator: Allocator, peer: *Worker(F), opts: Opts) !Self { const queue = try allocator.alloc(Args, if (opts.backlog == 0 or opts.backlog == 1) 2 else opts.backlog); const buffer = try allocator.alloc(u8, opts.buffer_size); return .{ + .io = io, .tail = 0, .head = 0, .peer = peer, - .mutex = .{}, + .mutex = .init, .stopped = false, .queue = queue, - .read_cond = .{}, - .write_cond = .{}, + .read_cond = .init, + .write_cond = .init, .buffer = buffer, }; } pub fn stop(self: *Self) void { + const io = self.io; { // allow stop to be called as part of server.stop() // but also in server.deinit(), or in both. - self.mutex.lock(); - defer self.mutex.unlock(); + self.mutex.lockUncancelable(io); + defer self.mutex.unlock(io); if (self.stopped) { return; } self.stopped = true; } - self.read_cond.broadcast(); + self.read_cond.broadcast(io); } pub fn empty(self: *Self) bool { - self.mutex.lock(); - defer self.mutex.unlock(); + const io = self.io; + self.mutex.lockUncancelable(io); + defer self.mutex.unlock(io); return self.head == self.tail; } pub fn spawn(self: *Self, args: []const Args) void { + const io = self.io; var pending = args; var capacity: usize = 0; @@ -194,7 +201,7 @@ fn Worker(comptime F: anytype) type { const queue_end = queue.len - 1; while (true) { - self.mutex.lock(); + self.mutex.lockUncancelable(io); var head = self.head; var tail = self.tail; while (true) { @@ -202,7 +209,7 @@ fn Worker(comptime F: anytype) type { if (capacity > 0) { break; } - self.write_cond.wait(&self.mutex); + self.write_cond.waitUncancelable(io, &self.mutex); head = self.head; tail = self.tail; } @@ -213,8 +220,8 @@ fn Worker(comptime F: anytype) type { head = if (head == queue_end) 0 else head + 1; } self.head = head; - self.mutex.unlock(); - self.read_cond.signal(); + self.mutex.unlock(io); + self.read_cond.signal(io); if (ready.len == pending.len) { break; } @@ -246,23 +253,24 @@ fn Worker(comptime F: anytype) type { } fn getNext(self: *Self, block: bool) ?Args { + const io = self.io; const queue = self.queue; const queue_end = queue.len - 1; - self.mutex.lock(); + self.mutex.lockUncancelable(io); while (self.tail == self.head) { if (block == false or self.stopped) { - self.mutex.unlock(); + self.mutex.unlock(io); return null; } - self.mutex.unlock(); + self.mutex.unlock(io); if (self.peer.getNext(false)) |args| { return args; } - self.mutex.lock(); + self.mutex.lockUncancelable(io); if (self.tail == self.head) { - self.read_cond.wait(&self.mutex); + self.read_cond.waitUncancelable(io, &self.mutex); } else { break; } @@ -271,8 +279,8 @@ fn Worker(comptime F: anytype) type { const tail = self.tail; const args = queue[tail]; self.tail = if (tail == queue_end) 0 else tail + 1; - self.mutex.unlock(); - self.write_cond.signal(); + self.mutex.unlock(io); + self.write_cond.signal(io); return args; } }; @@ -292,17 +300,11 @@ fn SpawnArgs(FullArgs: anytype) type { // []u8. But this ThreadPool is private and being used for 2 specific cases // that we control. - var fields: [ARG_COUNT]std.builtin.Type.StructField = undefined; - inline for (full_fields[0..ARG_COUNT], 0..) |field, index| fields[index] = field; - - return @Type(.{ - .@"struct" = .{ - .layout = .auto, - .is_tuple = true, - .fields = &fields, - .decls = &.{}, - }, - }); + var field_types: [ARG_COUNT]type = undefined; + inline for (full_fields[0..ARG_COUNT], 0..) |field, i| { + field_types[i] = field.type; + } + return @Tuple(&field_types); } const t = @import("t.zig"); @@ -321,7 +323,7 @@ test "ThreadPool: batch add" { testC4 = 0; testC5 = 0; testC6 = 0; - var tp = try ThreadPool(testIncr).init(t.arena.allocator(), .{ .count = count, .backlog = backlog, .buffer_size = 512 }); + var tp = try ThreadPool(testIncr).init(t.io, t.arena.allocator(), .{ .count = count, .backlog = backlog, .buffer_size = 512 }); defer tp.deinit(); for (0..1_000) |_| { @@ -331,7 +333,7 @@ test "ThreadPool: batch add" { tp.spawn(.{4}); } while (tp.empty() == false) { - std.Thread.sleep(std.time.ns_per_ms); + try t.io.sleep(.fromMilliseconds(1), .awake); } tp.stop(); try t.expectEqual(10_000, testSum); @@ -358,7 +360,7 @@ test "ThreadPool: small fuzz" { testC4 = 0; testC5 = 0; testC6 = 0; - var tp = try ThreadPool(testIncr).init(t.arena.allocator(), .{ .count = 3, .backlog = 3, .buffer_size = 512 }); + var tp = try ThreadPool(testIncr).init(t.io, t.arena.allocator(), .{ .count = 3, .backlog = 3, .buffer_size = 512 }); defer tp.deinit(); for (0..10_000) |_| { @@ -367,7 +369,7 @@ test "ThreadPool: small fuzz" { tp.spawn(.{3}); } while (tp.empty() == false) { - std.Thread.sleep(std.time.ns_per_ms); + try t.io.sleep(.fromMilliseconds(1), .awake); } tp.stop(); try t.expectEqual(60_000, testSum); @@ -391,7 +393,7 @@ test "ThreadPool: large fuzz" { testC4 = 0; testC5 = 0; testC6 = 0; - var tp = try ThreadPool(testIncr).init(t.arena.allocator(), .{ .count = 50, .backlog = 1000, .buffer_size = 512 }); + var tp = try ThreadPool(testIncr).init(t.io, t.arena.allocator(), .{ .count = 50, .backlog = 1000, .buffer_size = 512 }); defer tp.deinit(); for (0..10_000) |_| { @@ -403,7 +405,7 @@ test "ThreadPool: large fuzz" { tp.spawn(.{6}); } while (tp.empty() == false) { - std.Thread.sleep(std.time.ns_per_ms); + try t.io.sleep(.fromMilliseconds(1), .awake); } tp.stop(); try t.expectEqual(210_000, testSum); @@ -438,5 +440,5 @@ fn testIncr(c: u64, buf: []u8) void { else => unreachable, } // let the threadpool queue get backed up - std.Thread.sleep(std.time.ns_per_us * 20); + t.io.sleep(.fromMicroseconds(20), .awake) catch unreachable; } diff --git a/src/worker.zig b/src/worker.zig index 2329cdf..ccb75bb 100644 --- a/src/worker.zig +++ b/src/worker.zig @@ -1,9 +1,11 @@ const std = @import("std"); const builtin = @import("builtin"); +const posix = @import("posix.zig"); const httpz = @import("httpz.zig"); const metrics = @import("metrics.zig"); -const ws = @import("websocket").server; +// @ZIG016 +// const ws = @import("websocket").server; const Config = httpz.Config; const Request = httpz.Request; @@ -12,15 +14,11 @@ const Response = httpz.Response; const BufferPool = @import("buffer.zig").Pool; const ThreadPool = @import("thread_pool.zig").ThreadPool; -const Thread = std.Thread; +const Io = std.Io; +const Stream = Io.Stream; const Allocator = std.mem.Allocator; const ArenaAllocator = std.heap.ArenaAllocator; -const net = std.net; -const Stream = net.Stream; -const NetConn = net.StreamServer.Connection; - -const posix = std.posix; const log = std.log.scoped(.httpz); const MAX_TIMEOUT = 2_147_483_647; @@ -28,14 +26,18 @@ const MAX_TIMEOUT = 2_147_483_647; // This is our Blocking worker. It's very different than NonBlocking and much // simpler. (WSH is our websocket handler, and can be void) pub fn Blocking(comptime S: type, comptime WSH: type) type { + // @ZIG016 + _ = WSH; return struct { + io: Io, server: S, - mut: Thread.Mutex, + mut: Io.Mutex, config: *const Config, allocator: Allocator, buffer_pool: *BufferPool, http_conn_pool: HTTPConnPool, - websocket: *ws.Worker(WSH), + // @ZIG016 + // websocket: *ws.Worker(WSH), timeout_request: ?Timeout, timeout_keepalive: ?Timeout, timeout_write_error: Timeout, @@ -52,21 +54,21 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { const Timeout = struct { sec: u32, - timeval: [@sizeOf(std.posix.timeval)]u8, + timeval: [@sizeOf(posix.timeval)]u8, // if sec is null, it means we want to cancel the timeout. fn init(sec: ?u32) Timeout { return .{ .sec = if (sec) |s| s else MAX_TIMEOUT, - .timeval = std.mem.toBytes(std.posix.timeval{ .sec = @intCast(sec orelse 0), .usec = 0 }), + .timeval = std.mem.toBytes(posix.timeval{ .sec = @intCast(sec orelse 0), .usec = 0 }), }; } }; const Self = @This(); - pub fn init(allocator: Allocator, server: S, config: *const Config) !Self { - const buffer_pool = try initializeBufferPool(allocator, config); + pub fn init(io: Io, allocator: Allocator, server: S, config: *const Config) !Self { + const buffer_pool = try initializeBufferPool(io, allocator, config); errdefer allocator.destroy(buffer_pool); errdefer buffer_pool.deinit(); @@ -89,17 +91,19 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { timeout_keepalive = Timeout.init(0); } - const websocket = try allocator.create(ws.Worker(WSH)); - errdefer allocator.destroy(websocket); - websocket.* = try ws.Worker(WSH).init(allocator, &server._websocket_state); - errdefer websocket.deinit(); + // @ZIG016 + // const websocket = try allocator.create(ws.Worker(WSH)); + // errdefer allocator.destroy(websocket); + // websocket.* = try ws.Worker(WSH).init(allocator, &server._websocket_state); + // errdefer websocket.deinit(); - var http_conn_pool = try HTTPConnPool.init(allocator, buffer_pool, websocket, 0, config); + // @ZIG016 undefined + var http_conn_pool = try HTTPConnPool.init(io, allocator, buffer_pool, undefined, 0, config); errdefer http_conn_pool.deinit(); const retain_allocated_bytes_keepalive = config.workers.retain_allocated_bytes orelse 8192; - var thread_pool = try ThreadPool(Self.handleConnection).init(allocator, .{ + var thread_pool = try ThreadPool(Self.handleConnection).init(io, allocator, .{ .count = config.threadPoolCount(), .backlog = config.thread_pool.backlog orelse 500, .buffer_size = config.thread_pool.buffer_size orelse 32_768, @@ -110,19 +114,21 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { } return .{ - .mut = .{}, + .io = io, + .mut = .init, .server = server, .config = config, .connections = .{}, .allocator = allocator, - .websocket = websocket, + // @ZIG016 + // .websocket = websocket, .buffer_pool = buffer_pool, .thread_pool = thread_pool, .http_conn_pool = http_conn_pool, .timeout_request = timeout_request, .timeout_keepalive = timeout_keepalive, .timeout_write_error = Timeout.init(5), - .conn_node_pool = std.heap.MemoryPool(ConnNode).init(allocator), + .conn_node_pool = .empty, .retain_allocated_bytes_keepalive = retain_allocated_bytes_keepalive, }; } @@ -130,25 +136,29 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { pub fn deinit(self: *Self) void { const allocator = self.allocator; - self.websocket.deinit(); + // @ZIG016 + // self.websocket.deinit(); self.thread_pool.deinit(); - allocator.destroy(self.websocket); + // @ZIG016 + // allocator.destroy(self.websocket); self.http_conn_pool.deinit(); - self.conn_node_pool.deinit(); + self.conn_node_pool.deinit(allocator); self.buffer_pool.deinit(); allocator.destroy(self.buffer_pool); } pub fn listen(self: *Self, listener: posix.socket_t) void { + const io = self.io; var thread_pool = &self.thread_pool; while (true) { - var address: net.Address = undefined; - var address_len: posix.socklen_t = @sizeOf(net.Address); + var address: posix.Address = undefined; + var address_len: posix.socklen_t = @sizeOf(posix.Address); const socket = posix.accept(listener, &address.any, &address_len, posix.SOCK.CLOEXEC) catch |err| { if (err == error.ConnectionAborted or err == error.SocketNotListening) { - self.websocket.shutdown(); + // @ZIG016 + // self.websocket.shutdown(); break; } log.err("Failed to accept socket: {}", .{err}); @@ -160,8 +170,8 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { } { - self.mut.lock(); - defer self.mut.unlock(); + self.mut.lockUncancelable(io); + defer self.mut.unlock(io); var node = self.connections.head; while (node) |n| { node = n.next; @@ -172,17 +182,20 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { } pub fn stop(self: *const Self) void { + _ = self; // The HTTP server will stop when the http.Server shutdown the listening socket. - self.websocket.shutdown(); + // @ZIG016 + // self.websocket.shutdown(); } // Called in a worker thread. `thread_buf` is a thread-specific buffer that // we are free to use as needed. - pub fn handleConnection(self: *Self, socket: posix.socket_t, address: net.Address, thread_buf: []u8) void { + pub fn handleConnection(self: *Self, socket: posix.socket_t, address: posix.Address, thread_buf: []u8) void { + const io = self.io; const connection_node = blk: { - self.mut.lock(); - defer self.mut.unlock(); - const node = self.conn_node_pool.create() catch |err| { + self.mut.lockUncancelable(io); + defer self.mut.unlock(io); + const node = self.conn_node_pool.create(self.allocator) catch |err| { log.err("Failed to initialize connection node: {}", .{err}); return; }; @@ -196,8 +209,8 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { }; defer { - self.mut.lock(); - defer self.mut.unlock(); + self.mut.lockUncancelable(io); + defer self.mut.unlock(io); self.connections.remove(connection_node); self.conn_node_pool.destroy(connection_node); } @@ -207,9 +220,10 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { return; }; - conn.address = address; + const ip_address = address.toIOAddress(); + conn.address = ip_address; conn.handover = .unknown; - conn.stream = .{ .handle = socket }; + conn.stream = .{ .socket = .{ .handle = socket, .address = ip_address } }; var is_keepalive = false; while (true) { @@ -225,18 +239,20 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { self.http_conn_pool.release(conn); return; }, - .websocket => |ptr| { - const hc: *ws.HandlerConn(WSH) = @ptrCast(@alignCast(ptr)); - // impossible for this to fail in blocking mode - conn.requestDone(self.retain_allocated_bytes_keepalive, false) catch unreachable; - self.http_conn_pool.release(conn); - // blocking read loop - // will close the connection - self.handleWebSocket(hc) catch |err| { - log.err("({f} websocket connection error: {}", .{ address, err }); - }; - return; - }, + .websocket => unreachable, + // @ZIG016 + // .websocket => |ptr| { + // const hc: *ws.HandlerConn(WSH) = @ptrCast(@alignCast(ptr)); + // // impossible for this to fail in blocking mode + // conn.requestDone(self.retain_allocated_bytes_keepalive, false) catch unreachable; + // self.http_conn_pool.release(conn); + // // blocking read loop + // // will close the connection + // self.handleWebSocket(hc) catch |err| { + // log.err("({f} websocket connection error: {}", .{ address, err }); + // }; + // return; + // }, .disown => { // impossible for this to fail in blocking mode conn.requestDone(self.retain_allocated_bytes_keepalive, false) catch unreachable; @@ -248,52 +264,45 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { } fn handleRequest(self: *const Self, conn: *HTTPConn, is_keepalive: bool, thread_buf: []u8) !HTTPConn.Handover { - const stream = conn.stream; + const io = self.io; + const socket = conn.stream.socket.handle; const timeout: ?Timeout = if (is_keepalive) self.timeout_keepalive else self.timeout_request; var deadline: ?i64 = null; if (timeout) |to| { if (is_keepalive == false) { - deadline = timestamp(0) + to.sec; + deadline = timestamp(io) + to.sec; } - try posix.setsockopt(stream.handle, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &to.timeval); + try posix.setsockopt(socket, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &to.timeval); } var is_first = true; - var reader = stream.reader(&.{}); // Request.State does its own buffering while (true) { - const done = conn.req_state.parse(conn, reader.interface()) catch |err| { + const done = conn.req_state.parse(conn, socket) catch |err| { switch (err) { - error.ReadFailed => { - if (reader.getError()) |e| { - switch (e) { - error.WouldBlock => { - if (is_keepalive and is_first) { - metrics.timeoutKeepalive(1); - } else { - metrics.timeoutRequest(1); - } - return .close; - }, - error.NotOpenForReading => { - // This can only happen when we're shutting down and our - // listener has called posix.close(socket) to unblock - // this thread. Using `.disown` is a bit of a hack, but - // disown is handled in handleConnection the way we want - // WE DO NOT WANT to return .close, else that would result - // in posix.close(socket) being called on an already-closed - // socket, which would panic. - return .disown; - }, - else => {}, - } + error.WouldBlock => { + if (is_keepalive and is_first) { + metrics.timeoutKeepalive(1); + } else { + metrics.timeoutRequest(1); } + return .close; + }, + error.NotOpenForReading => { + // This can only happen when we're shutting down and our + // listener has called posix.close(socket) to unblock + // this thread. Using `.disown` is a bit of a hack, but + // disown is handled in handleConnection the way we want + // WE DO NOT WANT to return .close, else that would result + // in posix.close(socket) being called on an already-closed + // socket, which would panic. + return .disown; }, else => {}, } requestError(conn, err) catch {}; - posix.close(stream.handle); + posix.close(socket); return .disown; }; @@ -311,13 +320,13 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { // an actual timeout, or it could just be removing the keepalive timeout // either way, it's the same code (timeval will just be set to 0 for // the second case) - deadline = timestamp(0) + to.sec; - try posix.setsockopt(stream.handle, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &to.timeval); + deadline = timestamp(io) + to.sec; + try posix.setsockopt(socket, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &to.timeval); } is_first = false; } } else if (deadline) |dl| { - if (timestamp(0) > dl) { + if (timestamp(io) > dl) { metrics.timeoutRequest(1); return .close; } @@ -329,14 +338,15 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { return conn.handover; } - fn handleWebSocket(self: *const Self, hc: *ws.HandlerConn(WSH)) !void { - posix.setsockopt(hc.socket, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &std.mem.toBytes(posix.timeval{ .sec = 0, .usec = 0 })) catch |err| { - self.websocket.cleanupConn(hc); - return err; - }; - // closes the connection before returning - return self.websocket.worker.readLoop(hc); - } + // @ZIG016 + // fn handleWebSocket(self: *const Self, hc: *ws.HandlerConn(WSH)) !void { + // posix.setsockopt(hc.socket, posix.SOL.SOCKET, posix.SO.RCVTIMEO, &std.mem.toBytes(posix.timeval{ .sec = 0, .usec = 0 })) catch |err| { + // self.websocket.cleanupConn(hc); + // return err; + // }; + // // closes the connection before returning + // return self.websocket.worker.readLoop(hc); + // } }; } @@ -354,6 +364,8 @@ pub fn Blocking(comptime S: type, comptime WSH: type) type { // and complexity. pub fn NonBlocking(comptime S: type, comptime WSH: type) type { return struct { + io: Io, + // Reference to the httpz.Server. After we've parsed the request we // call its handleRequest method. server: S, @@ -376,7 +388,8 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { config: *const Config, - websocket: *ws.Worker(WSH), + // @ZIG016 + // websocket: *ws.Worker(WSH), // how many bytes should we retain in a connection's arena allocator retain_allocated_bytes: usize, @@ -448,25 +461,27 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { .epoll => EPoll(WSH), }; - pub fn init(allocator: Allocator, server: S, config: *const Config) !Self { + pub fn init(io: Io, allocator: Allocator, server: S, config: *const Config) !Self { const loop = try Loop.init(); errdefer loop.deinit(); - const websocket = try allocator.create(ws.Worker(WSH)); - errdefer allocator.destroy(websocket); - websocket.* = try ws.Worker(WSH).init(allocator, &server._websocket_state); - errdefer websocket.deinit(); + // @ZIG016 + // const websocket = try allocator.create(ws.Worker(WSH)); + // errdefer allocator.destroy(websocket); + // websocket.* = try ws.Worker(WSH).init(allocator, &server._websocket_state); + // errdefer websocket.deinit(); - var buffer_pool = try initializeBufferPool(allocator, config); + var buffer_pool = try initializeBufferPool(io, allocator, config); errdefer buffer_pool.deinit(); - var conn_mem_pool = std.heap.MemoryPool(Conn(WSH)).init(allocator); - errdefer conn_mem_pool.deinit(); + var conn_mem_pool: std.heap.MemoryPool(Conn(WSH)) = .empty; + errdefer conn_mem_pool.deinit(allocator); - var http_conn_pool = try HTTPConnPool.init(allocator, buffer_pool, websocket, loop.fd, config); + // @ZIG016 undefined!! + var http_conn_pool = try HTTPConnPool.init(io, allocator, buffer_pool, undefined, loop.fd, config); errdefer http_conn_pool.deinit(); - const thread_pool = try ThreadPool(Self.processData).init(allocator, .{ + const thread_pool = try ThreadPool(Self.processData).init(io, allocator, .{ .count = config.threadPoolCount(), .backlog = config.thread_pool.backlog orelse 500, .buffer_size = config.thread_pool.buffer_size orelse 32_768, @@ -478,13 +493,15 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { } return .{ + .io = io, .len = 0, .full = false, .loop = loop, .config = config, .server = server, .allocator = allocator, - .websocket = websocket, + // @ZIG016 + // .websocket = websocket, .thread_pool = thread_pool, .active_list = .{}, .request_list = .{}, @@ -503,8 +520,9 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { pub fn deinit(self: *Self) void { const allocator = self.allocator; - self.websocket.deinit(); - allocator.destroy(self.websocket); + // @ZIG016 + // self.websocket.deinit(); + // allocator.destroy(self.websocket); self.thread_pool.deinit(); @@ -514,7 +532,7 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { self.shutdownConcurrentList(&self.keepalive_list); self.buffer_pool.deinit(); - self.conn_mem_pool.deinit(); + self.conn_mem_pool.deinit(allocator); self.http_conn_pool.deinit(); allocator.destroy(self.buffer_pool); @@ -526,7 +544,8 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { self.loop.stop(); } - pub fn run(self: *Self, listener: posix.fd_t, ready_sem: *std.Thread.Semaphore) void { + pub fn run(self: *Self, listener: posix.fd_t, ready_sem: *Io.Semaphore) void { + const io = self.io; var thread_pool = &self.thread_pool; self.loop.start() catch |err| { @@ -538,13 +557,14 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { // that we're ready enough to be stopped in necessary. self.loop.monitorAccept(listener) catch |err| { log.err("Failed to add monitor to listening socket: {}", .{err}); - ready_sem.post(); + ready_sem.post(io); return; }; - ready_sem.post(); - defer self.websocket.shutdown(); + ready_sem.post(io); + // @ZIG016 + // defer self.websocket.shutdown(); - var now = timestamp(0); + var now = timestamp(io); var last_timeout = now; while (true) { var timeout: ?i32 = 1; @@ -560,18 +580,22 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { var it = self.loop.wait(timeout) catch |err| { log.err("Failed to wait on events: {}", .{err}); - std.Thread.sleep(std.time.ns_per_s); + io.sleep(.fromMilliseconds(100), .awake) catch |err2| { + log.err("Failed to do a mini recovery sleep: {}", .{err2}); + }; continue; }; - now = timestamp(now); + now = timestamp(io); var closed_conn = false; while (it.next()) |event| { switch (event) { .accept => self.accept(listener, now) catch |err| { log.err("Failed to accept connection: {}", .{err}); - std.Thread.sleep(std.time.ns_per_ms * 5); + io.sleep(.fromMilliseconds(5), .awake) catch |err2| { + log.err("Failed to do a mini recovery sleep: {}", .{err2}); + }; }, .signal => self.processSignal(&closed_conn), .recv => |conn| switch (conn.protocol) { @@ -593,10 +617,10 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { // can access _state directly. const stream = http_conn.stream; - var reader = stream.reader(&.{}); // Request.State does its own buffering - const done = http_conn.req_state.parse(http_conn, reader.interface()) catch |err| { + var reader = stream.reader(io, &.{}); // Request.State does its own buffering + const done = http_conn.req_state.parse(http_conn, &reader.interface) catch |err| { // maybe a write fail or something, doesn't matter, we're closing the connection - requestError(http_conn, reader.getError() orelse err) catch {}; + requestError(http_conn, reader.err orelse err) catch {}; // impossible to fail when false is passed http_conn.requestDone(self.retain_allocated_bytes, false) catch unreachable; @@ -614,15 +638,16 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { self.swapList(conn, .active); thread_pool.spawn(.{ self, now, conn }); }, - .websocket => { - if (conn.acquireProcessing() == false) { - // Connection is already being processed. We need - // to wait for the current processing to complete. - // See the processing field in Conn - continue; - } - thread_pool.spawn(.{ self, now, conn }); - }, + // @ZIG016 + // .websocket => { + // if (conn.acquireProcessing() == false) { + // // Connection is already being processed. We need + // // to wait for the current processing to complete. + // // See the processing field in Conn + // continue; + // } + // thread_pool.spawn(.{ self, now, conn }); + // }, }, .shutdown => return, } @@ -640,24 +665,25 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { } fn swapList(self: *Self, conn: *Conn(WSH), new_state: HTTPConn.State) void { + const io = self.io; const http_conn = conn.protocol.http; - http_conn._mut.lock(); - defer http_conn._mut.unlock(); + http_conn._mut.lockUncancelable(io); + defer http_conn._mut.unlock(io); switch (http_conn._state) { - .active => self.active_list.remove(conn), - .keepalive => self.keepalive_list.remove(conn), + .active => self.active_list.remove(io, conn), + .keepalive => self.keepalive_list.remove(io, conn), .request => self.request_list.remove(conn), - .handover => self.handover_list.remove(conn), + .handover => self.handover_list.remove(io, conn), } http_conn.setState(new_state); switch (new_state) { - .active => self.active_list.insert(conn), - .keepalive => self.keepalive_list.insert(conn), + .active => self.active_list.insert(io, conn), + .keepalive => self.keepalive_list.insert(io, conn), .request => self.request_list.insert(conn), - .handover => self.handover_list.insert(conn), + .handover => self.handover_list.insert(io, conn), } } @@ -678,8 +704,8 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { self.loop.pauseAccept(listener) catch {}; return; } - var address: net.Address = undefined; - var address_len: posix.socklen_t = @sizeOf(net.Address); + var address: posix.Address = undefined; + var address_len: posix.socklen_t = @sizeOf(posix.Address); const socket = posix.accept(listener, &address.any, &address_len, posix.SOCK.CLOEXEC | posix.SOCK.NONBLOCK) catch |err| { // On BSD, REUSEPORT_LB means that only 1 worker should get notified @@ -699,17 +725,19 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { const nonblocking = @as(u32, @bitCast(posix.O{ .NONBLOCK = true })); std.debug.assert(socket_flags & nonblocking == nonblocking); - const conn = try self.conn_mem_pool.create(); + const conn = try self.conn_mem_pool.create(self.allocator); errdefer self.conn_mem_pool.destroy(conn); + const ip_address = address.toIOAddress(); + const http_conn = try self.http_conn_pool.acquire(); http_conn.request_count = 1; http_conn._state = .request; http_conn.handover = .unknown; http_conn._io_mode = .nonblocking; - http_conn.address = address; + http_conn.address = ip_address; http_conn.socket_flags = socket_flags; - http_conn.stream = .{ .handle = socket }; + http_conn.stream = .{ .socket = .{ .handle = socket, .address = ip_address } }; http_conn.timeout = now + self.timeout_request; self.len += 1; @@ -730,7 +758,9 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { } fn processSignal(self: *Self, closed_bool: *bool) void { + const io = self.io; const loop = &self.loop; + _ = loop; var hl = &self.handover_list; // We take the handover list, and then re-initialize it. We do this @@ -743,8 +773,8 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { // going to end up in another list (or closed) by the time we're // done. var c = blk: { - hl.mut.lock(); - defer hl.mut.unlock(); + hl.mut.lockUncancelable(io); + defer hl.mut.unlock(io); const head = hl.inner.head; hl.inner = .{}; break :blk head; @@ -768,28 +798,30 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { closed_bool.* = true; self.disown(conn); }, - .websocket => |ptr| { - if (comptime WSH == httpz.DummyWebsocketHandler) { - std.debug.print("Your httpz handler must have a `WebsocketHandler` declaration. This must be the same type passed to `httpz.upgradeWebsocket`. Closing the connection.\n", .{}); - closed_bool.* = true; - conn.close(); - self.disown(conn); - continue; - } - - self.http_conn_pool.release(http_conn); - - const hc: *ws.HandlerConn(WSH) = @ptrCast(@alignCast(ptr)); - conn.protocol = .{ .websocket = hc }; - - loop.switchToOneShot(conn) catch { - metrics.internalError(); - closed_bool.* = true; - conn.close(); - self.disown(conn); - continue; - }; - }, + .websocket => unreachable, + // @ZIG016 + // .websocket => |ptr| { + // if (comptime WSH == httpz.DummyWebsocketHandler) { + // std.debug.print("Your httpz handler must have a `WebsocketHandler` declaration. This must be the same type passed to `httpz.upgradeWebsocket`. Closing the connection.\n", .{}); + // closed_bool.* = true; + // conn.close(); + // self.disown(conn); + // continue; + // } + + // self.http_conn_pool.release(http_conn); + + // const hc: *ws.HandlerConn(WSH) = @ptrCast(@alignCast(ptr)); + // conn.protocol = .{ .websocket = hc }; + + // loop.switchToOneShot(conn) catch { + // metrics.internalError(); + // closed_bool.* = true; + // conn.close(); + // self.disown(conn); + // continue; + // }; + // }, .keepalive => unreachable, } } @@ -803,7 +835,8 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { pub fn processData(self: *Self, now: u32, conn: *Conn(WSH), thread_buf: []u8) void { switch (conn.protocol) { .http => |http_conn| self.processHTTPData(now, conn, thread_buf, http_conn), - .websocket => |hc| self.processWebsocketData(conn, thread_buf, hc), + // @ZIG016 + // .websocket => |hc| self.processWebsocketData(conn, thread_buf, hc), } } @@ -835,7 +868,7 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { // If we don't do this here, then you'd get a segfault if // the signal cleared the connetion, and then in recv we'd // try to call conn.getState() after the signal. - posix.close(http_conn.stream.handle); + posix.close(http_conn.stream.socket.handle); }, .websocket, .disown => {}, } @@ -843,31 +876,33 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { self.loop.signal() catch |err| log.err("failed to signal worker: {}", .{err}); } - pub fn processWebsocketData(self: *Self, conn: *Conn(WSH), thread_buf: []u8, hc: *ws.HandlerConn(WSH)) void { - defer conn.releaseProcessing(); - - var ws_conn = &hc.conn; - const success = self.websocket.worker.dataAvailable(hc, thread_buf); - if (success == false) { - ws_conn.close(.{ .code = 4997, .reason = "wsz" }) catch {}; - self.websocket.cleanupConn(hc); - } else if (ws_conn.isClosed()) { - self.websocket.cleanupConn(hc); - } else { - self.loop.rearmRead(conn) catch |err| { - log.debug("({f}) failed to add read event monitor: {}", .{ ws_conn.address, err }); - ws_conn.close(.{ .code = 4998, .reason = "wsz" }) catch {}; - self.websocket.cleanupConn(hc); - }; - } - } + // @ZIG016 + // pub fn processWebsocketData(self: *Self, conn: *Conn(WSH), thread_buf: []u8, hc: *ws.HandlerConn(WSH)) void { + // defer conn.releaseProcessing(); + + // var ws_conn = &hc.conn; + // const success = self.websocket.worker.dataAvailable(hc, thread_buf); + // if (success == false) { + // ws_conn.close(.{ .code = 4997, .reason = "wsz" }) catch {}; + // self.websocket.cleanupConn(hc); + // } else if (ws_conn.isClosed()) { + // self.websocket.cleanupConn(hc); + // } else { + // self.loop.rearmRead(conn) catch |err| { + // log.debug("({f}) failed to add read event monitor: {}", .{ ws_conn.address, err }); + // ws_conn.close(.{ .code = 4998, .reason = "wsz" }) catch {}; + // self.websocket.cleanupConn(hc); + // }; + // } + // } fn disown(self: *Self, conn: *Conn(WSH)) void { + const io = self.io; const http_conn = conn.protocol.http; switch (http_conn._state) { .request => self.request_list.remove(conn), - .handover => self.handover_list.remove(conn), - .keepalive => self.keepalive_list.remove(conn), + .handover => self.handover_list.remove(io, conn), + .keepalive => self.keepalive_list.remove(io, conn), .active => unreachable, } self.len -= 1; @@ -879,10 +914,11 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { fn prepareToWait(self: *Self, now: u32) struct { bool, ?i32 } { const request_timed_out, const request_count, const request_timeout = collectTimedOut(&self.request_list, now); + const io = self.io; const keepalive_timed_out, const keepalive_count, const keepalive_timeout = blk: { const list = &self.keepalive_list; - list.mut.lock(); - defer list.mut.unlock(); + list.mut.lockUncancelable(io); + defer list.mut.unlock(io); break :blk collectTimedOut(&list.inner, now); }; @@ -950,14 +986,15 @@ pub fn NonBlocking(comptime S: type, comptime WSH: type) type { while (conn) |c| { conn = c.next; const http_conn = c.protocol.http; - posix.close(http_conn.stream.handle); + posix.close(http_conn.stream.socket.handle); http_conn.deinit(allocator); } } fn shutdownConcurrentList(self: *Self, list: *ConcurrentList(Conn(WSH))) void { - list.mut.lock(); - defer list.mut.unlock(); + const io = self.io; + list.mut.lockUncancelable(io); + defer list.mut.unlock(io); self.shutdownList(&list.inner); } @@ -1010,19 +1047,19 @@ pub fn List(comptime T: type) type { pub fn ConcurrentList(comptime T: type) type { return struct { inner: List(T) = .{}, - mut: Thread.Mutex = .{}, + mut: Io.Mutex = .init, const Self = @This(); - pub fn insert(self: *Self, node: *T) void { - self.mut.lock(); - defer self.mut.unlock(); + pub fn insert(self: *Self, io: Io, node: *T) void { + self.mut.lockUncancelable(io); + defer self.mut.unlock(io); self.inner.insert(node); } - pub fn remove(self: *Self, node: *T) void { - self.mut.lock(); - defer self.mut.unlock(); + pub fn remove(self: *Self, io: Io, node: *T) void { + self.mut.lockUncancelable(io); + defer self.mut.unlock(io); self.inner.remove(node); } }; @@ -1222,7 +1259,7 @@ fn EPoll(comptime WSH: type) type { .data = .{ .ptr = 2 }, .events = linux.EPOLL.IN, }; - try std.posix.epoll_ctl(self.fd, linux.EPOLL.CTL_ADD, self.close_fd, &event); + try posix.epoll_ctl(self.fd, linux.EPOLL.CTL_ADD, self.close_fd, &event); } { @@ -1230,7 +1267,7 @@ fn EPoll(comptime WSH: type) type { .data = .{ .ptr = 1 }, .events = linux.EPOLL.IN | linux.EPOLL.ET, }; - try std.posix.epoll_ctl(self.fd, linux.EPOLL.CTL_ADD, self.event_fd, &event); + try posix.epoll_ctl(self.fd, linux.EPOLL.CTL_ADD, self.event_fd, &event); } } @@ -1250,11 +1287,11 @@ fn EPoll(comptime WSH: type) type { fn monitorAccept(self: *Self, fd: posix.fd_t) !void { var event = linux.epoll_event{ .events = linux.EPOLL.IN | linux.EPOLL.EXCLUSIVE, .data = .{ .ptr = 0 } }; - return std.posix.epoll_ctl(self.fd, linux.EPOLL.CTL_ADD, fd, &event); + return posix.epoll_ctl(self.fd, linux.EPOLL.CTL_ADD, fd, &event); } fn pauseAccept(self: *Self, fd: posix.fd_t) !void { - return std.posix.epoll_ctl(self.fd, linux.EPOLL.CTL_DEL, fd, null); + return posix.epoll_ctl(self.fd, linux.EPOLL.CTL_DEL, fd, null); } fn monitorRead(self: *Self, conn: *Conn(WSH)) !void { @@ -1333,14 +1370,15 @@ fn Event(comptime WSH: type) type { // There's some shared logic between the NonBlocking and Blocking workers. // Whatever we can de-duplicate, goes here. const HTTPConnPool = struct { - mut: Thread.Mutex, + io: Io, + mut: Io.Mutex, conns: []*HTTPConn, available: usize, allocator: Allocator, config: *const Config, buffer_pool: *BufferPool, retain_allocated_bytes: usize, - http_mem_pool_mut: Thread.Mutex, + http_mem_pool_mut: Io.Mutex, http_mem_pool: std.heap.MemoryPool(HTTPConn), // we erase the type because we don't want Conn, and therefore Request and @@ -1354,14 +1392,14 @@ const HTTPConnPool = struct { // thread), we store the loop's FD which is more opaque. loop: i32, - fn init(allocator: Allocator, buffer_pool: *BufferPool, websocket: *anyopaque, loop: i32, config: *const Config) !HTTPConnPool { + fn init(io: Io, allocator: Allocator, buffer_pool: *BufferPool, websocket: *anyopaque, loop: i32, config: *const Config) !HTTPConnPool { const min = config.workers.min_conn orelse @min(config.workers.max_conn orelse 64, 64); var conns = try allocator.alloc(*HTTPConn, min); errdefer allocator.free(conns); - var http_mem_pool = std.heap.MemoryPool(HTTPConn).init(allocator); - errdefer http_mem_pool.deinit(); + var http_mem_pool: std.heap.MemoryPool(HTTPConn) = .empty; + errdefer http_mem_pool.deinit(allocator); var initialized: usize = 0; errdefer { @@ -1371,15 +1409,16 @@ const HTTPConnPool = struct { } for (0..min) |i| { - const conn = try http_mem_pool.create(); - conn.* = try HTTPConn.init(allocator, buffer_pool, websocket, loop, config); + const conn = try http_mem_pool.create(allocator); + conn.* = try HTTPConn.init(io, allocator, buffer_pool, websocket, loop, config); conns[i] = conn; initialized += 1; } return .{ - .mut = .{}, + .io = io, + .mut = .init, .loop = loop, .conns = conns, .config = config, @@ -1388,7 +1427,7 @@ const HTTPConnPool = struct { .allocator = allocator, .buffer_pool = buffer_pool, .http_mem_pool = http_mem_pool, - .http_mem_pool_mut = .{}, + .http_mem_pool_mut = .init, .retain_allocated_bytes = config.workers.retain_allocated_bytes orelse 4096, }; } @@ -1401,10 +1440,11 @@ const HTTPConnPool = struct { conn.deinit(allocator); } allocator.free(self.conns); - self.http_mem_pool.deinit(); + self.http_mem_pool.deinit(allocator); } fn acquire(self: *HTTPConnPool) !*HTTPConn { + const io = self.io; const conns = self.conns; self.lock(); @@ -1412,16 +1452,16 @@ const HTTPConnPool = struct { if (available == 0) { self.unlock(); - self.http_mem_pool_mut.lock(); - const conn = try self.http_mem_pool.create(); - self.http_mem_pool_mut.unlock(); + try self.http_mem_pool_mut.lock(io); + const conn = try self.http_mem_pool.create(self.allocator); + self.http_mem_pool_mut.unlock(io); errdefer { - self.http_mem_pool_mut.lock(); + self.http_mem_pool_mut.lockUncancelable(io); self.http_mem_pool.destroy(conn); - self.http_mem_pool_mut.unlock(); + self.http_mem_pool_mut.unlock(io); } - conn.* = try HTTPConn.init(self.allocator, self.buffer_pool, self.websocket, self.loop, self.config); + conn.* = try HTTPConn.init(io, self.allocator, self.buffer_pool, self.websocket, self.loop, self.config); return conn; } @@ -1434,16 +1474,16 @@ const HTTPConnPool = struct { fn release(self: *HTTPConnPool, conn: *HTTPConn) void { const conns = self.conns; - self.lock(); const available = self.available; if (available == conns.len) { self.unlock(); conn.deinit(self.allocator); - self.http_mem_pool_mut.lock(); + const io = self.io; + self.http_mem_pool_mut.lockUncancelable(io); self.http_mem_pool.destroy(conn); - self.http_mem_pool_mut.unlock(); + self.http_mem_pool_mut.unlock(io); return; } @@ -1455,14 +1495,14 @@ const HTTPConnPool = struct { // don't need thread safety in nonblocking fn lock(self: *HTTPConnPool) void { if (comptime httpz.blockingMode()) { - self.mut.lock(); + self.mut.lockUncancelable(self.io); } } // don't need thread safety in nonblocking fn unlock(self: *HTTPConnPool) void { if (comptime httpz.blockingMode()) { - self.mut.unlock(); + self.mut.unlock(self.io); } } }; @@ -1471,7 +1511,8 @@ pub fn Conn(comptime WSH: type) type { return struct { protocol: union(enum) { http: *HTTPConn, - websocket: *ws.HandlerConn(WSH), + // @ZIG016 + // websocket: *ws.HandlerConn(WSH), }, // Node in a List(WSH). List is [obviously] intrusive. @@ -1498,15 +1539,17 @@ pub fn Conn(comptime WSH: type) type { fn close(self: *Self) void { switch (self.protocol) { - .http => |http_conn| posix.close(http_conn.stream.handle), - .websocket => |hc| hc.conn.close(.{}) catch {}, + .http => |http_conn| posix.close(http_conn.stream.socket.handle), + // @ZIG016 + // .websocket => |hc| hc.conn.close(.{}) catch {}, } } pub fn getSocket(self: Self) posix.fd_t { return switch (self.protocol) { - .http => |hc| hc.stream.handle, - .websocket => |hc| hc.socket, + .http => |hc| hc.stream.socket.handle, + // @ZIG016 + // .websocket => |hc| hc.socket, }; } @@ -1566,10 +1609,12 @@ pub const HTTPConn = struct { nonblocking, }; + io: Io, + // can be concurrently accessed, use getState _state: State, - _mut: Thread.Mutex, + _mut: Io.Mutex, _io_mode: IOMode, @@ -1581,8 +1626,8 @@ pub const HTTPConn = struct { // number of requests made on this connection (within a keepalive session) request_count: u64, - stream: net.Stream, - address: net.Address, + stream: Io.net.Stream, + address: Io.net.IpAddress, socket_flags: usize, // Data needed to parse a request. This contains pre-allocated memory, e.g. @@ -1618,7 +1663,7 @@ pub const HTTPConn = struct { // thread), we store the loop's FD which is more opaque. loop: i32, - fn init(allocator: Allocator, buffer_pool: *BufferPool, ws_worker: *anyopaque, loop: i32, config: *const Config) !HTTPConn { + fn init(io: Io, allocator: Allocator, buffer_pool: *BufferPool, ws_worker: *anyopaque, loop: i32, config: *const Config) !HTTPConn { const conn_arena = try allocator.create(std.heap.ArenaAllocator); errdefer allocator.destroy(conn_arena); @@ -1632,8 +1677,9 @@ pub const HTTPConn = struct { errdefer req_arena.deinit(); return .{ + .io = io, .timeout = 0, - ._mut = .{}, + ._mut = .init, ._state = .request, .handover = .unknown, .stream = undefined, @@ -1682,7 +1728,7 @@ pub const HTTPConn = struct { } const loop = self.loop; - const socket = self.stream.handle; + const socket = self.stream.socket.handle; switch (comptime loopType()) { .kqueue => { _ = try posix.kevent(loop, &.{ @@ -1712,43 +1758,47 @@ pub const HTTPConn = struct { } pub fn writeAll(self: *HTTPConn, data: []const u8) !void { - const socket = self.stream.handle; - - var i: usize = 0; - var blocking = false; - - while (i < data.len) { - const n = posix.write(socket, data[i..]) catch |err| switch (err) { - error.WouldBlock => { - try self.blockingMode(); - blocking = true; - continue; - }, - else => return err, - }; - - // shouldn't be posssible on a correct posix implementation - // but let's assert to make sure - std.debug.assert(n != 0); - i += n; - } + var writer = self.stream.writer(self.io, &.{}); + try writer.interface.writeAll(data); + // ZIG016 would block + + // var i: usize = 0; + // var blocking = false; + + // while (i < data.len) { + // const remaining = + // const n = posix.system.write(socket, data[i..]) catch |err| switch (err) { + // error.WouldBlock => { + // try self.blockingMode(); + // blocking = true; + // continue; + // }, + // else => return err, + // }; + + // // shouldn't be posssible on a correct posix implementation + // // but let's assert to make sure + // std.debug.assert(n != 0); + // i += n; + // } } pub fn writeAllIOVec(self: *HTTPConn, vec: [][]const u8) !void { var buf: [4096]u8 = undefined; - var writer = self.stream.writer(&buf); + var writer = self.stream.writer(self.io, &buf); var i: usize = 0; while (true) { var n = writer.interface.writeVec(vec[i..]) catch |err| { - if (writer.err) |socket_err| { - switch (socket_err) { - error.WouldBlock => { - try self.blockingMode(); - continue; - }, - else => return err, - } - } + // ZIG016 + // if (writer.err) |socket_err| { + // switch (socket_err) { + // error.WouldBlock => { + // try self.blockingMode(); + // continue; + // }, + // else => return err, + // } + // } return err; }; while (n >= vec[i].len) { @@ -1771,7 +1821,7 @@ pub const HTTPConn = struct { if (self._io_mode == .blocking) { return; } - _ = try posix.fcntl(self.stream.handle, posix.F.SETFL, self.socket_flags & ~@as(u32, @bitCast(posix.O{ .NONBLOCK = true }))); + _ = try posix.fcntl(self.stream.socket.handle, posix.F.SETFL, self.socket_flags & ~@as(u32, @bitCast(posix.O{ .NONBLOCK = true }))); self._io_mode = .blocking; } @@ -1784,21 +1834,16 @@ pub const HTTPConn = struct { if (self._io_mode == .nonblocking) { return; } - _ = try posix.fcntl(self.stream.handle, posix.F.SETFL, self.socket_flags); + _ = try posix.fcntl(self.stream.socket.handle, posix.F.SETFL, self.socket_flags); self._io_mode = .nonblocking; } }; -pub fn timestamp(clamp: u32) u32 { - if (comptime @hasDecl(posix, "CLOCK") == false or posix.CLOCK == void) { - const value: u32 = @intCast(std.time.timestamp()); - return if (value <= clamp) return clamp + 1 else value; - } - const ts = posix.clock_gettime(posix.CLOCK.MONOTONIC) catch unreachable; - return @intCast(ts.sec); +pub fn timestamp(io: Io) u32 { + return @intCast(Io.Timestamp.now(io, .awake).toSeconds()); } -fn initializeBufferPool(allocator: Allocator, config: *const Config) !*BufferPool { +fn initializeBufferPool(io: Io, allocator: Allocator, config: *const Config) !*BufferPool { const large_buffer_count = config.workers.large_buffer_count orelse blk: { if (comptime httpz.blockingMode()) { break :blk config.threadPoolCount(); @@ -1811,7 +1856,7 @@ fn initializeBufferPool(allocator: Allocator, config: *const Config) !*BufferPoo const buffer_pool = try allocator.create(BufferPool); errdefer allocator.destroy(buffer_pool); - buffer_pool.* = try BufferPool.init(allocator, large_buffer_count, large_buffer_size); + buffer_pool.* = try BufferPool.init(io, allocator, large_buffer_count, large_buffer_size); return buffer_pool; } @@ -1821,7 +1866,7 @@ fn initializeBufferPool(allocator: Allocator, config: *const Config) !*BufferPoo // This function ensures that both Blocking and NonBlocking workers handle these // errors with the same response fn requestError(conn: *HTTPConn, err: anyerror) !void { - const handle = conn.stream.handle; + const handle = conn.stream.socket.handle; switch (err) { error.HeaderTooBig => { metrics.invalidRequest(); @@ -1869,10 +1914,10 @@ fn loopType() LoopType { const t = @import("t.zig"); test "HTTPConnPool" { - var bp = try BufferPool.init(t.allocator, 2, 64); + var bp = try BufferPool.init(t.io, t.allocator, 2, 64); defer bp.deinit(); - var p = try HTTPConnPool.init(t.allocator, &bp, undefined, 0, &.{ + var p = try HTTPConnPool.init(t.io, t.allocator, &bp, undefined, 0, &.{ .workers = .{ .min_conn = 2 }, .request = .{ .buffer_size = 64 }, }); diff --git a/test_runner.zig b/test_runner.zig index d9c5e62..668f0eb 100644 --- a/test_runner.zig +++ b/test_runner.zig @@ -1,14 +1,5 @@ -// in your build.zig, you can specify a custom test runner: -// const tests = b.addTest(.{ -// .root_module = $MODULE_BEING_TESTED, -// .test_runner = .{ .path = b.path("test_runner.zig"), .mode = .simple }, -// }); - -pub const std_options = std.Options{ .log_scope_levels = &[_]std.log.ScopeLevel{ - .{ .scope = .websocket, .level = .warn }, -} }; - const std = @import("std"); +const Io = std.Io; const builtin = @import("builtin"); const Allocator = std.mem.Allocator; @@ -18,16 +9,23 @@ const BORDER = "=" ** 80; // use in custom panic handler var current_test: ?[]const u8 = null; -pub fn main() !void { +pub fn main(init: std.process.Init) !void { var mem: [8192]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&mem); const allocator = fba.allocator(); - const env = Env.init(allocator); - defer env.deinit(allocator); + const env = Env.init(init.environ_map); + + std.testing.io_instance = .init(init.gpa, .{ + .argv0 = .init(init.minimal.args), + .environ = init.minimal.environ, + }); + defer std.testing.io_instance.deinit(); - var slowest = SlowTracker.init(allocator, 5); + const io = std.testing.io; + + var slowest = SlowTracker.init(allocator, io, 5); defer slowest.deinit(); var pass: usize = 0; @@ -52,7 +50,7 @@ pub fn main() !void { } var status = Status.pass; - slowest.startTiming(); + slowest.startTiming(io); const is_unnamed_test = isUnnamed(t); if (env.filter) |f| { @@ -78,7 +76,7 @@ pub fn main() !void { const result = t.func(); current_test = null; - const ns_taken = slowest.endTiming(friendly_name); + const ns_taken = slowest.endTiming(io, friendly_name); if (std.testing.allocator_instance.deinit() == .leak) { leak += 1; @@ -97,7 +95,7 @@ pub fn main() !void { fail += 1; Printer.status(.fail, "\n{s}\n\"{s}\" - {s}\n{s}\n", .{ BORDER, friendly_name, @errorName(err), BORDER }); if (@errorReturnTrace()) |trace| { - std.debug.dumpStackTrace(trace.*); + std.debug.dumpErrorReturnTrace(trace); } if (env.fail_first) { break; @@ -134,7 +132,7 @@ pub fn main() !void { Printer.fmt("\n", .{}); try slowest.display(); Printer.fmt("\n", .{}); - std.posix.exit(if (fail == 0) 0 else 1); + std.process.exit(if (fail == 0) 0 else 1); } const Printer = struct { @@ -161,19 +159,22 @@ const Status = enum { }; const SlowTracker = struct { - const SlowestQueue = std.PriorityDequeue(TestInfo, void, compareTiming); max: usize, slowest: SlowestQueue, - timer: std.time.Timer, + start: Io.Timestamp, + allocator: Allocator, + + const SlowestQueue = std.PriorityDequeue(TestInfo, void, compareTiming); - fn init(allocator: Allocator, count: u32) SlowTracker { - const timer = std.time.Timer.start() catch @panic("failed to start timer"); - var slowest = SlowestQueue.init(allocator, {}); - slowest.ensureTotalCapacity(count) catch @panic("OOM"); + fn init(allocator: Allocator, io: Io, count: u32) SlowTracker { + const timestamp = Io.Clock.awake.now(io); + var slowest: SlowestQueue = .empty; + slowest.ensureTotalCapacity(allocator, count) catch @panic("OOM"); return .{ .max = count, - .timer = timer, + .start = timestamp, .slowest = slowest, + .allocator = allocator, }; } @@ -182,24 +183,26 @@ const SlowTracker = struct { name: []const u8, }; - fn deinit(self: SlowTracker) void { - self.slowest.deinit(); + fn deinit(self: *SlowTracker) void { + self.slowest.deinit(self.allocator); } - fn startTiming(self: *SlowTracker) void { - self.timer.reset(); + fn startTiming(self: *SlowTracker, io: Io) void { + self.start = Io.Clock.awake.now(io); } - fn endTiming(self: *SlowTracker, test_name: []const u8) u64 { - var timer = self.timer; - const ns = timer.lap(); + fn endTiming(self: *SlowTracker, io: Io, test_name: []const u8) u64 { + const timestamp = Io.Clock.awake.now(io); + const start = self.start; + self.start = timestamp; + const ns: u64 = @intCast(start.durationTo(timestamp).toNanoseconds()); var slowest = &self.slowest; if (slowest.count() < self.max) { // Capacity is fixed to the # of slow tests we want to track // If we've tracked fewer tests than this capacity, than always add - slowest.add(TestInfo{ .ns = ns, .name = test_name }) catch @panic("failed to track test timing"); + slowest.push(self.allocator, TestInfo{ .ns = ns, .name = test_name }) catch @panic("failed to track test timing"); return ns; } @@ -214,8 +217,8 @@ const SlowTracker = struct { } // the previous fastest of our slow tests, has been pushed off. - _ = slowest.removeMin(); - slowest.add(TestInfo{ .ns = ns, .name = test_name }) catch @panic("failed to track test timing"); + _ = slowest.popMin(); + slowest.push(self.allocator, TestInfo{ .ns = ns, .name = test_name }) catch @panic("failed to track test timing"); return ns; } @@ -223,7 +226,7 @@ const SlowTracker = struct { var slowest = self.slowest; const count = slowest.count(); Printer.fmt("Slowest {d} test{s}: \n", .{ count, if (count != 1) "s" else "" }); - while (slowest.removeMinOrNull()) |info| { + while (slowest.popMin()) |info| { const ms = @as(f64, @floatFromInt(info.ns)) / 1_000_000.0; Printer.fmt(" {d:.2}ms\t{s}\n", .{ ms, info.name }); } @@ -240,34 +243,20 @@ const Env = struct { fail_first: bool, filter: ?[]const u8, - fn init(allocator: Allocator) Env { + fn init(map: *const std.process.Environ.Map) Env { return .{ - .verbose = readEnvBool(allocator, "TEST_VERBOSE", true), - .fail_first = readEnvBool(allocator, "TEST_FAIL_FIRST", false), - .filter = readEnv(allocator, "TEST_FILTER"), + .verbose = readEnvBool(map, "TEST_VERBOSE", true), + .fail_first = readEnvBool(map, "TEST_FAIL_FIRST", false), + .filter = readEnv(map, "TEST_FILTER"), }; } - fn deinit(self: Env, allocator: Allocator) void { - if (self.filter) |f| { - allocator.free(f); - } - } - - fn readEnv(allocator: Allocator, key: []const u8) ?[]const u8 { - const v = std.process.getEnvVarOwned(allocator, key) catch |err| { - if (err == error.EnvironmentVariableNotFound) { - return null; - } - std.log.warn("failed to get env var {s} due to err {}", .{ key, err }); - return null; - }; - return v; + fn readEnv(map: *const std.process.Environ.Map, key: []const u8) ?[]const u8 { + return map.get(key); } - fn readEnvBool(allocator: Allocator, key: []const u8, deflt: bool) bool { - const value = readEnv(allocator, key) orelse return deflt; - defer allocator.free(value); + fn readEnvBool(map: *const std.process.Environ.Map, key: []const u8, deflt: bool) bool { + const value = readEnv(map, key) orelse return deflt; return std.ascii.eqlIgnoreCase(value, "true"); } };