text
stringlengths 32
314k
| url
stringlengths 93
243
|
---|---|
const std = @import("std");
const builtin = @import("builtin");
pub usingnamespace @cImport({
@cInclude("mpack.h");
@cInclude("raylib.h");
@cInclude("blip_buf.h");
if (builtin.os.tag == .emscripten) {
@cInclude("emscripten/html5.h");
}
});
pub const Event = enum(u32) {
core_loaded = 1,
game_loaded = 2,
load_failed = 92,
game_resumed = 3,
game_paused = 4,
game_reset = 5,
region_changed = 8,
core_unloaded = 9,
slot_changed = 11,
state_saved = 12,
state_loaded = 13,
battery_persisted = 14,
ratio_changed = 22,
zoom_changed = 23,
has_battery = 73,
region_support = 79,
end_frame = 99,
};
var notifyEventFn: ?*const fn (u32, u32) void = null;
pub fn notifyEvent(event: Event, value: u32) void {
if (event != .end_frame) {
std.debug.print("Notification {s} with value {d} sent.\n", .{ std.enums.tagName(Event, event).?, value });
}
if (notifyEventFn) |f| f(@intFromEnum(event), value);
}
export fn setNotifyEventFn(fid: u32) void {
notifyEventFn = @ptrFromInt(fid);
}
| https://raw.githubusercontent.com/iury/retrobob/396a46988d76e8c1e61ffc3f8d1972519d377b64/packages/engine/src/c.zig |
//! # Linux IO Interface
//!
//! The linux IO interface for kvcache uses IO Uring,
//! which is a relatively standardized method of
//! Async IO (designed to replace linux AIO).
//!
//! ## Features
//!
//! This implementation takes advantage of a few
//! extensions to IO Uring, which is why the minimum
//! kernel verison supported is around ~5.19. However,
//! I recommend you use kernel 6.1 or higher for some
//! key improvements/features introduced to IO Uring.
//!
//! The required features are as follows:
//! - IORING_FEAT_CQE_SKIP: Don't generate CQEs for
//! certain SQE calls, saves space on the CQE.
//! (available since kernel 5.17)
//!
//! - IORING_FEAT_NODROP: The kernel will *almost* never
//! drop CQEs, instead queueing them internally and
//! returning -EBUSY when the internal buffer is full.
//! (available since kernel 5.19)
//!
//! - Ring Mapped Buffers: This is not a new feature
//! per se, but rather a improvement on an already
//! existing feature (registered buffers). It adds
//! an ring abstraction which makes buffer management
//! *very* seamless...
//! (available since kernel 5.19)
//!
//! ## Docs
//!
//! Below are some great resources on IO Uring, which I
//! recommend you take a look at before reading this code...
//!
//! (ofcourse, the best resource are the manpages)
//!
//! - [Unixism IO Uring guide](https://unixism.net/loti/)
//! - [Awesome IO Uring](https://github.com/noteflakes/awesome-io_uring)
//!
const std = @import("std");
const builtin = @import("builtin");
const root = switch (builtin.is_test) {
false => @import("root"),
//
// In testing envoirments, "root" refers to the
// zig test runner (in `lib/test_runner.zig`).
//
// Therefore, we must import `main.zig` manually...
//
true => @import("../main.zig"),
};
const posix = std.posix;
const linux = std.os.linux;
const MAX_ENTRIES = 64;
pub const Handle = linux.fd_t;
pub const IoType = enum(u64) {
nop = 0,
accept,
read,
write,
close,
recv,
};
pub const Result = struct {
res: i32,
flags: u32,
buffer: ?[]u8,
};
pub const Context = struct {
type: IoType,
handler: *const fn (kind: IoType, ctx: ?*anyopaque, result: Result) anyerror!void,
userptr: ?*anyopaque,
};
pub const Engine = struct {
ring: linux.IoUring,
buffers: linux.IoUring.BufferGroup,
allocator: std.mem.Allocator,
rawbufs: []u8,
pending: usize,
/// Setup a IO Engine, creating internal structures
/// and registering them with the kernel.
pub fn init(allocator: std.mem.Allocator) !Engine {
ensureKernelVersion(.{ .major = 5, .minor = 19, .patch = 0 }) catch {
std.debug.panic("kvcache: kernel is too old (min kernel supported is 5.19)\n", .{});
};
const flags: u32 = linux.IORING_SETUP_DEFER_TASKRUN | linux.IORING_SETUP_SINGLE_ISSUER;
var engine = Engine{
.ring = try linux.IoUring.init(MAX_ENTRIES, flags),
.rawbufs = try allocator.alloc(u8, 512 * 1024),
.buffers = undefined,
.allocator = allocator,
.pending = 0,
};
engine.buffers = try linux.IoUring.BufferGroup.init(
&engine.ring,
0,
engine.rawbufs,
512,
1024,
);
return engine;
}
/// Shutdown and destroy the IO Engine, freeing contexts.
pub fn deinit(self: *Engine) void {
self.ring.deinit();
self.allocator.free(self.rawbufs);
}
/// Internal helper function for ensuring kernel is
/// newer than the `min` version specified.
fn ensureKernelVersion(min: std.SemanticVersion) !void {
var uts: linux.utsname = undefined;
const res = linux.uname(&uts);
switch (linux.E.init(res)) {
.SUCCESS => {},
else => |errno| return posix.unexpectedErrno(errno),
}
const release = std.mem.sliceTo(&uts.release, 0);
var current = try std.SemanticVersion.parse(release);
current.pre = null; // don't check pre field
if (min.order(current) == .gt) return error.SystemOutdated;
}
/// Internal helper for getting SQE entries, flushing the
/// SQE queue until we are able to nab a entry.
fn getEntry(self: *Engine) !*linux.io_uring_sqe {
const entry = self.ring.get_sqe() catch |err| retry: {
if (err != error.SubmissionQueueFull)
return err;
_ = try self.ring.submit();
self.pending = 0;
var sqe = self.ring.get_sqe();
while (sqe == error.SubmissionQueueFull) {
_ = try self.ring.submit();
self.pending = 0;
sqe = self.ring.get_sqe();
}
break :retry sqe;
};
self.pending += 1;
return entry;
}
/// Flushes the SQE queue by entering the kernel.
pub fn flush(self: *Engine, comptime wait: bool) !void {
if (wait) {
self.pending -= try self.ring.submit_and_wait(1);
} else {
self.pending -= try self.ring.submit();
}
}
///
/// Enters the main runloop for the IO Uring, which looks
/// like this:
///
/// Enter kernel to flush -> loop over completions -> start over
///
/// Exits on errors thrown (or signals recieved), otherwise waits
/// in-kernel for ops to complete...
///
pub fn enter(self: *Engine) !void {
while (root.running) {
try self.flush(true);
while (self.ring.cq_ready() > 0 and root.running) {
const cqe = try self.ring.copy_cqe();
if (cqe.user_data > 0) {
var context: *Context = @ptrFromInt(cqe.user_data);
if (context.type == .recv) {
try context.handler(context.type, context.userptr, Result{
.res = cqe.res,
.flags = cqe.flags,
.buffer = self.buffers.get_cqe(cqe) catch null,
});
self.buffers.put_cqe(cqe) catch {};
} else {
try context.handler(context.type, context.userptr, Result{
.res = cqe.res,
.flags = cqe.flags,
.buffer = null,
});
}
}
}
}
}
// --------------------------------
// Operators
// --------------------------------
pub fn do_nop(self: *Engine, ctx: ?*Context) !void {
var sqe = try self.getEntry();
sqe.prep_nop();
// don't generate CQEs on success
sqe.flags |= linux.IOSQE_CQE_SKIP_SUCCESS;
if (ctx) |c| {
c.type = .nop;
sqe.user_data = @intFromPtr(c);
}
}
pub fn do_read(self: *Engine, handle: Handle, buffer: []u8, offset: u64, ctx: *Context) !void {
var sqe = try self.getEntry();
sqe.prep_read(handle, buffer, offset);
ctx.type = .read;
sqe.user_data = @intFromPtr(ctx);
}
pub fn do_recv(self: *Engine, handle: Handle, flags: u32, ctx: *Context) !void {
var sqe = try self.getEntry();
sqe.prep_rw(.RECV, handle, 0, 0, 0);
sqe.rw_flags = flags;
sqe.flags |= linux.IOSQE_BUFFER_SELECT;
sqe.buf_index = self.buffers.group_id;
ctx.type = .recv;
sqe.user_data = @intFromPtr(ctx);
}
pub fn do_write(self: *Engine, handle: Handle, buffer: []const u8, offset: u64, ctx: ?*Context) !void {
var sqe = try self.getEntry();
sqe.prep_write(handle, buffer, offset);
// don't generate CQEs on success
sqe.flags |= linux.IOSQE_CQE_SKIP_SUCCESS;
if (ctx) |c| {
c.type = .write;
sqe.user_data = @intFromPtr(c);
}
}
pub fn do_close(self: *Engine, handle: Handle, ctx: ?*Context) !void {
var sqe = try self.getEntry();
sqe.prep_close(@intCast(handle));
// don't generate CQEs on success
sqe.flags |= linux.IOSQE_CQE_SKIP_SUCCESS;
if (ctx) |c| {
c.type = .close;
sqe.user_data = @intFromPtr(c);
}
}
pub fn do_accept(self: *Engine, handle: Handle, ctx: *Context) !void {
var sqe = try self.getEntry();
sqe.prep_accept(handle, null, null, 0);
sqe.user_data = @intFromPtr(ctx);
ctx.type = .accept;
}
};
/// Registers signal handlers with the kernel
pub fn attachSigListener() !void {
{
var act = posix.Sigaction{
.handler = .{
.handler = posix.SIG.IGN,
},
.mask = posix.empty_sigset,
.flags = 0,
};
try posix.sigaction(posix.SIG.PIPE, &act, null);
}
{
var act = posix.Sigaction{
.handler = .{
.handler = struct {
fn wrapper(sig: c_int) callconv(.C) void {
std.debug.print("kvcache: caught signal {d}, shutting down...\n", .{sig});
root.running = false;
}
}.wrapper,
},
.mask = posix.empty_sigset,
.flags = 0,
};
try posix.sigaction(posix.SIG.TERM, &act, null);
try posix.sigaction(posix.SIG.INT, &act, null);
}
}
/// Creates a server socket, bind it and listen on it.
pub fn createSocket(port: u16) !Handle {
const sockfd = try posix.socket(posix.AF.INET6, posix.SOCK.STREAM | posix.SOCK.NONBLOCK, 0);
errdefer posix.close(sockfd);
// allow for multiple listeners on 1 thread
try posix.setsockopt(
sockfd,
posix.SOL.SOCKET,
posix.SO.REUSEPORT,
&std.mem.toBytes(@as(c_int, 1)),
);
// enable ipv4 as well
try posix.setsockopt(
sockfd,
posix.IPPROTO.IPV6,
linux.IPV6.V6ONLY,
&std.mem.toBytes(@as(c_int, 0)),
);
const addr = try std.net.Address.parseIp6("::0", port);
try posix.bind(sockfd, &addr.any, @sizeOf(posix.sockaddr.in6));
try posix.listen(sockfd, std.math.maxInt(u31));
return sockfd;
}
fn testingHandler(kind: IoType, ctx: ?*anyopaque, result: Result) anyerror!void {
_ = ctx;
if (!builtin.is_test) {
@compileError("attempt to call 'testingHandler()' outside of zig test!");
}
switch (kind) {
.read, .write => {
try std.testing.expect(result.res == 512); // bytes read/written
},
.accept => {
try std.testing.expect(result.res >= 0); // res < 0 means error
},
else => unreachable, // CQEs shouldn't be generated (and when they are, its an error regardless)
}
root.running = false;
}
test "uring nop" {
var engine = try Engine.init(std.testing.allocator);
defer engine.deinit();
var context = Context{
.type = undefined,
.userptr = null,
.handler = testingHandler,
};
try engine.do_nop(&context);
// don't wait for a CQE since nop doesn't generate them...
try engine.flush(false);
}
test "uring read/write/close" {
var engine = try Engine.init(std.testing.allocator);
defer engine.deinit();
const raw_handle = try std.fs.cwd().createFile("testing.txt", .{ .read = true });
const handle = raw_handle.handle;
const write_buffer = try std.testing.allocator.alloc(u8, 512);
const read_buffer = try std.testing.allocator.alloc(u8, 512);
defer std.testing.allocator.free(write_buffer);
defer std.testing.allocator.free(read_buffer);
@memset(write_buffer, 0xE9);
var read_context = Context{
.type = undefined,
.userptr = null,
.handler = testingHandler,
};
try engine.do_write(handle, write_buffer, 0, null);
try engine.do_read(handle, read_buffer, 0, &read_context);
try engine.do_close(handle, null);
try engine.enter();
// re-enable the engine for the next test...
root.running = true;
try std.testing.expectEqualStrings(write_buffer, read_buffer);
try std.fs.cwd().deleteFile("testing.txt");
}
//
// Connect using socat like this:
//
// ```bash
// $ socat - TCP-CONNECT:localhost:8284
// ```
//
test "uring accept multishot" {
var engine = try Engine.init(std.testing.allocator);
defer engine.deinit();
const socket = try createSocket(8284);
var context = Context{
.type = undefined,
.userptr = null,
.handler = testingHandler,
};
try engine.do_accept(socket, &context);
try engine.do_close(socket, null);
try engine.enter();
}
| https://raw.githubusercontent.com/cleanbaja/kvcache/4ee7d3e33f1367ad5a128b1a583a81f2095cf1e7/src/io/linux.zig |
const std = @import("std");
const math = std.math;
const print = std.debug.print;
const panic = std.debug.panic;
const rl = @import("raylib");
const grid_width: i32 = 10;
const grid_height: i32 = 20;
const grid_cell_size: i32 = 32;
const margin: i32 = 20;
const piece_preview_width = grid_cell_size * 5;
const screen_width: i32 = (grid_width * grid_cell_size) + (margin * 2) + piece_preview_width + margin;
const screen_height: i32 = (grid_height * grid_cell_size) + margin;
fn rgb(r: u8, g: u8, b: u8) rl.Color {
return .{ .r = r, .g = g, .b = b, .a = 255 };
}
fn rgba(r: u8, g: u8, b: u8, a: u8) rl.Color {
return .{ .r = r, .g = g, .b = b, .a = a };
}
const BackgroundColor = rgb(29, 38, 57);
const BackgroundHiLightColor = rgb(39, 48, 67);
const BorderColor = rgb(3, 2, 1);
const State = enum {
StartScreen,
Play,
Pause,
GameOver,
};
const Pos = struct {
x: i32,
y: i32,
};
fn p(x: i32, y: i32) Pos {
return Pos{ .x = x, .y = y };
}
const Type = enum {
Cube,
Long,
Z,
S,
T,
L,
J,
};
fn piece_color(t: Type) rl.Color {
return switch (t) {
Type.Cube => rgb(241, 211, 90),
Type.Long => rgb(83, 179, 219),
Type.L => rgb(92, 205, 162),
Type.J => rgb(231, 111, 124),
Type.T => rgb(195, 58, 47),
Type.S => rgb(96, 150, 71),
Type.Z => rgb(233, 154, 56),
};
}
fn random_type(rng: *std.rand.DefaultPrng) Type {
return rng.random().enumValue(Type);
}
const Rotation = enum { A, B, C, D };
const Square = struct {
color: rl.Color,
active: bool,
};
const Level = struct {
tick_rate: i32,
value: usize,
pub fn get_level(piece_count: usize) Level {
return switch (piece_count) {
0...10 => Level{ .value = 1, .tick_rate = 30 },
11...25 => Level{ .value = 2, .tick_rate = 30 },
26...50 => Level{ .value = 3, .tick_rate = 25 },
51...100 => Level{ .value = 4, .tick_rate = 25 },
101...150 => Level{ .value = 5, .tick_rate = 20 },
151...200 => Level{ .value = 6, .tick_rate = 20 },
201...250 => Level{ .value = 7, .tick_rate = 15 },
251...300 => Level{ .value = 8, .tick_rate = 15 }, // score ~100
301...350 => Level{ .value = 9, .tick_rate = 12 },
351...400 => Level{ .value = 10, .tick_rate = 12 },
401...450 => Level{ .value = 11, .tick_rate = 10 },
451...500 => Level{ .value = 12, .tick_rate = 10 },
501...600 => Level{ .value = 13, .tick_rate = 8 },
601...700 => Level{ .value = 14, .tick_rate = 8 },
701...800 => Level{ .value = 15, .tick_rate = 6 },
else => Level{ .value = 16, .tick_rate = 5 },
};
}
};
const Game = struct {
grid: [grid_width * grid_height]Square,
squares: [4]Pos,
rng: std.rand.DefaultPrng,
state: State,
t: Type,
next_type: Type,
r: Rotation,
tick: i32,
freeze_down: i32,
freeze_input: i32,
freeze_space: i32,
x: i32,
y: i32,
score: usize,
piece_count: usize,
rows_this_tick: usize,
level: Level,
pub fn init() Game {
// grid
var grid: [grid_width * grid_height]Square = undefined;
for (&grid) |*item| {
item.* = Square{ .color = rl.Color.white, .active = false };
}
// rng
var buf: [8]u8 = undefined;
std.crypto.random.bytes(buf[0..]);
const seed = std.mem.readIntLittle(u64, buf[0..8]);
var r = std.rand.DefaultPrng.init(seed);
// squares
const t = random_type(&r);
const next_type = random_type(&r);
var squares = Game.get_squares(t, Rotation.A);
return Game{
.grid = grid,
.squares = squares,
.rng = r,
.state = State.StartScreen,
.t = t,
.next_type = next_type,
.r = Rotation.A,
.tick = 0,
.freeze_down = 0,
.freeze_input = 0,
.freeze_space = 0,
.x = 4,
.y = 0,
.score = 0,
.piece_count = 1,
.rows_this_tick = 0,
.level = Level.get_level(1),
};
}
fn anykey(self: *Game) bool {
_ = self;
const k = rl.getKeyPressed();
if (k != rl.KeyboardKey.key_null) {
return true;
} else {
// Seems like some keys don't register with GetKeyPressed, so
// checking for them manually here.
if (rl.isKeyReleased(rl.KeyboardKey.key_down) or
rl.isKeyReleased(rl.KeyboardKey.key_left) or
rl.isKeyReleased(rl.KeyboardKey.key_right) or
rl.isKeyReleased(rl.KeyboardKey.key_down) or
rl.isKeyReleased(rl.KeyboardKey.key_enter))
{
return true;
}
}
return false;
}
pub fn update(self: *Game) void {
switch (self.state) {
State.StartScreen => {
if (self.anykey()) {
self.freeze_space = 30;
self.state = State.Play;
}
},
State.GameOver => {
if (self.anykey() and self.freeze_input == 0) {
self.reset();
self.piece_reset();
self.tick = 0;
// TODO: Add High Score screen.
self.score = 0;
self.rows_this_tick = 0;
self.state = State.Play;
}
},
State.Play => {
if (rl.isKeyReleased(rl.KeyboardKey.key_escape)) {
self.state = State.Pause;
return;
}
if (rl.isKeyPressed(rl.KeyboardKey.key_right) or rl.isKeyPressed(rl.KeyboardKey.key_d)) {
self.move_right();
}
if (rl.isKeyPressed(rl.KeyboardKey.key_left) or rl.isKeyPressed(rl.KeyboardKey.key_a)) {
self.move_left();
}
if (rl.isKeyDown(rl.KeyboardKey.key_down) or rl.isKeyPressed(rl.KeyboardKey.key_s)) {
if (self.freeze_down <= 0) {
const moved = self.move_down();
if (!moved) {
self.freeze_down = 60;
}
}
}
if (rl.isKeyReleased(rl.KeyboardKey.key_down) or rl.isKeyReleased(rl.KeyboardKey.key_s)) {
self.freeze_down = 0;
}
if (rl.isKeyPressed(rl.KeyboardKey.key_right_control) or rl.isKeyPressed(rl.KeyboardKey.key_space)) {
const moved = self.drop();
if (!moved) {
self.freeze_down = 60;
}
}
if (rl.isKeyPressed(rl.KeyboardKey.key_up) or rl.isKeyPressed(rl.KeyboardKey.key_w)) {
self.rotate();
}
if (self.tick >= self.level.tick_rate) {
_ = self.move_down();
self.remove_full_rows();
self.tick = 0;
self.update_score();
self.update_level();
}
self.tick += 1;
},
State.Pause => {
if (rl.isKeyReleased(rl.KeyboardKey.key_escape)) {
self.state = State.Play;
}
},
}
if (self.freeze_down > 0) {
self.freeze_down -= 1;
}
if (self.freeze_space > 0) {
self.freeze_space -= 1;
}
if (self.freeze_input > 0) {
self.freeze_input -= 1;
}
}
fn update_score(self: *Game) void {
const bonus: usize = switch (self.rows_this_tick) {
0 => 0,
1 => 1,
2 => 3,
3 => 5,
4 => 8,
else => 100, // shouldn't happen
};
self.score += bonus;
self.rows_this_tick = 0;
}
fn update_level(self: *Game) void {
const previous_level = self.level;
self.level = Level.get_level(self.piece_count);
if (self.level.value != previous_level.value) {
print("level: {}, speed: {}\n", .{ self.level.value, self.level.tick_rate });
}
}
fn row_is_full(self: Game, y: i32) bool {
if (y >= self.grid.len or y < 0) {
print("Row index out of bounds {}", .{y});
return false;
}
var x: i32 = 0;
return while (x < grid_width) : (x += 1) {
if (!self.get_active(x, y)) {
break false;
}
} else true;
}
fn copy_row(self: *Game, y1: i32, y2: i32) void {
if (y1 == y2) {
print("Invalid copy, {} must not equal {}\n", .{ y1, y2 });
return;
}
if (y2 < 0 or y1 >= grid_height or y2 >= grid_height) {
print("Invalid copy, {} or {} is out of bounds\n", .{ y1, y2 });
return;
}
var x: i32 = 0;
while (x < grid_width) : (x += 1) {
if (y1 < 0) {
self.set_active_state(x, y2, false);
self.set_grid_color(x, y2, rl.Color.white);
} else {
self.set_active_state(x, y2, self.get_active(x, y1));
self.set_grid_color(x, y2, self.get_grid_color(x, y1));
}
}
}
fn copy_rows(self: *Game, src_y: i32, dst_y: i32) void {
// Starting at dest row, copy everything above, but starting at dest
if (src_y >= dst_y) {
print("{} must be less than {}\n", .{ src_y, dst_y });
return;
}
var y1: i32 = src_y;
var y2: i32 = dst_y;
while (y2 > -1) {
self.copy_row(y1, y2);
y1 -= 1;
y2 -= 1;
}
}
pub fn remove_full_rows(self: *Game) void {
// Remove full rows
var y: i32 = grid_height - 1;
var cp_y: i32 = y;
while (y > -1) {
if (self.row_is_full(y)) {
while (self.row_is_full(cp_y)) {
self.rows_this_tick += 1;
cp_y -= 1;
}
self.copy_rows(cp_y, y);
cp_y = y;
}
y -= 1;
cp_y -= 1;
}
}
pub fn get_active(self: Game, x: i32, y: i32) bool {
if (x < 0) {
return true;
}
if (y < 0) {
return false;
}
const index: usize = @as(usize, @intCast(y)) * @as(usize, @intCast(grid_width)) + @as(usize, @intCast(x));
if (index >= self.grid.len) {
return true;
}
return self.grid[index].active;
}
pub fn get_grid_color(self: Game, x: i32, y: i32) rl.Color {
if (x < 0) {
return rl.Color.light_gray;
}
if (y < 0) {
return rl.Color.white;
}
const index: usize = @as(usize, @intCast(y)) * @as(usize, @intCast(grid_width)) + @as(usize, @intCast(x));
if (index >= self.grid.len) {
return rl.Color.light_gray;
}
return self.grid[index].color;
}
pub fn set_active_state(self: *Game, x: i32, y: i32, state: bool) void {
if (x < 0 or y < 0) {
return;
}
const index: usize = @as(usize, @intCast(y)) * @as(usize, @intCast(grid_width)) + @as(usize, @intCast(x));
if (index >= self.grid.len) {
return;
}
self.grid[index].active = state;
}
fn set_grid_color(self: *Game, x: i32, y: i32, color: rl.Color) void {
if (x < 0 or y < 0) {
return;
}
const index: usize = @as(usize, @intCast(y)) * @as(usize, @intCast(grid_width)) + @as(usize, @intCast(x));
if (index >= self.grid.len) {
return;
}
self.grid[index].color = color;
}
pub fn reset(self: *Game) void {
self.piece_count = 0;
for (&self.grid) |*item| {
item.* = Square{ .color = rl.Color.white, .active = false };
}
}
pub fn piece_reset(self: *Game) void {
self.piece_count += 1;
self.y = 0;
self.x = 4;
self.t = self.next_type;
self.next_type = random_type(&self.rng);
self.r = Rotation.A;
self.squares = Game.get_squares(self.t, self.r);
if (self.check_collision(self.squares)) {
self.state = State.GameOver;
self.freeze_input = 60; // Keep player from mashing keys at end and skipping the game over screen.
}
}
fn piece_shade(self: *Game) rl.Color {
return switch (self.t) {
Type.Cube => rgb(241, 211, 90),
Type.Long => rgb(83, 179, 219),
Type.L => rgb(92, 205, 162),
Type.J => rgb(231, 111, 124),
Type.T => rgb(195, 58, 47),
Type.S => rgb(96, 150, 71),
Type.Z => rgb(233, 154, 56),
};
}
fn piece_ghost(self: *Game) rl.Color {
return switch (self.t) {
Type.Cube => rgba(241, 211, 90, 175),
Type.Long => rgba(83, 179, 219, 175),
Type.L => rgba(92, 205, 162, 175),
Type.J => rgba(231, 111, 124, 175),
Type.T => rgba(195, 58, 47, 175),
Type.S => rgba(96, 150, 71, 175),
Type.Z => rgba(233, 154, 56, 175),
};
}
pub fn draw(self: *Game) void {
rl.clearBackground(BorderColor);
var y: i32 = 0;
var upper_left_y: i32 = 0;
while (y < grid_height) {
var x: i32 = 0;
var upper_left_x: i32 = margin;
while (x < grid_width) {
if (self.get_active(x, y)) {
rl.drawRectangle(upper_left_x, upper_left_y, grid_cell_size, grid_cell_size, self.get_grid_color(x, y));
} else {
rl.drawRectangle(upper_left_x, upper_left_y, grid_cell_size, grid_cell_size, BackgroundHiLightColor);
rl.drawRectangle(upper_left_x + 1, upper_left_y + 1, grid_cell_size - 2, grid_cell_size - 2, BackgroundColor);
}
upper_left_x += grid_cell_size;
x += 1;
}
upper_left_y += grid_cell_size;
y += 1;
}
if (self.state != State.StartScreen) {
// Draw falling piece and ghost
const ghost_square_offset = self.get_ghost_square_offset();
for (self.squares) |pos| {
// Draw ghost
rl.drawRectangle((self.x + pos.x) * grid_cell_size + margin, (self.y + ghost_square_offset + pos.y) * grid_cell_size, grid_cell_size, grid_cell_size, self.piece_ghost());
// Draw shape
rl.drawRectangle((self.x + pos.x) * grid_cell_size + margin, (self.y + pos.y) * grid_cell_size, grid_cell_size, grid_cell_size, piece_color(self.t));
}
}
const right_bar = margin + (10 * grid_cell_size) + margin;
var draw_height = margin; // Track where to start drawing the next item
// Draw score
rl.drawText("Score:", right_bar, draw_height, 20, rl.Color.light_gray);
draw_height += 20;
var score_text_buf = [_]u8{0} ** 21; // 20 for max usize + 1 for null byte
const score_text = std.fmt.bufPrintZ(score_text_buf[0..], "{}", .{self.score}) catch unreachable;
rl.drawText(score_text, right_bar, draw_height, 20, rl.Color.light_gray);
draw_height += 20;
// Draw next piece
draw_height += margin;
rl.drawRectangle(right_bar, draw_height, piece_preview_width, piece_preview_width, BackgroundColor);
if (self.state != State.StartScreen) {
const next_squares = switch (self.next_type) {
Type.Long => Game.get_squares(self.next_type, Rotation.B),
else => Game.get_squares(self.next_type, Rotation.A),
};
var max_x: i32 = 0;
var min_x: i32 = 0;
var max_y: i32 = 0;
var min_y: i32 = 0;
for (next_squares) |pos| {
min_x = @min(min_x, pos.x);
max_x = @max(max_x, pos.x);
min_y = @min(min_y, pos.y);
max_y = @max(max_y, pos.y);
}
const height = (max_y - min_y + 1) * grid_cell_size;
const width = (max_x - min_x + 1) * grid_cell_size;
// offset to add to each local pos so that 0,0 is in upper left.
const x_offset = min_x * -1;
const y_offset = min_y * -1;
const x_pixel_offset = @divFloor(piece_preview_width - width, 2);
const y_pixel_offset = @divFloor(piece_preview_width - height, 2);
for (next_squares) |pos| {
rl.drawRectangle(right_bar + x_pixel_offset + ((pos.x + x_offset) * grid_cell_size), draw_height + y_pixel_offset + ((pos.y + y_offset) * grid_cell_size), grid_cell_size, grid_cell_size, piece_color(self.next_type));
}
}
draw_height += piece_preview_width;
if (self.state == State.Pause or self.state == State.GameOver or self.state == State.StartScreen) {
// Partially transparent background to give text better contrast if drawn over the grid
rl.drawRectangle(0, (screen_height / 2) - 70, screen_width, 110, rgba(3, 2, 1, 100));
}
if (self.state == State.Pause) {
rl.drawText("PAUSED", 75, screen_height / 2 - 50, 50, rl.Color.white);
rl.drawText("Press ESCAPE to unpause", 45, screen_height / 2, 20, rl.Color.light_gray);
}
if (self.state == State.GameOver) {
rl.drawText("GAME OVER", 45, screen_height / 2 - 50, 42, rl.Color.white);
rl.drawText("Press any key to continue", 41, screen_height / 2, 20, rl.Color.light_gray);
}
if (self.state == State.StartScreen) {
rl.drawText("TETRIS", 75, screen_height / 2 - 50, 50, rl.Color.white);
rl.drawText("Press any key to continue", 41, screen_height / 2, 20, rl.Color.light_gray);
}
}
pub fn get_squares(t: Type, r: Rotation) [4]Pos {
return switch (t) {
Type.Cube => [_]Pos{
p(0, 0), p(1, 0), p(0, 1), p(1, 1),
},
Type.Long => switch (r) {
Rotation.A, Rotation.C => [_]Pos{
p(-1, 0), p(0, 0), p(1, 0), p(2, 0),
},
Rotation.B, Rotation.D => [_]Pos{
p(0, -1), p(0, 0), p(0, 1), p(0, 2),
},
},
Type.Z => switch (r) {
Rotation.A, Rotation.C => [_]Pos{
p(-1, 0), p(0, 0), p(0, 1), p(1, 1),
},
Rotation.B, Rotation.D => [_]Pos{
p(0, -1), p(-1, 0), p(0, 0), p(-1, 1),
},
},
Type.S => switch (r) {
Rotation.A, Rotation.C => [_]Pos{
p(0, 0), p(1, 0), p(-1, 1), p(0, 1),
},
Rotation.B, Rotation.D => [_]Pos{
p(0, -1), p(0, 0), p(1, 0), p(1, 1),
},
},
Type.T => switch (r) {
Rotation.A => [_]Pos{
p(0, -1), p(-1, 0), p(0, 0), p(1, 0),
},
Rotation.B => [_]Pos{
p(0, -1), p(0, 0), p(1, 0), p(0, 1),
},
Rotation.C => [_]Pos{
p(-1, 0), p(0, 0), p(1, 0), p(0, 1),
},
Rotation.D => [_]Pos{
p(0, -1), p(-1, 0), p(0, 0), p(0, 1),
},
},
Type.L => switch (r) {
Rotation.A => [_]Pos{
p(0, -1), p(0, 0), p(0, 1), p(1, 1),
},
Rotation.B => [_]Pos{
p(-1, 0), p(0, 0), p(1, 0), p(-1, 1),
},
Rotation.C => [_]Pos{
p(-1, -1), p(0, -1), p(0, 0), p(0, 1),
},
Rotation.D => [_]Pos{
p(1, -1), p(-1, 0), p(0, 0), p(1, 0),
},
},
Type.J => switch (r) {
Rotation.A => [_]Pos{
p(0, -1), p(0, 0), p(-1, 1), p(0, 1),
},
Rotation.B => [_]Pos{
p(-1, -1), p(-1, 0), p(0, 0), p(1, 0),
},
Rotation.C => [_]Pos{
p(0, -1), p(1, -1), p(0, 0), p(0, 1),
},
Rotation.D => [_]Pos{
p(-1, 0), p(0, 0), p(1, 0), p(1, 1),
},
},
};
}
pub fn get_ghost_square_offset(self: *Game) i32 {
var offset: i32 = 0;
while (true) {
if (self.check_collision_offset(0, offset, self.squares)) {
break;
}
offset += 1;
}
return offset - 1;
}
pub fn rotate(self: *Game) void {
const r = switch (self.r) {
Rotation.A => Rotation.B,
Rotation.B => Rotation.C,
Rotation.C => Rotation.D,
Rotation.D => Rotation.A,
};
const squares = Game.get_squares(self.t, r);
if (self.check_collision(squares)) {
// Try moving left or right by one or two squares. This helps when trying
// to rotate when right next to the wall or another block. Esp noticable
// on the 4x1 (Long) type.
const x_offsets = [_]i32{ 1, -1, 2, -2 };
for (x_offsets) |x_offset| {
if (!self.check_collision_offset(x_offset, 0, squares)) {
self.x += x_offset;
self.squares = squares;
self.r = r;
return;
}
}
} else {
self.squares = squares;
self.r = r;
}
}
pub fn check_collision(self: *Game, squares: [4]Pos) bool {
for (squares) |pos| {
const x = self.x + pos.x;
const y = self.y + pos.y;
if ((x >= grid_width) or (x < 0) or (y >= grid_height) or self.get_active(x, y)) {
return true;
}
}
return false;
}
fn check_collision_offset(self: *Game, offset_x: i32, offset_y: i32, squares: [4]Pos) bool {
for (squares) |pos| {
const x = self.x + pos.x + offset_x;
const y = self.y + pos.y + offset_y;
if ((x >= grid_width) or (x < 0) or (y >= grid_height) or self.get_active(x, y)) {
return true;
}
}
return false;
}
pub fn move_right(self: *Game) void {
const can_move = blk: {
for (self.squares) |pos| {
const x = self.x + pos.x + 1;
const y = self.y + pos.y;
if ((x >= grid_width) or self.get_active(x, y)) {
break :blk false;
}
}
break :blk true;
};
if (can_move) {
self.x += 1;
}
}
pub fn move_left(self: *Game) void {
const can_move = blk: {
for (self.squares) |pos| {
const x = self.x + pos.x - 1;
const y = self.y + pos.y;
if ((x < 0) or self.get_active(x, y)) {
break :blk false;
}
}
break :blk true;
};
if (can_move) {
self.x -= 1;
}
}
fn can_move_down(self: *Game) bool {
for (self.squares) |pos| {
const x = self.x + pos.x;
const y = self.y + pos.y + 1;
if ((y >= grid_height) or self.get_active(x, y)) {
return false;
}
}
return true;
}
pub fn drop(self: *Game) bool {
// Drop all the way down
var moved = false;
while (self.can_move_down()) {
self.y += 1;
moved = true;
}
if (moved) {
return true;
} else {
for (self.squares) |pos| {
self.set_active_state(self.x + pos.x, self.y + pos.y, true);
self.set_grid_color(self.x + pos.x, self.y + pos.y, self.piece_shade());
}
self.piece_reset();
return false;
}
}
pub fn move_down(self: *Game) bool {
if (self.can_move_down()) {
self.y += 1;
return true;
} else {
for (self.squares) |pos| {
self.set_active_state(self.x + pos.x, self.y + pos.y, true);
self.set_grid_color(self.x + pos.x, self.y + pos.y, self.piece_shade());
}
self.piece_reset();
return false;
}
}
};
pub fn main() anyerror!void {
// Initialization
var game = Game.init();
rl.initWindow(screen_width, screen_height, "Tetris");
defer rl.closeWindow();
// Default is Escape, but we want to use that for pause instead
rl.setExitKey(rl.KeyboardKey.key_f4);
// Set the game to run at 60 frames-per-second
rl.setTargetFPS(60);
// Solves blurry font on high resolution displays
rl.setTextureFilter(rl.getFontDefault().texture, @intFromEnum(rl.TextureFilter.texture_filter_point));
// Main game loop
while (!rl.windowShouldClose()) // Detect window close button or ESC key
{
game.update();
rl.beginDrawing();
game.draw();
rl.endDrawing();
}
}
| https://raw.githubusercontent.com/jlafayette/zig-tetris/9858d8d098d854d2d61190c3dd9a2b3842076d60/src/main.zig |
const std = @import("std");
const input_file = @embedFile("input.txt");
//I really should calculate these based on the amount of fall off room required but just hard coding them based on the input data
const GRID_WIDTH = 500;
const GRID_HEIGHT = 160;
const GRID_WIDTH_OFFSET = 300; //All coords are up around ~500 so wastes alot of space in the array - this just slides it forward
const DROP_X = 500 - GRID_WIDTH_OFFSET;
/// Advent of code - Day 14
///
/// Part 1 - Plot the rocks on a grid and then particle deposition sand until no more come to rest
/// Part 2 - Add a floor and perform the particle deposition until the sand blocks the opening
///
pub fn main() !void {
const timer = std.time.Timer;
var t = try timer.start();
var highest_y: u32 = 0;
var grid_1 = try buildStartingGrid(input_file[0..], &highest_y);
const result_1 = sandSimulation(&grid_1, DROP_X);
var grid_2 = try buildStartingGrid(input_file[0..], &highest_y);
//Add floor
var x: u32 = 0;
while (x < GRID_WIDTH) : (x += 1) {
grid_2.set((highest_y + 2) * GRID_WIDTH + x);
}
const result_2 = sandSimulation(&grid_2, DROP_X);
std.debug.print("Part 1: {}, Part 2: {} ms: {d:.5}\n", .{ result_1, result_2, @intToFloat(f64, t.read()) / 1000000.0 });
}
/// Parse the input of straight line segments and fill them in as rock in the grid
/// Path is formatted as 498,4 -> 498,6 -> 496,6
///
fn buildStartingGrid(data: []const u8, highest_y: *u32) !std.StaticBitSet(GRID_WIDTH * GRID_HEIGHT) {
var grid = std.StaticBitSet(GRID_WIDTH * GRID_HEIGHT).initEmpty();
var line_it = std.mem.tokenize(u8, data, "\n");
while (line_it.next()) |line| {
var i: usize = 0;
var j: usize = 0;
var last_x_opt: ?u32 = null;
var last_y: u32 = undefined;
while (true) {
//Skip to the start of the next digit or finish if we reach the end
while (i < line.len and isDigit(line[i]) == false) : (i += 1) {}
if (i >= line.len) {
break;
}
j = i;
//Read the X co-ord
while (isDigit(line[j])) : (j += 1) {}
const x = try std.fmt.parseUnsigned(u32, line[i..j], 10);
i = j + 1;
j = i;
//Read the Y co-ord
while (j < line.len and isDigit(line[j])) : (j += 1) {}
const y = try std.fmt.parseUnsigned(u32, line[i..j], 10);
i = j + 1;
j = i;
highest_y.* = std.math.max(highest_y.*, y);
//Try fill in the line between points
if (last_x_opt) |last_x| {
if (last_y == y) {
//Fill x
var xmin = std.math.min(last_x, x);
var xmax = std.math.max(last_x, x);
while (xmin <= xmax) : (xmin += 1) {
const idx = (y * GRID_WIDTH + xmin) - GRID_WIDTH_OFFSET;
grid.set(idx);
}
} else {
//Fill y
var ymin = std.math.min(last_y, y);
var ymax = std.math.max(last_y, y);
while (ymin <= ymax) : (ymin += 1) {
const idx = (ymin * GRID_WIDTH + x) - GRID_WIDTH_OFFSET;
grid.set(idx);
}
}
}
last_x_opt = x;
last_y = y;
}
}
return grid;
}
/// Deposit a particle of sand at 500,0 and have it fall until it comes to rest and then deposit another
/// The next sand particle falls once the previous has come to rest
/// Sand falls down first, then down to the left and then down to the right
/// We stop the simulation once there is:
/// 1. no more sand coming to rest
/// 2. blocked the 500,0 opening
/// and return the number of deposits to get to those states
///
/// Rather than calculating the full path each time we store the path in a stack and pick up where the other one left of
///
fn sandSimulation(grid: *std.StaticBitSet(GRID_WIDTH * GRID_HEIGHT), start_x: u32) u32 {
var stack: [1000]u32 = undefined;
var stack_head: u32 = 1;
stack[0] = @intCast(u32, 0 * GRID_WIDTH + start_x);
var ticks: u32 = 0;
var n: u32 = 0;
while (true) : (n += 1) {
stack_head -= 1;
const prev_idx = stack[stack_head];
var y = prev_idx / GRID_WIDTH;
var x = prev_idx % GRID_WIDTH;
while (true) {
//Try down first
const y_down = y + 1;
if (y_down >= GRID_HEIGHT) {
return ticks;
}
const idx_down = y_down * GRID_WIDTH + x;
if (grid.isSet(idx_down) == false) {
y = y_down;
stack_head += 1;
stack[stack_head] = idx_down;
continue;
}
//Next try down and to the left
const x_left = x - 1;
const idx_downleft = y_down * GRID_WIDTH + x_left;
if (grid.isSet(idx_downleft) == false) {
y = y_down;
x = x_left;
stack_head += 1;
stack[stack_head] = idx_downleft;
continue;
}
//Next try down and to the right
const x_right = x + 1;
const idx_downright = y_down * GRID_WIDTH + x_right;
if (grid.isSet(idx_downright) == false) {
y = y_down;
x = x_right;
stack_head += 1;
stack[stack_head] = idx_downright;
continue;
}
//Nowhere else to go - just come to rest and start the next deposit
const idx = y * GRID_WIDTH + x;
grid.set(idx);
ticks += 1;
if (x == start_x and y == 0) {
//Blocked the opening
return ticks;
}
break;
}
}
return ticks;
}
/// Return true if it is an ASCII digit between 0-9
///
inline fn isDigit(c: u8) bool {
return c >= '0' and c <= '9';
}
| https://raw.githubusercontent.com/RevDownie/AdventOfCodeSolutions/6bc8950f93d7b60088587809ba752a5640b74a01/2022/Day14/day14.zig |
pub const title = "English (Bork)";
pub const code = "en_BORK";
pub const variant = "BORK";
pub const language = "en";
pub const endonym = "English (Bork)";
pub const dir = "ltr";
pub const script = "Latn";
| https://raw.githubusercontent.com/cksac/faker-zig/5a51eb6e6aa4ce50a25a354affca36e8fa059675/src/locales/en_BORK/metadata.zig |
const std = @import("std");
const sdl = @cImport(@cInclude("SDL2/SDL.h"));
const mysdl = @import("./sdl.zig");
const CPU = @import("./cpu.zig").CPU;
const CPUCallback = @import("./cpu.zig").CPUCallback;
// ref: https://gist.github.com/wkjagt/9043907
const game_code = [_]u8{
0x20, 0x06, 0x06, 0x20, 0x38, 0x06, 0x20, 0x0d, 0x06, 0x20, 0x2a, 0x06, 0x60, 0xa9, 0x02, 0x85,
0x02, 0xa9, 0x04, 0x85, 0x03, 0xa9, 0x11, 0x85, 0x10, 0xa9, 0x10, 0x85, 0x12, 0xa9, 0x0f, 0x85,
0x14, 0xa9, 0x04, 0x85, 0x11, 0x85, 0x13, 0x85, 0x15, 0x60, 0xa5, 0xfe, 0x85, 0x00, 0xa5, 0xfe,
0x29, 0x03, 0x18, 0x69, 0x02, 0x85, 0x01, 0x60, 0x20, 0x4d, 0x06, 0x20, 0x8d, 0x06, 0x20, 0xc3,
0x06, 0x20, 0x19, 0x07, 0x20, 0x20, 0x07, 0x20, 0x2d, 0x07, 0x4c, 0x38, 0x06, 0xa5, 0xff, 0xc9,
0x77, 0xf0, 0x0d, 0xc9, 0x64, 0xf0, 0x14, 0xc9, 0x73, 0xf0, 0x1b, 0xc9, 0x61, 0xf0, 0x22, 0x60,
0xa9, 0x04, 0x24, 0x02, 0xd0, 0x26, 0xa9, 0x01, 0x85, 0x02, 0x60, 0xa9, 0x08, 0x24, 0x02, 0xd0,
0x1b, 0xa9, 0x02, 0x85, 0x02, 0x60, 0xa9, 0x01, 0x24, 0x02, 0xd0, 0x10, 0xa9, 0x04, 0x85, 0x02,
0x60, 0xa9, 0x02, 0x24, 0x02, 0xd0, 0x05, 0xa9, 0x08, 0x85, 0x02, 0x60, 0x60, 0x20, 0x94, 0x06,
0x20, 0xa8, 0x06, 0x60, 0xa5, 0x00, 0xc5, 0x10, 0xd0, 0x0d, 0xa5, 0x01, 0xc5, 0x11, 0xd0, 0x07,
0xe6, 0x03, 0xe6, 0x03, 0x20, 0x2a, 0x06, 0x60, 0xa2, 0x02, 0xb5, 0x10, 0xc5, 0x10, 0xd0, 0x06,
0xb5, 0x11, 0xc5, 0x11, 0xf0, 0x09, 0xe8, 0xe8, 0xe4, 0x03, 0xf0, 0x06, 0x4c, 0xaa, 0x06, 0x4c,
0x35, 0x07, 0x60, 0xa6, 0x03, 0xca, 0x8a, 0xb5, 0x10, 0x95, 0x12, 0xca, 0x10, 0xf9, 0xa5, 0x02,
0x4a, 0xb0, 0x09, 0x4a, 0xb0, 0x19, 0x4a, 0xb0, 0x1f, 0x4a, 0xb0, 0x2f, 0xa5, 0x10, 0x38, 0xe9,
0x20, 0x85, 0x10, 0x90, 0x01, 0x60, 0xc6, 0x11, 0xa9, 0x01, 0xc5, 0x11, 0xf0, 0x28, 0x60, 0xe6,
0x10, 0xa9, 0x1f, 0x24, 0x10, 0xf0, 0x1f, 0x60, 0xa5, 0x10, 0x18, 0x69, 0x20, 0x85, 0x10, 0xb0,
0x01, 0x60, 0xe6, 0x11, 0xa9, 0x06, 0xc5, 0x11, 0xf0, 0x0c, 0x60, 0xc6, 0x10, 0xa5, 0x10, 0x29,
0x1f, 0xc9, 0x1f, 0xf0, 0x01, 0x60, 0x4c, 0x35, 0x07, 0xa0, 0x00, 0xa5, 0xfe, 0x91, 0x00, 0x60,
0xa6, 0x03, 0xa9, 0x00, 0x81, 0x10, 0xa2, 0x00, 0xa9, 0x01, 0x81, 0x10, 0x60, 0xa2, 0x00, 0xea,
0xea, 0xca, 0xd0, 0xfb, 0x60,
};
pub fn main() !void {
if (sdl.SDL_Init(sdl.SDL_INIT_EVERYTHING) != 0) {
sdl.SDL_Log("Unable to initialize SDL: %s", sdl.SDL_GetError());
return error.SDLInitializationFailed;
}
defer sdl.SDL_Quit();
const window = sdl.SDL_CreateWindow("Game", sdl.SDL_WINDOWPOS_CENTERED, sdl.SDL_WINDOWPOS_CENTERED, 32 * 10, 32 * 10, 0) orelse {
sdl.SDL_Log("Unable to initialize window: %s", sdl.SDL_GetError());
return error.SDLInitializationFailed;
};
defer sdl.SDL_DestroyWindow(window);
const renderer = sdl.SDL_CreateRenderer(window, -1, sdl.SDL_RENDERER_ACCELERATED) orelse {
sdl.SDL_Log("Unable to initialize renderer: %s", sdl.SDL_GetError());
return error.SDLInitializationFailed;
};
defer sdl.SDL_DestroyRenderer(renderer);
const texture = sdl.SDL_CreateTexture(renderer, sdl.SDL_PIXELFORMAT_RGB24, sdl.SDL_TEXTUREACCESS_STREAMING, 32, 32) orelse {
sdl.SDL_Log("Unable to initialize renderer: %s", sdl.SDL_GetError());
return error.SDLInitializationFailed;
};
defer sdl.SDL_DestroyTexture(texture);
const Game = struct {
state: [(32 * 3 * 32)]u8,
renderer: ?*sdl.SDL_Renderer,
texture: ?*sdl.SDL_Texture,
pub fn init(
r: ?*sdl.SDL_Renderer,
t: ?*sdl.SDL_Texture,
) @This() {
return @This(){
.state = [_]u8{0} ** (32 * 3 * 32),
.renderer = r,
.texture = t,
};
}
pub fn call(ctx: *anyopaque, cpu: *CPU) void {
const self: *@This() = @ptrCast(@alignCast(ctx));
handle_user_input(cpu);
cpu.mem_write(0xFE, std.crypto.random.intRangeAtMost(u8, 1, 16));
if (read_screen_state(cpu, &self.state)) {
if (sdl.SDL_UpdateTexture(self.texture, null, &self.state, 32 * 3) == 0 and
sdl.SDL_RenderCopy(self.renderer, self.texture, null, null) == 0)
{
sdl.SDL_RenderPresent(self.renderer);
}
}
std.time.sleep(70000);
}
pub fn callback(self: *@This()) CPUCallback {
return CPUCallback{ .ptr = @ptrCast(self), .impl = &.{ .call = call } };
}
};
var game = Game.init(renderer, texture);
var cpu = CPU.init();
cpu.load(&game_code);
cpu.reset();
cpu.run_with_callback(game.callback());
}
fn handle_user_input(cpu: *CPU) void {
var event: sdl.SDL_Event = undefined;
if (sdl.SDL_PollEvent(&event) != 0) {
switch (event.type) {
sdl.SDL_QUIT => {
std.process.exit(0);
},
sdl.SDL_KEYDOWN => {
switch (event.key.keysym.sym) {
sdl.SDLK_ESCAPE => {
std.log.debug("keydown escape", .{});
std.process.exit(0);
},
sdl.SDLK_w => {
std.log.debug("keydown w", .{});
cpu.mem_write(0xff, 0x77);
},
sdl.SDLK_s => {
std.log.debug("keydown s", .{});
cpu.mem_write(0xff, 0x73);
},
sdl.SDLK_a => {
std.log.debug("keydown a", .{});
cpu.mem_write(0xff, 0x61);
},
sdl.SDLK_d => {
std.log.debug("keydown d", .{});
cpu.mem_write(0xff, 0x64);
},
else => {},
}
},
else => {},
}
}
}
fn color(byte: u8) mysdl.Color {
return switch (byte) {
0 => mysdl.Color.black,
1 => mysdl.Color.white,
2, 9 => mysdl.Color.gray,
3, 10 => mysdl.Color.red,
4, 11 => mysdl.Color.green,
5, 12 => mysdl.Color.blue,
6, 13 => mysdl.Color.magenta,
7, 14 => mysdl.Color.yellow,
else => mysdl.Color.cyan,
};
}
fn read_screen_state(cpu: *CPU, frame: *[32 * 3 * 32]u8) bool {
var frame_idx: usize = 0;
var update = false;
for (0x0200..0x0600) |i| {
const color_idx = cpu.mem_read(@intCast(i));
const rgb = color(color_idx).rgb();
if (frame[frame_idx] != rgb[0] or
frame[frame_idx + 1] != rgb[1] or
frame[frame_idx + 2] != rgb[2])
{
frame[frame_idx] = rgb[0];
frame[frame_idx + 1] = rgb[1];
frame[frame_idx + 2] = rgb[2];
update = true;
}
frame_idx += 3;
}
// // rendering test
// frame_idx = 0;
// for (0x0200..0x0600, 0..) |_, i| {
// const color_idx = i % 16;
// const rgb = color(@intCast(color_idx)).rgb();
// frame[frame_idx] = rgb[0];
// frame[frame_idx + 1] = rgb[1];
// frame[frame_idx + 2] = rgb[2];
// update = true;
// frame_idx += 3;
// }
return update;
}
| https://raw.githubusercontent.com/LeafChage/fc.zig/df876f264bec0fcd49ef0c446a8747c81c9c407c/src/main.zig |
// There is a generic CRC implementation "Crc()" which can be paramterized via
// the Algorithm struct for a plethora of uses.
//
// The primary interface for all of the standard CRC algorithms is the
// generated file "crc.zig", which uses the implementation code here to define
// many standard CRCs.
const std = @import("std");
pub fn Algorithm(comptime W: type) type {
return struct {
polynomial: W,
initial: W,
reflect_input: bool,
reflect_output: bool,
xor_output: W,
};
}
pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
return struct {
const Self = @This();
const I = if (@bitSizeOf(W) < 8) u8 else W;
const lookup_table = blk: {
@setEvalBranchQuota(2500);
const poly = if (algorithm.reflect_input)
@bitReverse(@as(I, algorithm.polynomial)) >> (@bitSizeOf(I) - @bitSizeOf(W))
else
@as(I, algorithm.polynomial) << (@bitSizeOf(I) - @bitSizeOf(W));
var table: [256]I = undefined;
for (&table, 0..) |*e, i| {
var crc: I = i;
if (algorithm.reflect_input) {
var j: usize = 0;
while (j < 8) : (j += 1) {
crc = (crc >> 1) ^ ((crc & 1) * poly);
}
} else {
crc <<= @bitSizeOf(I) - 8;
var j: usize = 0;
while (j < 8) : (j += 1) {
crc = (crc << 1) ^ (((crc >> (@bitSizeOf(I) - 1)) & 1) * poly);
}
}
e.* = crc;
}
break :blk table;
};
crc: I,
pub fn init() Self {
const initial = if (algorithm.reflect_input)
@bitReverse(@as(I, algorithm.initial)) >> (@bitSizeOf(I) - @bitSizeOf(W))
else
@as(I, algorithm.initial) << (@bitSizeOf(I) - @bitSizeOf(W));
return Self{ .crc = initial };
}
inline fn tableEntry(index: I) I {
return lookup_table[@as(u8, @intCast(index & 0xFF))];
}
pub fn update(self: *Self, bytes: []const u8) void {
var i: usize = 0;
if (@bitSizeOf(I) <= 8) {
while (i < bytes.len) : (i += 1) {
self.crc = tableEntry(self.crc ^ bytes[i]);
}
} else if (algorithm.reflect_input) {
while (i < bytes.len) : (i += 1) {
const table_index = self.crc ^ bytes[i];
self.crc = tableEntry(table_index) ^ (self.crc >> 8);
}
} else {
while (i < bytes.len) : (i += 1) {
const table_index = (self.crc >> (@bitSizeOf(I) - 8)) ^ bytes[i];
self.crc = tableEntry(table_index) ^ (self.crc << 8);
}
}
}
pub fn final(self: Self) W {
var c = self.crc;
if (algorithm.reflect_input != algorithm.reflect_output) {
c = @bitReverse(c);
}
if (!algorithm.reflect_output) {
c >>= @bitSizeOf(I) - @bitSizeOf(W);
}
return @as(W, @intCast(c ^ algorithm.xor_output));
}
pub fn hash(bytes: []const u8) W {
var c = Self.init();
c.update(bytes);
return c.final();
}
};
}
pub const Polynomial = enum(u32) {
IEEE = @compileError("use Crc with algorithm .Crc32IsoHdlc"),
Castagnoli = @compileError("use Crc with algorithm .Crc32Iscsi"),
Koopman = @compileError("use Crc with algorithm .Crc32Koopman"),
_,
};
pub const Crc32WithPoly = @compileError("use Crc instead");
pub const Crc32SmallWithPoly = @compileError("use Crc instead");
| https://raw.githubusercontent.com/ziglang/zig-bootstrap/ec2dca85a340f134d2fcfdc9007e91f9abed6996/zig/lib/std/hash/crc/impl.zig |
// There is a generic CRC implementation "Crc()" which can be paramterized via
// the Algorithm struct for a plethora of uses.
//
// The primary interface for all of the standard CRC algorithms is the
// generated file "crc.zig", which uses the implementation code here to define
// many standard CRCs.
const std = @import("std");
pub fn Algorithm(comptime W: type) type {
return struct {
polynomial: W,
initial: W,
reflect_input: bool,
reflect_output: bool,
xor_output: W,
};
}
pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
return struct {
const Self = @This();
const I = if (@bitSizeOf(W) < 8) u8 else W;
const lookup_table = blk: {
@setEvalBranchQuota(2500);
const poly = if (algorithm.reflect_input)
@bitReverse(@as(I, algorithm.polynomial)) >> (@bitSizeOf(I) - @bitSizeOf(W))
else
@as(I, algorithm.polynomial) << (@bitSizeOf(I) - @bitSizeOf(W));
var table: [256]I = undefined;
for (&table, 0..) |*e, i| {
var crc: I = i;
if (algorithm.reflect_input) {
var j: usize = 0;
while (j < 8) : (j += 1) {
crc = (crc >> 1) ^ ((crc & 1) * poly);
}
} else {
crc <<= @bitSizeOf(I) - 8;
var j: usize = 0;
while (j < 8) : (j += 1) {
crc = (crc << 1) ^ (((crc >> (@bitSizeOf(I) - 1)) & 1) * poly);
}
}
e.* = crc;
}
break :blk table;
};
crc: I,
pub fn init() Self {
const initial = if (algorithm.reflect_input)
@bitReverse(@as(I, algorithm.initial)) >> (@bitSizeOf(I) - @bitSizeOf(W))
else
@as(I, algorithm.initial) << (@bitSizeOf(I) - @bitSizeOf(W));
return Self{ .crc = initial };
}
inline fn tableEntry(index: I) I {
return lookup_table[@as(u8, @intCast(index & 0xFF))];
}
pub fn update(self: *Self, bytes: []const u8) void {
var i: usize = 0;
if (@bitSizeOf(I) <= 8) {
while (i < bytes.len) : (i += 1) {
self.crc = tableEntry(self.crc ^ bytes[i]);
}
} else if (algorithm.reflect_input) {
while (i < bytes.len) : (i += 1) {
const table_index = self.crc ^ bytes[i];
self.crc = tableEntry(table_index) ^ (self.crc >> 8);
}
} else {
while (i < bytes.len) : (i += 1) {
const table_index = (self.crc >> (@bitSizeOf(I) - 8)) ^ bytes[i];
self.crc = tableEntry(table_index) ^ (self.crc << 8);
}
}
}
pub fn final(self: Self) W {
var c = self.crc;
if (algorithm.reflect_input != algorithm.reflect_output) {
c = @bitReverse(c);
}
if (!algorithm.reflect_output) {
c >>= @bitSizeOf(I) - @bitSizeOf(W);
}
return @as(W, @intCast(c ^ algorithm.xor_output));
}
pub fn hash(bytes: []const u8) W {
var c = Self.init();
c.update(bytes);
return c.final();
}
};
}
pub const Polynomial = enum(u32) {
IEEE = @compileError("use Crc with algorithm .Crc32IsoHdlc"),
Castagnoli = @compileError("use Crc with algorithm .Crc32Iscsi"),
Koopman = @compileError("use Crc with algorithm .Crc32Koopman"),
_,
};
pub const Crc32WithPoly = @compileError("use Crc instead");
pub const Crc32SmallWithPoly = @compileError("use Crc instead");
| https://raw.githubusercontent.com/cyberegoorg/cetech1-zig/7438a7b157a4047261d161c06248b54fe9d822eb/lib/std/hash/crc/impl.zig |
// There is a generic CRC implementation "Crc()" which can be paramterized via
// the Algorithm struct for a plethora of uses.
//
// The primary interface for all of the standard CRC algorithms is the
// generated file "crc.zig", which uses the implementation code here to define
// many standard CRCs.
const std = @import("std");
pub fn Algorithm(comptime W: type) type {
return struct {
polynomial: W,
initial: W,
reflect_input: bool,
reflect_output: bool,
xor_output: W,
};
}
pub fn Crc(comptime W: type, comptime algorithm: Algorithm(W)) type {
return struct {
const Self = @This();
const I = if (@bitSizeOf(W) < 8) u8 else W;
const lookup_table = blk: {
@setEvalBranchQuota(2500);
const poly = if (algorithm.reflect_input)
@bitReverse(@as(I, algorithm.polynomial)) >> (@bitSizeOf(I) - @bitSizeOf(W))
else
@as(I, algorithm.polynomial) << (@bitSizeOf(I) - @bitSizeOf(W));
var table: [256]I = undefined;
for (&table, 0..) |*e, i| {
var crc: I = i;
if (algorithm.reflect_input) {
var j: usize = 0;
while (j < 8) : (j += 1) {
crc = (crc >> 1) ^ ((crc & 1) * poly);
}
} else {
crc <<= @bitSizeOf(I) - 8;
var j: usize = 0;
while (j < 8) : (j += 1) {
crc = (crc << 1) ^ (((crc >> (@bitSizeOf(I) - 1)) & 1) * poly);
}
}
e.* = crc;
}
break :blk table;
};
crc: I,
pub fn init() Self {
const initial = if (algorithm.reflect_input)
@bitReverse(@as(I, algorithm.initial)) >> (@bitSizeOf(I) - @bitSizeOf(W))
else
@as(I, algorithm.initial) << (@bitSizeOf(I) - @bitSizeOf(W));
return Self{ .crc = initial };
}
inline fn tableEntry(index: I) I {
return lookup_table[@as(u8, @intCast(index & 0xFF))];
}
pub fn update(self: *Self, bytes: []const u8) void {
var i: usize = 0;
if (@bitSizeOf(I) <= 8) {
while (i < bytes.len) : (i += 1) {
self.crc = tableEntry(self.crc ^ bytes[i]);
}
} else if (algorithm.reflect_input) {
while (i < bytes.len) : (i += 1) {
const table_index = self.crc ^ bytes[i];
self.crc = tableEntry(table_index) ^ (self.crc >> 8);
}
} else {
while (i < bytes.len) : (i += 1) {
const table_index = (self.crc >> (@bitSizeOf(I) - 8)) ^ bytes[i];
self.crc = tableEntry(table_index) ^ (self.crc << 8);
}
}
}
pub fn final(self: Self) W {
var c = self.crc;
if (algorithm.reflect_input != algorithm.reflect_output) {
c = @bitReverse(c);
}
if (!algorithm.reflect_output) {
c >>= @bitSizeOf(I) - @bitSizeOf(W);
}
return @as(W, @intCast(c ^ algorithm.xor_output));
}
pub fn hash(bytes: []const u8) W {
var c = Self.init();
c.update(bytes);
return c.final();
}
};
}
pub const Polynomial = enum(u32) {
IEEE = @compileError("use Crc with algorithm .Crc32IsoHdlc"),
Castagnoli = @compileError("use Crc with algorithm .Crc32Iscsi"),
Koopman = @compileError("use Crc with algorithm .Crc32Koopman"),
_,
};
pub const Crc32WithPoly = @compileError("use Crc instead");
pub const Crc32SmallWithPoly = @compileError("use Crc instead");
| https://raw.githubusercontent.com/kassane/zig-mos-bootstrap/19aac4779b9e93b0e833402c26c93cfc13bb94e2/zig/lib/std/hash/crc/impl.zig |
const expect = @import("std").testing.expect;
test "if statement" {
const a = true;
var x: u16 = 0;
if (a) {
x += 1;
} else {
x += 2;
}
try expect(x == 1);
}
| https://raw.githubusercontent.com/Sobeston/zig.guide/a49e8a20cd5b3699fc1510f3c9c2c5a698869358/website/versioned_docs/version-0.11/01-language-basics/03.expect.zig |
const expect = @import("std").testing.expect;
test "if statement" {
const a = true;
var x: u16 = 0;
if (a) {
x += 1;
} else {
x += 2;
}
try expect(x == 1);
}
| https://raw.githubusercontent.com/wolffshots/ziglearn/5743d7a6748bf38207e5395ea0cc7b5dbdeaf230/if.zig |
const expect = @import("std").testing.expect;
test "if statement" {
const a = true;
var x: u16 = 0;
if (a) {
x += 1;
} else {
x += 2;
}
try expect(x == 1);
}
| https://raw.githubusercontent.com/Nathan3-14/zig/0a8078ca3940dacec6fb9630ce38cd838fca54a8/files/if_statments.zig |
const expect = @import("std").testing.expect;
test "if statement" {
const a = true;
var x: u16 = 0;
if (a) {
x += 1;
} else {
x += 2;
}
try expect(x == 1);
}
| https://raw.githubusercontent.com/axsaucedo/zigplayground/21bf2ed32945ed42d01c0b99d5678b99ef9eaf84/test_if_simple.zig |
const std = @import("std.zig");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const math = std.math;
pub fn binarySearch(
comptime T: type,
key: T,
items: []const T,
context: anytype,
comptime compareFn: fn (context: @TypeOf(context), lhs: T, rhs: T) math.Order,
) ?usize {
var left: usize = 0;
var right: usize = items.len;
while (left < right) {
// Avoid overflowing in the midpoint calculation
const mid = left + (right - left) / 2;
// Compare the key with the midpoint element
switch (compareFn(context, key, items[mid])) {
.eq => return mid,
.gt => left = mid + 1,
.lt => right = mid,
}
}
return null;
}
test "binarySearch" {
const S = struct {
fn order_u32(context: void, lhs: u32, rhs: u32) math.Order {
_ = context;
return math.order(lhs, rhs);
}
fn order_i32(context: void, lhs: i32, rhs: i32) math.Order {
_ = context;
return math.order(lhs, rhs);
}
};
try testing.expectEqual(
@as(?usize, null),
binarySearch(u32, 1, &[_]u32{}, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, 0),
binarySearch(u32, 1, &[_]u32{1}, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, null),
binarySearch(u32, 1, &[_]u32{0}, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, null),
binarySearch(u32, 0, &[_]u32{1}, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, 4),
binarySearch(u32, 5, &[_]u32{ 1, 2, 3, 4, 5 }, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, 0),
binarySearch(u32, 2, &[_]u32{ 2, 4, 8, 16, 32, 64 }, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, 1),
binarySearch(i32, -4, &[_]i32{ -7, -4, 0, 9, 10 }, {}, S.order_i32),
);
try testing.expectEqual(
@as(?usize, 3),
binarySearch(i32, 98, &[_]i32{ -100, -25, 2, 98, 99, 100 }, {}, S.order_i32),
);
}
/// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required).
pub fn insertionSort(
comptime T: type,
items: []T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) void {
var i: usize = 1;
while (i < items.len) : (i += 1) {
const x = items[i];
var j: usize = i;
while (j > 0 and lessThan(context, x, items[j - 1])) : (j -= 1) {
items[j] = items[j - 1];
}
items[j] = x;
}
}
const Range = struct {
start: usize,
end: usize,
fn init(start: usize, end: usize) Range {
return Range{
.start = start,
.end = end,
};
}
fn length(self: Range) usize {
return self.end - self.start;
}
};
const Iterator = struct {
size: usize,
power_of_two: usize,
numerator: usize,
decimal: usize,
denominator: usize,
decimal_step: usize,
numerator_step: usize,
fn init(size2: usize, min_level: usize) Iterator {
const power_of_two = math.floorPowerOfTwo(usize, size2);
const denominator = power_of_two / min_level;
return Iterator{
.numerator = 0,
.decimal = 0,
.size = size2,
.power_of_two = power_of_two,
.denominator = denominator,
.decimal_step = size2 / denominator,
.numerator_step = size2 % denominator,
};
}
fn begin(self: *Iterator) void {
self.numerator = 0;
self.decimal = 0;
}
fn nextRange(self: *Iterator) Range {
const start = self.decimal;
self.decimal += self.decimal_step;
self.numerator += self.numerator_step;
if (self.numerator >= self.denominator) {
self.numerator -= self.denominator;
self.decimal += 1;
}
return Range{
.start = start,
.end = self.decimal,
};
}
fn finished(self: *Iterator) bool {
return self.decimal >= self.size;
}
fn nextLevel(self: *Iterator) bool {
self.decimal_step += self.decimal_step;
self.numerator_step += self.numerator_step;
if (self.numerator_step >= self.denominator) {
self.numerator_step -= self.denominator;
self.decimal_step += 1;
}
return (self.decimal_step < self.size);
}
fn length(self: *Iterator) usize {
return self.decimal_step;
}
};
const Pull = struct {
from: usize,
to: usize,
count: usize,
range: Range,
};
/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. O(1) memory (no allocator required).
/// Currently implemented as block sort.
pub fn sort(
comptime T: type,
items: []T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) void {
// Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
var cache: [512]T = undefined;
if (items.len < 4) {
if (items.len == 3) {
// hard coded insertion sort
if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
if (lessThan(context, items[2], items[1])) {
mem.swap(T, &items[1], &items[2]);
if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
}
} else if (items.len == 2) {
if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
}
return;
}
// sort groups of 4-8 items at a time using an unstable sorting network,
// but keep track of the original item orders to force it to be stable
// http://pages.ripco.net/~jgamble/nw.html
var iterator = Iterator.init(items.len, 4);
while (!iterator.finished()) {
var order = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7 };
const range = iterator.nextRange();
const sliced_items = items[range.start..];
switch (range.length()) {
8 => {
swap(T, sliced_items, context, lessThan, &order, 0, 1);
swap(T, sliced_items, context, lessThan, &order, 2, 3);
swap(T, sliced_items, context, lessThan, &order, 4, 5);
swap(T, sliced_items, context, lessThan, &order, 6, 7);
swap(T, sliced_items, context, lessThan, &order, 0, 2);
swap(T, sliced_items, context, lessThan, &order, 1, 3);
swap(T, sliced_items, context, lessThan, &order, 4, 6);
swap(T, sliced_items, context, lessThan, &order, 5, 7);
swap(T, sliced_items, context, lessThan, &order, 1, 2);
swap(T, sliced_items, context, lessThan, &order, 5, 6);
swap(T, sliced_items, context, lessThan, &order, 0, 4);
swap(T, sliced_items, context, lessThan, &order, 3, 7);
swap(T, sliced_items, context, lessThan, &order, 1, 5);
swap(T, sliced_items, context, lessThan, &order, 2, 6);
swap(T, sliced_items, context, lessThan, &order, 1, 4);
swap(T, sliced_items, context, lessThan, &order, 3, 6);
swap(T, sliced_items, context, lessThan, &order, 2, 4);
swap(T, sliced_items, context, lessThan, &order, 3, 5);
swap(T, sliced_items, context, lessThan, &order, 3, 4);
},
7 => {
swap(T, sliced_items, context, lessThan, &order, 1, 2);
swap(T, sliced_items, context, lessThan, &order, 3, 4);
swap(T, sliced_items, context, lessThan, &order, 5, 6);
swap(T, sliced_items, context, lessThan, &order, 0, 2);
swap(T, sliced_items, context, lessThan, &order, 3, 5);
swap(T, sliced_items, context, lessThan, &order, 4, 6);
swap(T, sliced_items, context, lessThan, &order, 0, 1);
swap(T, sliced_items, context, lessThan, &order, 4, 5);
swap(T, sliced_items, context, lessThan, &order, 2, 6);
swap(T, sliced_items, context, lessThan, &order, 0, 4);
swap(T, sliced_items, context, lessThan, &order, 1, 5);
swap(T, sliced_items, context, lessThan, &order, 0, 3);
swap(T, sliced_items, context, lessThan, &order, 2, 5);
swap(T, sliced_items, context, lessThan, &order, 1, 3);
swap(T, sliced_items, context, lessThan, &order, 2, 4);
swap(T, sliced_items, context, lessThan, &order, 2, 3);
},
6 => {
swap(T, sliced_items, context, lessThan, &order, 1, 2);
swap(T, sliced_items, context, lessThan, &order, 4, 5);
swap(T, sliced_items, context, lessThan, &order, 0, 2);
swap(T, sliced_items, context, lessThan, &order, 3, 5);
swap(T, sliced_items, context, lessThan, &order, 0, 1);
swap(T, sliced_items, context, lessThan, &order, 3, 4);
swap(T, sliced_items, context, lessThan, &order, 2, 5);
swap(T, sliced_items, context, lessThan, &order, 0, 3);
swap(T, sliced_items, context, lessThan, &order, 1, 4);
swap(T, sliced_items, context, lessThan, &order, 2, 4);
swap(T, sliced_items, context, lessThan, &order, 1, 3);
swap(T, sliced_items, context, lessThan, &order, 2, 3);
},
5 => {
swap(T, sliced_items, context, lessThan, &order, 0, 1);
swap(T, sliced_items, context, lessThan, &order, 3, 4);
swap(T, sliced_items, context, lessThan, &order, 2, 4);
swap(T, sliced_items, context, lessThan, &order, 2, 3);
swap(T, sliced_items, context, lessThan, &order, 1, 4);
swap(T, sliced_items, context, lessThan, &order, 0, 3);
swap(T, sliced_items, context, lessThan, &order, 0, 2);
swap(T, sliced_items, context, lessThan, &order, 1, 3);
swap(T, sliced_items, context, lessThan, &order, 1, 2);
},
4 => {
swap(T, sliced_items, context, lessThan, &order, 0, 1);
swap(T, sliced_items, context, lessThan, &order, 2, 3);
swap(T, sliced_items, context, lessThan, &order, 0, 2);
swap(T, sliced_items, context, lessThan, &order, 1, 3);
swap(T, sliced_items, context, lessThan, &order, 1, 2);
},
else => {},
}
}
if (items.len < 8) return;
// then merge sort the higher levels, which can be 8-15, 16-31, 32-63, 64-127, etc.
while (true) {
// if every A and B block will fit into the cache, use a special branch specifically for merging with the cache
// (we use < rather than <= since the block size might be one more than iterator.length())
if (iterator.length() < cache.len) {
// if four subarrays fit into the cache, it's faster to merge both pairs of subarrays into the cache,
// then merge the two merged subarrays from the cache back into the original array
if ((iterator.length() + 1) * 4 <= cache.len and iterator.length() * 4 <= items.len) {
iterator.begin();
while (!iterator.finished()) {
// merge A1 and B1 into the cache
var A1 = iterator.nextRange();
var B1 = iterator.nextRange();
var A2 = iterator.nextRange();
var B2 = iterator.nextRange();
if (lessThan(context, items[B1.end - 1], items[A1.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the cache
mem.copy(T, cache[B1.length()..], items[A1.start..A1.end]);
mem.copy(T, cache[0..], items[B1.start..B1.end]);
} else if (lessThan(context, items[B1.start], items[A1.end - 1])) {
// these two ranges weren't already in order, so merge them into the cache
mergeInto(T, items, A1, B1, context, lessThan, cache[0..]);
} else {
// if A1, B1, A2, and B2 are all in order, skip doing anything else
if (!lessThan(context, items[B2.start], items[A2.end - 1]) and !lessThan(context, items[A2.start], items[B1.end - 1])) continue;
// copy A1 and B1 into the cache in the same order
mem.copy(T, cache[0..], items[A1.start..A1.end]);
mem.copy(T, cache[A1.length()..], items[B1.start..B1.end]);
}
A1 = Range.init(A1.start, B1.end);
// merge A2 and B2 into the cache
if (lessThan(context, items[B2.end - 1], items[A2.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the cache
mem.copy(T, cache[A1.length() + B2.length() ..], items[A2.start..A2.end]);
mem.copy(T, cache[A1.length()..], items[B2.start..B2.end]);
} else if (lessThan(context, items[B2.start], items[A2.end - 1])) {
// these two ranges weren't already in order, so merge them into the cache
mergeInto(T, items, A2, B2, context, lessThan, cache[A1.length()..]);
} else {
// copy A2 and B2 into the cache in the same order
mem.copy(T, cache[A1.length()..], items[A2.start..A2.end]);
mem.copy(T, cache[A1.length() + A2.length() ..], items[B2.start..B2.end]);
}
A2 = Range.init(A2.start, B2.end);
// merge A1 and A2 from the cache into the items
const A3 = Range.init(0, A1.length());
const B3 = Range.init(A1.length(), A1.length() + A2.length());
if (lessThan(context, cache[B3.end - 1], cache[A3.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the items
mem.copy(T, items[A1.start + A2.length() ..], cache[A3.start..A3.end]);
mem.copy(T, items[A1.start..], cache[B3.start..B3.end]);
} else if (lessThan(context, cache[B3.start], cache[A3.end - 1])) {
// these two ranges weren't already in order, so merge them back into the items
mergeInto(T, cache[0..], A3, B3, context, lessThan, items[A1.start..]);
} else {
// copy A3 and B3 into the items in the same order
mem.copy(T, items[A1.start..], cache[A3.start..A3.end]);
mem.copy(T, items[A1.start + A1.length() ..], cache[B3.start..B3.end]);
}
}
// we merged two levels at the same time, so we're done with this level already
// (iterator.nextLevel() is called again at the bottom of this outer merge loop)
_ = iterator.nextLevel();
} else {
iterator.begin();
while (!iterator.finished()) {
var A = iterator.nextRange();
var B = iterator.nextRange();
if (lessThan(context, items[B.end - 1], items[A.start])) {
// the two ranges are in reverse order, so a simple rotation should fix it
mem.rotate(T, items[A.start..B.end], A.length());
} else if (lessThan(context, items[B.start], items[A.end - 1])) {
// these two ranges weren't already in order, so we'll need to merge them!
mem.copy(T, cache[0..], items[A.start..A.end]);
mergeExternal(T, items, A, B, context, lessThan, cache[0..]);
}
}
}
} else {
// this is where the in-place merge logic starts!
// 1. pull out two internal buffers each containing √A unique values
// 1a. adjust block_size and buffer_size if we couldn't find enough unique values
// 2. loop over the A and B subarrays within this level of the merge sort
// 3. break A and B into blocks of size 'block_size'
// 4. "tag" each of the A blocks with values from the first internal buffer
// 5. roll the A blocks through the B blocks and drop/rotate them where they belong
// 6. merge each A block with any B values that follow, using the cache or the second internal buffer
// 7. sort the second internal buffer if it exists
// 8. redistribute the two internal buffers back into the items
var block_size: usize = math.sqrt(iterator.length());
var buffer_size = iterator.length() / block_size + 1;
// as an optimization, we really only need to pull out the internal buffers once for each level of merges
// after that we can reuse the same buffers over and over, then redistribute it when we're finished with this level
var A: Range = undefined;
var B: Range = undefined;
var index: usize = 0;
var last: usize = 0;
var count: usize = 0;
var find: usize = 0;
var start: usize = 0;
var pull_index: usize = 0;
var pull = [_]Pull{
Pull{
.from = 0,
.to = 0,
.count = 0,
.range = Range.init(0, 0),
},
Pull{
.from = 0,
.to = 0,
.count = 0,
.range = Range.init(0, 0),
},
};
var buffer1 = Range.init(0, 0);
var buffer2 = Range.init(0, 0);
// find two internal buffers of size 'buffer_size' each
find = buffer_size + buffer_size;
var find_separately = false;
if (block_size <= cache.len) {
// if every A block fits into the cache then we won't need the second internal buffer,
// so we really only need to find 'buffer_size' unique values
find = buffer_size;
} else if (find > iterator.length()) {
// we can't fit both buffers into the same A or B subarray, so find two buffers separately
find = buffer_size;
find_separately = true;
}
// we need to find either a single contiguous space containing 2√A unique values (which will be split up into two buffers of size √A each),
// or we need to find one buffer of < 2√A unique values, and a second buffer of √A unique values,
// OR if we couldn't find that many unique values, we need the largest possible buffer we can get
// in the case where it couldn't find a single buffer of at least √A unique values,
// all of the Merge steps must be replaced by a different merge algorithm (MergeInPlace)
iterator.begin();
while (!iterator.finished()) {
A = iterator.nextRange();
B = iterator.nextRange();
// just store information about where the values will be pulled from and to,
// as well as how many values there are, to create the two internal buffers
// check A for the number of unique values we need to fill an internal buffer
// these values will be pulled out to the start of A
last = A.start;
count = 1;
while (count < find) : ({
last = index;
count += 1;
}) {
index = findLastForward(T, items, items[last], Range.init(last + 1, A.end), context, lessThan, find - count);
if (index == A.end) break;
}
index = last;
if (count >= buffer_size) {
// keep track of the range within the items where we'll need to "pull out" these values to create the internal buffer
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = A.start,
};
pull_index = 1;
if (count == buffer_size + buffer_size) {
// we were able to find a single contiguous section containing 2√A unique values,
// so this section can be used to contain both of the internal buffers we'll need
buffer1 = Range.init(A.start, A.start + buffer_size);
buffer2 = Range.init(A.start + buffer_size, A.start + count);
break;
} else if (find == buffer_size + buffer_size) {
// we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values,
// so we still need to find a second separate buffer of at least √A unique values
buffer1 = Range.init(A.start, A.start + count);
find = buffer_size;
} else if (block_size <= cache.len) {
// we found the first and only internal buffer that we need, so we're done!
buffer1 = Range.init(A.start, A.start + count);
break;
} else if (find_separately) {
// found one buffer, but now find the other one
buffer1 = Range.init(A.start, A.start + count);
find_separately = false;
} else {
// we found a second buffer in an 'A' subarray containing √A unique values, so we're done!
buffer2 = Range.init(A.start, A.start + count);
break;
}
} else if (pull_index == 0 and count > buffer1.length()) {
// keep track of the largest buffer we were able to find
buffer1 = Range.init(A.start, A.start + count);
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = A.start,
};
}
// check B for the number of unique values we need to fill an internal buffer
// these values will be pulled out to the end of B
last = B.end - 1;
count = 1;
while (count < find) : ({
last = index - 1;
count += 1;
}) {
index = findFirstBackward(T, items, items[last], Range.init(B.start, last), context, lessThan, find - count);
if (index == B.start) break;
}
index = last;
if (count >= buffer_size) {
// keep track of the range within the items where we'll need to "pull out" these values to create the internal buffe
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = B.end,
};
pull_index = 1;
if (count == buffer_size + buffer_size) {
// we were able to find a single contiguous section containing 2√A unique values,
// so this section can be used to contain both of the internal buffers we'll need
buffer1 = Range.init(B.end - count, B.end - buffer_size);
buffer2 = Range.init(B.end - buffer_size, B.end);
break;
} else if (find == buffer_size + buffer_size) {
// we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values,
// so we still need to find a second separate buffer of at least √A unique values
buffer1 = Range.init(B.end - count, B.end);
find = buffer_size;
} else if (block_size <= cache.len) {
// we found the first and only internal buffer that we need, so we're done!
buffer1 = Range.init(B.end - count, B.end);
break;
} else if (find_separately) {
// found one buffer, but now find the other one
buffer1 = Range.init(B.end - count, B.end);
find_separately = false;
} else {
// buffer2 will be pulled out from a 'B' subarray, so if the first buffer was pulled out from the corresponding 'A' subarray,
// we need to adjust the end point for that A subarray so it knows to stop redistributing its values before reaching buffer2
if (pull[0].range.start == A.start) pull[0].range.end -= pull[1].count;
// we found a second buffer in an 'B' subarray containing √A unique values, so we're done!
buffer2 = Range.init(B.end - count, B.end);
break;
}
} else if (pull_index == 0 and count > buffer1.length()) {
// keep track of the largest buffer we were able to find
buffer1 = Range.init(B.end - count, B.end);
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = B.end,
};
}
}
// pull out the two ranges so we can use them as internal buffers
pull_index = 0;
while (pull_index < 2) : (pull_index += 1) {
const length = pull[pull_index].count;
if (pull[pull_index].to < pull[pull_index].from) {
// we're pulling the values out to the left, which means the start of an A subarray
index = pull[pull_index].from;
count = 1;
while (count < length) : (count += 1) {
index = findFirstBackward(T, items, items[index - 1], Range.init(pull[pull_index].to, pull[pull_index].from - (count - 1)), context, lessThan, length - count);
const range = Range.init(index + 1, pull[pull_index].from + 1);
mem.rotate(T, items[range.start..range.end], range.length() - count);
pull[pull_index].from = index + count;
}
} else if (pull[pull_index].to > pull[pull_index].from) {
// we're pulling values out to the right, which means the end of a B subarray
index = pull[pull_index].from + 1;
count = 1;
while (count < length) : (count += 1) {
index = findLastForward(T, items, items[index], Range.init(index, pull[pull_index].to), context, lessThan, length - count);
const range = Range.init(pull[pull_index].from, index - 1);
mem.rotate(T, items[range.start..range.end], count);
pull[pull_index].from = index - 1 - count;
}
}
}
// adjust block_size and buffer_size based on the values we were able to pull out
buffer_size = buffer1.length();
block_size = iterator.length() / buffer_size + 1;
// the first buffer NEEDS to be large enough to tag each of the evenly sized A blocks,
// so this was originally here to test the math for adjusting block_size above
// assert((iterator.length() + 1)/block_size <= buffer_size);
// now that the two internal buffers have been created, it's time to merge each A+B combination at this level of the merge sort!
iterator.begin();
while (!iterator.finished()) {
A = iterator.nextRange();
B = iterator.nextRange();
// remove any parts of A or B that are being used by the internal buffers
start = A.start;
if (start == pull[0].range.start) {
if (pull[0].from > pull[0].to) {
A.start += pull[0].count;
// if the internal buffer takes up the entire A or B subarray, then there's nothing to merge
// this only happens for very small subarrays, like √4 = 2, 2 * (2 internal buffers) = 4,
// which also only happens when cache.len is small or 0 since it'd otherwise use MergeExternal
if (A.length() == 0) continue;
} else if (pull[0].from < pull[0].to) {
B.end -= pull[0].count;
if (B.length() == 0) continue;
}
}
if (start == pull[1].range.start) {
if (pull[1].from > pull[1].to) {
A.start += pull[1].count;
if (A.length() == 0) continue;
} else if (pull[1].from < pull[1].to) {
B.end -= pull[1].count;
if (B.length() == 0) continue;
}
}
if (lessThan(context, items[B.end - 1], items[A.start])) {
// the two ranges are in reverse order, so a simple rotation should fix it
mem.rotate(T, items[A.start..B.end], A.length());
} else if (lessThan(context, items[A.end], items[A.end - 1])) {
// these two ranges weren't already in order, so we'll need to merge them!
var findA: usize = undefined;
// break the remainder of A into blocks. firstA is the uneven-sized first A block
var blockA = Range.init(A.start, A.end);
var firstA = Range.init(A.start, A.start + blockA.length() % block_size);
// swap the first value of each A block with the value in buffer1
var indexA = buffer1.start;
index = firstA.end;
while (index < blockA.end) : ({
indexA += 1;
index += block_size;
}) {
mem.swap(T, &items[indexA], &items[index]);
}
// start rolling the A blocks through the B blocks!
// whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well
var lastA = firstA;
var lastB = Range.init(0, 0);
var blockB = Range.init(B.start, B.start + math.min(block_size, B.length()));
blockA.start += firstA.length();
indexA = buffer1.start;
// if the first unevenly sized A block fits into the cache, copy it there for when we go to Merge it
// otherwise, if the second buffer is available, block swap the contents into that
if (lastA.length() <= cache.len) {
mem.copy(T, cache[0..], items[lastA.start..lastA.end]);
} else if (buffer2.length() > 0) {
blockSwap(T, items, lastA.start, buffer2.start, lastA.length());
}
if (blockA.length() > 0) {
while (true) {
// if there's a previous B block and the first value of the minimum A block is <= the last value of the previous B block,
// then drop that minimum A block behind. or if there are no B blocks left then keep dropping the remaining A blocks.
if ((lastB.length() > 0 and !lessThan(context, items[lastB.end - 1], items[indexA])) or blockB.length() == 0) {
// figure out where to split the previous B block, and rotate it at the split
const B_split = binaryFirst(T, items, items[indexA], lastB, context, lessThan);
const B_remaining = lastB.end - B_split;
// swap the minimum A block to the beginning of the rolling A blocks
var minA = blockA.start;
findA = minA + block_size;
while (findA < blockA.end) : (findA += block_size) {
if (lessThan(context, items[findA], items[minA])) {
minA = findA;
}
}
blockSwap(T, items, blockA.start, minA, block_size);
// swap the first item of the previous A block back with its original value, which is stored in buffer1
mem.swap(T, &items[blockA.start], &items[indexA]);
indexA += 1;
// locally merge the previous A block with the B values that follow it
// if lastA fits into the external cache we'll use that (with MergeExternal),
// or if the second internal buffer exists we'll use that (with MergeInternal),
// or failing that we'll use a strictly in-place merge algorithm (MergeInPlace)
if (lastA.length() <= cache.len) {
mergeExternal(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan, cache[0..]);
} else if (buffer2.length() > 0) {
mergeInternal(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan, buffer2);
} else {
mergeInPlace(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan);
}
if (buffer2.length() > 0 or block_size <= cache.len) {
// copy the previous A block into the cache or buffer2, since that's where we need it to be when we go to merge it anyway
if (block_size <= cache.len) {
mem.copy(T, cache[0..], items[blockA.start .. blockA.start + block_size]);
} else {
blockSwap(T, items, blockA.start, buffer2.start, block_size);
}
// this is equivalent to rotating, but faster
// the area normally taken up by the A block is either the contents of buffer2, or data we don't need anymore since we memcopied it
// either way, we don't need to retain the order of those items, so instead of rotating we can just block swap B to where it belongs
blockSwap(T, items, B_split, blockA.start + block_size - B_remaining, B_remaining);
} else {
// we are unable to use the 'buffer2' trick to speed up the rotation operation since buffer2 doesn't exist, so perform a normal rotation
mem.rotate(T, items[B_split .. blockA.start + block_size], blockA.start - B_split);
}
// update the range for the remaining A blocks, and the range remaining from the B block after it was split
lastA = Range.init(blockA.start - B_remaining, blockA.start - B_remaining + block_size);
lastB = Range.init(lastA.end, lastA.end + B_remaining);
// if there are no more A blocks remaining, this step is finished!
blockA.start += block_size;
if (blockA.length() == 0) break;
} else if (blockB.length() < block_size) {
// move the last B block, which is unevenly sized, to before the remaining A blocks, by using a rotation
// the cache is disabled here since it might contain the contents of the previous A block
mem.rotate(T, items[blockA.start..blockB.end], blockB.start - blockA.start);
lastB = Range.init(blockA.start, blockA.start + blockB.length());
blockA.start += blockB.length();
blockA.end += blockB.length();
blockB.end = blockB.start;
} else {
// roll the leftmost A block to the end by swapping it with the next B block
blockSwap(T, items, blockA.start, blockB.start, block_size);
lastB = Range.init(blockA.start, blockA.start + block_size);
blockA.start += block_size;
blockA.end += block_size;
blockB.start += block_size;
if (blockB.end > B.end - block_size) {
blockB.end = B.end;
} else {
blockB.end += block_size;
}
}
}
}
// merge the last A block with the remaining B values
if (lastA.length() <= cache.len) {
mergeExternal(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan, cache[0..]);
} else if (buffer2.length() > 0) {
mergeInternal(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan, buffer2);
} else {
mergeInPlace(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan);
}
}
}
// when we're finished with this merge step we should have the one or two internal buffers left over, where the second buffer is all jumbled up
// insertion sort the second buffer, then redistribute the buffers back into the items using the opposite process used for creating the buffer
// while an unstable sort like quicksort could be applied here, in benchmarks it was consistently slightly slower than a simple insertion sort,
// even for tens of millions of items. this may be because insertion sort is quite fast when the data is already somewhat sorted, like it is here
insertionSort(T, items[buffer2.start..buffer2.end], context, lessThan);
pull_index = 0;
while (pull_index < 2) : (pull_index += 1) {
var unique = pull[pull_index].count * 2;
if (pull[pull_index].from > pull[pull_index].to) {
// the values were pulled out to the left, so redistribute them back to the right
var buffer = Range.init(pull[pull_index].range.start, pull[pull_index].range.start + pull[pull_index].count);
while (buffer.length() > 0) {
index = findFirstForward(T, items, items[buffer.start], Range.init(buffer.end, pull[pull_index].range.end), context, lessThan, unique);
const amount = index - buffer.end;
mem.rotate(T, items[buffer.start..index], buffer.length());
buffer.start += (amount + 1);
buffer.end += amount;
unique -= 2;
}
} else if (pull[pull_index].from < pull[pull_index].to) {
// the values were pulled out to the right, so redistribute them back to the left
var buffer = Range.init(pull[pull_index].range.end - pull[pull_index].count, pull[pull_index].range.end);
while (buffer.length() > 0) {
index = findLastBackward(T, items, items[buffer.end - 1], Range.init(pull[pull_index].range.start, buffer.start), context, lessThan, unique);
const amount = buffer.start - index;
mem.rotate(T, items[index..buffer.end], amount);
buffer.start -= amount;
buffer.end -= (amount + 1);
unique -= 2;
}
}
}
}
// double the size of each A and B subarray that will be merged in the next level
if (!iterator.nextLevel()) break;
}
}
// merge operation without a buffer
fn mergeInPlace(
comptime T: type,
items: []T,
A_arg: Range,
B_arg: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
) void {
if (A_arg.length() == 0 or B_arg.length() == 0) return;
// this just repeatedly binary searches into B and rotates A into position.
// the paper suggests using the 'rotation-based Hwang and Lin algorithm' here,
// but I decided to stick with this because it had better situational performance
//
// (Hwang and Lin is designed for merging subarrays of very different sizes,
// but WikiSort almost always uses subarrays that are roughly the same size)
//
// normally this is incredibly suboptimal, but this function is only called
// when none of the A or B blocks in any subarray contained 2√A unique values,
// which places a hard limit on the number of times this will ACTUALLY need
// to binary search and rotate.
//
// according to my analysis the worst case is √A rotations performed on √A items
// once the constant factors are removed, which ends up being O(n)
//
// again, this is NOT a general-purpose solution – it only works well in this case!
// kind of like how the O(n^2) insertion sort is used in some places
var A = A_arg;
var B = B_arg;
while (true) {
// find the first place in B where the first item in A needs to be inserted
const mid = binaryFirst(T, items, items[A.start], B, context, lessThan);
// rotate A into place
const amount = mid - A.end;
mem.rotate(T, items[A.start..mid], A.length());
if (B.end == mid) break;
// calculate the new A and B ranges
B.start = mid;
A = Range.init(A.start + amount, B.start);
A.start = binaryLast(T, items, items[A.start], A, context, lessThan);
if (A.length() == 0) break;
}
}
// merge operation using an internal buffer
fn mergeInternal(
comptime T: type,
items: []T,
A: Range,
B: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
buffer: Range,
) void {
// whenever we find a value to add to the final array, swap it with the value that's already in that spot
// when this algorithm is finished, 'buffer' will contain its original contents, but in a different order
var A_count: usize = 0;
var B_count: usize = 0;
var insert: usize = 0;
if (B.length() > 0 and A.length() > 0) {
while (true) {
if (!lessThan(context, items[B.start + B_count], items[buffer.start + A_count])) {
mem.swap(T, &items[A.start + insert], &items[buffer.start + A_count]);
A_count += 1;
insert += 1;
if (A_count >= A.length()) break;
} else {
mem.swap(T, &items[A.start + insert], &items[B.start + B_count]);
B_count += 1;
insert += 1;
if (B_count >= B.length()) break;
}
}
}
// swap the remainder of A into the final array
blockSwap(T, items, buffer.start + A_count, A.start + insert, A.length() - A_count);
}
fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_size: usize) void {
var index: usize = 0;
while (index < block_size) : (index += 1) {
mem.swap(T, &items[start1 + index], &items[start2 + index]);
}
}
// combine a linear search with a binary search to reduce the number of comparisons in situations
// where have some idea as to how many unique values there are and where the next value might be
fn findFirstForward(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.start + skip;
while (lessThan(context, items[index - 1], value)) : (index += skip) {
if (index >= range.end - skip) {
return binaryFirst(T, items, value, Range.init(index, range.end), context, lessThan);
}
}
return binaryFirst(T, items, value, Range.init(index - skip, index), context, lessThan);
}
fn findFirstBackward(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.end - skip;
while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) {
if (index < range.start + skip) {
return binaryFirst(T, items, value, Range.init(range.start, index), context, lessThan);
}
}
return binaryFirst(T, items, value, Range.init(index, index + skip), context, lessThan);
}
fn findLastForward(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.start + skip;
while (!lessThan(context, value, items[index - 1])) : (index += skip) {
if (index >= range.end - skip) {
return binaryLast(T, items, value, Range.init(index, range.end), context, lessThan);
}
}
return binaryLast(T, items, value, Range.init(index - skip, index), context, lessThan);
}
fn findLastBackward(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.end - skip;
while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) {
if (index < range.start + skip) {
return binaryLast(T, items, value, Range.init(range.start, index), context, lessThan);
}
}
return binaryLast(T, items, value, Range.init(index, index + skip), context, lessThan);
}
fn binaryFirst(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
) usize {
var curr = range.start;
var size = range.length();
if (range.start >= range.end) return range.end;
while (size > 0) {
const offset = size % 2;
size /= 2;
const mid = items[curr + size];
if (lessThan(context, mid, value)) {
curr += size + offset;
}
}
return curr;
}
fn binaryLast(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
) usize {
var curr = range.start;
var size = range.length();
if (range.start >= range.end) return range.end;
while (size > 0) {
const offset = size % 2;
size /= 2;
const mid = items[curr + size];
if (!lessThan(context, value, mid)) {
curr += size + offset;
}
}
return curr;
}
fn mergeInto(
comptime T: type,
from: []T,
A: Range,
B: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
into: []T,
) void {
var A_index: usize = A.start;
var B_index: usize = B.start;
const A_last = A.end;
const B_last = B.end;
var insert_index: usize = 0;
while (true) {
if (!lessThan(context, from[B_index], from[A_index])) {
into[insert_index] = from[A_index];
A_index += 1;
insert_index += 1;
if (A_index == A_last) {
// copy the remainder of B into the final array
mem.copy(T, into[insert_index..], from[B_index..B_last]);
break;
}
} else {
into[insert_index] = from[B_index];
B_index += 1;
insert_index += 1;
if (B_index == B_last) {
// copy the remainder of A into the final array
mem.copy(T, into[insert_index..], from[A_index..A_last]);
break;
}
}
}
}
fn mergeExternal(
comptime T: type,
items: []T,
A: Range,
B: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
cache: []T,
) void {
// A fits into the cache, so use that instead of the internal buffer
var A_index: usize = 0;
var B_index: usize = B.start;
var insert_index: usize = A.start;
const A_last = A.length();
const B_last = B.end;
if (B.length() > 0 and A.length() > 0) {
while (true) {
if (!lessThan(context, items[B_index], cache[A_index])) {
items[insert_index] = cache[A_index];
A_index += 1;
insert_index += 1;
if (A_index == A_last) break;
} else {
items[insert_index] = items[B_index];
B_index += 1;
insert_index += 1;
if (B_index == B_last) break;
}
}
}
// copy the remainder of A into the final array
mem.copy(T, items[insert_index..], cache[A_index..A_last]);
}
fn swap(
comptime T: type,
items: []T,
context: anytype,
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
order: *[8]u8,
x: usize,
y: usize,
) void {
if (lessThan(context, items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(context, items[x], items[y]))) {
mem.swap(T, &items[x], &items[y]);
mem.swap(u8, &(order.*)[x], &(order.*)[y]);
}
}
/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, comptime asc(u8))`.
pub fn asc(comptime T: type) fn (void, T, T) bool {
const impl = struct {
fn inner(context: void, a: T, b: T) bool {
_ = context;
return a < b;
}
};
return impl.inner;
}
/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, comptime desc(u8))`.
pub fn desc(comptime T: type) fn (void, T, T) bool {
const impl = struct {
fn inner(context: void, a: T, b: T) bool {
_ = context;
return a > b;
}
};
return impl.inner;
}
test "stable sort" {
try testStableSort();
comptime try testStableSort();
}
fn testStableSort() !void {
var expected = [_]IdAndValue{
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 2, .value = 0 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 2, .value = 2 },
};
var cases = [_][9]IdAndValue{
[_]IdAndValue{
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 2, .value = 0 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 2, .value = 2 },
},
[_]IdAndValue{
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 2, .value = 2 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 2, .value = 0 },
},
};
for (cases) |*case| {
insertionSort(IdAndValue, (case.*)[0..], {}, cmpByValue);
for (case.*) |item, i| {
try testing.expect(item.id == expected[i].id);
try testing.expect(item.value == expected[i].value);
}
}
}
const IdAndValue = struct {
id: usize,
value: i32,
};
fn cmpByValue(context: void, a: IdAndValue, b: IdAndValue) bool {
return asc_i32(context, a.value, b.value);
}
const asc_u8 = asc(u8);
const asc_i32 = asc(i32);
const desc_u8 = desc(u8);
const desc_i32 = desc(i32);
test "sort" {
const u8cases = [_][]const []const u8{
&[_][]const u8{
"",
"",
},
&[_][]const u8{
"a",
"a",
},
&[_][]const u8{
"az",
"az",
},
&[_][]const u8{
"za",
"az",
},
&[_][]const u8{
"asdf",
"adfs",
},
&[_][]const u8{
"one",
"eno",
},
};
for (u8cases) |case| {
var buf: [8]u8 = undefined;
const slice = buf[0..case[0].len];
mem.copy(u8, slice, case[0]);
sort(u8, slice, {}, asc_u8);
try testing.expect(mem.eql(u8, slice, case[1]));
}
const i32cases = [_][]const []const i32{
&[_][]const i32{
&[_]i32{},
&[_]i32{},
},
&[_][]const i32{
&[_]i32{1},
&[_]i32{1},
},
&[_][]const i32{
&[_]i32{ 0, 1 },
&[_]i32{ 0, 1 },
},
&[_][]const i32{
&[_]i32{ 1, 0 },
&[_]i32{ 0, 1 },
},
&[_][]const i32{
&[_]i32{ 1, -1, 0 },
&[_]i32{ -1, 0, 1 },
},
&[_][]const i32{
&[_]i32{ 2, 1, 3 },
&[_]i32{ 1, 2, 3 },
},
};
for (i32cases) |case| {
var buf: [8]i32 = undefined;
const slice = buf[0..case[0].len];
mem.copy(i32, slice, case[0]);
sort(i32, slice, {}, asc_i32);
try testing.expect(mem.eql(i32, slice, case[1]));
}
}
test "sort descending" {
const rev_cases = [_][]const []const i32{
&[_][]const i32{
&[_]i32{},
&[_]i32{},
},
&[_][]const i32{
&[_]i32{1},
&[_]i32{1},
},
&[_][]const i32{
&[_]i32{ 0, 1 },
&[_]i32{ 1, 0 },
},
&[_][]const i32{
&[_]i32{ 1, 0 },
&[_]i32{ 1, 0 },
},
&[_][]const i32{
&[_]i32{ 1, -1, 0 },
&[_]i32{ 1, 0, -1 },
},
&[_][]const i32{
&[_]i32{ 2, 1, 3 },
&[_]i32{ 3, 2, 1 },
},
};
for (rev_cases) |case| {
var buf: [8]i32 = undefined;
const slice = buf[0..case[0].len];
mem.copy(i32, slice, case[0]);
sort(i32, slice, {}, desc_i32);
try testing.expect(mem.eql(i32, slice, case[1]));
}
}
test "another sort case" {
var arr = [_]i32{ 5, 3, 1, 2, 4 };
sort(i32, arr[0..], {}, asc_i32);
try testing.expect(mem.eql(i32, &arr, &[_]i32{ 1, 2, 3, 4, 5 }));
}
test "sort fuzz testing" {
var prng = std.rand.DefaultPrng.init(0x12345678);
const random = prng.random();
const test_case_count = 10;
var i: usize = 0;
while (i < test_case_count) : (i += 1) {
try fuzzTest(random);
}
}
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
fn fuzzTest(rng: std.rand.Random) !void {
const array_size = rng.intRangeLessThan(usize, 0, 1000);
var array = try testing.allocator.alloc(IdAndValue, array_size);
defer testing.allocator.free(array);
// populate with random data
for (array) |*item, index| {
item.id = index;
item.value = rng.intRangeLessThan(i32, 0, 100);
}
sort(IdAndValue, array, {}, cmpByValue);
var index: usize = 1;
while (index < array.len) : (index += 1) {
if (array[index].value == array[index - 1].value) {
try testing.expect(array[index].id > array[index - 1].id);
} else {
try testing.expect(array[index].value > array[index - 1].value);
}
}
}
pub fn argMin(
comptime T: type,
items: []const T,
context: anytype,
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
) ?usize {
if (items.len == 0) {
return null;
}
var smallest = items[0];
var smallest_index: usize = 0;
for (items[1..]) |item, i| {
if (lessThan(context, item, smallest)) {
smallest = item;
smallest_index = i + 1;
}
}
return smallest_index;
}
test "argMin" {
try testing.expectEqual(@as(?usize, null), argMin(i32, &[_]i32{}, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{1}, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 3), argMin(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 3), argMin(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32));
}
pub fn min(
comptime T: type,
items: []const T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) ?T {
const i = argMin(T, items, context, lessThan) orelse return null;
return items[i];
}
test "min" {
try testing.expectEqual(@as(?i32, null), min(i32, &[_]i32{}, {}, asc_i32));
try testing.expectEqual(@as(?i32, 1), min(i32, &[_]i32{1}, {}, asc_i32));
try testing.expectEqual(@as(?i32, 1), min(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 2), min(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 1), min(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, -10), min(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 7), min(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32));
}
pub fn argMax(
comptime T: type,
items: []const T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) ?usize {
if (items.len == 0) {
return null;
}
var biggest = items[0];
var biggest_index: usize = 0;
for (items[1..]) |item, i| {
if (lessThan(context, biggest, item)) {
biggest = item;
biggest_index = i + 1;
}
}
return biggest_index;
}
test "argMax" {
try testing.expectEqual(@as(?usize, null), argMax(i32, &[_]i32{}, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMax(i32, &[_]i32{1}, {}, asc_i32));
try testing.expectEqual(@as(?usize, 4), argMax(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMax(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMax(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 2), argMax(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 1), argMax(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32));
}
pub fn max(
comptime T: type,
items: []const T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) ?T {
const i = argMax(T, items, context, lessThan) orelse return null;
return items[i];
}
test "max" {
try testing.expectEqual(@as(?i32, null), max(i32, &[_]i32{}, {}, asc_i32));
try testing.expectEqual(@as(?i32, 1), max(i32, &[_]i32{1}, {}, asc_i32));
try testing.expectEqual(@as(?i32, 5), max(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 9), max(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 1), max(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 10), max(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 3), max(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32));
}
pub fn isSorted(
comptime T: type,
items: []const T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) bool {
var i: usize = 1;
while (i < items.len) : (i += 1) {
if (lessThan(context, items[i], items[i - 1])) {
return false;
}
}
return true;
}
test "isSorted" {
try testing.expect(isSorted(i32, &[_]i32{}, {}, asc_i32));
try testing.expect(isSorted(i32, &[_]i32{10}, {}, asc_i32));
try testing.expect(isSorted(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32));
try testing.expect(isSorted(i32, &[_]i32{ -10, 1, 1, 1, 10 }, {}, asc_i32));
try testing.expect(isSorted(i32, &[_]i32{}, {}, desc_i32));
try testing.expect(isSorted(i32, &[_]i32{-20}, {}, desc_i32));
try testing.expect(isSorted(i32, &[_]i32{ 3, 2, 1, 0, -1 }, {}, desc_i32));
try testing.expect(isSorted(i32, &[_]i32{ 10, -10 }, {}, desc_i32));
try testing.expect(isSorted(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32));
try testing.expect(isSorted(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, desc_i32));
try testing.expectEqual(false, isSorted(i32, &[_]i32{ 5, 4, 3, 2, 1 }, {}, asc_i32));
try testing.expectEqual(false, isSorted(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, desc_i32));
try testing.expect(isSorted(u8, "abcd", {}, asc_u8));
try testing.expect(isSorted(u8, "zyxw", {}, desc_u8));
try testing.expectEqual(false, isSorted(u8, "abcd", {}, desc_u8));
try testing.expectEqual(false, isSorted(u8, "zyxw", {}, asc_u8));
try testing.expect(isSorted(u8, "ffff", {}, asc_u8));
try testing.expect(isSorted(u8, "ffff", {}, desc_u8));
}
| https://raw.githubusercontent.com/kraxli/dev_tools_mswindows/1d1a8f61299e4b7ba356fae3a37af0ddc8daf356/zig-windows-x86_64-0.9.1/lib/std/sort.zig |
const std = @import("std.zig");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const math = std.math;
pub fn binarySearch(
comptime T: type,
key: T,
items: []const T,
context: anytype,
comptime compareFn: fn (context: @TypeOf(context), lhs: T, rhs: T) math.Order,
) ?usize {
var left: usize = 0;
var right: usize = items.len;
while (left < right) {
// Avoid overflowing in the midpoint calculation
const mid = left + (right - left) / 2;
// Compare the key with the midpoint element
switch (compareFn(context, key, items[mid])) {
.eq => return mid,
.gt => left = mid + 1,
.lt => right = mid,
}
}
return null;
}
test "binarySearch" {
const S = struct {
fn order_u32(context: void, lhs: u32, rhs: u32) math.Order {
_ = context;
return math.order(lhs, rhs);
}
fn order_i32(context: void, lhs: i32, rhs: i32) math.Order {
_ = context;
return math.order(lhs, rhs);
}
};
try testing.expectEqual(
@as(?usize, null),
binarySearch(u32, 1, &[_]u32{}, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, 0),
binarySearch(u32, 1, &[_]u32{1}, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, null),
binarySearch(u32, 1, &[_]u32{0}, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, null),
binarySearch(u32, 0, &[_]u32{1}, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, 4),
binarySearch(u32, 5, &[_]u32{ 1, 2, 3, 4, 5 }, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, 0),
binarySearch(u32, 2, &[_]u32{ 2, 4, 8, 16, 32, 64 }, {}, S.order_u32),
);
try testing.expectEqual(
@as(?usize, 1),
binarySearch(i32, -4, &[_]i32{ -7, -4, 0, 9, 10 }, {}, S.order_i32),
);
try testing.expectEqual(
@as(?usize, 3),
binarySearch(i32, 98, &[_]i32{ -100, -25, 2, 98, 99, 100 }, {}, S.order_i32),
);
}
/// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required).
pub fn insertionSort(
comptime T: type,
items: []T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) void {
var i: usize = 1;
while (i < items.len) : (i += 1) {
const x = items[i];
var j: usize = i;
while (j > 0 and lessThan(context, x, items[j - 1])) : (j -= 1) {
items[j] = items[j - 1];
}
items[j] = x;
}
}
const Range = struct {
start: usize,
end: usize,
fn init(start: usize, end: usize) Range {
return Range{
.start = start,
.end = end,
};
}
fn length(self: Range) usize {
return self.end - self.start;
}
};
const Iterator = struct {
size: usize,
power_of_two: usize,
numerator: usize,
decimal: usize,
denominator: usize,
decimal_step: usize,
numerator_step: usize,
fn init(size2: usize, min_level: usize) Iterator {
const power_of_two = math.floorPowerOfTwo(usize, size2);
const denominator = power_of_two / min_level;
return Iterator{
.numerator = 0,
.decimal = 0,
.size = size2,
.power_of_two = power_of_two,
.denominator = denominator,
.decimal_step = size2 / denominator,
.numerator_step = size2 % denominator,
};
}
fn begin(self: *Iterator) void {
self.numerator = 0;
self.decimal = 0;
}
fn nextRange(self: *Iterator) Range {
const start = self.decimal;
self.decimal += self.decimal_step;
self.numerator += self.numerator_step;
if (self.numerator >= self.denominator) {
self.numerator -= self.denominator;
self.decimal += 1;
}
return Range{
.start = start,
.end = self.decimal,
};
}
fn finished(self: *Iterator) bool {
return self.decimal >= self.size;
}
fn nextLevel(self: *Iterator) bool {
self.decimal_step += self.decimal_step;
self.numerator_step += self.numerator_step;
if (self.numerator_step >= self.denominator) {
self.numerator_step -= self.denominator;
self.decimal_step += 1;
}
return (self.decimal_step < self.size);
}
fn length(self: *Iterator) usize {
return self.decimal_step;
}
};
const Pull = struct {
from: usize,
to: usize,
count: usize,
range: Range,
};
/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. O(1) memory (no allocator required).
/// Currently implemented as block sort.
pub fn sort(
comptime T: type,
items: []T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) void {
// Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
var cache: [512]T = undefined;
if (items.len < 4) {
if (items.len == 3) {
// hard coded insertion sort
if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
if (lessThan(context, items[2], items[1])) {
mem.swap(T, &items[1], &items[2]);
if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
}
} else if (items.len == 2) {
if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]);
}
return;
}
// sort groups of 4-8 items at a time using an unstable sorting network,
// but keep track of the original item orders to force it to be stable
// http://pages.ripco.net/~jgamble/nw.html
var iterator = Iterator.init(items.len, 4);
while (!iterator.finished()) {
var order = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7 };
const range = iterator.nextRange();
const sliced_items = items[range.start..];
switch (range.length()) {
8 => {
swap(T, sliced_items, context, lessThan, &order, 0, 1);
swap(T, sliced_items, context, lessThan, &order, 2, 3);
swap(T, sliced_items, context, lessThan, &order, 4, 5);
swap(T, sliced_items, context, lessThan, &order, 6, 7);
swap(T, sliced_items, context, lessThan, &order, 0, 2);
swap(T, sliced_items, context, lessThan, &order, 1, 3);
swap(T, sliced_items, context, lessThan, &order, 4, 6);
swap(T, sliced_items, context, lessThan, &order, 5, 7);
swap(T, sliced_items, context, lessThan, &order, 1, 2);
swap(T, sliced_items, context, lessThan, &order, 5, 6);
swap(T, sliced_items, context, lessThan, &order, 0, 4);
swap(T, sliced_items, context, lessThan, &order, 3, 7);
swap(T, sliced_items, context, lessThan, &order, 1, 5);
swap(T, sliced_items, context, lessThan, &order, 2, 6);
swap(T, sliced_items, context, lessThan, &order, 1, 4);
swap(T, sliced_items, context, lessThan, &order, 3, 6);
swap(T, sliced_items, context, lessThan, &order, 2, 4);
swap(T, sliced_items, context, lessThan, &order, 3, 5);
swap(T, sliced_items, context, lessThan, &order, 3, 4);
},
7 => {
swap(T, sliced_items, context, lessThan, &order, 1, 2);
swap(T, sliced_items, context, lessThan, &order, 3, 4);
swap(T, sliced_items, context, lessThan, &order, 5, 6);
swap(T, sliced_items, context, lessThan, &order, 0, 2);
swap(T, sliced_items, context, lessThan, &order, 3, 5);
swap(T, sliced_items, context, lessThan, &order, 4, 6);
swap(T, sliced_items, context, lessThan, &order, 0, 1);
swap(T, sliced_items, context, lessThan, &order, 4, 5);
swap(T, sliced_items, context, lessThan, &order, 2, 6);
swap(T, sliced_items, context, lessThan, &order, 0, 4);
swap(T, sliced_items, context, lessThan, &order, 1, 5);
swap(T, sliced_items, context, lessThan, &order, 0, 3);
swap(T, sliced_items, context, lessThan, &order, 2, 5);
swap(T, sliced_items, context, lessThan, &order, 1, 3);
swap(T, sliced_items, context, lessThan, &order, 2, 4);
swap(T, sliced_items, context, lessThan, &order, 2, 3);
},
6 => {
swap(T, sliced_items, context, lessThan, &order, 1, 2);
swap(T, sliced_items, context, lessThan, &order, 4, 5);
swap(T, sliced_items, context, lessThan, &order, 0, 2);
swap(T, sliced_items, context, lessThan, &order, 3, 5);
swap(T, sliced_items, context, lessThan, &order, 0, 1);
swap(T, sliced_items, context, lessThan, &order, 3, 4);
swap(T, sliced_items, context, lessThan, &order, 2, 5);
swap(T, sliced_items, context, lessThan, &order, 0, 3);
swap(T, sliced_items, context, lessThan, &order, 1, 4);
swap(T, sliced_items, context, lessThan, &order, 2, 4);
swap(T, sliced_items, context, lessThan, &order, 1, 3);
swap(T, sliced_items, context, lessThan, &order, 2, 3);
},
5 => {
swap(T, sliced_items, context, lessThan, &order, 0, 1);
swap(T, sliced_items, context, lessThan, &order, 3, 4);
swap(T, sliced_items, context, lessThan, &order, 2, 4);
swap(T, sliced_items, context, lessThan, &order, 2, 3);
swap(T, sliced_items, context, lessThan, &order, 1, 4);
swap(T, sliced_items, context, lessThan, &order, 0, 3);
swap(T, sliced_items, context, lessThan, &order, 0, 2);
swap(T, sliced_items, context, lessThan, &order, 1, 3);
swap(T, sliced_items, context, lessThan, &order, 1, 2);
},
4 => {
swap(T, sliced_items, context, lessThan, &order, 0, 1);
swap(T, sliced_items, context, lessThan, &order, 2, 3);
swap(T, sliced_items, context, lessThan, &order, 0, 2);
swap(T, sliced_items, context, lessThan, &order, 1, 3);
swap(T, sliced_items, context, lessThan, &order, 1, 2);
},
else => {},
}
}
if (items.len < 8) return;
// then merge sort the higher levels, which can be 8-15, 16-31, 32-63, 64-127, etc.
while (true) {
// if every A and B block will fit into the cache, use a special branch specifically for merging with the cache
// (we use < rather than <= since the block size might be one more than iterator.length())
if (iterator.length() < cache.len) {
// if four subarrays fit into the cache, it's faster to merge both pairs of subarrays into the cache,
// then merge the two merged subarrays from the cache back into the original array
if ((iterator.length() + 1) * 4 <= cache.len and iterator.length() * 4 <= items.len) {
iterator.begin();
while (!iterator.finished()) {
// merge A1 and B1 into the cache
var A1 = iterator.nextRange();
var B1 = iterator.nextRange();
var A2 = iterator.nextRange();
var B2 = iterator.nextRange();
if (lessThan(context, items[B1.end - 1], items[A1.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the cache
mem.copy(T, cache[B1.length()..], items[A1.start..A1.end]);
mem.copy(T, cache[0..], items[B1.start..B1.end]);
} else if (lessThan(context, items[B1.start], items[A1.end - 1])) {
// these two ranges weren't already in order, so merge them into the cache
mergeInto(T, items, A1, B1, context, lessThan, cache[0..]);
} else {
// if A1, B1, A2, and B2 are all in order, skip doing anything else
if (!lessThan(context, items[B2.start], items[A2.end - 1]) and !lessThan(context, items[A2.start], items[B1.end - 1])) continue;
// copy A1 and B1 into the cache in the same order
mem.copy(T, cache[0..], items[A1.start..A1.end]);
mem.copy(T, cache[A1.length()..], items[B1.start..B1.end]);
}
A1 = Range.init(A1.start, B1.end);
// merge A2 and B2 into the cache
if (lessThan(context, items[B2.end - 1], items[A2.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the cache
mem.copy(T, cache[A1.length() + B2.length() ..], items[A2.start..A2.end]);
mem.copy(T, cache[A1.length()..], items[B2.start..B2.end]);
} else if (lessThan(context, items[B2.start], items[A2.end - 1])) {
// these two ranges weren't already in order, so merge them into the cache
mergeInto(T, items, A2, B2, context, lessThan, cache[A1.length()..]);
} else {
// copy A2 and B2 into the cache in the same order
mem.copy(T, cache[A1.length()..], items[A2.start..A2.end]);
mem.copy(T, cache[A1.length() + A2.length() ..], items[B2.start..B2.end]);
}
A2 = Range.init(A2.start, B2.end);
// merge A1 and A2 from the cache into the items
const A3 = Range.init(0, A1.length());
const B3 = Range.init(A1.length(), A1.length() + A2.length());
if (lessThan(context, cache[B3.end - 1], cache[A3.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the items
mem.copy(T, items[A1.start + A2.length() ..], cache[A3.start..A3.end]);
mem.copy(T, items[A1.start..], cache[B3.start..B3.end]);
} else if (lessThan(context, cache[B3.start], cache[A3.end - 1])) {
// these two ranges weren't already in order, so merge them back into the items
mergeInto(T, cache[0..], A3, B3, context, lessThan, items[A1.start..]);
} else {
// copy A3 and B3 into the items in the same order
mem.copy(T, items[A1.start..], cache[A3.start..A3.end]);
mem.copy(T, items[A1.start + A1.length() ..], cache[B3.start..B3.end]);
}
}
// we merged two levels at the same time, so we're done with this level already
// (iterator.nextLevel() is called again at the bottom of this outer merge loop)
_ = iterator.nextLevel();
} else {
iterator.begin();
while (!iterator.finished()) {
var A = iterator.nextRange();
var B = iterator.nextRange();
if (lessThan(context, items[B.end - 1], items[A.start])) {
// the two ranges are in reverse order, so a simple rotation should fix it
mem.rotate(T, items[A.start..B.end], A.length());
} else if (lessThan(context, items[B.start], items[A.end - 1])) {
// these two ranges weren't already in order, so we'll need to merge them!
mem.copy(T, cache[0..], items[A.start..A.end]);
mergeExternal(T, items, A, B, context, lessThan, cache[0..]);
}
}
}
} else {
// this is where the in-place merge logic starts!
// 1. pull out two internal buffers each containing √A unique values
// 1a. adjust block_size and buffer_size if we couldn't find enough unique values
// 2. loop over the A and B subarrays within this level of the merge sort
// 3. break A and B into blocks of size 'block_size'
// 4. "tag" each of the A blocks with values from the first internal buffer
// 5. roll the A blocks through the B blocks and drop/rotate them where they belong
// 6. merge each A block with any B values that follow, using the cache or the second internal buffer
// 7. sort the second internal buffer if it exists
// 8. redistribute the two internal buffers back into the items
var block_size: usize = math.sqrt(iterator.length());
var buffer_size = iterator.length() / block_size + 1;
// as an optimization, we really only need to pull out the internal buffers once for each level of merges
// after that we can reuse the same buffers over and over, then redistribute it when we're finished with this level
var A: Range = undefined;
var B: Range = undefined;
var index: usize = 0;
var last: usize = 0;
var count: usize = 0;
var find: usize = 0;
var start: usize = 0;
var pull_index: usize = 0;
var pull = [_]Pull{
Pull{
.from = 0,
.to = 0,
.count = 0,
.range = Range.init(0, 0),
},
Pull{
.from = 0,
.to = 0,
.count = 0,
.range = Range.init(0, 0),
},
};
var buffer1 = Range.init(0, 0);
var buffer2 = Range.init(0, 0);
// find two internal buffers of size 'buffer_size' each
find = buffer_size + buffer_size;
var find_separately = false;
if (block_size <= cache.len) {
// if every A block fits into the cache then we won't need the second internal buffer,
// so we really only need to find 'buffer_size' unique values
find = buffer_size;
} else if (find > iterator.length()) {
// we can't fit both buffers into the same A or B subarray, so find two buffers separately
find = buffer_size;
find_separately = true;
}
// we need to find either a single contiguous space containing 2√A unique values (which will be split up into two buffers of size √A each),
// or we need to find one buffer of < 2√A unique values, and a second buffer of √A unique values,
// OR if we couldn't find that many unique values, we need the largest possible buffer we can get
// in the case where it couldn't find a single buffer of at least √A unique values,
// all of the Merge steps must be replaced by a different merge algorithm (MergeInPlace)
iterator.begin();
while (!iterator.finished()) {
A = iterator.nextRange();
B = iterator.nextRange();
// just store information about where the values will be pulled from and to,
// as well as how many values there are, to create the two internal buffers
// check A for the number of unique values we need to fill an internal buffer
// these values will be pulled out to the start of A
last = A.start;
count = 1;
while (count < find) : ({
last = index;
count += 1;
}) {
index = findLastForward(T, items, items[last], Range.init(last + 1, A.end), context, lessThan, find - count);
if (index == A.end) break;
}
index = last;
if (count >= buffer_size) {
// keep track of the range within the items where we'll need to "pull out" these values to create the internal buffer
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = A.start,
};
pull_index = 1;
if (count == buffer_size + buffer_size) {
// we were able to find a single contiguous section containing 2√A unique values,
// so this section can be used to contain both of the internal buffers we'll need
buffer1 = Range.init(A.start, A.start + buffer_size);
buffer2 = Range.init(A.start + buffer_size, A.start + count);
break;
} else if (find == buffer_size + buffer_size) {
// we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values,
// so we still need to find a second separate buffer of at least √A unique values
buffer1 = Range.init(A.start, A.start + count);
find = buffer_size;
} else if (block_size <= cache.len) {
// we found the first and only internal buffer that we need, so we're done!
buffer1 = Range.init(A.start, A.start + count);
break;
} else if (find_separately) {
// found one buffer, but now find the other one
buffer1 = Range.init(A.start, A.start + count);
find_separately = false;
} else {
// we found a second buffer in an 'A' subarray containing √A unique values, so we're done!
buffer2 = Range.init(A.start, A.start + count);
break;
}
} else if (pull_index == 0 and count > buffer1.length()) {
// keep track of the largest buffer we were able to find
buffer1 = Range.init(A.start, A.start + count);
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = A.start,
};
}
// check B for the number of unique values we need to fill an internal buffer
// these values will be pulled out to the end of B
last = B.end - 1;
count = 1;
while (count < find) : ({
last = index - 1;
count += 1;
}) {
index = findFirstBackward(T, items, items[last], Range.init(B.start, last), context, lessThan, find - count);
if (index == B.start) break;
}
index = last;
if (count >= buffer_size) {
// keep track of the range within the items where we'll need to "pull out" these values to create the internal buffe
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = B.end,
};
pull_index = 1;
if (count == buffer_size + buffer_size) {
// we were able to find a single contiguous section containing 2√A unique values,
// so this section can be used to contain both of the internal buffers we'll need
buffer1 = Range.init(B.end - count, B.end - buffer_size);
buffer2 = Range.init(B.end - buffer_size, B.end);
break;
} else if (find == buffer_size + buffer_size) {
// we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values,
// so we still need to find a second separate buffer of at least √A unique values
buffer1 = Range.init(B.end - count, B.end);
find = buffer_size;
} else if (block_size <= cache.len) {
// we found the first and only internal buffer that we need, so we're done!
buffer1 = Range.init(B.end - count, B.end);
break;
} else if (find_separately) {
// found one buffer, but now find the other one
buffer1 = Range.init(B.end - count, B.end);
find_separately = false;
} else {
// buffer2 will be pulled out from a 'B' subarray, so if the first buffer was pulled out from the corresponding 'A' subarray,
// we need to adjust the end point for that A subarray so it knows to stop redistributing its values before reaching buffer2
if (pull[0].range.start == A.start) pull[0].range.end -= pull[1].count;
// we found a second buffer in an 'B' subarray containing √A unique values, so we're done!
buffer2 = Range.init(B.end - count, B.end);
break;
}
} else if (pull_index == 0 and count > buffer1.length()) {
// keep track of the largest buffer we were able to find
buffer1 = Range.init(B.end - count, B.end);
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = B.end,
};
}
}
// pull out the two ranges so we can use them as internal buffers
pull_index = 0;
while (pull_index < 2) : (pull_index += 1) {
const length = pull[pull_index].count;
if (pull[pull_index].to < pull[pull_index].from) {
// we're pulling the values out to the left, which means the start of an A subarray
index = pull[pull_index].from;
count = 1;
while (count < length) : (count += 1) {
index = findFirstBackward(T, items, items[index - 1], Range.init(pull[pull_index].to, pull[pull_index].from - (count - 1)), context, lessThan, length - count);
const range = Range.init(index + 1, pull[pull_index].from + 1);
mem.rotate(T, items[range.start..range.end], range.length() - count);
pull[pull_index].from = index + count;
}
} else if (pull[pull_index].to > pull[pull_index].from) {
// we're pulling values out to the right, which means the end of a B subarray
index = pull[pull_index].from + 1;
count = 1;
while (count < length) : (count += 1) {
index = findLastForward(T, items, items[index], Range.init(index, pull[pull_index].to), context, lessThan, length - count);
const range = Range.init(pull[pull_index].from, index - 1);
mem.rotate(T, items[range.start..range.end], count);
pull[pull_index].from = index - 1 - count;
}
}
}
// adjust block_size and buffer_size based on the values we were able to pull out
buffer_size = buffer1.length();
block_size = iterator.length() / buffer_size + 1;
// the first buffer NEEDS to be large enough to tag each of the evenly sized A blocks,
// so this was originally here to test the math for adjusting block_size above
// assert((iterator.length() + 1)/block_size <= buffer_size);
// now that the two internal buffers have been created, it's time to merge each A+B combination at this level of the merge sort!
iterator.begin();
while (!iterator.finished()) {
A = iterator.nextRange();
B = iterator.nextRange();
// remove any parts of A or B that are being used by the internal buffers
start = A.start;
if (start == pull[0].range.start) {
if (pull[0].from > pull[0].to) {
A.start += pull[0].count;
// if the internal buffer takes up the entire A or B subarray, then there's nothing to merge
// this only happens for very small subarrays, like √4 = 2, 2 * (2 internal buffers) = 4,
// which also only happens when cache.len is small or 0 since it'd otherwise use MergeExternal
if (A.length() == 0) continue;
} else if (pull[0].from < pull[0].to) {
B.end -= pull[0].count;
if (B.length() == 0) continue;
}
}
if (start == pull[1].range.start) {
if (pull[1].from > pull[1].to) {
A.start += pull[1].count;
if (A.length() == 0) continue;
} else if (pull[1].from < pull[1].to) {
B.end -= pull[1].count;
if (B.length() == 0) continue;
}
}
if (lessThan(context, items[B.end - 1], items[A.start])) {
// the two ranges are in reverse order, so a simple rotation should fix it
mem.rotate(T, items[A.start..B.end], A.length());
} else if (lessThan(context, items[A.end], items[A.end - 1])) {
// these two ranges weren't already in order, so we'll need to merge them!
var findA: usize = undefined;
// break the remainder of A into blocks. firstA is the uneven-sized first A block
var blockA = Range.init(A.start, A.end);
var firstA = Range.init(A.start, A.start + blockA.length() % block_size);
// swap the first value of each A block with the value in buffer1
var indexA = buffer1.start;
index = firstA.end;
while (index < blockA.end) : ({
indexA += 1;
index += block_size;
}) {
mem.swap(T, &items[indexA], &items[index]);
}
// start rolling the A blocks through the B blocks!
// whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well
var lastA = firstA;
var lastB = Range.init(0, 0);
var blockB = Range.init(B.start, B.start + math.min(block_size, B.length()));
blockA.start += firstA.length();
indexA = buffer1.start;
// if the first unevenly sized A block fits into the cache, copy it there for when we go to Merge it
// otherwise, if the second buffer is available, block swap the contents into that
if (lastA.length() <= cache.len) {
mem.copy(T, cache[0..], items[lastA.start..lastA.end]);
} else if (buffer2.length() > 0) {
blockSwap(T, items, lastA.start, buffer2.start, lastA.length());
}
if (blockA.length() > 0) {
while (true) {
// if there's a previous B block and the first value of the minimum A block is <= the last value of the previous B block,
// then drop that minimum A block behind. or if there are no B blocks left then keep dropping the remaining A blocks.
if ((lastB.length() > 0 and !lessThan(context, items[lastB.end - 1], items[indexA])) or blockB.length() == 0) {
// figure out where to split the previous B block, and rotate it at the split
const B_split = binaryFirst(T, items, items[indexA], lastB, context, lessThan);
const B_remaining = lastB.end - B_split;
// swap the minimum A block to the beginning of the rolling A blocks
var minA = blockA.start;
findA = minA + block_size;
while (findA < blockA.end) : (findA += block_size) {
if (lessThan(context, items[findA], items[minA])) {
minA = findA;
}
}
blockSwap(T, items, blockA.start, minA, block_size);
// swap the first item of the previous A block back with its original value, which is stored in buffer1
mem.swap(T, &items[blockA.start], &items[indexA]);
indexA += 1;
// locally merge the previous A block with the B values that follow it
// if lastA fits into the external cache we'll use that (with MergeExternal),
// or if the second internal buffer exists we'll use that (with MergeInternal),
// or failing that we'll use a strictly in-place merge algorithm (MergeInPlace)
if (lastA.length() <= cache.len) {
mergeExternal(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan, cache[0..]);
} else if (buffer2.length() > 0) {
mergeInternal(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan, buffer2);
} else {
mergeInPlace(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan);
}
if (buffer2.length() > 0 or block_size <= cache.len) {
// copy the previous A block into the cache or buffer2, since that's where we need it to be when we go to merge it anyway
if (block_size <= cache.len) {
mem.copy(T, cache[0..], items[blockA.start .. blockA.start + block_size]);
} else {
blockSwap(T, items, blockA.start, buffer2.start, block_size);
}
// this is equivalent to rotating, but faster
// the area normally taken up by the A block is either the contents of buffer2, or data we don't need anymore since we memcopied it
// either way, we don't need to retain the order of those items, so instead of rotating we can just block swap B to where it belongs
blockSwap(T, items, B_split, blockA.start + block_size - B_remaining, B_remaining);
} else {
// we are unable to use the 'buffer2' trick to speed up the rotation operation since buffer2 doesn't exist, so perform a normal rotation
mem.rotate(T, items[B_split .. blockA.start + block_size], blockA.start - B_split);
}
// update the range for the remaining A blocks, and the range remaining from the B block after it was split
lastA = Range.init(blockA.start - B_remaining, blockA.start - B_remaining + block_size);
lastB = Range.init(lastA.end, lastA.end + B_remaining);
// if there are no more A blocks remaining, this step is finished!
blockA.start += block_size;
if (blockA.length() == 0) break;
} else if (blockB.length() < block_size) {
// move the last B block, which is unevenly sized, to before the remaining A blocks, by using a rotation
// the cache is disabled here since it might contain the contents of the previous A block
mem.rotate(T, items[blockA.start..blockB.end], blockB.start - blockA.start);
lastB = Range.init(blockA.start, blockA.start + blockB.length());
blockA.start += blockB.length();
blockA.end += blockB.length();
blockB.end = blockB.start;
} else {
// roll the leftmost A block to the end by swapping it with the next B block
blockSwap(T, items, blockA.start, blockB.start, block_size);
lastB = Range.init(blockA.start, blockA.start + block_size);
blockA.start += block_size;
blockA.end += block_size;
blockB.start += block_size;
if (blockB.end > B.end - block_size) {
blockB.end = B.end;
} else {
blockB.end += block_size;
}
}
}
}
// merge the last A block with the remaining B values
if (lastA.length() <= cache.len) {
mergeExternal(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan, cache[0..]);
} else if (buffer2.length() > 0) {
mergeInternal(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan, buffer2);
} else {
mergeInPlace(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan);
}
}
}
// when we're finished with this merge step we should have the one or two internal buffers left over, where the second buffer is all jumbled up
// insertion sort the second buffer, then redistribute the buffers back into the items using the opposite process used for creating the buffer
// while an unstable sort like quicksort could be applied here, in benchmarks it was consistently slightly slower than a simple insertion sort,
// even for tens of millions of items. this may be because insertion sort is quite fast when the data is already somewhat sorted, like it is here
insertionSort(T, items[buffer2.start..buffer2.end], context, lessThan);
pull_index = 0;
while (pull_index < 2) : (pull_index += 1) {
var unique = pull[pull_index].count * 2;
if (pull[pull_index].from > pull[pull_index].to) {
// the values were pulled out to the left, so redistribute them back to the right
var buffer = Range.init(pull[pull_index].range.start, pull[pull_index].range.start + pull[pull_index].count);
while (buffer.length() > 0) {
index = findFirstForward(T, items, items[buffer.start], Range.init(buffer.end, pull[pull_index].range.end), context, lessThan, unique);
const amount = index - buffer.end;
mem.rotate(T, items[buffer.start..index], buffer.length());
buffer.start += (amount + 1);
buffer.end += amount;
unique -= 2;
}
} else if (pull[pull_index].from < pull[pull_index].to) {
// the values were pulled out to the right, so redistribute them back to the left
var buffer = Range.init(pull[pull_index].range.end - pull[pull_index].count, pull[pull_index].range.end);
while (buffer.length() > 0) {
index = findLastBackward(T, items, items[buffer.end - 1], Range.init(pull[pull_index].range.start, buffer.start), context, lessThan, unique);
const amount = buffer.start - index;
mem.rotate(T, items[index..buffer.end], amount);
buffer.start -= amount;
buffer.end -= (amount + 1);
unique -= 2;
}
}
}
}
// double the size of each A and B subarray that will be merged in the next level
if (!iterator.nextLevel()) break;
}
}
// merge operation without a buffer
fn mergeInPlace(
comptime T: type,
items: []T,
A_arg: Range,
B_arg: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
) void {
if (A_arg.length() == 0 or B_arg.length() == 0) return;
// this just repeatedly binary searches into B and rotates A into position.
// the paper suggests using the 'rotation-based Hwang and Lin algorithm' here,
// but I decided to stick with this because it had better situational performance
//
// (Hwang and Lin is designed for merging subarrays of very different sizes,
// but WikiSort almost always uses subarrays that are roughly the same size)
//
// normally this is incredibly suboptimal, but this function is only called
// when none of the A or B blocks in any subarray contained 2√A unique values,
// which places a hard limit on the number of times this will ACTUALLY need
// to binary search and rotate.
//
// according to my analysis the worst case is √A rotations performed on √A items
// once the constant factors are removed, which ends up being O(n)
//
// again, this is NOT a general-purpose solution – it only works well in this case!
// kind of like how the O(n^2) insertion sort is used in some places
var A = A_arg;
var B = B_arg;
while (true) {
// find the first place in B where the first item in A needs to be inserted
const mid = binaryFirst(T, items, items[A.start], B, context, lessThan);
// rotate A into place
const amount = mid - A.end;
mem.rotate(T, items[A.start..mid], A.length());
if (B.end == mid) break;
// calculate the new A and B ranges
B.start = mid;
A = Range.init(A.start + amount, B.start);
A.start = binaryLast(T, items, items[A.start], A, context, lessThan);
if (A.length() == 0) break;
}
}
// merge operation using an internal buffer
fn mergeInternal(
comptime T: type,
items: []T,
A: Range,
B: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
buffer: Range,
) void {
// whenever we find a value to add to the final array, swap it with the value that's already in that spot
// when this algorithm is finished, 'buffer' will contain its original contents, but in a different order
var A_count: usize = 0;
var B_count: usize = 0;
var insert: usize = 0;
if (B.length() > 0 and A.length() > 0) {
while (true) {
if (!lessThan(context, items[B.start + B_count], items[buffer.start + A_count])) {
mem.swap(T, &items[A.start + insert], &items[buffer.start + A_count]);
A_count += 1;
insert += 1;
if (A_count >= A.length()) break;
} else {
mem.swap(T, &items[A.start + insert], &items[B.start + B_count]);
B_count += 1;
insert += 1;
if (B_count >= B.length()) break;
}
}
}
// swap the remainder of A into the final array
blockSwap(T, items, buffer.start + A_count, A.start + insert, A.length() - A_count);
}
fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_size: usize) void {
var index: usize = 0;
while (index < block_size) : (index += 1) {
mem.swap(T, &items[start1 + index], &items[start2 + index]);
}
}
// combine a linear search with a binary search to reduce the number of comparisons in situations
// where have some idea as to how many unique values there are and where the next value might be
fn findFirstForward(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.start + skip;
while (lessThan(context, items[index - 1], value)) : (index += skip) {
if (index >= range.end - skip) {
return binaryFirst(T, items, value, Range.init(index, range.end), context, lessThan);
}
}
return binaryFirst(T, items, value, Range.init(index - skip, index), context, lessThan);
}
fn findFirstBackward(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.end - skip;
while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) {
if (index < range.start + skip) {
return binaryFirst(T, items, value, Range.init(range.start, index), context, lessThan);
}
}
return binaryFirst(T, items, value, Range.init(index, index + skip), context, lessThan);
}
fn findLastForward(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.start + skip;
while (!lessThan(context, value, items[index - 1])) : (index += skip) {
if (index >= range.end - skip) {
return binaryLast(T, items, value, Range.init(index, range.end), context, lessThan);
}
}
return binaryLast(T, items, value, Range.init(index - skip, index), context, lessThan);
}
fn findLastBackward(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
unique: usize,
) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.end - skip;
while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) {
if (index < range.start + skip) {
return binaryLast(T, items, value, Range.init(range.start, index), context, lessThan);
}
}
return binaryLast(T, items, value, Range.init(index, index + skip), context, lessThan);
}
fn binaryFirst(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
) usize {
var curr = range.start;
var size = range.length();
if (range.start >= range.end) return range.end;
while (size > 0) {
const offset = size % 2;
size /= 2;
const mid = items[curr + size];
if (lessThan(context, mid, value)) {
curr += size + offset;
}
}
return curr;
}
fn binaryLast(
comptime T: type,
items: []T,
value: T,
range: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
) usize {
var curr = range.start;
var size = range.length();
if (range.start >= range.end) return range.end;
while (size > 0) {
const offset = size % 2;
size /= 2;
const mid = items[curr + size];
if (!lessThan(context, value, mid)) {
curr += size + offset;
}
}
return curr;
}
fn mergeInto(
comptime T: type,
from: []T,
A: Range,
B: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
into: []T,
) void {
var A_index: usize = A.start;
var B_index: usize = B.start;
const A_last = A.end;
const B_last = B.end;
var insert_index: usize = 0;
while (true) {
if (!lessThan(context, from[B_index], from[A_index])) {
into[insert_index] = from[A_index];
A_index += 1;
insert_index += 1;
if (A_index == A_last) {
// copy the remainder of B into the final array
mem.copy(T, into[insert_index..], from[B_index..B_last]);
break;
}
} else {
into[insert_index] = from[B_index];
B_index += 1;
insert_index += 1;
if (B_index == B_last) {
// copy the remainder of A into the final array
mem.copy(T, into[insert_index..], from[A_index..A_last]);
break;
}
}
}
}
fn mergeExternal(
comptime T: type,
items: []T,
A: Range,
B: Range,
context: anytype,
comptime lessThan: fn (@TypeOf(context), T, T) bool,
cache: []T,
) void {
// A fits into the cache, so use that instead of the internal buffer
var A_index: usize = 0;
var B_index: usize = B.start;
var insert_index: usize = A.start;
const A_last = A.length();
const B_last = B.end;
if (B.length() > 0 and A.length() > 0) {
while (true) {
if (!lessThan(context, items[B_index], cache[A_index])) {
items[insert_index] = cache[A_index];
A_index += 1;
insert_index += 1;
if (A_index == A_last) break;
} else {
items[insert_index] = items[B_index];
B_index += 1;
insert_index += 1;
if (B_index == B_last) break;
}
}
}
// copy the remainder of A into the final array
mem.copy(T, items[insert_index..], cache[A_index..A_last]);
}
fn swap(
comptime T: type,
items: []T,
context: anytype,
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
order: *[8]u8,
x: usize,
y: usize,
) void {
if (lessThan(context, items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(context, items[x], items[y]))) {
mem.swap(T, &items[x], &items[y]);
mem.swap(u8, &(order.*)[x], &(order.*)[y]);
}
}
/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, comptime asc(u8))`.
pub fn asc(comptime T: type) fn (void, T, T) bool {
const impl = struct {
fn inner(context: void, a: T, b: T) bool {
_ = context;
return a < b;
}
};
return impl.inner;
}
/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, comptime desc(u8))`.
pub fn desc(comptime T: type) fn (void, T, T) bool {
const impl = struct {
fn inner(context: void, a: T, b: T) bool {
_ = context;
return a > b;
}
};
return impl.inner;
}
test "stable sort" {
try testStableSort();
comptime try testStableSort();
}
fn testStableSort() !void {
var expected = [_]IdAndValue{
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 2, .value = 0 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 2, .value = 2 },
};
var cases = [_][9]IdAndValue{
[_]IdAndValue{
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 2, .value = 0 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 2, .value = 2 },
},
[_]IdAndValue{
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 2, .value = 2 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 2, .value = 0 },
},
};
for (cases) |*case| {
insertionSort(IdAndValue, (case.*)[0..], {}, cmpByValue);
for (case.*) |item, i| {
try testing.expect(item.id == expected[i].id);
try testing.expect(item.value == expected[i].value);
}
}
}
const IdAndValue = struct {
id: usize,
value: i32,
};
fn cmpByValue(context: void, a: IdAndValue, b: IdAndValue) bool {
return asc_i32(context, a.value, b.value);
}
const asc_u8 = asc(u8);
const asc_i32 = asc(i32);
const desc_u8 = desc(u8);
const desc_i32 = desc(i32);
test "sort" {
const u8cases = [_][]const []const u8{
&[_][]const u8{
"",
"",
},
&[_][]const u8{
"a",
"a",
},
&[_][]const u8{
"az",
"az",
},
&[_][]const u8{
"za",
"az",
},
&[_][]const u8{
"asdf",
"adfs",
},
&[_][]const u8{
"one",
"eno",
},
};
for (u8cases) |case| {
var buf: [8]u8 = undefined;
const slice = buf[0..case[0].len];
mem.copy(u8, slice, case[0]);
sort(u8, slice, {}, asc_u8);
try testing.expect(mem.eql(u8, slice, case[1]));
}
const i32cases = [_][]const []const i32{
&[_][]const i32{
&[_]i32{},
&[_]i32{},
},
&[_][]const i32{
&[_]i32{1},
&[_]i32{1},
},
&[_][]const i32{
&[_]i32{ 0, 1 },
&[_]i32{ 0, 1 },
},
&[_][]const i32{
&[_]i32{ 1, 0 },
&[_]i32{ 0, 1 },
},
&[_][]const i32{
&[_]i32{ 1, -1, 0 },
&[_]i32{ -1, 0, 1 },
},
&[_][]const i32{
&[_]i32{ 2, 1, 3 },
&[_]i32{ 1, 2, 3 },
},
};
for (i32cases) |case| {
var buf: [8]i32 = undefined;
const slice = buf[0..case[0].len];
mem.copy(i32, slice, case[0]);
sort(i32, slice, {}, asc_i32);
try testing.expect(mem.eql(i32, slice, case[1]));
}
}
test "sort descending" {
const rev_cases = [_][]const []const i32{
&[_][]const i32{
&[_]i32{},
&[_]i32{},
},
&[_][]const i32{
&[_]i32{1},
&[_]i32{1},
},
&[_][]const i32{
&[_]i32{ 0, 1 },
&[_]i32{ 1, 0 },
},
&[_][]const i32{
&[_]i32{ 1, 0 },
&[_]i32{ 1, 0 },
},
&[_][]const i32{
&[_]i32{ 1, -1, 0 },
&[_]i32{ 1, 0, -1 },
},
&[_][]const i32{
&[_]i32{ 2, 1, 3 },
&[_]i32{ 3, 2, 1 },
},
};
for (rev_cases) |case| {
var buf: [8]i32 = undefined;
const slice = buf[0..case[0].len];
mem.copy(i32, slice, case[0]);
sort(i32, slice, {}, desc_i32);
try testing.expect(mem.eql(i32, slice, case[1]));
}
}
test "another sort case" {
var arr = [_]i32{ 5, 3, 1, 2, 4 };
sort(i32, arr[0..], {}, asc_i32);
try testing.expect(mem.eql(i32, &arr, &[_]i32{ 1, 2, 3, 4, 5 }));
}
test "sort fuzz testing" {
var prng = std.rand.DefaultPrng.init(0x12345678);
const random = prng.random();
const test_case_count = 10;
var i: usize = 0;
while (i < test_case_count) : (i += 1) {
try fuzzTest(random);
}
}
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
fn fuzzTest(rng: std.rand.Random) !void {
const array_size = rng.intRangeLessThan(usize, 0, 1000);
var array = try testing.allocator.alloc(IdAndValue, array_size);
defer testing.allocator.free(array);
// populate with random data
for (array) |*item, index| {
item.id = index;
item.value = rng.intRangeLessThan(i32, 0, 100);
}
sort(IdAndValue, array, {}, cmpByValue);
var index: usize = 1;
while (index < array.len) : (index += 1) {
if (array[index].value == array[index - 1].value) {
try testing.expect(array[index].id > array[index - 1].id);
} else {
try testing.expect(array[index].value > array[index - 1].value);
}
}
}
pub fn argMin(
comptime T: type,
items: []const T,
context: anytype,
comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool,
) ?usize {
if (items.len == 0) {
return null;
}
var smallest = items[0];
var smallest_index: usize = 0;
for (items[1..]) |item, i| {
if (lessThan(context, item, smallest)) {
smallest = item;
smallest_index = i + 1;
}
}
return smallest_index;
}
test "argMin" {
try testing.expectEqual(@as(?usize, null), argMin(i32, &[_]i32{}, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{1}, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 3), argMin(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMin(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 3), argMin(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32));
}
pub fn min(
comptime T: type,
items: []const T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) ?T {
const i = argMin(T, items, context, lessThan) orelse return null;
return items[i];
}
test "min" {
try testing.expectEqual(@as(?i32, null), min(i32, &[_]i32{}, {}, asc_i32));
try testing.expectEqual(@as(?i32, 1), min(i32, &[_]i32{1}, {}, asc_i32));
try testing.expectEqual(@as(?i32, 1), min(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 2), min(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 1), min(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, -10), min(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 7), min(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32));
}
pub fn argMax(
comptime T: type,
items: []const T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) ?usize {
if (items.len == 0) {
return null;
}
var biggest = items[0];
var biggest_index: usize = 0;
for (items[1..]) |item, i| {
if (lessThan(context, biggest, item)) {
biggest = item;
biggest_index = i + 1;
}
}
return biggest_index;
}
test "argMax" {
try testing.expectEqual(@as(?usize, null), argMax(i32, &[_]i32{}, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMax(i32, &[_]i32{1}, {}, asc_i32));
try testing.expectEqual(@as(?usize, 4), argMax(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMax(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 0), argMax(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 2), argMax(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32));
try testing.expectEqual(@as(?usize, 1), argMax(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32));
}
pub fn max(
comptime T: type,
items: []const T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) ?T {
const i = argMax(T, items, context, lessThan) orelse return null;
return items[i];
}
test "max" {
try testing.expectEqual(@as(?i32, null), max(i32, &[_]i32{}, {}, asc_i32));
try testing.expectEqual(@as(?i32, 1), max(i32, &[_]i32{1}, {}, asc_i32));
try testing.expectEqual(@as(?i32, 5), max(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 9), max(i32, &[_]i32{ 9, 3, 8, 2, 5 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 1), max(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 10), max(i32, &[_]i32{ -10, 1, 10 }, {}, asc_i32));
try testing.expectEqual(@as(?i32, 3), max(i32, &[_]i32{ 6, 3, 5, 7, 6 }, {}, desc_i32));
}
pub fn isSorted(
comptime T: type,
items: []const T,
context: anytype,
comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool,
) bool {
var i: usize = 1;
while (i < items.len) : (i += 1) {
if (lessThan(context, items[i], items[i - 1])) {
return false;
}
}
return true;
}
test "isSorted" {
try testing.expect(isSorted(i32, &[_]i32{}, {}, asc_i32));
try testing.expect(isSorted(i32, &[_]i32{10}, {}, asc_i32));
try testing.expect(isSorted(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, asc_i32));
try testing.expect(isSorted(i32, &[_]i32{ -10, 1, 1, 1, 10 }, {}, asc_i32));
try testing.expect(isSorted(i32, &[_]i32{}, {}, desc_i32));
try testing.expect(isSorted(i32, &[_]i32{-20}, {}, desc_i32));
try testing.expect(isSorted(i32, &[_]i32{ 3, 2, 1, 0, -1 }, {}, desc_i32));
try testing.expect(isSorted(i32, &[_]i32{ 10, -10 }, {}, desc_i32));
try testing.expect(isSorted(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, asc_i32));
try testing.expect(isSorted(i32, &[_]i32{ 1, 1, 1, 1, 1 }, {}, desc_i32));
try testing.expectEqual(false, isSorted(i32, &[_]i32{ 5, 4, 3, 2, 1 }, {}, asc_i32));
try testing.expectEqual(false, isSorted(i32, &[_]i32{ 1, 2, 3, 4, 5 }, {}, desc_i32));
try testing.expect(isSorted(u8, "abcd", {}, asc_u8));
try testing.expect(isSorted(u8, "zyxw", {}, desc_u8));
try testing.expectEqual(false, isSorted(u8, "abcd", {}, desc_u8));
try testing.expectEqual(false, isSorted(u8, "zyxw", {}, asc_u8));
try testing.expect(isSorted(u8, "ffff", {}, asc_u8));
try testing.expect(isSorted(u8, "ffff", {}, desc_u8));
}
| https://raw.githubusercontent.com/jamesmintram/jimzos/8eb52e7efffb1a97eca4899ff72549f96ed3460b/tools/ext2fs/data/test3/sort.zig |
// BITCOUNT key [start end]
pub const BITCOUNT = struct {
//! ```
//! const cmd = BITCOUNT.init("test", BITCOUNT.Bounds{ .Slice = .{ .start = -2, .end = -1 } });
//! ```
key: []const u8,
bounds: Bounds = .FullString,
const Self = @This();
pub fn init(key: []const u8, bounds: Bounds) BITCOUNT {
return .{ .key = key, .bounds = bounds };
}
pub fn validate(self: Self) !void {
if (self.key.len == 0) return error.EmptyKeyName;
}
pub const RedisCommand = struct {
pub fn serialize(self: BITCOUNT, comptime rootSerializer: type, msg: anytype) !void {
return rootSerializer.serializeCommand(msg, .{ "BITCOUNT", self.key, self.bounds });
}
};
pub const Bounds = union(enum) {
FullString,
Slice: struct {
start: isize,
end: isize,
},
pub const RedisArguments = struct {
pub fn count(self: Bounds) usize {
return switch (self) {
.FullString => 0,
.Slice => 2,
};
}
pub fn serialize(self: Bounds, comptime rootSerializer: type, msg: anytype) !void {
switch (self) {
.FullString => {},
.Slice => |slice| {
try rootSerializer.serializeArgument(msg, isize, slice.start);
try rootSerializer.serializeArgument(msg, isize, slice.end);
},
}
}
};
};
};
test "example" {
_ = BITCOUNT.init("test", BITCOUNT.Bounds{ .Slice = .{ .start = -2, .end = -1 } });
}
test "serializer" {
const std = @import("std");
const serializer = @import("../../serializer.zig").CommandSerializer;
var correctBuf: [1000]u8 = undefined;
var correctMsg = std.io.fixedBufferStream(correctBuf[0..]);
var testBuf: [1000]u8 = undefined;
var testMsg = std.io.fixedBufferStream(testBuf[0..]);
{
correctMsg.reset();
testMsg.reset();
try serializer.serializeCommand(
testMsg.writer(),
BITCOUNT.init("mykey", BITCOUNT.Bounds{ .Slice = .{ .start = 1, .end = 10 } }),
);
try serializer.serializeCommand(
correctMsg.writer(),
.{ "BITCOUNT", "mykey", 1, 10 },
);
try std.testing.expectEqualSlices(u8, correctMsg.getWritten(), testMsg.getWritten());
}
}
| https://raw.githubusercontent.com/MarcoPolo/zig-libp2p/478d9b60c99069e5bc557b8f60c36ae72dedc83a/interop/okredis/src/commands/strings/bitcount.zig |
//! Futex is a mechanism used to block (`wait`) and unblock (`wake`) threads using a 32bit memory address as hints.
//! Blocking a thread is acknowledged only if the 32bit memory address is equal to a given value.
//! This check helps avoid block/unblock deadlocks which occur if a `wake()` happens before a `wait()`.
//! Using Futex, other Thread synchronization primitives can be built which efficiently wait for cross-thread events or signals.
const std = @import("../std.zig");
const builtin = @import("builtin");
const Futex = @This();
const os = std.os;
const assert = std.debug.assert;
const testing = std.testing;
const Atomic = std.atomic.Atomic;
/// Checks if `ptr` still contains the value `expect` and, if so, blocks the caller until either:
/// - The value at `ptr` is no longer equal to `expect`.
/// - The caller is unblocked by a matching `wake()`.
/// - The caller is unblocked spuriously ("at random").
///
/// The checking of `ptr` and `expect`, along with blocking the caller, is done atomically
/// and totally ordered (sequentially consistent) with respect to other wait()/wake() calls on the same `ptr`.
pub fn wait(ptr: *const Atomic(u32), expect: u32) void {
@setCold(true);
Impl.wait(ptr, expect, null) catch |err| switch (err) {
error.Timeout => unreachable, // null timeout meant to wait forever
};
}
/// Checks if `ptr` still contains the value `expect` and, if so, blocks the caller until either:
/// - The value at `ptr` is no longer equal to `expect`.
/// - The caller is unblocked by a matching `wake()`.
/// - The caller is unblocked spuriously ("at random").
/// - The caller blocks for longer than the given timeout. In which case, `error.Timeout` is returned.
///
/// The checking of `ptr` and `expect`, along with blocking the caller, is done atomically
/// and totally ordered (sequentially consistent) with respect to other wait()/wake() calls on the same `ptr`.
pub fn timedWait(ptr: *const Atomic(u32), expect: u32, timeout_ns: u64) error{Timeout}!void {
@setCold(true);
// Avoid calling into the OS for no-op timeouts.
if (timeout_ns == 0) {
if (ptr.load(.SeqCst) != expect) return;
return error.Timeout;
}
return Impl.wait(ptr, expect, timeout_ns);
}
/// Unblocks at most `max_waiters` callers blocked in a `wait()` call on `ptr`.
pub fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
@setCold(true);
// Avoid calling into the OS if there's nothing to wake up.
if (max_waiters == 0) {
return;
}
Impl.wake(ptr, max_waiters);
}
const Impl = if (builtin.single_threaded)
SingleThreadedImpl
else if (builtin.os.tag == .windows)
WindowsImpl
else if (builtin.os.tag.isDarwin())
DarwinImpl
else if (builtin.os.tag == .linux)
LinuxImpl
else if (builtin.os.tag == .freebsd)
FreebsdImpl
else if (builtin.os.tag == .openbsd)
OpenbsdImpl
else if (builtin.os.tag == .dragonfly)
DragonflyImpl
else if (builtin.target.isWasm())
WasmImpl
else if (std.Thread.use_pthreads)
PosixImpl
else
UnsupportedImpl;
/// We can't do @compileError() in the `Impl` switch statement above as its eagerly evaluated.
/// So instead, we @compileError() on the methods themselves for platforms which don't support futex.
const UnsupportedImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
return unsupported(.{ ptr, expect, timeout });
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
return unsupported(.{ ptr, max_waiters });
}
fn unsupported(unused: anytype) noreturn {
_ = unused;
@compileError("Unsupported operating system " ++ @tagName(builtin.target.os.tag));
}
};
const SingleThreadedImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
if (ptr.loadUnchecked() != expect) {
return;
}
// There are no threads to wake us up.
// So if we wait without a timeout we would never wake up.
const delay = timeout orelse {
unreachable; // deadlock detected
};
std.time.sleep(delay);
return error.Timeout;
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
// There are no other threads to possibly wake up
_ = ptr;
_ = max_waiters;
}
};
// We use WaitOnAddress through NtDll instead of API-MS-Win-Core-Synch-l1-2-0.dll
// as it's generally already a linked target and is autoloaded into all processes anyway.
const WindowsImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
var timeout_value: os.windows.LARGE_INTEGER = undefined;
var timeout_ptr: ?*const os.windows.LARGE_INTEGER = null;
// NTDLL functions work with time in units of 100 nanoseconds.
// Positive values are absolute deadlines while negative values are relative durations.
if (timeout) |delay| {
timeout_value = @as(os.windows.LARGE_INTEGER, @intCast(delay / 100));
timeout_value = -timeout_value;
timeout_ptr = &timeout_value;
}
const rc = os.windows.ntdll.RtlWaitOnAddress(
@as(?*const anyopaque, @ptrCast(ptr)),
@as(?*const anyopaque, @ptrCast(&expect)),
@sizeOf(@TypeOf(expect)),
timeout_ptr,
);
switch (rc) {
.SUCCESS => {},
.TIMEOUT => {
assert(timeout != null);
return error.Timeout;
},
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const address = @as(?*const anyopaque, @ptrCast(ptr));
assert(max_waiters != 0);
switch (max_waiters) {
1 => os.windows.ntdll.RtlWakeAddressSingle(address),
else => os.windows.ntdll.RtlWakeAddressAll(address),
}
}
};
const DarwinImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
// Darwin XNU 7195.50.7.100.1 introduced __ulock_wait2 and migrated code paths (notably pthread_cond_t) towards it:
// https://github.com/apple/darwin-xnu/commit/d4061fb0260b3ed486147341b72468f836ed6c8f#diff-08f993cc40af475663274687b7c326cc6c3031e0db3ac8de7b24624610616be6
//
// This XNU version appears to correspond to 11.0.1:
// https://kernelshaman.blogspot.com/2021/01/building-xnu-for-macos-big-sur-1101.html
//
// ulock_wait() uses 32-bit micro-second timeouts where 0 = INFINITE or no-timeout
// ulock_wait2() uses 64-bit nano-second timeouts (with the same convention)
const supports_ulock_wait2 = builtin.target.os.version_range.semver.min.major >= 11;
var timeout_ns: u64 = 0;
if (timeout) |delay| {
assert(delay != 0); // handled by timedWait()
timeout_ns = delay;
}
// If we're using `__ulock_wait` and `timeout` is too big to fit inside a `u32` count of
// micro-seconds (around 70min), we'll request a shorter timeout. This is fine (users
// should handle spurious wakeups), but we need to remember that we did so, so that
// we don't return `Timeout` incorrectly. If that happens, we set this variable to
// true so that we we know to ignore the ETIMEDOUT result.
var timeout_overflowed = false;
const addr = @as(*const anyopaque, @ptrCast(ptr));
const flags = os.darwin.UL_COMPARE_AND_WAIT | os.darwin.ULF_NO_ERRNO;
const status = blk: {
if (supports_ulock_wait2) {
break :blk os.darwin.__ulock_wait2(flags, addr, expect, timeout_ns, 0);
}
const timeout_us = std.math.cast(u32, timeout_ns / std.time.ns_per_us) orelse overflow: {
timeout_overflowed = true;
break :overflow std.math.maxInt(u32);
};
break :blk os.darwin.__ulock_wait(flags, addr, expect, timeout_us);
};
if (status >= 0) return;
switch (@as(std.os.E, @enumFromInt(-status))) {
// Wait was interrupted by the OS or other spurious signalling.
.INTR => {},
// Address of the futex was paged out. This is unlikely, but possible in theory, and
// pthread/libdispatch on darwin bother to handle it. In this case we'll return
// without waiting, but the caller should retry anyway.
.FAULT => {},
// Only report Timeout if we didn't have to cap the timeout
.TIMEDOUT => {
assert(timeout != null);
if (!timeout_overflowed) return error.Timeout;
},
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
var flags: u32 = os.darwin.UL_COMPARE_AND_WAIT | os.darwin.ULF_NO_ERRNO;
if (max_waiters > 1) {
flags |= os.darwin.ULF_WAKE_ALL;
}
while (true) {
const addr = @as(*const anyopaque, @ptrCast(ptr));
const status = os.darwin.__ulock_wake(flags, addr, 0);
if (status >= 0) return;
switch (@as(std.os.E, @enumFromInt(-status))) {
.INTR => continue, // spurious wake()
.FAULT => unreachable, // __ulock_wake doesn't generate EFAULT according to darwin pthread_cond_t
.NOENT => return, // nothing was woken up
.ALREADY => unreachable, // only for ULF_WAKE_THREAD
else => unreachable,
}
}
}
};
// https://man7.org/linux/man-pages/man2/futex.2.html
const LinuxImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
var ts: os.timespec = undefined;
if (timeout) |timeout_ns| {
ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
}
const rc = os.linux.futex_wait(
@as(*const i32, @ptrCast(&ptr.value)),
os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAIT,
@as(i32, @bitCast(expect)),
if (timeout != null) &ts else null,
);
switch (os.linux.getErrno(rc)) {
.SUCCESS => {}, // notified by `wake()`
.INTR => {}, // spurious wakeup
.AGAIN => {}, // ptr.* != expect
.TIMEDOUT => {
assert(timeout != null);
return error.Timeout;
},
.INVAL => {}, // possibly timeout overflow
.FAULT => unreachable, // ptr was invalid
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const rc = os.linux.futex_wake(
@as(*const i32, @ptrCast(&ptr.value)),
os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAKE,
std.math.cast(i32, max_waiters) orelse std.math.maxInt(i32),
);
switch (os.linux.getErrno(rc)) {
.SUCCESS => {}, // successful wake up
.INVAL => {}, // invalid futex_wait() on ptr done elsewhere
.FAULT => {}, // pointer became invalid while doing the wake
else => unreachable,
}
}
};
// https://www.freebsd.org/cgi/man.cgi?query=_umtx_op&sektion=2&n=1
const FreebsdImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
var tm_size: usize = 0;
var tm: os.freebsd._umtx_time = undefined;
var tm_ptr: ?*const os.freebsd._umtx_time = null;
if (timeout) |timeout_ns| {
tm_ptr = &tm;
tm_size = @sizeOf(@TypeOf(tm));
tm._flags = 0; // use relative time not UMTX_ABSTIME
tm._clockid = os.CLOCK.MONOTONIC;
tm._timeout.tv_sec = @as(@TypeOf(tm._timeout.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
tm._timeout.tv_nsec = @as(@TypeOf(tm._timeout.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
}
const rc = os.freebsd._umtx_op(
@intFromPtr(&ptr.value),
@intFromEnum(os.freebsd.UMTX_OP.WAIT_UINT_PRIVATE),
@as(c_ulong, expect),
tm_size,
@intFromPtr(tm_ptr),
);
switch (os.errno(rc)) {
.SUCCESS => {},
.FAULT => unreachable, // one of the args points to invalid memory
.INVAL => unreachable, // arguments should be correct
.TIMEDOUT => {
assert(timeout != null);
return error.Timeout;
},
.INTR => {}, // spurious wake
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const rc = os.freebsd._umtx_op(
@intFromPtr(&ptr.value),
@intFromEnum(os.freebsd.UMTX_OP.WAKE_PRIVATE),
@as(c_ulong, max_waiters),
0, // there is no timeout struct
0, // there is no timeout struct pointer
);
switch (os.errno(rc)) {
.SUCCESS => {},
.FAULT => {}, // it's ok if the ptr doesn't point to valid memory
.INVAL => unreachable, // arguments should be correct
else => unreachable,
}
}
};
// https://man.openbsd.org/futex.2
const OpenbsdImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
var ts: os.timespec = undefined;
if (timeout) |timeout_ns| {
ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
}
const rc = os.openbsd.futex(
@as(*const volatile u32, @ptrCast(&ptr.value)),
os.openbsd.FUTEX_WAIT | os.openbsd.FUTEX_PRIVATE_FLAG,
@as(c_int, @bitCast(expect)),
if (timeout != null) &ts else null,
null, // FUTEX_WAIT takes no requeue address
);
switch (os.errno(rc)) {
.SUCCESS => {}, // woken up by wake
.NOSYS => unreachable, // the futex operation shouldn't be invalid
.FAULT => unreachable, // ptr was invalid
.AGAIN => {}, // ptr != expect
.INVAL => unreachable, // invalid timeout
.TIMEDOUT => {
assert(timeout != null);
return error.Timeout;
},
.INTR => {}, // spurious wake from signal
.CANCELED => {}, // spurious wake from signal with SA_RESTART
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const rc = os.openbsd.futex(
@as(*const volatile u32, @ptrCast(&ptr.value)),
os.openbsd.FUTEX_WAKE | os.openbsd.FUTEX_PRIVATE_FLAG,
std.math.cast(c_int, max_waiters) orelse std.math.maxInt(c_int),
null, // FUTEX_WAKE takes no timeout ptr
null, // FUTEX_WAKE takes no requeue address
);
// returns number of threads woken up.
assert(rc >= 0);
}
};
// https://man.dragonflybsd.org/?command=umtx§ion=2
const DragonflyImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
// Dragonfly uses a scheme where 0 timeout means wait until signaled or spurious wake.
// It's reporting of timeout's is also unrealiable so we use an external timing source (Timer) instead.
var timeout_us: c_int = 0;
var timeout_overflowed = false;
var sleep_timer: std.time.Timer = undefined;
if (timeout) |delay| {
assert(delay != 0); // handled by timedWait().
timeout_us = std.math.cast(c_int, delay / std.time.ns_per_us) orelse blk: {
timeout_overflowed = true;
break :blk std.math.maxInt(c_int);
};
// Only need to record the start time if we can provide somewhat accurate error.Timeout's
if (!timeout_overflowed) {
sleep_timer = std.time.Timer.start() catch unreachable;
}
}
const value = @as(c_int, @bitCast(expect));
const addr = @as(*const volatile c_int, @ptrCast(&ptr.value));
const rc = os.dragonfly.umtx_sleep(addr, value, timeout_us);
switch (os.errno(rc)) {
.SUCCESS => {},
.BUSY => {}, // ptr != expect
.AGAIN => { // maybe timed out, or paged out, or hit 2s kernel refresh
if (timeout) |timeout_ns| {
// Report error.Timeout only if we know the timeout duration has passed.
// If not, there's not much choice other than treating it as a spurious wake.
if (!timeout_overflowed and sleep_timer.read() >= timeout_ns) {
return error.Timeout;
}
}
},
.INTR => {}, // spurious wake
.INVAL => unreachable, // invalid timeout
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
// A count of zero means wake all waiters.
assert(max_waiters != 0);
const to_wake = std.math.cast(c_int, max_waiters) orelse 0;
// https://man.dragonflybsd.org/?command=umtx§ion=2
// > umtx_wakeup() will generally return 0 unless the address is bad.
// We are fine with the address being bad (e.g. for Semaphore.post() where Semaphore.wait() frees the Semaphore)
const addr = @as(*const volatile c_int, @ptrCast(&ptr.value));
_ = os.dragonfly.umtx_wakeup(addr, to_wake);
}
};
const WasmImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
if (!comptime std.Target.wasm.featureSetHas(builtin.target.cpu.features, .atomics)) {
@compileError("WASI target missing cpu feature 'atomics'");
}
const to: i64 = if (timeout) |to| @intCast(to) else -1;
const result = asm (
\\local.get %[ptr]
\\local.get %[expected]
\\local.get %[timeout]
\\memory.atomic.wait32 0
\\local.set %[ret]
: [ret] "=r" (-> u32),
: [ptr] "r" (&ptr.value),
[expected] "r" (@as(i32, @bitCast(expect))),
[timeout] "r" (to),
);
switch (result) {
0 => {}, // ok
1 => {}, // expected =! loaded
2 => return error.Timeout,
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
if (!comptime std.Target.wasm.featureSetHas(builtin.target.cpu.features, .atomics)) {
@compileError("WASI target missing cpu feature 'atomics'");
}
assert(max_waiters != 0);
const woken_count = asm (
\\local.get %[ptr]
\\local.get %[waiters]
\\memory.atomic.notify 0
\\local.set %[ret]
: [ret] "=r" (-> u32),
: [ptr] "r" (&ptr.value),
[waiters] "r" (max_waiters),
);
_ = woken_count; // can be 0 when linker flag 'shared-memory' is not enabled
}
};
/// Modified version of linux's futex and Go's sema to implement userspace wait queues with pthread:
/// https://code.woboq.org/linux/linux/kernel/futex.c.html
/// https://go.dev/src/runtime/sema.go
const PosixImpl = struct {
const Event = struct {
cond: std.c.pthread_cond_t,
mutex: std.c.pthread_mutex_t,
state: enum { empty, waiting, notified },
fn init(self: *Event) void {
// Use static init instead of pthread_cond/mutex_init() since this is generally faster.
self.cond = .{};
self.mutex = .{};
self.state = .empty;
}
fn deinit(self: *Event) void {
// Some platforms reportedly give EINVAL for statically initialized pthread types.
const rc = std.c.pthread_cond_destroy(&self.cond);
assert(rc == .SUCCESS or rc == .INVAL);
const rm = std.c.pthread_mutex_destroy(&self.mutex);
assert(rm == .SUCCESS or rm == .INVAL);
self.* = undefined;
}
fn wait(self: *Event, timeout: ?u64) error{Timeout}!void {
assert(std.c.pthread_mutex_lock(&self.mutex) == .SUCCESS);
defer assert(std.c.pthread_mutex_unlock(&self.mutex) == .SUCCESS);
// Early return if the event was already set.
if (self.state == .notified) {
return;
}
// Compute the absolute timeout if one was specified.
// POSIX requires that REALTIME is used by default for the pthread timedwait functions.
// This can be changed with pthread_condattr_setclock, but it's an extension and may not be available everywhere.
var ts: os.timespec = undefined;
if (timeout) |timeout_ns| {
os.clock_gettime(os.CLOCK.REALTIME, &ts) catch unreachable;
ts.tv_sec +|= @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
ts.tv_nsec += @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
if (ts.tv_nsec >= std.time.ns_per_s) {
ts.tv_sec +|= 1;
ts.tv_nsec -= std.time.ns_per_s;
}
}
// Start waiting on the event - there can be only one thread waiting.
assert(self.state == .empty);
self.state = .waiting;
while (true) {
// Block using either pthread_cond_wait or pthread_cond_timewait if there's an absolute timeout.
const rc = blk: {
if (timeout == null) break :blk std.c.pthread_cond_wait(&self.cond, &self.mutex);
break :blk std.c.pthread_cond_timedwait(&self.cond, &self.mutex, &ts);
};
// After waking up, check if the event was set.
if (self.state == .notified) {
return;
}
assert(self.state == .waiting);
switch (rc) {
.SUCCESS => {},
.TIMEDOUT => {
// If timed out, reset the event to avoid the set() thread doing an unnecessary signal().
self.state = .empty;
return error.Timeout;
},
.INVAL => unreachable, // cond, mutex, and potentially ts should all be valid
.PERM => unreachable, // mutex is locked when cond_*wait() functions are called
else => unreachable,
}
}
}
fn set(self: *Event) void {
assert(std.c.pthread_mutex_lock(&self.mutex) == .SUCCESS);
defer assert(std.c.pthread_mutex_unlock(&self.mutex) == .SUCCESS);
// Make sure that multiple calls to set() were not done on the same Event.
const old_state = self.state;
assert(old_state != .notified);
// Mark the event as set and wake up the waiting thread if there was one.
// This must be done while the mutex as the wait() thread could deallocate
// the condition variable once it observes the new state, potentially causing a UAF if done unlocked.
self.state = .notified;
if (old_state == .waiting) {
assert(std.c.pthread_cond_signal(&self.cond) == .SUCCESS);
}
}
};
const Treap = std.Treap(usize, std.math.order);
const Waiter = struct {
node: Treap.Node,
prev: ?*Waiter,
next: ?*Waiter,
tail: ?*Waiter,
is_queued: bool,
event: Event,
};
// An unordered set of Waiters
const WaitList = struct {
top: ?*Waiter = null,
len: usize = 0,
fn push(self: *WaitList, waiter: *Waiter) void {
waiter.next = self.top;
self.top = waiter;
self.len += 1;
}
fn pop(self: *WaitList) ?*Waiter {
const waiter = self.top orelse return null;
self.top = waiter.next;
self.len -= 1;
return waiter;
}
};
const WaitQueue = struct {
fn insert(treap: *Treap, address: usize, waiter: *Waiter) void {
// prepare the waiter to be inserted.
waiter.next = null;
waiter.is_queued = true;
// Find the wait queue entry associated with the address.
// If there isn't a wait queue on the address, this waiter creates the queue.
var entry = treap.getEntryFor(address);
const entry_node = entry.node orelse {
waiter.prev = null;
waiter.tail = waiter;
entry.set(&waiter.node);
return;
};
// There's a wait queue on the address; get the queue head and tail.
const head = @fieldParentPtr(Waiter, "node", entry_node);
const tail = head.tail orelse unreachable;
// Push the waiter to the tail by replacing it and linking to the previous tail.
head.tail = waiter;
tail.next = waiter;
waiter.prev = tail;
}
fn remove(treap: *Treap, address: usize, max_waiters: usize) WaitList {
// Find the wait queue associated with this address and get the head/tail if any.
var entry = treap.getEntryFor(address);
var queue_head = if (entry.node) |node| @fieldParentPtr(Waiter, "node", node) else null;
const queue_tail = if (queue_head) |head| head.tail else null;
// Once we're done updating the head, fix it's tail pointer and update the treap's queue head as well.
defer entry.set(blk: {
const new_head = queue_head orelse break :blk null;
new_head.tail = queue_tail;
break :blk &new_head.node;
});
var removed = WaitList{};
while (removed.len < max_waiters) {
// dequeue and collect waiters from their wait queue.
const waiter = queue_head orelse break;
queue_head = waiter.next;
removed.push(waiter);
// When dequeueing, we must mark is_queued as false.
// This ensures that a waiter which calls tryRemove() returns false.
assert(waiter.is_queued);
waiter.is_queued = false;
}
return removed;
}
fn tryRemove(treap: *Treap, address: usize, waiter: *Waiter) bool {
if (!waiter.is_queued) {
return false;
}
queue_remove: {
// Find the wait queue associated with the address.
var entry = blk: {
// A waiter without a previous link means it's the queue head that's in the treap so we can avoid lookup.
if (waiter.prev == null) {
assert(waiter.node.key == address);
break :blk treap.getEntryForExisting(&waiter.node);
}
break :blk treap.getEntryFor(address);
};
// The queue head and tail must exist if we're removing a queued waiter.
const head = @fieldParentPtr(Waiter, "node", entry.node orelse unreachable);
const tail = head.tail orelse unreachable;
// A waiter with a previous link is never the head of the queue.
if (waiter.prev) |prev| {
assert(waiter != head);
prev.next = waiter.next;
// A waiter with both a previous and next link is in the middle.
// We only need to update the surrounding waiter's links to remove it.
if (waiter.next) |next| {
assert(waiter != tail);
next.prev = waiter.prev;
break :queue_remove;
}
// A waiter with a previous but no next link means it's the tail of the queue.
// In that case, we need to update the head's tail reference.
assert(waiter == tail);
head.tail = waiter.prev;
break :queue_remove;
}
// A waiter with no previous link means it's the queue head of queue.
// We must replace (or remove) the head waiter reference in the treap.
assert(waiter == head);
entry.set(blk: {
const new_head = waiter.next orelse break :blk null;
new_head.tail = head.tail;
break :blk &new_head.node;
});
}
// Mark the waiter as successfully removed.
waiter.is_queued = false;
return true;
}
};
const Bucket = struct {
mutex: std.c.pthread_mutex_t align(std.atomic.cache_line) = .{},
pending: Atomic(usize) = Atomic(usize).init(0),
treap: Treap = .{},
// Global array of buckets that addresses map to.
// Bucket array size is pretty much arbitrary here, but it must be a power of two for fibonacci hashing.
var buckets = [_]Bucket{.{}} ** @bitSizeOf(usize);
// https://github.com/Amanieu/parking_lot/blob/1cf12744d097233316afa6c8b7d37389e4211756/core/src/parking_lot.rs#L343-L353
fn from(address: usize) *Bucket {
// The upper `@bitSizeOf(usize)` bits of the fibonacci golden ratio.
// Hashing this via (h * k) >> (64 - b) where k=golden-ration and b=bitsize-of-array
// evenly lays out h=hash values over the bit range even when the hash has poor entropy (identity-hash for pointers).
const max_multiplier_bits = @bitSizeOf(usize);
const fibonacci_multiplier = 0x9E3779B97F4A7C15 >> (64 - max_multiplier_bits);
const max_bucket_bits = @ctz(buckets.len);
comptime assert(std.math.isPowerOfTwo(buckets.len));
const index = (address *% fibonacci_multiplier) >> (max_multiplier_bits - max_bucket_bits);
return &buckets[index];
}
};
const Address = struct {
fn from(ptr: *const Atomic(u32)) usize {
// Get the alignment of the pointer.
const alignment = @alignOf(Atomic(u32));
comptime assert(std.math.isPowerOfTwo(alignment));
// Make sure the pointer is aligned,
// then cut off the zero bits from the alignment to get the unique address.
const addr = @intFromPtr(ptr);
assert(addr & (alignment - 1) == 0);
return addr >> @ctz(@as(usize, alignment));
}
};
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
const address = Address.from(ptr);
const bucket = Bucket.from(address);
// Announce that there's a waiter in the bucket before checking the ptr/expect condition.
// If the announcement is reordered after the ptr check, the waiter could deadlock:
//
// - T1: checks ptr == expect which is true
// - T2: updates ptr to != expect
// - T2: does Futex.wake(), sees no pending waiters, exits
// - T1: bumps pending waiters (was reordered after the ptr == expect check)
// - T1: goes to sleep and misses both the ptr change and T2's wake up
//
// SeqCst as Acquire barrier to ensure the announcement happens before the ptr check below.
// SeqCst as shared modification order to form a happens-before edge with the fence(.SeqCst)+load() in wake().
var pending = bucket.pending.fetchAdd(1, .SeqCst);
assert(pending < std.math.maxInt(usize));
// If the wait gets cancelled, remove the pending count we previously added.
// This is done outside the mutex lock to keep the critical section short in case of contention.
var cancelled = false;
defer if (cancelled) {
pending = bucket.pending.fetchSub(1, .Monotonic);
assert(pending > 0);
};
var waiter: Waiter = undefined;
{
assert(std.c.pthread_mutex_lock(&bucket.mutex) == .SUCCESS);
defer assert(std.c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS);
cancelled = ptr.load(.Monotonic) != expect;
if (cancelled) {
return;
}
waiter.event.init();
WaitQueue.insert(&bucket.treap, address, &waiter);
}
defer {
assert(!waiter.is_queued);
waiter.event.deinit();
}
waiter.event.wait(timeout) catch {
// If we fail to cancel after a timeout, it means a wake() thread dequeued us and will wake us up.
// We must wait until the event is set as that's a signal that the wake() thread won't access the waiter memory anymore.
// If we return early without waiting, the waiter on the stack would be invalidated and the wake() thread risks a UAF.
defer if (!cancelled) waiter.event.wait(null) catch unreachable;
assert(std.c.pthread_mutex_lock(&bucket.mutex) == .SUCCESS);
defer assert(std.c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS);
cancelled = WaitQueue.tryRemove(&bucket.treap, address, &waiter);
if (cancelled) {
return error.Timeout;
}
};
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const address = Address.from(ptr);
const bucket = Bucket.from(address);
// Quick check if there's even anything to wake up.
// The change to the ptr's value must happen before we check for pending waiters.
// If not, the wake() thread could miss a sleeping waiter and have it deadlock:
//
// - T2: p = has pending waiters (reordered before the ptr update)
// - T1: bump pending waiters
// - T1: if ptr == expected: sleep()
// - T2: update ptr != expected
// - T2: p is false from earlier so doesn't wake (T1 missed ptr update and T2 missed T1 sleeping)
//
// What we really want here is a Release load, but that doesn't exist under the C11 memory model.
// We could instead do `bucket.pending.fetchAdd(0, Release) == 0` which achieves effectively the same thing,
// but the RMW operation unconditionally marks the cache-line as modified for others causing unnecessary fetching/contention.
//
// Instead we opt to do a full-fence + load instead which avoids taking ownership of the cache-line.
// fence(SeqCst) effectively converts the ptr update to SeqCst and the pending load to SeqCst: creating a Store-Load barrier.
//
// The pending count increment in wait() must also now use SeqCst for the update + this pending load
// to be in the same modification order as our load isn't using Release/Acquire to guarantee it.
bucket.pending.fence(.SeqCst);
if (bucket.pending.load(.Monotonic) == 0) {
return;
}
// Keep a list of all the waiters notified and wake then up outside the mutex critical section.
var notified = WaitList{};
defer if (notified.len > 0) {
const pending = bucket.pending.fetchSub(notified.len, .Monotonic);
assert(pending >= notified.len);
while (notified.pop()) |waiter| {
assert(!waiter.is_queued);
waiter.event.set();
}
};
assert(std.c.pthread_mutex_lock(&bucket.mutex) == .SUCCESS);
defer assert(std.c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS);
// Another pending check again to avoid the WaitQueue lookup if not necessary.
if (bucket.pending.load(.Monotonic) > 0) {
notified = WaitQueue.remove(&bucket.treap, address, max_waiters);
}
}
};
test "Futex - smoke test" {
var value = Atomic(u32).init(0);
// Try waits with invalid values.
Futex.wait(&value, 0xdeadbeef);
Futex.timedWait(&value, 0xdeadbeef, 0) catch {};
// Try timeout waits.
try testing.expectError(error.Timeout, Futex.timedWait(&value, 0, 0));
try testing.expectError(error.Timeout, Futex.timedWait(&value, 0, std.time.ns_per_ms));
// Try wakes
Futex.wake(&value, 0);
Futex.wake(&value, 1);
Futex.wake(&value, std.math.maxInt(u32));
}
test "Futex - signaling" {
// This test requires spawning threads
if (builtin.single_threaded) {
return error.SkipZigTest;
}
const num_threads = 4;
const num_iterations = 4;
const Paddle = struct {
value: Atomic(u32) = Atomic(u32).init(0),
current: u32 = 0,
fn hit(self: *@This()) void {
_ = self.value.fetchAdd(1, .Release);
Futex.wake(&self.value, 1);
}
fn run(self: *@This(), hit_to: *@This()) !void {
while (self.current < num_iterations) {
// Wait for the value to change from hit()
var new_value: u32 = undefined;
while (true) {
new_value = self.value.load(.Acquire);
if (new_value != self.current) break;
Futex.wait(&self.value, self.current);
}
// change the internal "current" value
try testing.expectEqual(new_value, self.current + 1);
self.current = new_value;
// hit the next paddle
hit_to.hit();
}
}
};
var paddles = [_]Paddle{.{}} ** num_threads;
var threads = [_]std.Thread{undefined} ** num_threads;
// Create a circle of paddles which hit each other
for (&threads, 0..) |*t, i| {
const paddle = &paddles[i];
const hit_to = &paddles[(i + 1) % paddles.len];
t.* = try std.Thread.spawn(.{}, Paddle.run, .{ paddle, hit_to });
}
// Hit the first paddle and wait for them all to complete by hitting each other for num_iterations.
paddles[0].hit();
for (threads) |t| t.join();
for (paddles) |p| try testing.expectEqual(p.current, num_iterations);
}
test "Futex - broadcasting" {
// This test requires spawning threads
if (builtin.single_threaded) {
return error.SkipZigTest;
}
const num_threads = 4;
const num_iterations = 4;
const Barrier = struct {
count: Atomic(u32) = Atomic(u32).init(num_threads),
futex: Atomic(u32) = Atomic(u32).init(0),
fn wait(self: *@This()) !void {
// Decrement the counter.
// Release ensures stuff before this barrier.wait() happens before the last one.
const count = self.count.fetchSub(1, .Release);
try testing.expect(count <= num_threads);
try testing.expect(count > 0);
// First counter to reach zero wakes all other threads.
// Acquire for the last counter ensures stuff before previous barrier.wait()s happened before it.
// Release on futex update ensures stuff before all barrier.wait()'s happens before they all return.
if (count - 1 == 0) {
_ = self.count.load(.Acquire); // TODO: could be fence(Acquire) if not for TSAN
self.futex.store(1, .Release);
Futex.wake(&self.futex, num_threads - 1);
return;
}
// Other threads wait until last counter wakes them up.
// Acquire on futex synchronizes with last barrier count to ensure stuff before all barrier.wait()'s happen before us.
while (self.futex.load(.Acquire) == 0) {
Futex.wait(&self.futex, 0);
}
}
};
const Broadcast = struct {
barriers: [num_iterations]Barrier = [_]Barrier{.{}} ** num_iterations,
threads: [num_threads]std.Thread = undefined,
fn run(self: *@This()) !void {
for (&self.barriers) |*barrier| {
try barrier.wait();
}
}
};
var broadcast = Broadcast{};
for (&broadcast.threads) |*t| t.* = try std.Thread.spawn(.{}, Broadcast.run, .{&broadcast});
for (broadcast.threads) |t| t.join();
}
/// Deadline is used to wait efficiently for a pointer's value to change using Futex and a fixed timeout.
///
/// Futex's timedWait() api uses a relative duration which suffers from over-waiting
/// when used in a loop which is often required due to the possibility of spurious wakeups.
///
/// Deadline instead converts the relative timeout to an absolute one so that multiple calls
/// to Futex timedWait() can block for and report more accurate error.Timeouts.
pub const Deadline = struct {
timeout: ?u64,
started: std.time.Timer,
/// Create the deadline to expire after the given amount of time in nanoseconds passes.
/// Pass in `null` to have the deadline call `Futex.wait()` and never expire.
pub fn init(expires_in_ns: ?u64) Deadline {
var deadline: Deadline = undefined;
deadline.timeout = expires_in_ns;
// std.time.Timer is required to be supported for somewhat accurate reportings of error.Timeout.
if (deadline.timeout != null) {
deadline.started = std.time.Timer.start() catch unreachable;
}
return deadline;
}
/// Wait until either:
/// - the `ptr`'s value changes from `expect`.
/// - `Futex.wake()` is called on the `ptr`.
/// - A spurious wake occurs.
/// - The deadline expires; In which case `error.Timeout` is returned.
pub fn wait(self: *Deadline, ptr: *const Atomic(u32), expect: u32) error{Timeout}!void {
@setCold(true);
// Check if we actually have a timeout to wait until.
// If not just wait "forever".
const timeout_ns = self.timeout orelse {
return Futex.wait(ptr, expect);
};
// Get how much time has passed since we started waiting
// then subtract that from the init() timeout to get how much longer to wait.
// Use overflow to detect when we've been waiting longer than the init() timeout.
const elapsed_ns = self.started.read();
const until_timeout_ns = std.math.sub(u64, timeout_ns, elapsed_ns) catch 0;
return Futex.timedWait(ptr, expect, until_timeout_ns);
}
};
test "Futex - Deadline" {
var deadline = Deadline.init(100 * std.time.ns_per_ms);
var futex_word = Atomic(u32).init(0);
while (true) {
deadline.wait(&futex_word, 0) catch break;
}
}
| https://raw.githubusercontent.com/mundusnine/FoundryTools_linux_x64/98e738bf92a416b255c9d11b78e8033071b52672/lib/std/Thread/Futex.zig |
//! Futex is a mechanism used to block (`wait`) and unblock (`wake`) threads using a 32bit memory address as hints.
//! Blocking a thread is acknowledged only if the 32bit memory address is equal to a given value.
//! This check helps avoid block/unblock deadlocks which occur if a `wake()` happens before a `wait()`.
//! Using Futex, other Thread synchronization primitives can be built which efficiently wait for cross-thread events or signals.
const std = @import("../std.zig");
const builtin = @import("builtin");
const Futex = @This();
const os = std.os;
const assert = std.debug.assert;
const testing = std.testing;
const Atomic = std.atomic.Atomic;
/// Checks if `ptr` still contains the value `expect` and, if so, blocks the caller until either:
/// - The value at `ptr` is no longer equal to `expect`.
/// - The caller is unblocked by a matching `wake()`.
/// - The caller is unblocked spuriously ("at random").
///
/// The checking of `ptr` and `expect`, along with blocking the caller, is done atomically
/// and totally ordered (sequentially consistent) with respect to other wait()/wake() calls on the same `ptr`.
pub fn wait(ptr: *const Atomic(u32), expect: u32) void {
@setCold(true);
Impl.wait(ptr, expect, null) catch |err| switch (err) {
error.Timeout => unreachable, // null timeout meant to wait forever
};
}
/// Checks if `ptr` still contains the value `expect` and, if so, blocks the caller until either:
/// - The value at `ptr` is no longer equal to `expect`.
/// - The caller is unblocked by a matching `wake()`.
/// - The caller is unblocked spuriously ("at random").
/// - The caller blocks for longer than the given timeout. In which case, `error.Timeout` is returned.
///
/// The checking of `ptr` and `expect`, along with blocking the caller, is done atomically
/// and totally ordered (sequentially consistent) with respect to other wait()/wake() calls on the same `ptr`.
pub fn timedWait(ptr: *const Atomic(u32), expect: u32, timeout_ns: u64) error{Timeout}!void {
@setCold(true);
// Avoid calling into the OS for no-op timeouts.
if (timeout_ns == 0) {
if (ptr.load(.SeqCst) != expect) return;
return error.Timeout;
}
return Impl.wait(ptr, expect, timeout_ns);
}
/// Unblocks at most `max_waiters` callers blocked in a `wait()` call on `ptr`.
pub fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
@setCold(true);
// Avoid calling into the OS if there's nothing to wake up.
if (max_waiters == 0) {
return;
}
Impl.wake(ptr, max_waiters);
}
const Impl = if (builtin.single_threaded)
SingleThreadedImpl
else if (builtin.os.tag == .windows)
WindowsImpl
else if (builtin.os.tag.isDarwin())
DarwinImpl
else if (builtin.os.tag == .linux)
LinuxImpl
else if (builtin.os.tag == .freebsd)
FreebsdImpl
else if (builtin.os.tag == .openbsd)
OpenbsdImpl
else if (builtin.os.tag == .dragonfly)
DragonflyImpl
else if (builtin.target.isWasm())
WasmImpl
else if (std.Thread.use_pthreads)
PosixImpl
else
UnsupportedImpl;
/// We can't do @compileError() in the `Impl` switch statement above as its eagerly evaluated.
/// So instead, we @compileError() on the methods themselves for platforms which don't support futex.
const UnsupportedImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
return unsupported(.{ ptr, expect, timeout });
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
return unsupported(.{ ptr, max_waiters });
}
fn unsupported(unused: anytype) noreturn {
_ = unused;
@compileError("Unsupported operating system " ++ @tagName(builtin.target.os.tag));
}
};
const SingleThreadedImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
if (ptr.loadUnchecked() != expect) {
return;
}
// There are no threads to wake us up.
// So if we wait without a timeout we would never wake up.
const delay = timeout orelse {
unreachable; // deadlock detected
};
std.time.sleep(delay);
return error.Timeout;
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
// There are no other threads to possibly wake up
_ = ptr;
_ = max_waiters;
}
};
// We use WaitOnAddress through NtDll instead of API-MS-Win-Core-Synch-l1-2-0.dll
// as it's generally already a linked target and is autoloaded into all processes anyway.
const WindowsImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
var timeout_value: os.windows.LARGE_INTEGER = undefined;
var timeout_ptr: ?*const os.windows.LARGE_INTEGER = null;
// NTDLL functions work with time in units of 100 nanoseconds.
// Positive values are absolute deadlines while negative values are relative durations.
if (timeout) |delay| {
timeout_value = @as(os.windows.LARGE_INTEGER, @intCast(delay / 100));
timeout_value = -timeout_value;
timeout_ptr = &timeout_value;
}
const rc = os.windows.ntdll.RtlWaitOnAddress(
@as(?*const anyopaque, @ptrCast(ptr)),
@as(?*const anyopaque, @ptrCast(&expect)),
@sizeOf(@TypeOf(expect)),
timeout_ptr,
);
switch (rc) {
.SUCCESS => {},
.TIMEOUT => {
assert(timeout != null);
return error.Timeout;
},
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const address = @as(?*const anyopaque, @ptrCast(ptr));
assert(max_waiters != 0);
switch (max_waiters) {
1 => os.windows.ntdll.RtlWakeAddressSingle(address),
else => os.windows.ntdll.RtlWakeAddressAll(address),
}
}
};
const DarwinImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
// Darwin XNU 7195.50.7.100.1 introduced __ulock_wait2 and migrated code paths (notably pthread_cond_t) towards it:
// https://github.com/apple/darwin-xnu/commit/d4061fb0260b3ed486147341b72468f836ed6c8f#diff-08f993cc40af475663274687b7c326cc6c3031e0db3ac8de7b24624610616be6
//
// This XNU version appears to correspond to 11.0.1:
// https://kernelshaman.blogspot.com/2021/01/building-xnu-for-macos-big-sur-1101.html
//
// ulock_wait() uses 32-bit micro-second timeouts where 0 = INFINITE or no-timeout
// ulock_wait2() uses 64-bit nano-second timeouts (with the same convention)
const supports_ulock_wait2 = builtin.target.os.version_range.semver.min.major >= 11;
var timeout_ns: u64 = 0;
if (timeout) |delay| {
assert(delay != 0); // handled by timedWait()
timeout_ns = delay;
}
// If we're using `__ulock_wait` and `timeout` is too big to fit inside a `u32` count of
// micro-seconds (around 70min), we'll request a shorter timeout. This is fine (users
// should handle spurious wakeups), but we need to remember that we did so, so that
// we don't return `Timeout` incorrectly. If that happens, we set this variable to
// true so that we we know to ignore the ETIMEDOUT result.
var timeout_overflowed = false;
const addr = @as(*const anyopaque, @ptrCast(ptr));
const flags = os.darwin.UL_COMPARE_AND_WAIT | os.darwin.ULF_NO_ERRNO;
const status = blk: {
if (supports_ulock_wait2) {
break :blk os.darwin.__ulock_wait2(flags, addr, expect, timeout_ns, 0);
}
const timeout_us = std.math.cast(u32, timeout_ns / std.time.ns_per_us) orelse overflow: {
timeout_overflowed = true;
break :overflow std.math.maxInt(u32);
};
break :blk os.darwin.__ulock_wait(flags, addr, expect, timeout_us);
};
if (status >= 0) return;
switch (@as(std.os.E, @enumFromInt(-status))) {
// Wait was interrupted by the OS or other spurious signalling.
.INTR => {},
// Address of the futex was paged out. This is unlikely, but possible in theory, and
// pthread/libdispatch on darwin bother to handle it. In this case we'll return
// without waiting, but the caller should retry anyway.
.FAULT => {},
// Only report Timeout if we didn't have to cap the timeout
.TIMEDOUT => {
assert(timeout != null);
if (!timeout_overflowed) return error.Timeout;
},
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
var flags: u32 = os.darwin.UL_COMPARE_AND_WAIT | os.darwin.ULF_NO_ERRNO;
if (max_waiters > 1) {
flags |= os.darwin.ULF_WAKE_ALL;
}
while (true) {
const addr = @as(*const anyopaque, @ptrCast(ptr));
const status = os.darwin.__ulock_wake(flags, addr, 0);
if (status >= 0) return;
switch (@as(std.os.E, @enumFromInt(-status))) {
.INTR => continue, // spurious wake()
.FAULT => unreachable, // __ulock_wake doesn't generate EFAULT according to darwin pthread_cond_t
.NOENT => return, // nothing was woken up
.ALREADY => unreachable, // only for ULF_WAKE_THREAD
else => unreachable,
}
}
}
};
// https://man7.org/linux/man-pages/man2/futex.2.html
const LinuxImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
var ts: os.timespec = undefined;
if (timeout) |timeout_ns| {
ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
}
const rc = os.linux.futex_wait(
@as(*const i32, @ptrCast(&ptr.value)),
os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAIT,
@as(i32, @bitCast(expect)),
if (timeout != null) &ts else null,
);
switch (os.linux.getErrno(rc)) {
.SUCCESS => {}, // notified by `wake()`
.INTR => {}, // spurious wakeup
.AGAIN => {}, // ptr.* != expect
.TIMEDOUT => {
assert(timeout != null);
return error.Timeout;
},
.INVAL => {}, // possibly timeout overflow
.FAULT => unreachable, // ptr was invalid
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const rc = os.linux.futex_wake(
@as(*const i32, @ptrCast(&ptr.value)),
os.linux.FUTEX.PRIVATE_FLAG | os.linux.FUTEX.WAKE,
std.math.cast(i32, max_waiters) orelse std.math.maxInt(i32),
);
switch (os.linux.getErrno(rc)) {
.SUCCESS => {}, // successful wake up
.INVAL => {}, // invalid futex_wait() on ptr done elsewhere
.FAULT => {}, // pointer became invalid while doing the wake
else => unreachable,
}
}
};
// https://www.freebsd.org/cgi/man.cgi?query=_umtx_op&sektion=2&n=1
const FreebsdImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
var tm_size: usize = 0;
var tm: os.freebsd._umtx_time = undefined;
var tm_ptr: ?*const os.freebsd._umtx_time = null;
if (timeout) |timeout_ns| {
tm_ptr = &tm;
tm_size = @sizeOf(@TypeOf(tm));
tm._flags = 0; // use relative time not UMTX_ABSTIME
tm._clockid = os.CLOCK.MONOTONIC;
tm._timeout.tv_sec = @as(@TypeOf(tm._timeout.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
tm._timeout.tv_nsec = @as(@TypeOf(tm._timeout.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
}
const rc = os.freebsd._umtx_op(
@intFromPtr(&ptr.value),
@intFromEnum(os.freebsd.UMTX_OP.WAIT_UINT_PRIVATE),
@as(c_ulong, expect),
tm_size,
@intFromPtr(tm_ptr),
);
switch (os.errno(rc)) {
.SUCCESS => {},
.FAULT => unreachable, // one of the args points to invalid memory
.INVAL => unreachable, // arguments should be correct
.TIMEDOUT => {
assert(timeout != null);
return error.Timeout;
},
.INTR => {}, // spurious wake
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const rc = os.freebsd._umtx_op(
@intFromPtr(&ptr.value),
@intFromEnum(os.freebsd.UMTX_OP.WAKE_PRIVATE),
@as(c_ulong, max_waiters),
0, // there is no timeout struct
0, // there is no timeout struct pointer
);
switch (os.errno(rc)) {
.SUCCESS => {},
.FAULT => {}, // it's ok if the ptr doesn't point to valid memory
.INVAL => unreachable, // arguments should be correct
else => unreachable,
}
}
};
// https://man.openbsd.org/futex.2
const OpenbsdImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
var ts: os.timespec = undefined;
if (timeout) |timeout_ns| {
ts.tv_sec = @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
ts.tv_nsec = @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
}
const rc = os.openbsd.futex(
@as(*const volatile u32, @ptrCast(&ptr.value)),
os.openbsd.FUTEX_WAIT | os.openbsd.FUTEX_PRIVATE_FLAG,
@as(c_int, @bitCast(expect)),
if (timeout != null) &ts else null,
null, // FUTEX_WAIT takes no requeue address
);
switch (os.errno(rc)) {
.SUCCESS => {}, // woken up by wake
.NOSYS => unreachable, // the futex operation shouldn't be invalid
.FAULT => unreachable, // ptr was invalid
.AGAIN => {}, // ptr != expect
.INVAL => unreachable, // invalid timeout
.TIMEDOUT => {
assert(timeout != null);
return error.Timeout;
},
.INTR => {}, // spurious wake from signal
.CANCELED => {}, // spurious wake from signal with SA_RESTART
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const rc = os.openbsd.futex(
@as(*const volatile u32, @ptrCast(&ptr.value)),
os.openbsd.FUTEX_WAKE | os.openbsd.FUTEX_PRIVATE_FLAG,
std.math.cast(c_int, max_waiters) orelse std.math.maxInt(c_int),
null, // FUTEX_WAKE takes no timeout ptr
null, // FUTEX_WAKE takes no requeue address
);
// returns number of threads woken up.
assert(rc >= 0);
}
};
// https://man.dragonflybsd.org/?command=umtx§ion=2
const DragonflyImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
// Dragonfly uses a scheme where 0 timeout means wait until signaled or spurious wake.
// It's reporting of timeout's is also unrealiable so we use an external timing source (Timer) instead.
var timeout_us: c_int = 0;
var timeout_overflowed = false;
var sleep_timer: std.time.Timer = undefined;
if (timeout) |delay| {
assert(delay != 0); // handled by timedWait().
timeout_us = std.math.cast(c_int, delay / std.time.ns_per_us) orelse blk: {
timeout_overflowed = true;
break :blk std.math.maxInt(c_int);
};
// Only need to record the start time if we can provide somewhat accurate error.Timeout's
if (!timeout_overflowed) {
sleep_timer = std.time.Timer.start() catch unreachable;
}
}
const value = @as(c_int, @bitCast(expect));
const addr = @as(*const volatile c_int, @ptrCast(&ptr.value));
const rc = os.dragonfly.umtx_sleep(addr, value, timeout_us);
switch (os.errno(rc)) {
.SUCCESS => {},
.BUSY => {}, // ptr != expect
.AGAIN => { // maybe timed out, or paged out, or hit 2s kernel refresh
if (timeout) |timeout_ns| {
// Report error.Timeout only if we know the timeout duration has passed.
// If not, there's not much choice other than treating it as a spurious wake.
if (!timeout_overflowed and sleep_timer.read() >= timeout_ns) {
return error.Timeout;
}
}
},
.INTR => {}, // spurious wake
.INVAL => unreachable, // invalid timeout
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
// A count of zero means wake all waiters.
assert(max_waiters != 0);
const to_wake = std.math.cast(c_int, max_waiters) orelse 0;
// https://man.dragonflybsd.org/?command=umtx§ion=2
// > umtx_wakeup() will generally return 0 unless the address is bad.
// We are fine with the address being bad (e.g. for Semaphore.post() where Semaphore.wait() frees the Semaphore)
const addr = @as(*const volatile c_int, @ptrCast(&ptr.value));
_ = os.dragonfly.umtx_wakeup(addr, to_wake);
}
};
const WasmImpl = struct {
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
if (!comptime std.Target.wasm.featureSetHas(builtin.target.cpu.features, .atomics)) {
@compileError("WASI target missing cpu feature 'atomics'");
}
const to: i64 = if (timeout) |to| @intCast(to) else -1;
const result = asm (
\\local.get %[ptr]
\\local.get %[expected]
\\local.get %[timeout]
\\memory.atomic.wait32 0
\\local.set %[ret]
: [ret] "=r" (-> u32),
: [ptr] "r" (&ptr.value),
[expected] "r" (@as(i32, @bitCast(expect))),
[timeout] "r" (to),
);
switch (result) {
0 => {}, // ok
1 => {}, // expected =! loaded
2 => return error.Timeout,
else => unreachable,
}
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
if (!comptime std.Target.wasm.featureSetHas(builtin.target.cpu.features, .atomics)) {
@compileError("WASI target missing cpu feature 'atomics'");
}
assert(max_waiters != 0);
const woken_count = asm (
\\local.get %[ptr]
\\local.get %[waiters]
\\memory.atomic.notify 0
\\local.set %[ret]
: [ret] "=r" (-> u32),
: [ptr] "r" (&ptr.value),
[waiters] "r" (max_waiters),
);
_ = woken_count; // can be 0 when linker flag 'shared-memory' is not enabled
}
};
/// Modified version of linux's futex and Go's sema to implement userspace wait queues with pthread:
/// https://code.woboq.org/linux/linux/kernel/futex.c.html
/// https://go.dev/src/runtime/sema.go
const PosixImpl = struct {
const Event = struct {
cond: std.c.pthread_cond_t,
mutex: std.c.pthread_mutex_t,
state: enum { empty, waiting, notified },
fn init(self: *Event) void {
// Use static init instead of pthread_cond/mutex_init() since this is generally faster.
self.cond = .{};
self.mutex = .{};
self.state = .empty;
}
fn deinit(self: *Event) void {
// Some platforms reportedly give EINVAL for statically initialized pthread types.
const rc = std.c.pthread_cond_destroy(&self.cond);
assert(rc == .SUCCESS or rc == .INVAL);
const rm = std.c.pthread_mutex_destroy(&self.mutex);
assert(rm == .SUCCESS or rm == .INVAL);
self.* = undefined;
}
fn wait(self: *Event, timeout: ?u64) error{Timeout}!void {
assert(std.c.pthread_mutex_lock(&self.mutex) == .SUCCESS);
defer assert(std.c.pthread_mutex_unlock(&self.mutex) == .SUCCESS);
// Early return if the event was already set.
if (self.state == .notified) {
return;
}
// Compute the absolute timeout if one was specified.
// POSIX requires that REALTIME is used by default for the pthread timedwait functions.
// This can be changed with pthread_condattr_setclock, but it's an extension and may not be available everywhere.
var ts: os.timespec = undefined;
if (timeout) |timeout_ns| {
os.clock_gettime(os.CLOCK.REALTIME, &ts) catch unreachable;
ts.tv_sec +|= @as(@TypeOf(ts.tv_sec), @intCast(timeout_ns / std.time.ns_per_s));
ts.tv_nsec += @as(@TypeOf(ts.tv_nsec), @intCast(timeout_ns % std.time.ns_per_s));
if (ts.tv_nsec >= std.time.ns_per_s) {
ts.tv_sec +|= 1;
ts.tv_nsec -= std.time.ns_per_s;
}
}
// Start waiting on the event - there can be only one thread waiting.
assert(self.state == .empty);
self.state = .waiting;
while (true) {
// Block using either pthread_cond_wait or pthread_cond_timewait if there's an absolute timeout.
const rc = blk: {
if (timeout == null) break :blk std.c.pthread_cond_wait(&self.cond, &self.mutex);
break :blk std.c.pthread_cond_timedwait(&self.cond, &self.mutex, &ts);
};
// After waking up, check if the event was set.
if (self.state == .notified) {
return;
}
assert(self.state == .waiting);
switch (rc) {
.SUCCESS => {},
.TIMEDOUT => {
// If timed out, reset the event to avoid the set() thread doing an unnecessary signal().
self.state = .empty;
return error.Timeout;
},
.INVAL => unreachable, // cond, mutex, and potentially ts should all be valid
.PERM => unreachable, // mutex is locked when cond_*wait() functions are called
else => unreachable,
}
}
}
fn set(self: *Event) void {
assert(std.c.pthread_mutex_lock(&self.mutex) == .SUCCESS);
defer assert(std.c.pthread_mutex_unlock(&self.mutex) == .SUCCESS);
// Make sure that multiple calls to set() were not done on the same Event.
const old_state = self.state;
assert(old_state != .notified);
// Mark the event as set and wake up the waiting thread if there was one.
// This must be done while the mutex as the wait() thread could deallocate
// the condition variable once it observes the new state, potentially causing a UAF if done unlocked.
self.state = .notified;
if (old_state == .waiting) {
assert(std.c.pthread_cond_signal(&self.cond) == .SUCCESS);
}
}
};
const Treap = std.Treap(usize, std.math.order);
const Waiter = struct {
node: Treap.Node,
prev: ?*Waiter,
next: ?*Waiter,
tail: ?*Waiter,
is_queued: bool,
event: Event,
};
// An unordered set of Waiters
const WaitList = struct {
top: ?*Waiter = null,
len: usize = 0,
fn push(self: *WaitList, waiter: *Waiter) void {
waiter.next = self.top;
self.top = waiter;
self.len += 1;
}
fn pop(self: *WaitList) ?*Waiter {
const waiter = self.top orelse return null;
self.top = waiter.next;
self.len -= 1;
return waiter;
}
};
const WaitQueue = struct {
fn insert(treap: *Treap, address: usize, waiter: *Waiter) void {
// prepare the waiter to be inserted.
waiter.next = null;
waiter.is_queued = true;
// Find the wait queue entry associated with the address.
// If there isn't a wait queue on the address, this waiter creates the queue.
var entry = treap.getEntryFor(address);
const entry_node = entry.node orelse {
waiter.prev = null;
waiter.tail = waiter;
entry.set(&waiter.node);
return;
};
// There's a wait queue on the address; get the queue head and tail.
const head = @fieldParentPtr(Waiter, "node", entry_node);
const tail = head.tail orelse unreachable;
// Push the waiter to the tail by replacing it and linking to the previous tail.
head.tail = waiter;
tail.next = waiter;
waiter.prev = tail;
}
fn remove(treap: *Treap, address: usize, max_waiters: usize) WaitList {
// Find the wait queue associated with this address and get the head/tail if any.
var entry = treap.getEntryFor(address);
var queue_head = if (entry.node) |node| @fieldParentPtr(Waiter, "node", node) else null;
const queue_tail = if (queue_head) |head| head.tail else null;
// Once we're done updating the head, fix it's tail pointer and update the treap's queue head as well.
defer entry.set(blk: {
const new_head = queue_head orelse break :blk null;
new_head.tail = queue_tail;
break :blk &new_head.node;
});
var removed = WaitList{};
while (removed.len < max_waiters) {
// dequeue and collect waiters from their wait queue.
const waiter = queue_head orelse break;
queue_head = waiter.next;
removed.push(waiter);
// When dequeueing, we must mark is_queued as false.
// This ensures that a waiter which calls tryRemove() returns false.
assert(waiter.is_queued);
waiter.is_queued = false;
}
return removed;
}
fn tryRemove(treap: *Treap, address: usize, waiter: *Waiter) bool {
if (!waiter.is_queued) {
return false;
}
queue_remove: {
// Find the wait queue associated with the address.
var entry = blk: {
// A waiter without a previous link means it's the queue head that's in the treap so we can avoid lookup.
if (waiter.prev == null) {
assert(waiter.node.key == address);
break :blk treap.getEntryForExisting(&waiter.node);
}
break :blk treap.getEntryFor(address);
};
// The queue head and tail must exist if we're removing a queued waiter.
const head = @fieldParentPtr(Waiter, "node", entry.node orelse unreachable);
const tail = head.tail orelse unreachable;
// A waiter with a previous link is never the head of the queue.
if (waiter.prev) |prev| {
assert(waiter != head);
prev.next = waiter.next;
// A waiter with both a previous and next link is in the middle.
// We only need to update the surrounding waiter's links to remove it.
if (waiter.next) |next| {
assert(waiter != tail);
next.prev = waiter.prev;
break :queue_remove;
}
// A waiter with a previous but no next link means it's the tail of the queue.
// In that case, we need to update the head's tail reference.
assert(waiter == tail);
head.tail = waiter.prev;
break :queue_remove;
}
// A waiter with no previous link means it's the queue head of queue.
// We must replace (or remove) the head waiter reference in the treap.
assert(waiter == head);
entry.set(blk: {
const new_head = waiter.next orelse break :blk null;
new_head.tail = head.tail;
break :blk &new_head.node;
});
}
// Mark the waiter as successfully removed.
waiter.is_queued = false;
return true;
}
};
const Bucket = struct {
mutex: std.c.pthread_mutex_t align(std.atomic.cache_line) = .{},
pending: Atomic(usize) = Atomic(usize).init(0),
treap: Treap = .{},
// Global array of buckets that addresses map to.
// Bucket array size is pretty much arbitrary here, but it must be a power of two for fibonacci hashing.
var buckets = [_]Bucket{.{}} ** @bitSizeOf(usize);
// https://github.com/Amanieu/parking_lot/blob/1cf12744d097233316afa6c8b7d37389e4211756/core/src/parking_lot.rs#L343-L353
fn from(address: usize) *Bucket {
// The upper `@bitSizeOf(usize)` bits of the fibonacci golden ratio.
// Hashing this via (h * k) >> (64 - b) where k=golden-ration and b=bitsize-of-array
// evenly lays out h=hash values over the bit range even when the hash has poor entropy (identity-hash for pointers).
const max_multiplier_bits = @bitSizeOf(usize);
const fibonacci_multiplier = 0x9E3779B97F4A7C15 >> (64 - max_multiplier_bits);
const max_bucket_bits = @ctz(buckets.len);
comptime assert(std.math.isPowerOfTwo(buckets.len));
const index = (address *% fibonacci_multiplier) >> (max_multiplier_bits - max_bucket_bits);
return &buckets[index];
}
};
const Address = struct {
fn from(ptr: *const Atomic(u32)) usize {
// Get the alignment of the pointer.
const alignment = @alignOf(Atomic(u32));
comptime assert(std.math.isPowerOfTwo(alignment));
// Make sure the pointer is aligned,
// then cut off the zero bits from the alignment to get the unique address.
const addr = @intFromPtr(ptr);
assert(addr & (alignment - 1) == 0);
return addr >> @ctz(@as(usize, alignment));
}
};
fn wait(ptr: *const Atomic(u32), expect: u32, timeout: ?u64) error{Timeout}!void {
const address = Address.from(ptr);
const bucket = Bucket.from(address);
// Announce that there's a waiter in the bucket before checking the ptr/expect condition.
// If the announcement is reordered after the ptr check, the waiter could deadlock:
//
// - T1: checks ptr == expect which is true
// - T2: updates ptr to != expect
// - T2: does Futex.wake(), sees no pending waiters, exits
// - T1: bumps pending waiters (was reordered after the ptr == expect check)
// - T1: goes to sleep and misses both the ptr change and T2's wake up
//
// SeqCst as Acquire barrier to ensure the announcement happens before the ptr check below.
// SeqCst as shared modification order to form a happens-before edge with the fence(.SeqCst)+load() in wake().
var pending = bucket.pending.fetchAdd(1, .SeqCst);
assert(pending < std.math.maxInt(usize));
// If the wait gets cancelled, remove the pending count we previously added.
// This is done outside the mutex lock to keep the critical section short in case of contention.
var cancelled = false;
defer if (cancelled) {
pending = bucket.pending.fetchSub(1, .Monotonic);
assert(pending > 0);
};
var waiter: Waiter = undefined;
{
assert(std.c.pthread_mutex_lock(&bucket.mutex) == .SUCCESS);
defer assert(std.c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS);
cancelled = ptr.load(.Monotonic) != expect;
if (cancelled) {
return;
}
waiter.event.init();
WaitQueue.insert(&bucket.treap, address, &waiter);
}
defer {
assert(!waiter.is_queued);
waiter.event.deinit();
}
waiter.event.wait(timeout) catch {
// If we fail to cancel after a timeout, it means a wake() thread dequeued us and will wake us up.
// We must wait until the event is set as that's a signal that the wake() thread won't access the waiter memory anymore.
// If we return early without waiting, the waiter on the stack would be invalidated and the wake() thread risks a UAF.
defer if (!cancelled) waiter.event.wait(null) catch unreachable;
assert(std.c.pthread_mutex_lock(&bucket.mutex) == .SUCCESS);
defer assert(std.c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS);
cancelled = WaitQueue.tryRemove(&bucket.treap, address, &waiter);
if (cancelled) {
return error.Timeout;
}
};
}
fn wake(ptr: *const Atomic(u32), max_waiters: u32) void {
const address = Address.from(ptr);
const bucket = Bucket.from(address);
// Quick check if there's even anything to wake up.
// The change to the ptr's value must happen before we check for pending waiters.
// If not, the wake() thread could miss a sleeping waiter and have it deadlock:
//
// - T2: p = has pending waiters (reordered before the ptr update)
// - T1: bump pending waiters
// - T1: if ptr == expected: sleep()
// - T2: update ptr != expected
// - T2: p is false from earlier so doesn't wake (T1 missed ptr update and T2 missed T1 sleeping)
//
// What we really want here is a Release load, but that doesn't exist under the C11 memory model.
// We could instead do `bucket.pending.fetchAdd(0, Release) == 0` which achieves effectively the same thing,
// but the RMW operation unconditionally marks the cache-line as modified for others causing unnecessary fetching/contention.
//
// Instead we opt to do a full-fence + load instead which avoids taking ownership of the cache-line.
// fence(SeqCst) effectively converts the ptr update to SeqCst and the pending load to SeqCst: creating a Store-Load barrier.
//
// The pending count increment in wait() must also now use SeqCst for the update + this pending load
// to be in the same modification order as our load isn't using Release/Acquire to guarantee it.
bucket.pending.fence(.SeqCst);
if (bucket.pending.load(.Monotonic) == 0) {
return;
}
// Keep a list of all the waiters notified and wake then up outside the mutex critical section.
var notified = WaitList{};
defer if (notified.len > 0) {
const pending = bucket.pending.fetchSub(notified.len, .Monotonic);
assert(pending >= notified.len);
while (notified.pop()) |waiter| {
assert(!waiter.is_queued);
waiter.event.set();
}
};
assert(std.c.pthread_mutex_lock(&bucket.mutex) == .SUCCESS);
defer assert(std.c.pthread_mutex_unlock(&bucket.mutex) == .SUCCESS);
// Another pending check again to avoid the WaitQueue lookup if not necessary.
if (bucket.pending.load(.Monotonic) > 0) {
notified = WaitQueue.remove(&bucket.treap, address, max_waiters);
}
}
};
test "Futex - smoke test" {
var value = Atomic(u32).init(0);
// Try waits with invalid values.
Futex.wait(&value, 0xdeadbeef);
Futex.timedWait(&value, 0xdeadbeef, 0) catch {};
// Try timeout waits.
try testing.expectError(error.Timeout, Futex.timedWait(&value, 0, 0));
try testing.expectError(error.Timeout, Futex.timedWait(&value, 0, std.time.ns_per_ms));
// Try wakes
Futex.wake(&value, 0);
Futex.wake(&value, 1);
Futex.wake(&value, std.math.maxInt(u32));
}
test "Futex - signaling" {
// This test requires spawning threads
if (builtin.single_threaded) {
return error.SkipZigTest;
}
const num_threads = 4;
const num_iterations = 4;
const Paddle = struct {
value: Atomic(u32) = Atomic(u32).init(0),
current: u32 = 0,
fn hit(self: *@This()) void {
_ = self.value.fetchAdd(1, .Release);
Futex.wake(&self.value, 1);
}
fn run(self: *@This(), hit_to: *@This()) !void {
while (self.current < num_iterations) {
// Wait for the value to change from hit()
var new_value: u32 = undefined;
while (true) {
new_value = self.value.load(.Acquire);
if (new_value != self.current) break;
Futex.wait(&self.value, self.current);
}
// change the internal "current" value
try testing.expectEqual(new_value, self.current + 1);
self.current = new_value;
// hit the next paddle
hit_to.hit();
}
}
};
var paddles = [_]Paddle{.{}} ** num_threads;
var threads = [_]std.Thread{undefined} ** num_threads;
// Create a circle of paddles which hit each other
for (&threads, 0..) |*t, i| {
const paddle = &paddles[i];
const hit_to = &paddles[(i + 1) % paddles.len];
t.* = try std.Thread.spawn(.{}, Paddle.run, .{ paddle, hit_to });
}
// Hit the first paddle and wait for them all to complete by hitting each other for num_iterations.
paddles[0].hit();
for (threads) |t| t.join();
for (paddles) |p| try testing.expectEqual(p.current, num_iterations);
}
test "Futex - broadcasting" {
// This test requires spawning threads
if (builtin.single_threaded) {
return error.SkipZigTest;
}
const num_threads = 4;
const num_iterations = 4;
const Barrier = struct {
count: Atomic(u32) = Atomic(u32).init(num_threads),
futex: Atomic(u32) = Atomic(u32).init(0),
fn wait(self: *@This()) !void {
// Decrement the counter.
// Release ensures stuff before this barrier.wait() happens before the last one.
const count = self.count.fetchSub(1, .Release);
try testing.expect(count <= num_threads);
try testing.expect(count > 0);
// First counter to reach zero wakes all other threads.
// Acquire for the last counter ensures stuff before previous barrier.wait()s happened before it.
// Release on futex update ensures stuff before all barrier.wait()'s happens before they all return.
if (count - 1 == 0) {
_ = self.count.load(.Acquire); // TODO: could be fence(Acquire) if not for TSAN
self.futex.store(1, .Release);
Futex.wake(&self.futex, num_threads - 1);
return;
}
// Other threads wait until last counter wakes them up.
// Acquire on futex synchronizes with last barrier count to ensure stuff before all barrier.wait()'s happen before us.
while (self.futex.load(.Acquire) == 0) {
Futex.wait(&self.futex, 0);
}
}
};
const Broadcast = struct {
barriers: [num_iterations]Barrier = [_]Barrier{.{}} ** num_iterations,
threads: [num_threads]std.Thread = undefined,
fn run(self: *@This()) !void {
for (&self.barriers) |*barrier| {
try barrier.wait();
}
}
};
var broadcast = Broadcast{};
for (&broadcast.threads) |*t| t.* = try std.Thread.spawn(.{}, Broadcast.run, .{&broadcast});
for (broadcast.threads) |t| t.join();
}
/// Deadline is used to wait efficiently for a pointer's value to change using Futex and a fixed timeout.
///
/// Futex's timedWait() api uses a relative duration which suffers from over-waiting
/// when used in a loop which is often required due to the possibility of spurious wakeups.
///
/// Deadline instead converts the relative timeout to an absolute one so that multiple calls
/// to Futex timedWait() can block for and report more accurate error.Timeouts.
pub const Deadline = struct {
timeout: ?u64,
started: std.time.Timer,
/// Create the deadline to expire after the given amount of time in nanoseconds passes.
/// Pass in `null` to have the deadline call `Futex.wait()` and never expire.
pub fn init(expires_in_ns: ?u64) Deadline {
var deadline: Deadline = undefined;
deadline.timeout = expires_in_ns;
// std.time.Timer is required to be supported for somewhat accurate reportings of error.Timeout.
if (deadline.timeout != null) {
deadline.started = std.time.Timer.start() catch unreachable;
}
return deadline;
}
/// Wait until either:
/// - the `ptr`'s value changes from `expect`.
/// - `Futex.wake()` is called on the `ptr`.
/// - A spurious wake occurs.
/// - The deadline expires; In which case `error.Timeout` is returned.
pub fn wait(self: *Deadline, ptr: *const Atomic(u32), expect: u32) error{Timeout}!void {
@setCold(true);
// Check if we actually have a timeout to wait until.
// If not just wait "forever".
const timeout_ns = self.timeout orelse {
return Futex.wait(ptr, expect);
};
// Get how much time has passed since we started waiting
// then subtract that from the init() timeout to get how much longer to wait.
// Use overflow to detect when we've been waiting longer than the init() timeout.
const elapsed_ns = self.started.read();
const until_timeout_ns = std.math.sub(u64, timeout_ns, elapsed_ns) catch 0;
return Futex.timedWait(ptr, expect, until_timeout_ns);
}
};
test "Futex - Deadline" {
var deadline = Deadline.init(100 * std.time.ns_per_ms);
var futex_word = Atomic(u32).init(0);
while (true) {
deadline.wait(&futex_word, 0) catch break;
}
}
| https://raw.githubusercontent.com/matpx/daydream/018ad0c7caaf796d8a04b882fcbed39ccb7c9cd8/toolchain/zig/lib/std/Thread/Futex.zig |
//
// Now that we've seen how methods work, let's see if we can help
// our elephants out a bit more with some Elephant methods.
//
const std = @import("std");
const Elephant = struct {
letter: u8,
tail: ?*Elephant = null,
visited: bool = false,
// New Elephant methods!
pub fn getTail(self: *Elephant) *Elephant {
return self.tail.?; // Remember, this means "orelse unreachable"
}
pub fn hasTail(self: *Elephant) bool {
return (self.tail != null);
}
pub fn visit(self: *Elephant) void {
self.visited = true;
}
pub fn print(self: *Elephant) void {
// Prints elephant letter and [v]isited
var v: u8 = if (self.visited) 'v' else ' ';
std.debug.print("{u}{u} ", .{ self.letter, v });
}
};
pub fn main() void {
var elephantA = Elephant{ .letter = 'A' };
var elephantB = Elephant{ .letter = 'B' };
var elephantC = Elephant{ .letter = 'C' };
// This links the elephants so that each tail "points" to the next.
elephantA.tail = &elephantB;
elephantB.tail = &elephantC;
visitElephants(&elephantA);
std.debug.print("\n", .{});
}
// This function visits all elephants once, starting with the
// first elephant and following the tails to the next elephant.
fn visitElephants(first_elephant: *Elephant) void {
var e = first_elephant;
while (true) {
e.print();
e.visit();
// This gets the next elephant or stops:
// which method do we want here?
e = if (e.hasTail()) e.tail.? else break;
}
}
// Zig's enums can also have methods! This comment originally asked
// if anyone could find instances of enum methods in the wild. The
// first five pull requests were accepted and here they are:
//
// 1) drforester - I found one in the Zig source:
// https://github.com/ziglang/zig/blob/041212a41cfaf029dc3eb9740467b721c76f406c/src/Compilation.zig#L2495
//
// 2) bbuccianti - I found one!
// https://github.com/ziglang/zig/blob/6787f163eb6db2b8b89c2ea6cb51d63606487e12/lib/std/debug.zig#L477
//
// 3) GoldsteinE - Found many, here's one
// https://github.com/ziglang/zig/blob/ce14bc7176f9e441064ffdde2d85e35fd78977f2/lib/std/target.zig#L65
//
// 4) SpencerCDixon - Love this language so far :-)
// https://github.com/ziglang/zig/blob/a502c160cd51ce3de80b3be945245b7a91967a85/src/zir.zig#L530
//
// 5) tomkun - here's another enum method
// https://github.com/ziglang/zig/blob/4ca1f4ec2e3ae1a08295bc6ed03c235cb7700ab9/src/codegen/aarch64.zig#L24
| https://raw.githubusercontent.com/tomial/ziglings/c49f10c08770775b596309d3e4f54d774c327b95/exercises/048_methods2.zig |
//
// Now that we've seen how methods work, let's see if we can help
// our elephants out a bit more with some Elephant methods.
//
const std = @import("std");
const Elephant = struct {
letter: u8,
tail: ?*Elephant = null,
visited: bool = false,
// New Elephant methods!
pub fn getTail(self: *Elephant) *Elephant {
return self.tail.?; // Remember, this means "orelse unreachable"
}
pub fn hasTail(self: *Elephant) bool {
return (self.tail != null);
}
pub fn visit(self: *Elephant) void {
self.visited = true;
}
pub fn print(self: *Elephant) void {
// Prints elephant letter and [v]isited
var v: u8 = if (self.visited) 'v' else ' ';
std.debug.print("{u}{u} ", .{ self.letter, v });
}
};
pub fn main() void {
var elephantA = Elephant{ .letter = 'A' };
var elephantB = Elephant{ .letter = 'B' };
var elephantC = Elephant{ .letter = 'C' };
// This links the elephants so that each tail "points" to the next.
elephantA.tail = &elephantB;
elephantB.tail = &elephantC;
visitElephants(&elephantA);
std.debug.print("\n", .{});
}
// This function visits all elephants once, starting with the
// first elephant and following the tails to the next elephant.
fn visitElephants(first_elephant: *Elephant) void {
var e = first_elephant;
while (true) {
e.print();
e.visit();
// This gets the next elephant or stops:
// which method do we want here?
e = if (e.hasTail()) e.tail.? else break;
}
}
// Zig's enums can also have methods! This comment originally asked
// if anyone could find instances of enum methods in the wild. The
// first five pull requests were accepted and here they are:
//
// 1) drforester - I found one in the Zig source:
// https://github.com/ziglang/zig/blob/041212a41cfaf029dc3eb9740467b721c76f406c/src/Compilation.zig#L2495
//
// 2) bbuccianti - I found one!
// https://github.com/ziglang/zig/blob/6787f163eb6db2b8b89c2ea6cb51d63606487e12/lib/std/debug.zig#L477
//
// 3) GoldsteinE - Found many, here's one
// https://github.com/ziglang/zig/blob/ce14bc7176f9e441064ffdde2d85e35fd78977f2/lib/std/target.zig#L65
//
// 4) SpencerCDixon - Love this language so far :-)
// https://github.com/ziglang/zig/blob/a502c160cd51ce3de80b3be945245b7a91967a85/src/zir.zig#L530
//
// 5) tomkun - here's another enum method
// https://github.com/ziglang/zig/blob/4ca1f4ec2e3ae1a08295bc6ed03c235cb7700ab9/src/codegen/aarch64.zig#L24
| https://raw.githubusercontent.com/Hiten-Tandon/my-ziglings-attempt/798ec87da4f2a3f6ff8f04703b706d4f0f8def17/exercises/048_methods2.zig |
//
// Being able to group values together lets us turn this:
//
// point1_x = 3;
// point1_y = 16;
// point1_z = 27;
// point2_x = 7;
// point2_y = 13;
// point2_z = 34;
//
// into this:
//
// point1 = Point{ .x=3, .y=16, .z=27 };
// point2 = Point{ .x=7, .y=13, .z=34 };
//
// The Point above is an example of a "struct" (short for "structure").
// Here's how that struct type could have been defined:
//
// const Point = struct{ x: u32, y: u32, z: u32 };
//
// Let's store something fun with a struct: a roleplaying character!
//
const std = @import("std");
// We'll use an enum to specify the character class.
const Class = enum {
wizard,
thief,
bard,
warrior,
};
// Please add a new property to this struct called "health" and make
// it a u8 integer type.
const Character = struct {
class: Class,
gold: u32,
experience: u32,
health: u8,
};
pub fn main() void {
// Please initialize Glorp with 100 health.
var glorp_the_wise = Character{
.class = Class.wizard,
.gold = 20,
.experience = 10,
.health = 100,
};
// Glorp gains some gold.
glorp_the_wise.gold += 5;
// Ouch! Glorp takes a punch!
glorp_the_wise.health -= 10;
std.debug.print("Your wizard has {} health and {} gold.\n", .{
glorp_the_wise.health,
glorp_the_wise.gold,
});
}
| https://raw.githubusercontent.com/mhanberg/ziglings/b57ff44f37d088f253e44f38ae16ab0c00e42aee/exercises/037_structs.zig |
//
// Being able to group values together lets us turn this:
//
// point1_x = 3;
// point1_y = 16;
// point1_z = 27;
// point2_x = 7;
// point2_y = 13;
// point2_z = 34;
//
// into this:
//
// point1 = Point{ .x=3, .y=16, .z=27 };
// point2 = Point{ .x=7, .y=13, .z=34 };
//
// The Point above is an example of a "struct" (short for "structure").
// Here's how that struct type could have been defined:
//
// const Point = struct{ x: u32, y: u32, z: u32 };
//
// Let's store something fun with a struct: a roleplaying character!
//
const std = @import("std");
// We'll use an enum to specify the character class.
const Class = enum {
wizard,
thief,
bard,
warrior,
};
// Please add a new property to this struct called "health" and make
// it a u8 integer type.
const Character = struct {
class: Class,
gold: u32,
experience: u32,
health: u8,
};
pub fn main() void {
// Please initialize Glorp with 100 health.
var glorp_the_wise = Character{
.class = Class.wizard,
.gold = 20,
.experience = 10,
.health = 100,
};
// Glorp gains some gold.
glorp_the_wise.gold += 5;
// Ouch! Glorp takes a punch!
glorp_the_wise.health -= 10;
std.debug.print("Your wizard has {} health and {} gold.\n", .{
glorp_the_wise.health,
glorp_the_wise.gold,
});
}
| https://raw.githubusercontent.com/Steven0351/ziglings/345445f187efcfca0898d1622931d8ab45fa8935/exercises/037_structs.zig |
//
// Being able to group values together lets us turn this:
//
// point1_x = 3;
// point1_y = 16;
// point1_z = 27;
// point2_x = 7;
// point2_y = 13;
// point2_z = 34;
//
// into this:
//
// point1 = Point{ .x=3, .y=16, .z=27 };
// point2 = Point{ .x=7, .y=13, .z=34 };
//
// The Point above is an example of a "struct" (short for "structure").
// Here's how that struct type could have been defined:
//
// const Point = struct{ x: u32, y: u32, z: u32 };
//
// Let's store something fun with a struct: a roleplaying character!
//
const std = @import("std");
// We'll use an enum to specify the character class.
const Class = enum {
wizard,
thief,
bard,
warrior,
};
// Please add a new property to this struct called "health" and make
// it a u8 integer type.
const Character = struct {
class: Class,
gold: u32,
experience: u32,
health: u8,
};
pub fn main() void {
// Please initialize Glorp with 100 health.
var glorp_the_wise = Character{
.class = Class.wizard,
.gold = 20,
.experience = 10,
.health = 100,
};
// Glorp gains some gold.
glorp_the_wise.gold += 5;
// Ouch! Glorp takes a punch!
glorp_the_wise.health -= 10;
std.debug.print("Your wizard has {} health and {} gold.\n", .{
glorp_the_wise.health,
glorp_the_wise.gold,
});
}
| https://raw.githubusercontent.com/matthewsimo/ziglings/76b194775858a18b011a0f38206ae16024d2a221/exercises/037_structs.zig |
//
// Being able to group values together lets us turn this:
//
// point1_x = 3;
// point1_y = 16;
// point1_z = 27;
// point2_x = 7;
// point2_y = 13;
// point2_z = 34;
//
// into this:
//
// point1 = Point{ .x=3, .y=16, .z=27 };
// point2 = Point{ .x=7, .y=13, .z=34 };
//
// The Point above is an example of a "struct" (short for "structure").
// Here's how that struct type could have been defined:
//
// const Point = struct{ x: u32, y: u32, z: u32 };
//
// Let's store something fun with a struct: a roleplaying character!
//
const std = @import("std");
// We'll use an enum to specify the character class.
const Class = enum {
wizard,
thief,
bard,
warrior,
};
// Please add a new property to this struct called "health" and make
// it a u8 integer type.
const Character = struct {
class: Class,
gold: u32,
experience: u32,
health: u8,
};
pub fn main() void {
// Please initialize Glorp with 100 health.
var glorp_the_wise = Character{
.class = Class.wizard,
.gold = 20,
.experience = 10,
.health = 100,
};
// Glorp gains some gold.
glorp_the_wise.gold += 5;
// Ouch! Glorp takes a punch!
glorp_the_wise.health -= 10;
std.debug.print("Your wizard has {} health and {} gold.\n", .{
glorp_the_wise.health,
glorp_the_wise.gold,
});
}
| https://raw.githubusercontent.com/saggit/ziglings/d2ccc075df5fb1f0efe5e615c5b35d129267bfde/exercises/037_structs.zig |
const common = @import("../common.zig");
const jsFree = common.jsFree;
const jsCreateClass = common.jsCreateClass;
const Classes = common.Classes;
const toJSBool = common.toJSBool;
const Undefined = common.Undefined;
const True = common.True;
const object = @import("../object.zig");
const Object = object.Object;
const getObjectValue = object.getObjectValue;
const AsyncFunction = @import("../function.zig").AsyncFunction;
const WritableStream = @import("writable.zig").WritableStream;
const Array = @import("../array.zig").Array;
// https://github.com/cloudflare/workers-types/blob/master/index.d.ts#L989
pub const PipeToOptions = struct {
preventClose: ?bool = null,
preventAbort: ?bool = null,
preventCancel: ?bool = null,
// NOTE: This exists but will prob never be implemented.
// signal: ?AbortSignal = null,
pub fn toObject (self: *const PipeToOptions) Object {
const obj = Object.new();
if (self.preventClose != null) {
obj.setID("ignoreMethod", toJSBool(self.preventClose.?));
}
// if (self.preventAbort != null) { obj.setID("ignoreMethod", toJSBool(self.preventAbort.?)); }
// if (self.preventCancel != null) { obj.setID("ignoreMethod", toJSBool(self.preventCancel.?)); }
return obj;
}
};
// TODO: Plenty of functions/structs not implemented here yet.
// https://developers.cloudflare.com/workers/runtime-apis/streams/readablestream/
// https://github.com/cloudflare/workers-types/blob/master/index.d.ts#L1155
pub const ReadableStream = struct {
id: u32,
pub fn init (ptr: u32) ReadableStream {
return ReadableStream{ .id = ptr };
}
// TODO: Support inputs
pub fn new () ReadableStream {
return ReadableStream{ .id = jsCreateClass(Classes.ReadableStream.toInt(), Undefined) };
}
pub fn free (self: ReadableStream) void {
jsFree(self.id);
}
pub fn locked (self: *const ReadableStream) bool {
const jsPtr = getObjectValue(self.id, "locked");
return jsPtr == True;
}
pub fn cancel (self: *const ReadableStream) void {
const func = AsyncFunction{ .id = getObjectValue(self.id, "cancel") };
defer func.free();
func.call();
}
pub fn pipeTo (
self: *const ReadableStream,
destination: *const WritableStream,
options: PipeToOptions
) void {
const optObj = options.toObject();
defer optObj.free();
const func = AsyncFunction{ .id = getObjectValue(self.id, "pipeTo") };
defer func.free();
// setup args
const args = Array.new();
defer args.free();
args.push(&destination);
args.push(&optObj);
func.call(args.id);
}
};
| https://raw.githubusercontent.com/CraigglesO/workers-zig-auth/44c7c7c83e4360457037450dc74a57d5c9e06819/auth/workers-zig/lib/bindings/streams/readable.zig |
const std = @import("std");
pub const Options = struct {
enable_cross_platform_determinism: bool = true,
};
pub const Package = struct {
target: std.Build.ResolvedTarget,
options: Options,
zmath: *std.Build.Module,
zmath_options: *std.Build.Module,
pub fn link(pkg: Package, exe: *std.Build.Step.Compile) void {
exe.root_module.addImport("zmath", pkg.zmath);
exe.root_module.addImport("zmath_options", pkg.zmath_options);
}
};
pub fn package(
b: *std.Build,
target: std.Build.ResolvedTarget,
_: std.builtin.Mode,
args: struct {
options: Options = .{},
},
) Package {
const step = b.addOptions();
step.addOption(
bool,
"enable_cross_platform_determinism",
args.options.enable_cross_platform_determinism,
);
const zmath_options = step.createModule();
const zmath = b.addModule("zmath", .{
.root_source_file = .{ .path = thisDir() ++ "/src/main.zig" },
.imports = &.{
.{ .name = "zmath_options", .module = zmath_options },
},
});
return .{
.target = target,
.options = args.options,
.zmath = zmath,
.zmath_options = zmath_options,
};
}
pub fn build(b: *std.Build) void {
const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
_ = package(b, target, optimize, .{ .options = .{
.enable_cross_platform_determinism = b.option(bool, "enable_cross_platform_determinism", "Whether to enable cross-platform determinism.") orelse true,
} });
const test_step = b.step("test", "Run zmath tests");
test_step.dependOn(runTests(b, optimize, target));
const benchmark_step = b.step("benchmark", "Run zmath benchmarks");
benchmark_step.dependOn(runBenchmarks(b, target, optimize));
}
pub fn runTests(
b: *std.Build,
optimize: std.builtin.Mode,
target: std.Build.ResolvedTarget,
) *std.Build.Step {
const tests = b.addTest(.{
.name = "zmath-tests",
.root_source_file = .{ .path = thisDir() ++ "/src/main.zig" },
.target = target,
.optimize = optimize,
});
const zmath_pkg = package(b, target, optimize, .{});
tests.root_module.addImport("zmath_options", zmath_pkg.zmath_options);
return &b.addRunArtifact(tests).step;
}
pub fn runBenchmarks(
b: *std.Build,
target: std.Build.ResolvedTarget,
optimize: std.builtin.OptimizeMode,
) *std.Build.Step {
const exe = b.addExecutable(.{
.name = "zmath-benchmarks",
.root_source_file = .{ .path = thisDir() ++ "/src/benchmark.zig" },
.target = target,
.optimize = optimize,
});
const zmath_pkg = package(b, target, .ReleaseFast, .{});
exe.root_module.addImport("zmath", zmath_pkg.zmath);
return &b.addRunArtifact(exe).step;
}
inline fn thisDir() []const u8 {
return comptime std.fs.path.dirname(@src().file) orelse ".";
}
| https://raw.githubusercontent.com/sarr-io/realityarch/996e9cf6b72a2c9ae969ccb29f145a9563d9e503/libs/zmath/build.zig |
const std = @import("std");
pub const Options = struct {
enable_cross_platform_determinism: bool = true,
};
pub const Package = struct {
target: std.Build.ResolvedTarget,
options: Options,
zmath: *std.Build.Module,
zmath_options: *std.Build.Module,
pub fn link(pkg: Package, exe: *std.Build.Step.Compile) void {
exe.root_module.addImport("zmath", pkg.zmath);
exe.root_module.addImport("zmath_options", pkg.zmath_options);
}
};
pub fn package(
b: *std.Build,
target: std.Build.ResolvedTarget,
_: std.builtin.Mode,
args: struct {
options: Options = .{},
},
) Package {
const step = b.addOptions();
step.addOption(
bool,
"enable_cross_platform_determinism",
args.options.enable_cross_platform_determinism,
);
const zmath_options = step.createModule();
const zmath = b.addModule("zmath", .{
.root_source_file = .{ .path = thisDir() ++ "/src/main.zig" },
.imports = &.{
.{ .name = "zmath_options", .module = zmath_options },
},
});
return .{
.target = target,
.options = args.options,
.zmath = zmath,
.zmath_options = zmath_options,
};
}
pub fn build(b: *std.Build) void {
const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
_ = package(b, target, optimize, .{ .options = .{
.enable_cross_platform_determinism = b.option(bool, "enable_cross_platform_determinism", "Whether to enable cross-platform determinism.") orelse true,
} });
const test_step = b.step("test", "Run zmath tests");
test_step.dependOn(runTests(b, optimize, target));
const benchmark_step = b.step("benchmark", "Run zmath benchmarks");
benchmark_step.dependOn(runBenchmarks(b, target, optimize));
}
pub fn runTests(
b: *std.Build,
optimize: std.builtin.Mode,
target: std.Build.ResolvedTarget,
) *std.Build.Step {
const tests = b.addTest(.{
.name = "zmath-tests",
.root_source_file = .{ .path = thisDir() ++ "/src/main.zig" },
.target = target,
.optimize = optimize,
});
const zmath_pkg = package(b, target, optimize, .{});
tests.root_module.addImport("zmath_options", zmath_pkg.zmath_options);
return &b.addRunArtifact(tests).step;
}
pub fn runBenchmarks(
b: *std.Build,
target: std.Build.ResolvedTarget,
optimize: std.builtin.OptimizeMode,
) *std.Build.Step {
const exe = b.addExecutable(.{
.name = "zmath-benchmarks",
.root_source_file = .{ .path = thisDir() ++ "/src/benchmark.zig" },
.target = target,
.optimize = optimize,
});
const zmath_pkg = package(b, target, .ReleaseFast, .{});
exe.root_module.addImport("zmath", zmath_pkg.zmath);
return &b.addRunArtifact(exe).step;
}
inline fn thisDir() []const u8 {
return comptime std.fs.path.dirname(@src().file) orelse ".";
}
| https://raw.githubusercontent.com/freakmangd/zentig_ecs/709736c26d00d5ad919b454609a2d566e8b92e36/deps/zmath/build.zig |
const std = @import("std");
const utils = @import("utils.zig");
const res = @import("res.zig");
const SourceBytes = @import("literals.zig").SourceBytes;
// https://learn.microsoft.com/en-us/windows/win32/menurc/about-resource-files
pub const Resource = enum {
accelerators,
bitmap,
cursor,
dialog,
dialogex,
/// As far as I can tell, this is undocumented; the most I could find was this:
/// https://www.betaarchive.com/wiki/index.php/Microsoft_KB_Archive/91697
dlginclude,
/// Undocumented, basically works exactly like RCDATA
dlginit,
font,
html,
icon,
menu,
menuex,
messagetable,
plugplay, // Obsolete
rcdata,
stringtable,
/// Undocumented
toolbar,
user_defined,
versioninfo,
vxd, // Obsolete
// Types that are treated as a user-defined type when encountered, but have
// special meaning without the Visual Studio GUI. We match the Win32 RC compiler
// behavior by acting as if these keyword don't exist when compiling the .rc
// (thereby treating them as user-defined).
//textinclude, // A special resource that is interpreted by Visual C++.
//typelib, // A special resource that is used with the /TLBID and /TLBOUT linker options
// Types that can only be specified by numbers, they don't have keywords
cursor_num,
icon_num,
string_num,
anicursor_num,
aniicon_num,
fontdir_num,
manifest_num,
const map = std.ComptimeStringMapWithEql(Resource, .{
.{ "ACCELERATORS", .accelerators },
.{ "BITMAP", .bitmap },
.{ "CURSOR", .cursor },
.{ "DIALOG", .dialog },
.{ "DIALOGEX", .dialogex },
.{ "DLGINCLUDE", .dlginclude },
.{ "DLGINIT", .dlginit },
.{ "FONT", .font },
.{ "HTML", .html },
.{ "ICON", .icon },
.{ "MENU", .menu },
.{ "MENUEX", .menuex },
.{ "MESSAGETABLE", .messagetable },
.{ "PLUGPLAY", .plugplay },
.{ "RCDATA", .rcdata },
.{ "STRINGTABLE", .stringtable },
.{ "TOOLBAR", .toolbar },
.{ "VERSIONINFO", .versioninfo },
.{ "VXD", .vxd },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
pub fn fromString(bytes: SourceBytes) Resource {
const maybe_ordinal = res.NameOrOrdinal.maybeOrdinalFromString(bytes);
if (maybe_ordinal) |ordinal| {
if (ordinal.ordinal >= 256) return .user_defined;
return fromRT(@enumFromInt(ordinal.ordinal));
}
return map.get(bytes.slice) orelse .user_defined;
}
// TODO: Some comptime validation that RT <-> Resource conversion is synced?
pub fn fromRT(rt: res.RT) Resource {
return switch (rt) {
.ACCELERATOR => .accelerators,
.ANICURSOR => .anicursor_num,
.ANIICON => .aniicon_num,
.BITMAP => .bitmap,
.CURSOR => .cursor_num,
.DIALOG => .dialog,
.DLGINCLUDE => .dlginclude,
.DLGINIT => .dlginit,
.FONT => .font,
.FONTDIR => .fontdir_num,
.GROUP_CURSOR => .cursor,
.GROUP_ICON => .icon,
.HTML => .html,
.ICON => .icon_num,
.MANIFEST => .manifest_num,
.MENU => .menu,
.MESSAGETABLE => .messagetable,
.PLUGPLAY => .plugplay,
.RCDATA => .rcdata,
.STRING => .string_num,
.TOOLBAR => .toolbar,
.VERSION => .versioninfo,
.VXD => .vxd,
_ => .user_defined,
};
}
pub fn canUseRawData(resource: Resource) bool {
return switch (resource) {
.user_defined,
.html,
.plugplay, // Obsolete
.rcdata,
.vxd, // Obsolete
.manifest_num,
.dlginit,
=> true,
else => false,
};
}
pub fn nameForErrorDisplay(resource: Resource) []const u8 {
return switch (resource) {
// zig fmt: off
.accelerators, .bitmap, .cursor, .dialog, .dialogex, .dlginclude, .dlginit, .font,
.html, .icon, .menu, .menuex, .messagetable, .plugplay, .rcdata, .stringtable,
.toolbar, .versioninfo, .vxd => @tagName(resource),
// zig fmt: on
.user_defined => "user-defined",
.cursor_num => std.fmt.comptimePrint("{d} (cursor)", .{@intFromEnum(res.RT.CURSOR)}),
.icon_num => std.fmt.comptimePrint("{d} (icon)", .{@intFromEnum(res.RT.ICON)}),
.string_num => std.fmt.comptimePrint("{d} (string)", .{@intFromEnum(res.RT.STRING)}),
.anicursor_num => std.fmt.comptimePrint("{d} (anicursor)", .{@intFromEnum(res.RT.ANICURSOR)}),
.aniicon_num => std.fmt.comptimePrint("{d} (aniicon)", .{@intFromEnum(res.RT.ANIICON)}),
.fontdir_num => std.fmt.comptimePrint("{d} (fontdir)", .{@intFromEnum(res.RT.FONTDIR)}),
.manifest_num => std.fmt.comptimePrint("{d} (manifest)", .{@intFromEnum(res.RT.MANIFEST)}),
};
}
};
/// https://learn.microsoft.com/en-us/windows/win32/menurc/stringtable-resource#parameters
/// https://learn.microsoft.com/en-us/windows/win32/menurc/dialog-resource#parameters
/// https://learn.microsoft.com/en-us/windows/win32/menurc/dialogex-resource#parameters
pub const OptionalStatements = enum {
characteristics,
language,
version,
// DIALOG
caption,
class,
exstyle,
font,
menu,
style,
pub const map = std.ComptimeStringMapWithEql(OptionalStatements, .{
.{ "CHARACTERISTICS", .characteristics },
.{ "LANGUAGE", .language },
.{ "VERSION", .version },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
pub const dialog_map = std.ComptimeStringMapWithEql(OptionalStatements, .{
.{ "CAPTION", .caption },
.{ "CLASS", .class },
.{ "EXSTYLE", .exstyle },
.{ "FONT", .font },
.{ "MENU", .menu },
.{ "STYLE", .style },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
pub const Control = enum {
auto3state,
autocheckbox,
autoradiobutton,
checkbox,
combobox,
control,
ctext,
defpushbutton,
edittext,
hedit,
iedit,
groupbox,
icon,
listbox,
ltext,
pushbox,
pushbutton,
radiobutton,
rtext,
scrollbar,
state3,
userbutton,
pub const map = std.ComptimeStringMapWithEql(Control, .{
.{ "AUTO3STATE", .auto3state },
.{ "AUTOCHECKBOX", .autocheckbox },
.{ "AUTORADIOBUTTON", .autoradiobutton },
.{ "CHECKBOX", .checkbox },
.{ "COMBOBOX", .combobox },
.{ "CONTROL", .control },
.{ "CTEXT", .ctext },
.{ "DEFPUSHBUTTON", .defpushbutton },
.{ "EDITTEXT", .edittext },
.{ "HEDIT", .hedit },
.{ "IEDIT", .iedit },
.{ "GROUPBOX", .groupbox },
.{ "ICON", .icon },
.{ "LISTBOX", .listbox },
.{ "LTEXT", .ltext },
.{ "PUSHBOX", .pushbox },
.{ "PUSHBUTTON", .pushbutton },
.{ "RADIOBUTTON", .radiobutton },
.{ "RTEXT", .rtext },
.{ "SCROLLBAR", .scrollbar },
.{ "STATE3", .state3 },
.{ "USERBUTTON", .userbutton },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
pub fn hasTextParam(control: Control) bool {
switch (control) {
.scrollbar, .listbox, .iedit, .hedit, .edittext, .combobox => return false,
else => return true,
}
}
};
pub const ControlClass = struct {
pub const map = std.ComptimeStringMapWithEql(res.ControlClass, .{
.{ "BUTTON", .button },
.{ "EDIT", .edit },
.{ "STATIC", .static },
.{ "LISTBOX", .listbox },
.{ "SCROLLBAR", .scrollbar },
.{ "COMBOBOX", .combobox },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
/// Like `map.get` but works on WTF16 strings, for use with parsed
/// string literals ("BUTTON", or even "\x42UTTON")
pub fn fromWideString(str: []const u16) ?res.ControlClass {
const utf16Literal = std.unicode.utf8ToUtf16LeStringLiteral;
return if (ascii.eqlIgnoreCaseW(str, utf16Literal("BUTTON")))
.button
else if (ascii.eqlIgnoreCaseW(str, utf16Literal("EDIT")))
.edit
else if (ascii.eqlIgnoreCaseW(str, utf16Literal("STATIC")))
.static
else if (ascii.eqlIgnoreCaseW(str, utf16Literal("LISTBOX")))
.listbox
else if (ascii.eqlIgnoreCaseW(str, utf16Literal("SCROLLBAR")))
.scrollbar
else if (ascii.eqlIgnoreCaseW(str, utf16Literal("COMBOBOX")))
.combobox
else
null;
}
};
const ascii = struct {
/// Compares ASCII values case-insensitively, non-ASCII values are compared directly
pub fn eqlIgnoreCaseW(a: []const u16, b: []const u16) bool {
if (a.len != b.len) return false;
for (a, b) |a_c, b_c| {
if (a_c < 128) {
if (std.ascii.toLower(@intCast(a_c)) != std.ascii.toLower(@intCast(b_c))) return false;
} else {
if (a_c != b_c) return false;
}
}
return true;
}
};
pub const MenuItem = enum {
menuitem,
popup,
pub const map = std.ComptimeStringMapWithEql(MenuItem, .{
.{ "MENUITEM", .menuitem },
.{ "POPUP", .popup },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
pub fn isSeparator(bytes: []const u8) bool {
return std.ascii.eqlIgnoreCase(bytes, "SEPARATOR");
}
pub const Option = enum {
checked,
grayed,
help,
inactive,
menubarbreak,
menubreak,
pub const map = std.ComptimeStringMapWithEql(Option, .{
.{ "CHECKED", .checked },
.{ "GRAYED", .grayed },
.{ "HELP", .help },
.{ "INACTIVE", .inactive },
.{ "MENUBARBREAK", .menubarbreak },
.{ "MENUBREAK", .menubreak },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
};
pub const ToolbarButton = enum {
button,
separator,
pub const map = std.ComptimeStringMapWithEql(ToolbarButton, .{
.{ "BUTTON", .button },
.{ "SEPARATOR", .separator },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
pub const VersionInfo = enum {
file_version,
product_version,
file_flags_mask,
file_flags,
file_os,
file_type,
file_subtype,
pub const map = std.ComptimeStringMapWithEql(VersionInfo, .{
.{ "FILEVERSION", .file_version },
.{ "PRODUCTVERSION", .product_version },
.{ "FILEFLAGSMASK", .file_flags_mask },
.{ "FILEFLAGS", .file_flags },
.{ "FILEOS", .file_os },
.{ "FILETYPE", .file_type },
.{ "FILESUBTYPE", .file_subtype },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
pub const VersionBlock = enum {
block,
value,
pub const map = std.ComptimeStringMapWithEql(VersionBlock, .{
.{ "BLOCK", .block },
.{ "VALUE", .value },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
/// Keywords that are be the first token in a statement and (if so) dictate how the rest
/// of the statement is parsed.
pub const TopLevelKeywords = enum {
language,
version,
characteristics,
stringtable,
pub const map = std.ComptimeStringMapWithEql(TopLevelKeywords, .{
.{ "LANGUAGE", .language },
.{ "VERSION", .version },
.{ "CHARACTERISTICS", .characteristics },
.{ "STRINGTABLE", .stringtable },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
pub const CommonResourceAttributes = enum {
preload,
loadoncall,
fixed,
moveable,
discardable,
pure,
impure,
shared,
nonshared,
pub const map = std.ComptimeStringMapWithEql(CommonResourceAttributes, .{
.{ "PRELOAD", .preload },
.{ "LOADONCALL", .loadoncall },
.{ "FIXED", .fixed },
.{ "MOVEABLE", .moveable },
.{ "DISCARDABLE", .discardable },
.{ "PURE", .pure },
.{ "IMPURE", .impure },
.{ "SHARED", .shared },
.{ "NONSHARED", .nonshared },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
pub const AcceleratorTypeAndOptions = enum {
virtkey,
ascii,
noinvert,
alt,
shift,
control,
pub const map = std.ComptimeStringMapWithEql(AcceleratorTypeAndOptions, .{
.{ "VIRTKEY", .virtkey },
.{ "ASCII", .ascii },
.{ "NOINVERT", .noinvert },
.{ "ALT", .alt },
.{ "SHIFT", .shift },
.{ "CONTROL", .control },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
| https://raw.githubusercontent.com/beingofexistence13/multiversal-lang/dd769e3fc6182c23ef43ed4479614f43f29738c9/zig/src/resinator/rc.zig |
const std = @import("std");
const utils = @import("utils.zig");
const res = @import("res.zig");
const SourceBytes = @import("literals.zig").SourceBytes;
// https://learn.microsoft.com/en-us/windows/win32/menurc/about-resource-files
pub const Resource = enum {
accelerators,
bitmap,
cursor,
dialog,
dialogex,
/// As far as I can tell, this is undocumented; the most I could find was this:
/// https://www.betaarchive.com/wiki/index.php/Microsoft_KB_Archive/91697
dlginclude,
/// Undocumented, basically works exactly like RCDATA
dlginit,
font,
html,
icon,
menu,
menuex,
messagetable,
plugplay, // Obsolete
rcdata,
stringtable,
/// Undocumented
toolbar,
user_defined,
versioninfo,
vxd, // Obsolete
// Types that are treated as a user-defined type when encountered, but have
// special meaning without the Visual Studio GUI. We match the Win32 RC compiler
// behavior by acting as if these keyword don't exist when compiling the .rc
// (thereby treating them as user-defined).
//textinclude, // A special resource that is interpreted by Visual C++.
//typelib, // A special resource that is used with the /TLBID and /TLBOUT linker options
// Types that can only be specified by numbers, they don't have keywords
cursor_num,
icon_num,
string_num,
anicursor_num,
aniicon_num,
fontdir_num,
manifest_num,
const map = std.ComptimeStringMapWithEql(Resource, .{
.{ "ACCELERATORS", .accelerators },
.{ "BITMAP", .bitmap },
.{ "CURSOR", .cursor },
.{ "DIALOG", .dialog },
.{ "DIALOGEX", .dialogex },
.{ "DLGINCLUDE", .dlginclude },
.{ "DLGINIT", .dlginit },
.{ "FONT", .font },
.{ "HTML", .html },
.{ "ICON", .icon },
.{ "MENU", .menu },
.{ "MENUEX", .menuex },
.{ "MESSAGETABLE", .messagetable },
.{ "PLUGPLAY", .plugplay },
.{ "RCDATA", .rcdata },
.{ "STRINGTABLE", .stringtable },
.{ "TOOLBAR", .toolbar },
.{ "VERSIONINFO", .versioninfo },
.{ "VXD", .vxd },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
pub fn fromString(bytes: SourceBytes) Resource {
const maybe_ordinal = res.NameOrOrdinal.maybeOrdinalFromString(bytes);
if (maybe_ordinal) |ordinal| {
if (ordinal.ordinal >= 256) return .user_defined;
return fromRT(@enumFromInt(ordinal.ordinal));
}
return map.get(bytes.slice) orelse .user_defined;
}
// TODO: Some comptime validation that RT <-> Resource conversion is synced?
pub fn fromRT(rt: res.RT) Resource {
return switch (rt) {
.ACCELERATOR => .accelerators,
.ANICURSOR => .anicursor_num,
.ANIICON => .aniicon_num,
.BITMAP => .bitmap,
.CURSOR => .cursor_num,
.DIALOG => .dialog,
.DLGINCLUDE => .dlginclude,
.DLGINIT => .dlginit,
.FONT => .font,
.FONTDIR => .fontdir_num,
.GROUP_CURSOR => .cursor,
.GROUP_ICON => .icon,
.HTML => .html,
.ICON => .icon_num,
.MANIFEST => .manifest_num,
.MENU => .menu,
.MESSAGETABLE => .messagetable,
.PLUGPLAY => .plugplay,
.RCDATA => .rcdata,
.STRING => .string_num,
.TOOLBAR => .toolbar,
.VERSION => .versioninfo,
.VXD => .vxd,
_ => .user_defined,
};
}
pub fn canUseRawData(resource: Resource) bool {
return switch (resource) {
.user_defined,
.html,
.plugplay, // Obsolete
.rcdata,
.vxd, // Obsolete
.manifest_num,
.dlginit,
=> true,
else => false,
};
}
pub fn nameForErrorDisplay(resource: Resource) []const u8 {
return switch (resource) {
// zig fmt: off
.accelerators, .bitmap, .cursor, .dialog, .dialogex, .dlginclude, .dlginit, .font,
.html, .icon, .menu, .menuex, .messagetable, .plugplay, .rcdata, .stringtable,
.toolbar, .versioninfo, .vxd => @tagName(resource),
// zig fmt: on
.user_defined => "user-defined",
.cursor_num => std.fmt.comptimePrint("{d} (cursor)", .{@intFromEnum(res.RT.CURSOR)}),
.icon_num => std.fmt.comptimePrint("{d} (icon)", .{@intFromEnum(res.RT.ICON)}),
.string_num => std.fmt.comptimePrint("{d} (string)", .{@intFromEnum(res.RT.STRING)}),
.anicursor_num => std.fmt.comptimePrint("{d} (anicursor)", .{@intFromEnum(res.RT.ANICURSOR)}),
.aniicon_num => std.fmt.comptimePrint("{d} (aniicon)", .{@intFromEnum(res.RT.ANIICON)}),
.fontdir_num => std.fmt.comptimePrint("{d} (fontdir)", .{@intFromEnum(res.RT.FONTDIR)}),
.manifest_num => std.fmt.comptimePrint("{d} (manifest)", .{@intFromEnum(res.RT.MANIFEST)}),
};
}
};
/// https://learn.microsoft.com/en-us/windows/win32/menurc/stringtable-resource#parameters
/// https://learn.microsoft.com/en-us/windows/win32/menurc/dialog-resource#parameters
/// https://learn.microsoft.com/en-us/windows/win32/menurc/dialogex-resource#parameters
pub const OptionalStatements = enum {
characteristics,
language,
version,
// DIALOG
caption,
class,
exstyle,
font,
menu,
style,
pub const map = std.ComptimeStringMapWithEql(OptionalStatements, .{
.{ "CHARACTERISTICS", .characteristics },
.{ "LANGUAGE", .language },
.{ "VERSION", .version },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
pub const dialog_map = std.ComptimeStringMapWithEql(OptionalStatements, .{
.{ "CAPTION", .caption },
.{ "CLASS", .class },
.{ "EXSTYLE", .exstyle },
.{ "FONT", .font },
.{ "MENU", .menu },
.{ "STYLE", .style },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
pub const Control = enum {
auto3state,
autocheckbox,
autoradiobutton,
checkbox,
combobox,
control,
ctext,
defpushbutton,
edittext,
hedit,
iedit,
groupbox,
icon,
listbox,
ltext,
pushbox,
pushbutton,
radiobutton,
rtext,
scrollbar,
state3,
userbutton,
pub const map = std.ComptimeStringMapWithEql(Control, .{
.{ "AUTO3STATE", .auto3state },
.{ "AUTOCHECKBOX", .autocheckbox },
.{ "AUTORADIOBUTTON", .autoradiobutton },
.{ "CHECKBOX", .checkbox },
.{ "COMBOBOX", .combobox },
.{ "CONTROL", .control },
.{ "CTEXT", .ctext },
.{ "DEFPUSHBUTTON", .defpushbutton },
.{ "EDITTEXT", .edittext },
.{ "HEDIT", .hedit },
.{ "IEDIT", .iedit },
.{ "GROUPBOX", .groupbox },
.{ "ICON", .icon },
.{ "LISTBOX", .listbox },
.{ "LTEXT", .ltext },
.{ "PUSHBOX", .pushbox },
.{ "PUSHBUTTON", .pushbutton },
.{ "RADIOBUTTON", .radiobutton },
.{ "RTEXT", .rtext },
.{ "SCROLLBAR", .scrollbar },
.{ "STATE3", .state3 },
.{ "USERBUTTON", .userbutton },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
pub fn hasTextParam(control: Control) bool {
switch (control) {
.scrollbar, .listbox, .iedit, .hedit, .edittext, .combobox => return false,
else => return true,
}
}
};
pub const ControlClass = struct {
pub const map = std.ComptimeStringMapWithEql(res.ControlClass, .{
.{ "BUTTON", .button },
.{ "EDIT", .edit },
.{ "STATIC", .static },
.{ "LISTBOX", .listbox },
.{ "SCROLLBAR", .scrollbar },
.{ "COMBOBOX", .combobox },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
/// Like `map.get` but works on WTF16 strings, for use with parsed
/// string literals ("BUTTON", or even "\x42UTTON")
pub fn fromWideString(str: []const u16) ?res.ControlClass {
const utf16Literal = std.unicode.utf8ToUtf16LeStringLiteral;
return if (ascii.eqlIgnoreCaseW(str, utf16Literal("BUTTON")))
.button
else if (ascii.eqlIgnoreCaseW(str, utf16Literal("EDIT")))
.edit
else if (ascii.eqlIgnoreCaseW(str, utf16Literal("STATIC")))
.static
else if (ascii.eqlIgnoreCaseW(str, utf16Literal("LISTBOX")))
.listbox
else if (ascii.eqlIgnoreCaseW(str, utf16Literal("SCROLLBAR")))
.scrollbar
else if (ascii.eqlIgnoreCaseW(str, utf16Literal("COMBOBOX")))
.combobox
else
null;
}
};
const ascii = struct {
/// Compares ASCII values case-insensitively, non-ASCII values are compared directly
pub fn eqlIgnoreCaseW(a: []const u16, b: []const u16) bool {
if (a.len != b.len) return false;
for (a, b) |a_c, b_c| {
if (a_c < 128) {
if (std.ascii.toLower(@intCast(a_c)) != std.ascii.toLower(@intCast(b_c))) return false;
} else {
if (a_c != b_c) return false;
}
}
return true;
}
};
pub const MenuItem = enum {
menuitem,
popup,
pub const map = std.ComptimeStringMapWithEql(MenuItem, .{
.{ "MENUITEM", .menuitem },
.{ "POPUP", .popup },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
pub fn isSeparator(bytes: []const u8) bool {
return std.ascii.eqlIgnoreCase(bytes, "SEPARATOR");
}
pub const Option = enum {
checked,
grayed,
help,
inactive,
menubarbreak,
menubreak,
pub const map = std.ComptimeStringMapWithEql(Option, .{
.{ "CHECKED", .checked },
.{ "GRAYED", .grayed },
.{ "HELP", .help },
.{ "INACTIVE", .inactive },
.{ "MENUBARBREAK", .menubarbreak },
.{ "MENUBREAK", .menubreak },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
};
pub const ToolbarButton = enum {
button,
separator,
pub const map = std.ComptimeStringMapWithEql(ToolbarButton, .{
.{ "BUTTON", .button },
.{ "SEPARATOR", .separator },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
pub const VersionInfo = enum {
file_version,
product_version,
file_flags_mask,
file_flags,
file_os,
file_type,
file_subtype,
pub const map = std.ComptimeStringMapWithEql(VersionInfo, .{
.{ "FILEVERSION", .file_version },
.{ "PRODUCTVERSION", .product_version },
.{ "FILEFLAGSMASK", .file_flags_mask },
.{ "FILEFLAGS", .file_flags },
.{ "FILEOS", .file_os },
.{ "FILETYPE", .file_type },
.{ "FILESUBTYPE", .file_subtype },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
pub const VersionBlock = enum {
block,
value,
pub const map = std.ComptimeStringMapWithEql(VersionBlock, .{
.{ "BLOCK", .block },
.{ "VALUE", .value },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
/// Keywords that are be the first token in a statement and (if so) dictate how the rest
/// of the statement is parsed.
pub const TopLevelKeywords = enum {
language,
version,
characteristics,
stringtable,
pub const map = std.ComptimeStringMapWithEql(TopLevelKeywords, .{
.{ "LANGUAGE", .language },
.{ "VERSION", .version },
.{ "CHARACTERISTICS", .characteristics },
.{ "STRINGTABLE", .stringtable },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
pub const CommonResourceAttributes = enum {
preload,
loadoncall,
fixed,
moveable,
discardable,
pure,
impure,
shared,
nonshared,
pub const map = std.ComptimeStringMapWithEql(CommonResourceAttributes, .{
.{ "PRELOAD", .preload },
.{ "LOADONCALL", .loadoncall },
.{ "FIXED", .fixed },
.{ "MOVEABLE", .moveable },
.{ "DISCARDABLE", .discardable },
.{ "PURE", .pure },
.{ "IMPURE", .impure },
.{ "SHARED", .shared },
.{ "NONSHARED", .nonshared },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
pub const AcceleratorTypeAndOptions = enum {
virtkey,
ascii,
noinvert,
alt,
shift,
control,
pub const map = std.ComptimeStringMapWithEql(AcceleratorTypeAndOptions, .{
.{ "VIRTKEY", .virtkey },
.{ "ASCII", .ascii },
.{ "NOINVERT", .noinvert },
.{ "ALT", .alt },
.{ "SHIFT", .shift },
.{ "CONTROL", .control },
}, std.comptime_string_map.eqlAsciiIgnoreCase);
};
| https://raw.githubusercontent.com/2lambda123/ziglang-zig-bootstrap/f56dc0fd298f41c8cc2a4f76a9648111e6c75503/zig/src/resinator/rc.zig |
// Ported from musl, which is licensed under the MIT license:
// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
//
// https://git.musl-libc.org/cgit/musl/tree/src/math/sinhf.c
// https://git.musl-libc.org/cgit/musl/tree/src/math/sinh.c
const std = @import("../std.zig");
const math = std.math;
const expect = std.testing.expect;
const expo2 = @import("expo2.zig").expo2;
const maxInt = std.math.maxInt;
/// Returns the hyperbolic sine of x.
///
/// Special Cases:
/// - sinh(+-0) = +-0
/// - sinh(+-inf) = +-inf
/// - sinh(nan) = nan
pub fn sinh(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => sinh32(x),
f64 => sinh64(x),
else => @compileError("sinh not implemented for " ++ @typeName(T)),
};
}
// sinh(x) = (exp(x) - 1 / exp(x)) / 2
// = (exp(x) - 1 + (exp(x) - 1) / exp(x)) / 2
// = x + x^3 / 6 + o(x^5)
fn sinh32(x: f32) f32 {
const u = @as(u32, @bitCast(x));
const ux = u & 0x7FFFFFFF;
const ax = @as(f32, @bitCast(ux));
if (x == 0.0 or math.isNan(x)) {
return x;
}
var h: f32 = 0.5;
if (u >> 31 != 0) {
h = -h;
}
// |x| < log(FLT_MAX)
if (ux < 0x42B17217) {
const t = math.expm1(ax);
if (ux < 0x3F800000) {
if (ux < 0x3F800000 - (12 << 23)) {
return x;
} else {
return h * (2 * t - t * t / (t + 1));
}
}
return h * (t + t / (t + 1));
}
// |x| > log(FLT_MAX) or nan
return 2 * h * expo2(ax);
}
fn sinh64(x: f64) f64 {
const u = @as(u64, @bitCast(x));
const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1);
const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1)));
if (x == 0.0 or math.isNan(x)) {
return x;
}
var h: f32 = 0.5;
if (u >> 63 != 0) {
h = -h;
}
// |x| < log(FLT_MAX)
if (w < 0x40862E42) {
const t = math.expm1(ax);
if (w < 0x3FF00000) {
if (w < 0x3FF00000 - (26 << 20)) {
return x;
} else {
return h * (2 * t - t * t / (t + 1));
}
}
// NOTE: |x| > log(0x1p26) + eps could be h * exp(x)
return h * (t + t / (t + 1));
}
// |x| > log(DBL_MAX) or nan
return 2 * h * expo2(ax);
}
test "math.sinh" {
try expect(sinh(@as(f32, 1.5)) == sinh32(1.5));
try expect(sinh(@as(f64, 1.5)) == sinh64(1.5));
}
test "math.sinh32" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f32, sinh32(0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f32, sinh32(0.2), 0.201336, epsilon));
try expect(math.approxEqAbs(f32, sinh32(0.8923), 1.015512, epsilon));
try expect(math.approxEqAbs(f32, sinh32(1.5), 2.129279, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.0), -0.0, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.2), -0.201336, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.8923), -1.015512, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-1.5), -2.129279, epsilon));
}
test "math.sinh64" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f64, sinh64(0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f64, sinh64(0.2), 0.201336, epsilon));
try expect(math.approxEqAbs(f64, sinh64(0.8923), 1.015512, epsilon));
try expect(math.approxEqAbs(f64, sinh64(1.5), 2.129279, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.0), -0.0, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.2), -0.201336, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.8923), -1.015512, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-1.5), -2.129279, epsilon));
}
test "math.sinh32.special" {
try expect(sinh32(0.0) == 0.0);
try expect(sinh32(-0.0) == -0.0);
try expect(math.isPositiveInf(sinh32(math.inf(f32))));
try expect(math.isNegativeInf(sinh32(-math.inf(f32))));
try expect(math.isNan(sinh32(math.nan(f32))));
}
test "math.sinh64.special" {
try expect(sinh64(0.0) == 0.0);
try expect(sinh64(-0.0) == -0.0);
try expect(math.isPositiveInf(sinh64(math.inf(f64))));
try expect(math.isNegativeInf(sinh64(-math.inf(f64))));
try expect(math.isNan(sinh64(math.nan(f64))));
}
| https://raw.githubusercontent.com/beingofexistence13/multiversal-lang/dd769e3fc6182c23ef43ed4479614f43f29738c9/zig/lib/std/math/sinh.zig |
// Ported from musl, which is licensed under the MIT license:
// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
//
// https://git.musl-libc.org/cgit/musl/tree/src/math/sinhf.c
// https://git.musl-libc.org/cgit/musl/tree/src/math/sinh.c
const std = @import("../std.zig");
const math = std.math;
const expect = std.testing.expect;
const expo2 = @import("expo2.zig").expo2;
const maxInt = std.math.maxInt;
/// Returns the hyperbolic sine of x.
///
/// Special Cases:
/// - sinh(+-0) = +-0
/// - sinh(+-inf) = +-inf
/// - sinh(nan) = nan
pub fn sinh(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => sinh32(x),
f64 => sinh64(x),
else => @compileError("sinh not implemented for " ++ @typeName(T)),
};
}
// sinh(x) = (exp(x) - 1 / exp(x)) / 2
// = (exp(x) - 1 + (exp(x) - 1) / exp(x)) / 2
// = x + x^3 / 6 + o(x^5)
fn sinh32(x: f32) f32 {
const u = @as(u32, @bitCast(x));
const ux = u & 0x7FFFFFFF;
const ax = @as(f32, @bitCast(ux));
if (x == 0.0 or math.isNan(x)) {
return x;
}
var h: f32 = 0.5;
if (u >> 31 != 0) {
h = -h;
}
// |x| < log(FLT_MAX)
if (ux < 0x42B17217) {
const t = math.expm1(ax);
if (ux < 0x3F800000) {
if (ux < 0x3F800000 - (12 << 23)) {
return x;
} else {
return h * (2 * t - t * t / (t + 1));
}
}
return h * (t + t / (t + 1));
}
// |x| > log(FLT_MAX) or nan
return 2 * h * expo2(ax);
}
fn sinh64(x: f64) f64 {
const u = @as(u64, @bitCast(x));
const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1);
const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1)));
if (x == 0.0 or math.isNan(x)) {
return x;
}
var h: f32 = 0.5;
if (u >> 63 != 0) {
h = -h;
}
// |x| < log(FLT_MAX)
if (w < 0x40862E42) {
const t = math.expm1(ax);
if (w < 0x3FF00000) {
if (w < 0x3FF00000 - (26 << 20)) {
return x;
} else {
return h * (2 * t - t * t / (t + 1));
}
}
// NOTE: |x| > log(0x1p26) + eps could be h * exp(x)
return h * (t + t / (t + 1));
}
// |x| > log(DBL_MAX) or nan
return 2 * h * expo2(ax);
}
test "math.sinh" {
try expect(sinh(@as(f32, 1.5)) == sinh32(1.5));
try expect(sinh(@as(f64, 1.5)) == sinh64(1.5));
}
test "math.sinh32" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f32, sinh32(0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f32, sinh32(0.2), 0.201336, epsilon));
try expect(math.approxEqAbs(f32, sinh32(0.8923), 1.015512, epsilon));
try expect(math.approxEqAbs(f32, sinh32(1.5), 2.129279, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.0), -0.0, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.2), -0.201336, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.8923), -1.015512, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-1.5), -2.129279, epsilon));
}
test "math.sinh64" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f64, sinh64(0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f64, sinh64(0.2), 0.201336, epsilon));
try expect(math.approxEqAbs(f64, sinh64(0.8923), 1.015512, epsilon));
try expect(math.approxEqAbs(f64, sinh64(1.5), 2.129279, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.0), -0.0, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.2), -0.201336, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.8923), -1.015512, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-1.5), -2.129279, epsilon));
}
test "math.sinh32.special" {
try expect(sinh32(0.0) == 0.0);
try expect(sinh32(-0.0) == -0.0);
try expect(math.isPositiveInf(sinh32(math.inf(f32))));
try expect(math.isNegativeInf(sinh32(-math.inf(f32))));
try expect(math.isNan(sinh32(math.nan(f32))));
}
test "math.sinh64.special" {
try expect(sinh64(0.0) == 0.0);
try expect(sinh64(-0.0) == -0.0);
try expect(math.isPositiveInf(sinh64(math.inf(f64))));
try expect(math.isNegativeInf(sinh64(-math.inf(f64))));
try expect(math.isNan(sinh64(math.nan(f64))));
}
| https://raw.githubusercontent.com/mundusnine/FoundryTools_windows_x64/b64cdb7e56db28eb710a05a089aed0daff8bc8be/lib/std/math/sinh.zig |
// Ported from musl, which is licensed under the MIT license:
// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
//
// https://git.musl-libc.org/cgit/musl/tree/src/math/sinhf.c
// https://git.musl-libc.org/cgit/musl/tree/src/math/sinh.c
const std = @import("../std.zig");
const math = std.math;
const expect = std.testing.expect;
const expo2 = @import("expo2.zig").expo2;
const maxInt = std.math.maxInt;
/// Returns the hyperbolic sine of x.
///
/// Special Cases:
/// - sinh(+-0) = +-0
/// - sinh(+-inf) = +-inf
/// - sinh(nan) = nan
pub fn sinh(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => sinh32(x),
f64 => sinh64(x),
else => @compileError("sinh not implemented for " ++ @typeName(T)),
};
}
// sinh(x) = (exp(x) - 1 / exp(x)) / 2
// = (exp(x) - 1 + (exp(x) - 1) / exp(x)) / 2
// = x + x^3 / 6 + o(x^5)
fn sinh32(x: f32) f32 {
const u = @as(u32, @bitCast(x));
const ux = u & 0x7FFFFFFF;
const ax = @as(f32, @bitCast(ux));
if (x == 0.0 or math.isNan(x)) {
return x;
}
var h: f32 = 0.5;
if (u >> 31 != 0) {
h = -h;
}
// |x| < log(FLT_MAX)
if (ux < 0x42B17217) {
const t = math.expm1(ax);
if (ux < 0x3F800000) {
if (ux < 0x3F800000 - (12 << 23)) {
return x;
} else {
return h * (2 * t - t * t / (t + 1));
}
}
return h * (t + t / (t + 1));
}
// |x| > log(FLT_MAX) or nan
return 2 * h * expo2(ax);
}
fn sinh64(x: f64) f64 {
const u = @as(u64, @bitCast(x));
const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1);
const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1)));
if (x == 0.0 or math.isNan(x)) {
return x;
}
var h: f32 = 0.5;
if (u >> 63 != 0) {
h = -h;
}
// |x| < log(FLT_MAX)
if (w < 0x40862E42) {
const t = math.expm1(ax);
if (w < 0x3FF00000) {
if (w < 0x3FF00000 - (26 << 20)) {
return x;
} else {
return h * (2 * t - t * t / (t + 1));
}
}
// NOTE: |x| > log(0x1p26) + eps could be h * exp(x)
return h * (t + t / (t + 1));
}
// |x| > log(DBL_MAX) or nan
return 2 * h * expo2(ax);
}
test "math.sinh" {
try expect(sinh(@as(f32, 1.5)) == sinh32(1.5));
try expect(sinh(@as(f64, 1.5)) == sinh64(1.5));
}
test "math.sinh32" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f32, sinh32(0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f32, sinh32(0.2), 0.201336, epsilon));
try expect(math.approxEqAbs(f32, sinh32(0.8923), 1.015512, epsilon));
try expect(math.approxEqAbs(f32, sinh32(1.5), 2.129279, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.0), -0.0, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.2), -0.201336, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.8923), -1.015512, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-1.5), -2.129279, epsilon));
}
test "math.sinh64" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f64, sinh64(0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f64, sinh64(0.2), 0.201336, epsilon));
try expect(math.approxEqAbs(f64, sinh64(0.8923), 1.015512, epsilon));
try expect(math.approxEqAbs(f64, sinh64(1.5), 2.129279, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.0), -0.0, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.2), -0.201336, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.8923), -1.015512, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-1.5), -2.129279, epsilon));
}
test "math.sinh32.special" {
try expect(sinh32(0.0) == 0.0);
try expect(sinh32(-0.0) == -0.0);
try expect(math.isPositiveInf(sinh32(math.inf(f32))));
try expect(math.isNegativeInf(sinh32(-math.inf(f32))));
try expect(math.isNan(sinh32(math.nan(f32))));
}
test "math.sinh64.special" {
try expect(sinh64(0.0) == 0.0);
try expect(sinh64(-0.0) == -0.0);
try expect(math.isPositiveInf(sinh64(math.inf(f64))));
try expect(math.isNegativeInf(sinh64(-math.inf(f64))));
try expect(math.isNan(sinh64(math.nan(f64))));
}
| https://raw.githubusercontent.com/mundusnine/FoundryTools_linux_x64/98e738bf92a416b255c9d11b78e8033071b52672/lib/std/math/sinh.zig |
// Ported from musl, which is licensed under the MIT license:
// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
//
// https://git.musl-libc.org/cgit/musl/tree/src/math/sinhf.c
// https://git.musl-libc.org/cgit/musl/tree/src/math/sinh.c
const std = @import("../std.zig");
const math = std.math;
const expect = std.testing.expect;
const expo2 = @import("expo2.zig").expo2;
const maxInt = std.math.maxInt;
/// Returns the hyperbolic sine of x.
///
/// Special Cases:
/// - sinh(+-0) = +-0
/// - sinh(+-inf) = +-inf
/// - sinh(nan) = nan
pub fn sinh(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => sinh32(x),
f64 => sinh64(x),
else => @compileError("sinh not implemented for " ++ @typeName(T)),
};
}
// sinh(x) = (exp(x) - 1 / exp(x)) / 2
// = (exp(x) - 1 + (exp(x) - 1) / exp(x)) / 2
// = x + x^3 / 6 + o(x^5)
fn sinh32(x: f32) f32 {
const u = @as(u32, @bitCast(x));
const ux = u & 0x7FFFFFFF;
const ax = @as(f32, @bitCast(ux));
if (x == 0.0 or math.isNan(x)) {
return x;
}
var h: f32 = 0.5;
if (u >> 31 != 0) {
h = -h;
}
// |x| < log(FLT_MAX)
if (ux < 0x42B17217) {
const t = math.expm1(ax);
if (ux < 0x3F800000) {
if (ux < 0x3F800000 - (12 << 23)) {
return x;
} else {
return h * (2 * t - t * t / (t + 1));
}
}
return h * (t + t / (t + 1));
}
// |x| > log(FLT_MAX) or nan
return 2 * h * expo2(ax);
}
fn sinh64(x: f64) f64 {
const u = @as(u64, @bitCast(x));
const w = @as(u32, @intCast(u >> 32)) & (maxInt(u32) >> 1);
const ax = @as(f64, @bitCast(u & (maxInt(u64) >> 1)));
if (x == 0.0 or math.isNan(x)) {
return x;
}
var h: f32 = 0.5;
if (u >> 63 != 0) {
h = -h;
}
// |x| < log(FLT_MAX)
if (w < 0x40862E42) {
const t = math.expm1(ax);
if (w < 0x3FF00000) {
if (w < 0x3FF00000 - (26 << 20)) {
return x;
} else {
return h * (2 * t - t * t / (t + 1));
}
}
// NOTE: |x| > log(0x1p26) + eps could be h * exp(x)
return h * (t + t / (t + 1));
}
// |x| > log(DBL_MAX) or nan
return 2 * h * expo2(ax);
}
test "math.sinh" {
try expect(sinh(@as(f32, 1.5)) == sinh32(1.5));
try expect(sinh(@as(f64, 1.5)) == sinh64(1.5));
}
test "math.sinh32" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f32, sinh32(0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f32, sinh32(0.2), 0.201336, epsilon));
try expect(math.approxEqAbs(f32, sinh32(0.8923), 1.015512, epsilon));
try expect(math.approxEqAbs(f32, sinh32(1.5), 2.129279, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.0), -0.0, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.2), -0.201336, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-0.8923), -1.015512, epsilon));
try expect(math.approxEqAbs(f32, sinh32(-1.5), -2.129279, epsilon));
}
test "math.sinh64" {
const epsilon = 0.000001;
try expect(math.approxEqAbs(f64, sinh64(0.0), 0.0, epsilon));
try expect(math.approxEqAbs(f64, sinh64(0.2), 0.201336, epsilon));
try expect(math.approxEqAbs(f64, sinh64(0.8923), 1.015512, epsilon));
try expect(math.approxEqAbs(f64, sinh64(1.5), 2.129279, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.0), -0.0, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.2), -0.201336, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-0.8923), -1.015512, epsilon));
try expect(math.approxEqAbs(f64, sinh64(-1.5), -2.129279, epsilon));
}
test "math.sinh32.special" {
try expect(sinh32(0.0) == 0.0);
try expect(sinh32(-0.0) == -0.0);
try expect(math.isPositiveInf(sinh32(math.inf(f32))));
try expect(math.isNegativeInf(sinh32(-math.inf(f32))));
try expect(math.isNan(sinh32(math.nan(f32))));
}
test "math.sinh64.special" {
try expect(sinh64(0.0) == 0.0);
try expect(sinh64(-0.0) == -0.0);
try expect(math.isPositiveInf(sinh64(math.inf(f64))));
try expect(math.isNegativeInf(sinh64(-math.inf(f64))));
try expect(math.isNan(sinh64(math.nan(f64))));
}
| https://raw.githubusercontent.com/matpx/daydream/018ad0c7caaf796d8a04b882fcbed39ccb7c9cd8/toolchain/zig/lib/std/math/sinh.zig |
const std = @import("std");
pub fn twoFer(buffer: []u8, name: ?[]const u8) anyerror![]u8 {
return std.fmt.bufPrint(buffer, "One for {s}, one for me.", .{name orelse "you"});
}
| https://raw.githubusercontent.com/emanuel-bs/exercism-12in23/d5521f9712907b22aaa4b42623c3e7c34edd7a38/zig/two-fer/two_fer.zig |
const std = @import("std");
pub fn twoFer(buffer: []u8, name: ?[]const u8) anyerror![]u8 {
return std.fmt.bufPrint(buffer, "One for {s}, one for me.", .{name orelse "you"});
}
| https://raw.githubusercontent.com/alexwheezy/Exercism/d1bef009856cb121de80fec80322f60cdbc8034a/zig/two-fer/two_fer.zig |
const std = @import("std");
pub fn twoFer(buffer: []u8, name: ?[]const u8) anyerror![]u8 {
return std.fmt.bufPrint(buffer, "One for {s}, one for me.", .{name orelse "you"});
}
| https://raw.githubusercontent.com/binhtran432k/exercism-learning/7f52068bf630aa54226acc132464c9035669b4c4/zig/two-fer/two_fer.zig |
const std = @import("std");
pub fn twoFer(buffer: []u8, name: ?[]const u8) anyerror![]u8 {
return std.fmt.bufPrint(buffer, "One for {s}, one for me.", .{name orelse "you"});
}
| https://raw.githubusercontent.com/stackcats/exercism/2d8f8e5494f04a4ce53eefd9fcb36bee5128ba43/zig/two_fer.zig |
pub const AllocateType = @import("tables/boot_services.zig").AllocateType;
pub const BootServices = @import("tables/boot_services.zig").BootServices;
pub const ConfigurationTable = @import("tables/configuration_table.zig").ConfigurationTable;
pub const global_variable align(8) = @import("tables/runtime_services.zig").global_variable;
pub const LocateSearchType = @import("tables/boot_services.zig").LocateSearchType;
pub const MemoryDescriptor = @import("tables/boot_services.zig").MemoryDescriptor;
pub const MemoryType = @import("tables/boot_services.zig").MemoryType;
pub const OpenProtocolAttributes = @import("tables/boot_services.zig").OpenProtocolAttributes;
pub const ProtocolInformationEntry = @import("tables/boot_services.zig").ProtocolInformationEntry;
pub const ResetType = @import("tables/runtime_services.zig").ResetType;
pub const RuntimeServices = @import("tables/runtime_services.zig").RuntimeServices;
pub const SystemTable = @import("tables/system_table.zig").SystemTable;
pub const TableHeader = @import("tables/table_header.zig").TableHeader;
pub const TimerDelay = @import("tables/boot_services.zig").TimerDelay;
| https://raw.githubusercontent.com/natanalt/zig-x86_16/1b38fc3ef5e539047c76604ffe71b81e246f1a1e/lib/std/os/uefi/tables.zig |
pub const AllocateType = @import("tables/boot_services.zig").AllocateType;
pub const BootServices = @import("tables/boot_services.zig").BootServices;
pub const ConfigurationTable = @import("tables/configuration_table.zig").ConfigurationTable;
pub const global_variable align(8) = @import("tables/runtime_services.zig").global_variable;
pub const LocateSearchType = @import("tables/boot_services.zig").LocateSearchType;
pub const MemoryDescriptor = @import("tables/boot_services.zig").MemoryDescriptor;
pub const MemoryType = @import("tables/boot_services.zig").MemoryType;
pub const OpenProtocolAttributes = @import("tables/boot_services.zig").OpenProtocolAttributes;
pub const ProtocolInformationEntry = @import("tables/boot_services.zig").ProtocolInformationEntry;
pub const ResetType = @import("tables/runtime_services.zig").ResetType;
pub const RuntimeServices = @import("tables/runtime_services.zig").RuntimeServices;
pub const SystemTable = @import("tables/system_table.zig").SystemTable;
pub const TableHeader = @import("tables/table_header.zig").TableHeader;
pub const TimerDelay = @import("tables/boot_services.zig").TimerDelay;
| https://raw.githubusercontent.com/kraxli/dev_tools_mswindows/1d1a8f61299e4b7ba356fae3a37af0ddc8daf356/zig-windows-x86_64-0.9.1/lib/std/os/uefi/tables.zig |
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std");
const builtin = std.builtin;
const page_size = std.mem.page_size;
pub const tokenizer = @import("c/tokenizer.zig");
pub const Token = tokenizer.Token;
pub const Tokenizer = tokenizer.Tokenizer;
pub const parse = @import("c/parse.zig").parse;
pub const ast = @import("c/ast.zig");
pub const builtins = @import("c/builtins.zig");
test {
_ = tokenizer;
}
pub usingnamespace @import("os/bits.zig");
pub usingnamespace switch (std.Target.current.os.tag) {
.linux => @import("c/linux.zig"),
.windows => @import("c/windows.zig"),
.macos, .ios, .tvos, .watchos => @import("c/darwin.zig"),
.freebsd, .kfreebsd => @import("c/freebsd.zig"),
.netbsd => @import("c/netbsd.zig"),
.dragonfly => @import("c/dragonfly.zig"),
.openbsd => @import("c/openbsd.zig"),
.haiku => @import("c/haiku.zig"),
.hermit => @import("c/hermit.zig"),
.solaris => @import("c/solaris.zig"),
.fuchsia => @import("c/fuchsia.zig"),
.minix => @import("c/minix.zig"),
.emscripten => @import("c/emscripten.zig"),
else => struct {},
};
pub fn getErrno(rc: anytype) c_int {
if (rc == -1) {
return _errno().*;
} else {
return 0;
}
}
/// The return type is `type` to force comptime function call execution.
/// TODO: https://github.com/ziglang/zig/issues/425
/// If not linking libc, returns struct{pub const ok = false;}
/// If linking musl libc, returns struct{pub const ok = true;}
/// If linking gnu libc (glibc), the `ok` value will be true if the target
/// version is greater than or equal to `glibc_version`.
/// If linking a libc other than these, returns `false`.
pub fn versionCheck(glibc_version: builtin.Version) type {
return struct {
pub const ok = blk: {
if (!builtin.link_libc) break :blk false;
if (std.Target.current.abi.isMusl()) break :blk true;
if (std.Target.current.isGnuLibC()) {
const ver = std.Target.current.os.version_range.linux.glibc;
const order = ver.order(glibc_version);
break :blk switch (order) {
.gt, .eq => true,
.lt => false,
};
} else {
break :blk false;
}
};
};
}
pub extern "c" var environ: [*:null]?[*:0]u8;
pub extern "c" fn fopen(noalias filename: [*:0]const u8, noalias modes: [*:0]const u8) ?*FILE;
pub extern "c" fn fclose(stream: *FILE) c_int;
pub extern "c" fn fwrite(noalias ptr: [*]const u8, size_of_type: usize, item_count: usize, noalias stream: *FILE) usize;
pub extern "c" fn fread(noalias ptr: [*]u8, size_of_type: usize, item_count: usize, noalias stream: *FILE) usize;
pub extern "c" fn printf(format: [*:0]const u8, ...) c_int;
pub extern "c" fn abort() noreturn;
pub extern "c" fn exit(code: c_int) noreturn;
pub extern "c" fn _exit(code: c_int) noreturn;
pub extern "c" fn isatty(fd: fd_t) c_int;
pub extern "c" fn close(fd: fd_t) c_int;
pub extern "c" fn lseek(fd: fd_t, offset: off_t, whence: c_int) off_t;
pub extern "c" fn open(path: [*:0]const u8, oflag: c_uint, ...) c_int;
pub extern "c" fn openat(fd: c_int, path: [*:0]const u8, oflag: c_uint, ...) c_int;
pub extern "c" fn ftruncate(fd: c_int, length: off_t) c_int;
pub extern "c" fn raise(sig: c_int) c_int;
pub extern "c" fn read(fd: fd_t, buf: [*]u8, nbyte: usize) isize;
pub extern "c" fn readv(fd: c_int, iov: [*]const iovec, iovcnt: c_uint) isize;
pub extern "c" fn pread(fd: fd_t, buf: [*]u8, nbyte: usize, offset: u64) isize;
pub extern "c" fn preadv(fd: c_int, iov: [*]const iovec, iovcnt: c_uint, offset: u64) isize;
pub extern "c" fn writev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint) isize;
pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, offset: u64) isize;
pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize;
pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: u64) isize;
pub extern "c" fn mmap(addr: ?*align(page_size) c_void, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: u64) *c_void;
pub extern "c" fn munmap(addr: *align(page_size) c_void, len: usize) c_int;
pub extern "c" fn mprotect(addr: *align(page_size) c_void, len: usize, prot: c_uint) c_int;
pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: c_int) c_int;
pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_int) c_int;
pub extern "c" fn unlink(path: [*:0]const u8) c_int;
pub extern "c" fn unlinkat(dirfd: fd_t, path: [*:0]const u8, flags: c_uint) c_int;
pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8;
pub extern "c" fn waitpid(pid: c_int, stat_loc: *c_uint, options: c_uint) c_int;
pub extern "c" fn fork() c_int;
pub extern "c" fn access(path: [*:0]const u8, mode: c_uint) c_int;
pub extern "c" fn faccessat(dirfd: fd_t, path: [*:0]const u8, mode: c_uint, flags: c_uint) c_int;
pub extern "c" fn pipe(fds: *[2]fd_t) c_int;
pub extern "c" fn mkdir(path: [*:0]const u8, mode: c_uint) c_int;
pub extern "c" fn mkdirat(dirfd: fd_t, path: [*:0]const u8, mode: u32) c_int;
pub extern "c" fn symlink(existing: [*:0]const u8, new: [*:0]const u8) c_int;
pub extern "c" fn symlinkat(oldpath: [*:0]const u8, newdirfd: fd_t, newpath: [*:0]const u8) c_int;
pub extern "c" fn rename(old: [*:0]const u8, new: [*:0]const u8) c_int;
pub extern "c" fn renameat(olddirfd: fd_t, old: [*:0]const u8, newdirfd: fd_t, new: [*:0]const u8) c_int;
pub extern "c" fn chdir(path: [*:0]const u8) c_int;
pub extern "c" fn fchdir(fd: fd_t) c_int;
pub extern "c" fn execve(path: [*:0]const u8, argv: [*:null]const ?[*:0]const u8, envp: [*:null]const ?[*:0]const u8) c_int;
pub extern "c" fn dup(fd: fd_t) c_int;
pub extern "c" fn dup2(old_fd: fd_t, new_fd: fd_t) c_int;
pub extern "c" fn readlink(noalias path: [*:0]const u8, noalias buf: [*]u8, bufsize: usize) isize;
pub extern "c" fn readlinkat(dirfd: fd_t, noalias path: [*:0]const u8, noalias buf: [*]u8, bufsize: usize) isize;
pub usingnamespace switch (builtin.os.tag) {
.macos, .ios, .watchos, .tvos => struct {
pub const realpath = @"realpath$DARWIN_EXTSN";
pub const fstatat = _fstatat;
},
else => struct {
pub extern "c" fn realpath(noalias file_name: [*:0]const u8, noalias resolved_name: [*]u8) ?[*:0]u8;
pub extern "c" fn fstatat(dirfd: fd_t, path: [*:0]const u8, stat_buf: *libc_stat, flags: u32) c_int;
},
};
pub extern "c" fn rmdir(path: [*:0]const u8) c_int;
pub extern "c" fn getenv(name: [*:0]const u8) ?[*:0]u8;
pub extern "c" fn sysctl(name: [*]const c_int, namelen: c_uint, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) c_int;
pub extern "c" fn sysctlbyname(name: [*:0]const u8, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) c_int;
pub extern "c" fn sysctlnametomib(name: [*:0]const u8, mibp: ?*c_int, sizep: ?*usize) c_int;
pub extern "c" fn tcgetattr(fd: fd_t, termios_p: *termios) c_int;
pub extern "c" fn tcsetattr(fd: fd_t, optional_action: TCSA, termios_p: *const termios) c_int;
pub extern "c" fn fcntl(fd: fd_t, cmd: c_int, ...) c_int;
pub extern "c" fn flock(fd: fd_t, operation: c_int) c_int;
pub extern "c" fn ioctl(fd: fd_t, request: c_int, ...) c_int;
pub extern "c" fn uname(buf: *utsname) c_int;
pub extern "c" fn gethostname(name: [*]u8, len: usize) c_int;
pub extern "c" fn shutdown(socket: fd_t, how: c_int) c_int;
pub extern "c" fn bind(socket: fd_t, address: ?*const sockaddr, address_len: socklen_t) c_int;
pub extern "c" fn socketpair(domain: c_uint, sock_type: c_uint, protocol: c_uint, sv: *[2]fd_t) c_int;
pub extern "c" fn listen(sockfd: fd_t, backlog: c_uint) c_int;
pub extern "c" fn getsockname(sockfd: fd_t, noalias addr: *sockaddr, noalias addrlen: *socklen_t) c_int;
pub extern "c" fn connect(sockfd: fd_t, sock_addr: *const sockaddr, addrlen: socklen_t) c_int;
pub extern "c" fn accept(sockfd: fd_t, noalias addr: ?*sockaddr, noalias addrlen: ?*socklen_t) c_int;
pub extern "c" fn accept4(sockfd: fd_t, noalias addr: ?*sockaddr, noalias addrlen: ?*socklen_t, flags: c_uint) c_int;
pub extern "c" fn getsockopt(sockfd: fd_t, level: u32, optname: u32, noalias optval: ?*c_void, noalias optlen: *socklen_t) c_int;
pub extern "c" fn setsockopt(sockfd: fd_t, level: u32, optname: u32, optval: ?*const c_void, optlen: socklen_t) c_int;
pub extern "c" fn send(sockfd: fd_t, buf: *const c_void, len: usize, flags: u32) isize;
pub extern "c" fn sendto(
sockfd: fd_t,
buf: *const c_void,
len: usize,
flags: u32,
dest_addr: ?*const sockaddr,
addrlen: socklen_t,
) isize;
pub extern fn recv(sockfd: fd_t, arg1: ?*c_void, arg2: usize, arg3: c_int) isize;
pub extern fn recvfrom(
sockfd: fd_t,
noalias buf: *c_void,
len: usize,
flags: u32,
noalias src_addr: ?*sockaddr,
noalias addrlen: ?*socklen_t,
) isize;
pub usingnamespace switch (builtin.os.tag) {
.netbsd => struct {
pub const clock_getres = __clock_getres50;
pub const clock_gettime = __clock_gettime50;
pub const fstat = __fstat50;
pub const getdents = __getdents30;
pub const getrusage = __getrusage50;
pub const gettimeofday = __gettimeofday50;
pub const nanosleep = __nanosleep50;
pub const sched_yield = __libc_thr_yield;
pub const sigaction = __sigaction14;
pub const sigaltstack = __sigaltstack14;
pub const sigprocmask = __sigprocmask14;
pub const stat = __stat50;
},
.macos, .ios, .watchos, .tvos => struct {
// XXX: close -> close$NOCANCEL
// XXX: getdirentries -> _getdirentries64
pub extern "c" fn clock_getres(clk_id: c_int, tp: *timespec) c_int;
pub extern "c" fn clock_gettime(clk_id: c_int, tp: *timespec) c_int;
pub const fstat = _fstat;
pub extern "c" fn getrusage(who: c_int, usage: *rusage) c_int;
pub extern "c" fn gettimeofday(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
pub extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
pub extern "c" fn sched_yield() c_int;
pub extern "c" fn sigaction(sig: c_int, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) c_int;
pub extern "c" fn sigprocmask(how: c_int, noalias set: ?*const sigset_t, noalias oset: ?*sigset_t) c_int;
pub extern "c" fn socket(domain: c_uint, sock_type: c_uint, protocol: c_uint) c_int;
pub extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *libc_stat) c_int;
},
.windows => struct {
// TODO: copied the else case and removed the socket function (because its in ws2_32)
// need to verify which of these is actually supported on windows
pub extern "c" fn clock_getres(clk_id: c_int, tp: *timespec) c_int;
pub extern "c" fn clock_gettime(clk_id: c_int, tp: *timespec) c_int;
pub extern "c" fn fstat(fd: fd_t, buf: *libc_stat) c_int;
pub extern "c" fn getrusage(who: c_int, usage: *rusage) c_int;
pub extern "c" fn gettimeofday(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
pub extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
pub extern "c" fn sched_yield() c_int;
pub extern "c" fn sigaction(sig: c_int, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) c_int;
pub extern "c" fn sigprocmask(how: c_int, noalias set: ?*const sigset_t, noalias oset: ?*sigset_t) c_int;
pub extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *libc_stat) c_int;
},
else => struct {
pub extern "c" fn clock_getres(clk_id: c_int, tp: *timespec) c_int;
pub extern "c" fn clock_gettime(clk_id: c_int, tp: *timespec) c_int;
pub extern "c" fn fstat(fd: fd_t, buf: *libc_stat) c_int;
pub extern "c" fn getrusage(who: c_int, usage: *rusage) c_int;
pub extern "c" fn gettimeofday(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
pub extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
pub extern "c" fn sched_yield() c_int;
pub extern "c" fn sigaction(sig: c_int, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) c_int;
pub extern "c" fn sigprocmask(how: c_int, noalias set: ?*const sigset_t, noalias oset: ?*sigset_t) c_int;
pub extern "c" fn socket(domain: c_uint, sock_type: c_uint, protocol: c_uint) c_int;
pub extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *libc_stat) c_int;
},
};
pub extern "c" fn kill(pid: pid_t, sig: c_int) c_int;
pub extern "c" fn getdirentries(fd: fd_t, buf_ptr: [*]u8, nbytes: usize, basep: *i64) isize;
pub extern "c" fn setuid(uid: uid_t) c_int;
pub extern "c" fn setgid(gid: gid_t) c_int;
pub extern "c" fn seteuid(euid: uid_t) c_int;
pub extern "c" fn setegid(egid: gid_t) c_int;
pub extern "c" fn setreuid(ruid: uid_t, euid: uid_t) c_int;
pub extern "c" fn setregid(rgid: gid_t, egid: gid_t) c_int;
pub extern "c" fn setresuid(ruid: uid_t, euid: uid_t, suid: uid_t) c_int;
pub extern "c" fn setresgid(rgid: gid_t, egid: gid_t, sgid: gid_t) c_int;
pub extern "c" fn malloc(usize) ?*c_void;
pub extern "c" fn realloc(?*c_void, usize) ?*c_void;
pub extern "c" fn free(?*c_void) void;
pub extern "c" fn futimes(fd: fd_t, times: *[2]timeval) c_int;
pub extern "c" fn utimes(path: [*:0]const u8, times: *[2]timeval) c_int;
pub extern "c" fn utimensat(dirfd: fd_t, pathname: [*:0]const u8, times: *[2]timespec, flags: u32) c_int;
pub extern "c" fn futimens(fd: fd_t, times: *const [2]timespec) c_int;
pub extern "c" fn pthread_create(noalias newthread: *pthread_t, noalias attr: ?*const pthread_attr_t, start_routine: fn (?*c_void) callconv(.C) ?*c_void, noalias arg: ?*c_void) c_int;
pub extern "c" fn pthread_attr_init(attr: *pthread_attr_t) c_int;
pub extern "c" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int;
pub extern "c" fn pthread_attr_setstacksize(attr: *pthread_attr_t, stacksize: usize) c_int;
pub extern "c" fn pthread_attr_setguardsize(attr: *pthread_attr_t, guardsize: usize) c_int;
pub extern "c" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int;
pub extern "c" fn pthread_self() pthread_t;
pub extern "c" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int;
pub extern "c" fn pthread_atfork(
prepare: ?fn () callconv(.C) void,
parent: ?fn () callconv(.C) void,
child: ?fn () callconv(.C) void,
) c_int;
pub extern "c" fn pthread_key_create(key: *pthread_key_t, destructor: ?fn (value: *c_void) callconv(.C) void) c_int;
pub extern "c" fn pthread_key_delete(key: pthread_key_t) c_int;
pub extern "c" fn pthread_getspecific(key: pthread_key_t) ?*c_void;
pub extern "c" fn pthread_setspecific(key: pthread_key_t, value: ?*c_void) c_int;
pub extern "c" fn sem_init(sem: *sem_t, pshared: c_int, value: c_uint) c_int;
pub extern "c" fn sem_destroy(sem: *sem_t) c_int;
pub extern "c" fn sem_post(sem: *sem_t) c_int;
pub extern "c" fn sem_wait(sem: *sem_t) c_int;
pub extern "c" fn sem_trywait(sem: *sem_t) c_int;
pub extern "c" fn sem_timedwait(sem: *sem_t, abs_timeout: *const timespec) c_int;
pub extern "c" fn sem_getvalue(sem: *sem_t, sval: *c_int) c_int;
pub extern "c" fn kqueue() c_int;
pub extern "c" fn kevent(
kq: c_int,
changelist: [*]const Kevent,
nchanges: c_int,
eventlist: [*]Kevent,
nevents: c_int,
timeout: ?*const timespec,
) c_int;
pub extern "c" fn getaddrinfo(
noalias node: ?[*:0]const u8,
noalias service: ?[*:0]const u8,
noalias hints: ?*const addrinfo,
noalias res: **addrinfo,
) EAI;
pub extern "c" fn freeaddrinfo(res: *addrinfo) void;
pub extern "c" fn getnameinfo(
noalias addr: *const sockaddr,
addrlen: socklen_t,
noalias host: [*]u8,
hostlen: socklen_t,
noalias serv: [*]u8,
servlen: socklen_t,
flags: u32,
) EAI;
pub extern "c" fn gai_strerror(errcode: EAI) [*:0]const u8;
pub extern "c" fn poll(fds: [*]pollfd, nfds: nfds_t, timeout: c_int) c_int;
pub extern "c" fn ppoll(fds: [*]pollfd, nfds: nfds_t, timeout: ?*const timespec, sigmask: ?*const sigset_t) c_int;
pub extern "c" fn dn_expand(
msg: [*:0]const u8,
eomorig: [*:0]const u8,
comp_dn: [*:0]const u8,
exp_dn: [*:0]u8,
length: c_int,
) c_int;
pub const PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{};
pub extern "c" fn pthread_mutex_lock(mutex: *pthread_mutex_t) c_int;
pub extern "c" fn pthread_mutex_unlock(mutex: *pthread_mutex_t) c_int;
pub extern "c" fn pthread_mutex_trylock(mutex: *pthread_mutex_t) c_int;
pub extern "c" fn pthread_mutex_destroy(mutex: *pthread_mutex_t) c_int;
pub const PTHREAD_COND_INITIALIZER = pthread_cond_t{};
pub extern "c" fn pthread_cond_wait(noalias cond: *pthread_cond_t, noalias mutex: *pthread_mutex_t) c_int;
pub extern "c" fn pthread_cond_timedwait(noalias cond: *pthread_cond_t, noalias mutex: *pthread_mutex_t, noalias abstime: *const timespec) c_int;
pub extern "c" fn pthread_cond_signal(cond: *pthread_cond_t) c_int;
pub extern "c" fn pthread_cond_broadcast(cond: *pthread_cond_t) c_int;
pub extern "c" fn pthread_cond_destroy(cond: *pthread_cond_t) c_int;
pub extern "c" fn pthread_rwlock_destroy(rwl: *pthread_rwlock_t) callconv(.C) c_int;
pub extern "c" fn pthread_rwlock_rdlock(rwl: *pthread_rwlock_t) callconv(.C) c_int;
pub extern "c" fn pthread_rwlock_wrlock(rwl: *pthread_rwlock_t) callconv(.C) c_int;
pub extern "c" fn pthread_rwlock_tryrdlock(rwl: *pthread_rwlock_t) callconv(.C) c_int;
pub extern "c" fn pthread_rwlock_trywrlock(rwl: *pthread_rwlock_t) callconv(.C) c_int;
pub extern "c" fn pthread_rwlock_unlock(rwl: *pthread_rwlock_t) callconv(.C) c_int;
pub const pthread_t = *opaque {};
pub const FILE = opaque {};
pub extern "c" fn dlopen(path: [*:0]const u8, mode: c_int) ?*c_void;
pub extern "c" fn dlclose(handle: *c_void) c_int;
pub extern "c" fn dlsym(handle: ?*c_void, symbol: [*:0]const u8) ?*c_void;
pub extern "c" fn sync() void;
pub extern "c" fn syncfs(fd: c_int) c_int;
pub extern "c" fn fsync(fd: c_int) c_int;
pub extern "c" fn fdatasync(fd: c_int) c_int;
pub extern "c" fn prctl(option: c_int, ...) c_int;
pub extern "c" fn getrlimit(resource: rlimit_resource, rlim: *rlimit) c_int;
pub extern "c" fn setrlimit(resource: rlimit_resource, rlim: *const rlimit) c_int;
pub extern "c" fn fmemopen(noalias buf: ?*c_void, size: usize, noalias mode: [*:0]const u8) ?*FILE;
pub extern "c" fn syslog(priority: c_int, message: [*:0]const u8, ...) void;
pub extern "c" fn openlog(ident: [*:0]const u8, logopt: c_int, facility: c_int) void;
pub extern "c" fn closelog() void;
pub extern "c" fn setlogmask(maskpri: c_int) c_int;
pub const max_align_t = if (std.Target.current.abi == .msvc)
f64
else if (std.Target.current.isDarwin())
c_longdouble
else
extern struct {
a: c_longlong,
b: c_longdouble,
};
| https://raw.githubusercontent.com/creationix/zig-toolset/9ad208cd93d1f05eb772deff4af24f58eb42386f/zig-linux-x86_64-0.8.0-dev.1860+1fada3746/lib/std/c.zig |
const std = @import("std");
pub fn build(b: *std.build.Builder) void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
const mode = b.standardReleaseOptions();
const exe = b.addExecutable("day-1", "src/main.zig");
exe.setTarget(target);
exe.setBuildMode(mode);
exe.install();
const run_cmd = exe.run();
run_cmd.step.dependOn(b.getInstallStep());
if (b.args) |args| {
run_cmd.addArgs(args);
}
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
const exe_tests = b.addTest("src/main.zig");
exe_tests.setTarget(target);
exe_tests.setBuildMode(mode);
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&exe_tests.step);
}
| https://raw.githubusercontent.com/JoeyMckenzie/advent-of-code/05fffbcbaa2a5112c6fa740b55466ecacb70f956/2022/zig/day-1/build.zig |
//
// Now let's create a function that takes a parameter. Here's an
// example that takes two parameters. As you can see, parameters
// are declared just like any other types ("name": "type"):
//
// fn myFunction(number: u8, is_lucky: bool) {
// ...
// }
//
const std = @import("std");
pub fn main() void {
std.debug.print("Powers of two: {} {} {} {}\n", .{
twoToThe(1),
twoToThe(2),
twoToThe(3),
twoToThe(4),
});
}
// Please give this function the correct input parameter(s).
// You'll need to figure out the parameter name and type that we're
// expecting. The output type has already been specified for you.
//
fn twoToThe(???) u32 {
return std.math.pow(u32, 2, my_number);
// std.math.pow(type, a, b) takes a numeric type and two numbers
// of that type and returns "a to the power of b" as that same
// numeric type.
}
| https://raw.githubusercontent.com/thomastanck/ziglings/2de5b69c3fb491cd4327a31d60258bef41e33bf7/exercises/019_functions2.zig |
const std = @import("std");
const isNan = std.math.isNan;
const isInf = std.math.isInf;
const copysign = std.math.copysign;
pub fn Complex(comptime T: type) type {
return extern struct {
real: T,
imag: T,
};
}
/// Implementation based on Annex G of C17 Standard (N2176)
pub inline fn mulc3(comptime T: type, a_in: T, b_in: T, c_in: T, d_in: T) Complex(T) {
var a = a_in;
var b = b_in;
var c = c_in;
var d = d_in;
const ac = a * c;
const bd = b * d;
const ad = a * d;
const bc = b * c;
const zero: T = 0.0;
const one: T = 1.0;
var z = Complex(T){
.real = ac - bd,
.imag = ad + bc,
};
if (isNan(z.real) and isNan(z.imag)) {
var recalc: bool = false;
if (isInf(a) or isInf(b)) { // (a + ib) is infinite
// "Box" the infinity (+/-inf goes to +/-1, all finite values go to 0)
a = copysign(if (isInf(a)) one else zero, a);
b = copysign(if (isInf(b)) one else zero, b);
// Replace NaNs in the other factor with (signed) 0
if (isNan(c)) c = copysign(zero, c);
if (isNan(d)) d = copysign(zero, d);
recalc = true;
}
if (isInf(c) or isInf(d)) { // (c + id) is infinite
// "Box" the infinity (+/-inf goes to +/-1, all finite values go to 0)
c = copysign(if (isInf(c)) one else zero, c);
d = copysign(if (isInf(d)) one else zero, d);
// Replace NaNs in the other factor with (signed) 0
if (isNan(a)) a = copysign(zero, a);
if (isNan(b)) b = copysign(zero, b);
recalc = true;
}
if (!recalc and (isInf(ac) or isInf(bd) or isInf(ad) or isInf(bc))) {
// Recover infinities from overflow by changing NaNs to 0
if (isNan(a)) a = copysign(zero, a);
if (isNan(b)) b = copysign(zero, b);
if (isNan(c)) c = copysign(zero, c);
if (isNan(d)) d = copysign(zero, d);
recalc = true;
}
if (recalc) {
return .{
.real = std.math.inf(T) * (a * c - b * d),
.imag = std.math.inf(T) * (a * d + b * c),
};
}
}
return z;
}
| https://raw.githubusercontent.com/beingofexistence13/multiversal-lang/dd769e3fc6182c23ef43ed4479614f43f29738c9/zig/lib/compiler_rt/mulc3.zig |
const std = @import("std");
const isNan = std.math.isNan;
const isInf = std.math.isInf;
const copysign = std.math.copysign;
pub fn Complex(comptime T: type) type {
return extern struct {
real: T,
imag: T,
};
}
/// Implementation based on Annex G of C17 Standard (N2176)
pub inline fn mulc3(comptime T: type, a_in: T, b_in: T, c_in: T, d_in: T) Complex(T) {
var a = a_in;
var b = b_in;
var c = c_in;
var d = d_in;
const ac = a * c;
const bd = b * d;
const ad = a * d;
const bc = b * c;
const zero: T = 0.0;
const one: T = 1.0;
var z = Complex(T){
.real = ac - bd,
.imag = ad + bc,
};
if (isNan(z.real) and isNan(z.imag)) {
var recalc: bool = false;
if (isInf(a) or isInf(b)) { // (a + ib) is infinite
// "Box" the infinity (+/-inf goes to +/-1, all finite values go to 0)
a = copysign(if (isInf(a)) one else zero, a);
b = copysign(if (isInf(b)) one else zero, b);
// Replace NaNs in the other factor with (signed) 0
if (isNan(c)) c = copysign(zero, c);
if (isNan(d)) d = copysign(zero, d);
recalc = true;
}
if (isInf(c) or isInf(d)) { // (c + id) is infinite
// "Box" the infinity (+/-inf goes to +/-1, all finite values go to 0)
c = copysign(if (isInf(c)) one else zero, c);
d = copysign(if (isInf(d)) one else zero, d);
// Replace NaNs in the other factor with (signed) 0
if (isNan(a)) a = copysign(zero, a);
if (isNan(b)) b = copysign(zero, b);
recalc = true;
}
if (!recalc and (isInf(ac) or isInf(bd) or isInf(ad) or isInf(bc))) {
// Recover infinities from overflow by changing NaNs to 0
if (isNan(a)) a = copysign(zero, a);
if (isNan(b)) b = copysign(zero, b);
if (isNan(c)) c = copysign(zero, c);
if (isNan(d)) d = copysign(zero, d);
recalc = true;
}
if (recalc) {
return .{
.real = std.math.inf(T) * (a * c - b * d),
.imag = std.math.inf(T) * (a * d + b * c),
};
}
}
return z;
}
| https://raw.githubusercontent.com/mundusnine/FoundryTools_windows_x64/b64cdb7e56db28eb710a05a089aed0daff8bc8be/lib/compiler_rt/mulc3.zig |
const std = @import("std");
const isNan = std.math.isNan;
const isInf = std.math.isInf;
const copysign = std.math.copysign;
pub fn Complex(comptime T: type) type {
return extern struct {
real: T,
imag: T,
};
}
/// Implementation based on Annex G of C17 Standard (N2176)
pub inline fn mulc3(comptime T: type, a_in: T, b_in: T, c_in: T, d_in: T) Complex(T) {
var a = a_in;
var b = b_in;
var c = c_in;
var d = d_in;
const ac = a * c;
const bd = b * d;
const ad = a * d;
const bc = b * c;
const zero: T = 0.0;
const one: T = 1.0;
var z = Complex(T){
.real = ac - bd,
.imag = ad + bc,
};
if (isNan(z.real) and isNan(z.imag)) {
var recalc: bool = false;
if (isInf(a) or isInf(b)) { // (a + ib) is infinite
// "Box" the infinity (+/-inf goes to +/-1, all finite values go to 0)
a = copysign(if (isInf(a)) one else zero, a);
b = copysign(if (isInf(b)) one else zero, b);
// Replace NaNs in the other factor with (signed) 0
if (isNan(c)) c = copysign(zero, c);
if (isNan(d)) d = copysign(zero, d);
recalc = true;
}
if (isInf(c) or isInf(d)) { // (c + id) is infinite
// "Box" the infinity (+/-inf goes to +/-1, all finite values go to 0)
c = copysign(if (isInf(c)) one else zero, c);
d = copysign(if (isInf(d)) one else zero, d);
// Replace NaNs in the other factor with (signed) 0
if (isNan(a)) a = copysign(zero, a);
if (isNan(b)) b = copysign(zero, b);
recalc = true;
}
if (!recalc and (isInf(ac) or isInf(bd) or isInf(ad) or isInf(bc))) {
// Recover infinities from overflow by changing NaNs to 0
if (isNan(a)) a = copysign(zero, a);
if (isNan(b)) b = copysign(zero, b);
if (isNan(c)) c = copysign(zero, c);
if (isNan(d)) d = copysign(zero, d);
recalc = true;
}
if (recalc) {
return .{
.real = std.math.inf(T) * (a * c - b * d),
.imag = std.math.inf(T) * (a * d + b * c),
};
}
}
return z;
}
| https://raw.githubusercontent.com/mundusnine/FoundryTools_linux_x64/98e738bf92a416b255c9d11b78e8033071b52672/lib/compiler_rt/mulc3.zig |
const std = @import("std");
const isNan = std.math.isNan;
const isInf = std.math.isInf;
const copysign = std.math.copysign;
pub fn Complex(comptime T: type) type {
return extern struct {
real: T,
imag: T,
};
}
/// Implementation based on Annex G of C17 Standard (N2176)
pub inline fn mulc3(comptime T: type, a_in: T, b_in: T, c_in: T, d_in: T) Complex(T) {
var a = a_in;
var b = b_in;
var c = c_in;
var d = d_in;
const ac = a * c;
const bd = b * d;
const ad = a * d;
const bc = b * c;
const zero: T = 0.0;
const one: T = 1.0;
var z = Complex(T){
.real = ac - bd,
.imag = ad + bc,
};
if (isNan(z.real) and isNan(z.imag)) {
var recalc: bool = false;
if (isInf(a) or isInf(b)) { // (a + ib) is infinite
// "Box" the infinity (+/-inf goes to +/-1, all finite values go to 0)
a = copysign(if (isInf(a)) one else zero, a);
b = copysign(if (isInf(b)) one else zero, b);
// Replace NaNs in the other factor with (signed) 0
if (isNan(c)) c = copysign(zero, c);
if (isNan(d)) d = copysign(zero, d);
recalc = true;
}
if (isInf(c) or isInf(d)) { // (c + id) is infinite
// "Box" the infinity (+/-inf goes to +/-1, all finite values go to 0)
c = copysign(if (isInf(c)) one else zero, c);
d = copysign(if (isInf(d)) one else zero, d);
// Replace NaNs in the other factor with (signed) 0
if (isNan(a)) a = copysign(zero, a);
if (isNan(b)) b = copysign(zero, b);
recalc = true;
}
if (!recalc and (isInf(ac) or isInf(bd) or isInf(ad) or isInf(bc))) {
// Recover infinities from overflow by changing NaNs to 0
if (isNan(a)) a = copysign(zero, a);
if (isNan(b)) b = copysign(zero, b);
if (isNan(c)) c = copysign(zero, c);
if (isNan(d)) d = copysign(zero, d);
recalc = true;
}
if (recalc) {
return .{
.real = std.math.inf(T) * (a * c - b * d),
.imag = std.math.inf(T) * (a * d + b * c),
};
}
}
return z;
}
| https://raw.githubusercontent.com/matpx/daydream/018ad0c7caaf796d8a04b882fcbed39ccb7c9cd8/toolchain/zig/lib/compiler_rt/mulc3.zig |
//! This file is auto-generated by tools/update_cpu_features.zig.
const std = @import("../std.zig");
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
a510,
a65,
a710,
a76,
a78,
a78c,
aes,
aggressive_fma,
alternate_sextload_cvt_f32_pattern,
altnzcv,
am,
amvs,
arith_bcc_fusion,
arith_cbz_fusion,
ascend_store_address,
b16b16,
balance_fp_ops,
bf16,
brbe,
bti,
call_saved_x10,
call_saved_x11,
call_saved_x12,
call_saved_x13,
call_saved_x14,
call_saved_x15,
call_saved_x18,
call_saved_x8,
call_saved_x9,
ccdp,
ccidx,
ccpp,
chk,
clrbhb,
cmp_bcc_fusion,
complxnum,
contextidr_el2,
cortex_r82,
crc,
crypto,
cssc,
custom_cheap_as_move,
d128,
disable_latency_sched_heuristic,
dit,
dotprod,
ecv,
el2vmsa,
el3,
enable_select_opt,
ete,
exynos_cheap_as_move,
f32mm,
f64mm,
fgt,
fix_cortex_a53_835769,
flagm,
fmv,
force_32bit_jump_tables,
fp16fml,
fp_armv8,
fptoint,
fullfp16,
fuse_address,
fuse_addsub_2reg_const1,
fuse_adrp_add,
fuse_aes,
fuse_arith_logic,
fuse_crypto_eor,
fuse_csel,
fuse_literals,
gcs,
harden_sls_blr,
harden_sls_nocomdat,
harden_sls_retbr,
hbc,
hcx,
i8mm,
ite,
jsconv,
lor,
ls64,
lse,
lse128,
lse2,
lsl_fast,
mec,
mops,
mpam,
mte,
neon,
nmi,
no_bti_at_return_twice,
no_neg_immediates,
no_sve_fp_ld1r,
no_zcz_fp,
nv,
outline_atomics,
pan,
pan_rwv,
pauth,
perfmon,
predictable_select_expensive,
predres,
prfm_slc_target,
rand,
ras,
rasv2,
rcpc,
rcpc3,
rcpc_immo,
rdm,
reserve_x1,
reserve_x10,
reserve_x11,
reserve_x12,
reserve_x13,
reserve_x14,
reserve_x15,
reserve_x18,
reserve_x2,
reserve_x20,
reserve_x21,
reserve_x22,
reserve_x23,
reserve_x24,
reserve_x25,
reserve_x26,
reserve_x27,
reserve_x28,
reserve_x3,
reserve_x30,
reserve_x4,
reserve_x5,
reserve_x6,
reserve_x7,
reserve_x9,
rme,
sb,
sel2,
sha2,
sha3,
slow_misaligned_128store,
slow_paired_128,
slow_strqro_store,
sm4,
sme,
sme2,
sme2p1,
sme_f16f16,
sme_f64f64,
sme_i16i64,
spe,
spe_eef,
specres2,
specrestrict,
ssbs,
strict_align,
sve,
sve2,
sve2_aes,
sve2_bitperm,
sve2_sha3,
sve2_sm4,
sve2p1,
tagged_globals,
the,
tlb_rmi,
tme,
tpidr_el1,
tpidr_el2,
tpidr_el3,
tpidrro_el0,
tracev8_4,
trbe,
uaops,
use_experimental_zeroing_pseudos,
use_postra_scheduler,
use_reciprocal_square_root,
use_scalar_inc_vl,
v8_1a,
v8_2a,
v8_3a,
v8_4a,
v8_5a,
v8_6a,
v8_7a,
v8_8a,
v8_9a,
v8a,
v8r,
v9_1a,
v9_2a,
v9_3a,
v9_4a,
v9a,
vh,
wfxt,
xs,
zcm,
zcz,
zcz_fp_workaround,
zcz_gp,
};
pub const featureSet = CpuFeature.feature_set_fns(Feature).featureSet;
pub const featureSetHas = CpuFeature.feature_set_fns(Feature).featureSetHas;
pub const featureSetHasAny = CpuFeature.feature_set_fns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.feature_set_fns(Feature).featureSetHasAll;
pub const all_features = blk: {
@setEvalBranchQuota(2000);
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.a510)] = .{
.llvm_name = "a510",
.description = "Cortex-A510 ARM processors",
.dependencies = featureSet(&[_]Feature{
.fuse_adrp_add,
.fuse_aes,
.use_postra_scheduler,
}),
};
result[@intFromEnum(Feature.a65)] = .{
.llvm_name = "a65",
.description = "Cortex-A65 ARM processors",
.dependencies = featureSet(&[_]Feature{
.enable_select_opt,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_literals,
.predictable_select_expensive,
}),
};
result[@intFromEnum(Feature.a710)] = .{
.llvm_name = "a710",
.description = "Cortex-A710 ARM processors",
.dependencies = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.predictable_select_expensive,
.use_postra_scheduler,
}),
};
result[@intFromEnum(Feature.a76)] = .{
.llvm_name = "a76",
.description = "Cortex-A76 ARM processors",
.dependencies = featureSet(&[_]Feature{
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.predictable_select_expensive,
}),
};
result[@intFromEnum(Feature.a78)] = .{
.llvm_name = "a78",
.description = "Cortex-A78 ARM processors",
.dependencies = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.predictable_select_expensive,
.use_postra_scheduler,
}),
};
result[@intFromEnum(Feature.a78c)] = .{
.llvm_name = "a78c",
.description = "Cortex-A78C ARM processors",
.dependencies = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.predictable_select_expensive,
.use_postra_scheduler,
}),
};
result[@intFromEnum(Feature.aes)] = .{
.llvm_name = "aes",
.description = "Enable AES support (FEAT_AES, FEAT_PMULL)",
.dependencies = featureSet(&[_]Feature{
.neon,
}),
};
result[@intFromEnum(Feature.aggressive_fma)] = .{
.llvm_name = "aggressive-fma",
.description = "Enable Aggressive FMA for floating-point.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.alternate_sextload_cvt_f32_pattern)] = .{
.llvm_name = "alternate-sextload-cvt-f32-pattern",
.description = "Use alternative pattern for sextload convert to f32",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.altnzcv)] = .{
.llvm_name = "altnzcv",
.description = "Enable alternative NZCV format for floating point comparisons (FEAT_FlagM2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.am)] = .{
.llvm_name = "am",
.description = "Enable v8.4-A Activity Monitors extension (FEAT_AMUv1)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.amvs)] = .{
.llvm_name = "amvs",
.description = "Enable v8.6-A Activity Monitors Virtualization support (FEAT_AMUv1p1)",
.dependencies = featureSet(&[_]Feature{
.am,
}),
};
result[@intFromEnum(Feature.arith_bcc_fusion)] = .{
.llvm_name = "arith-bcc-fusion",
.description = "CPU fuses arithmetic+bcc operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.arith_cbz_fusion)] = .{
.llvm_name = "arith-cbz-fusion",
.description = "CPU fuses arithmetic + cbz/cbnz operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ascend_store_address)] = .{
.llvm_name = "ascend-store-address",
.description = "Schedule vector stores by ascending address",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.b16b16)] = .{
.llvm_name = "b16b16",
.description = "Enable SVE2.1 or SME2.1 non-widening BFloat16 to BFloat16 instructions (FEAT_B16B16)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.balance_fp_ops)] = .{
.llvm_name = "balance-fp-ops",
.description = "balance mix of odd and even D-registers for fp multiply(-accumulate) ops",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.bf16)] = .{
.llvm_name = "bf16",
.description = "Enable BFloat16 Extension (FEAT_BF16)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.brbe)] = .{
.llvm_name = "brbe",
.description = "Enable Branch Record Buffer Extension (FEAT_BRBE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.bti)] = .{
.llvm_name = "bti",
.description = "Enable Branch Target Identification (FEAT_BTI)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x10)] = .{
.llvm_name = "call-saved-x10",
.description = "Make X10 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x11)] = .{
.llvm_name = "call-saved-x11",
.description = "Make X11 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x12)] = .{
.llvm_name = "call-saved-x12",
.description = "Make X12 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x13)] = .{
.llvm_name = "call-saved-x13",
.description = "Make X13 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x14)] = .{
.llvm_name = "call-saved-x14",
.description = "Make X14 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x15)] = .{
.llvm_name = "call-saved-x15",
.description = "Make X15 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x18)] = .{
.llvm_name = "call-saved-x18",
.description = "Make X18 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x8)] = .{
.llvm_name = "call-saved-x8",
.description = "Make X8 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x9)] = .{
.llvm_name = "call-saved-x9",
.description = "Make X9 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ccdp)] = .{
.llvm_name = "ccdp",
.description = "Enable v8.5 Cache Clean to Point of Deep Persistence (FEAT_DPB2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ccidx)] = .{
.llvm_name = "ccidx",
.description = "Enable v8.3-A Extend of the CCSIDR number of sets (FEAT_CCIDX)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ccpp)] = .{
.llvm_name = "ccpp",
.description = "Enable v8.2 data Cache Clean to Point of Persistence (FEAT_DPB)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.chk)] = .{
.llvm_name = "chk",
.description = "Enable Armv8.0-A Check Feature Status Extension (FEAT_CHK)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.clrbhb)] = .{
.llvm_name = "clrbhb",
.description = "Enable Clear BHB instruction (FEAT_CLRBHB)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.cmp_bcc_fusion)] = .{
.llvm_name = "cmp-bcc-fusion",
.description = "CPU fuses cmp+bcc operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.complxnum)] = .{
.llvm_name = "complxnum",
.description = "Enable v8.3-A Floating-point complex number support (FEAT_FCMA)",
.dependencies = featureSet(&[_]Feature{
.neon,
}),
};
result[@intFromEnum(Feature.contextidr_el2)] = .{
.llvm_name = "CONTEXTIDREL2",
.description = "Enable RW operand Context ID Register (EL2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.cortex_r82)] = .{
.llvm_name = "cortex-r82",
.description = "Cortex-R82 ARM processors",
.dependencies = featureSet(&[_]Feature{
.use_postra_scheduler,
}),
};
result[@intFromEnum(Feature.crc)] = .{
.llvm_name = "crc",
.description = "Enable ARMv8 CRC-32 checksum instructions (FEAT_CRC32)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.crypto)] = .{
.llvm_name = "crypto",
.description = "Enable cryptographic instructions",
.dependencies = featureSet(&[_]Feature{
.aes,
.sha2,
}),
};
result[@intFromEnum(Feature.cssc)] = .{
.llvm_name = "cssc",
.description = "Enable Common Short Sequence Compression (CSSC) instructions (FEAT_CSSC)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.custom_cheap_as_move)] = .{
.llvm_name = "custom-cheap-as-move",
.description = "Use custom handling of cheap instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.d128)] = .{
.llvm_name = "d128",
.description = "Enable Armv9.4-A 128-bit Page Table Descriptors, System Registers and Instructions (FEAT_D128, FEAT_LVA3, FEAT_SYSREG128, FEAT_SYSINSTR128)",
.dependencies = featureSet(&[_]Feature{
.lse128,
}),
};
result[@intFromEnum(Feature.disable_latency_sched_heuristic)] = .{
.llvm_name = "disable-latency-sched-heuristic",
.description = "Disable latency scheduling heuristic",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.dit)] = .{
.llvm_name = "dit",
.description = "Enable v8.4-A Data Independent Timing instructions (FEAT_DIT)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.dotprod)] = .{
.llvm_name = "dotprod",
.description = "Enable dot product support (FEAT_DotProd)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ecv)] = .{
.llvm_name = "ecv",
.description = "Enable enhanced counter virtualization extension (FEAT_ECV)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.el2vmsa)] = .{
.llvm_name = "el2vmsa",
.description = "Enable Exception Level 2 Virtual Memory System Architecture",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.el3)] = .{
.llvm_name = "el3",
.description = "Enable Exception Level 3",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.enable_select_opt)] = .{
.llvm_name = "enable-select-opt",
.description = "Enable the select optimize pass for select loop heuristics",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ete)] = .{
.llvm_name = "ete",
.description = "Enable Embedded Trace Extension (FEAT_ETE)",
.dependencies = featureSet(&[_]Feature{
.trbe,
}),
};
result[@intFromEnum(Feature.exynos_cheap_as_move)] = .{
.llvm_name = "exynos-cheap-as-move",
.description = "Use Exynos specific handling of cheap instructions",
.dependencies = featureSet(&[_]Feature{
.custom_cheap_as_move,
}),
};
result[@intFromEnum(Feature.f32mm)] = .{
.llvm_name = "f32mm",
.description = "Enable Matrix Multiply FP32 Extension (FEAT_F32MM)",
.dependencies = featureSet(&[_]Feature{
.sve,
}),
};
result[@intFromEnum(Feature.f64mm)] = .{
.llvm_name = "f64mm",
.description = "Enable Matrix Multiply FP64 Extension (FEAT_F64MM)",
.dependencies = featureSet(&[_]Feature{
.sve,
}),
};
result[@intFromEnum(Feature.fgt)] = .{
.llvm_name = "fgt",
.description = "Enable fine grained virtualization traps extension (FEAT_FGT)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fix_cortex_a53_835769)] = .{
.llvm_name = "fix-cortex-a53-835769",
.description = "Mitigate Cortex-A53 Erratum 835769",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.flagm)] = .{
.llvm_name = "flagm",
.description = "Enable v8.4-A Flag Manipulation Instructions (FEAT_FlagM)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fmv)] = .{
.llvm_name = "fmv",
.description = "Enable Function Multi Versioning support.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.force_32bit_jump_tables)] = .{
.llvm_name = "force-32bit-jump-tables",
.description = "Force jump table entries to be 32-bits wide except at MinSize",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fp16fml)] = .{
.llvm_name = "fp16fml",
.description = "Enable FP16 FML instructions (FEAT_FHM)",
.dependencies = featureSet(&[_]Feature{
.fullfp16,
}),
};
result[@intFromEnum(Feature.fp_armv8)] = .{
.llvm_name = "fp-armv8",
.description = "Enable ARMv8 FP (FEAT_FP)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fptoint)] = .{
.llvm_name = "fptoint",
.description = "Enable FRInt[32|64][Z|X] instructions that round a floating-point number to an integer (in FP format) forcing it to fit into a 32- or 64-bit int (FEAT_FRINTTS)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fullfp16)] = .{
.llvm_name = "fullfp16",
.description = "Full FP16 (FEAT_FP16)",
.dependencies = featureSet(&[_]Feature{
.fp_armv8,
}),
};
result[@intFromEnum(Feature.fuse_address)] = .{
.llvm_name = "fuse-address",
.description = "CPU fuses address generation and memory operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_addsub_2reg_const1)] = .{
.llvm_name = "fuse-addsub-2reg-const1",
.description = "CPU fuses (a + b + 1) and (a - b - 1)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_adrp_add)] = .{
.llvm_name = "fuse-adrp-add",
.description = "CPU fuses adrp+add operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_aes)] = .{
.llvm_name = "fuse-aes",
.description = "CPU fuses AES crypto operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_arith_logic)] = .{
.llvm_name = "fuse-arith-logic",
.description = "CPU fuses arithmetic and logic operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_crypto_eor)] = .{
.llvm_name = "fuse-crypto-eor",
.description = "CPU fuses AES/PMULL and EOR operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_csel)] = .{
.llvm_name = "fuse-csel",
.description = "CPU fuses conditional select operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_literals)] = .{
.llvm_name = "fuse-literals",
.description = "CPU fuses literal generation operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.gcs)] = .{
.llvm_name = "gcs",
.description = "Enable Armv9.4-A Guarded Call Stack Extension",
.dependencies = featureSet(&[_]Feature{
.chk,
}),
};
result[@intFromEnum(Feature.harden_sls_blr)] = .{
.llvm_name = "harden-sls-blr",
.description = "Harden against straight line speculation across BLR instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.harden_sls_nocomdat)] = .{
.llvm_name = "harden-sls-nocomdat",
.description = "Generate thunk code for SLS mitigation in the normal text section",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.harden_sls_retbr)] = .{
.llvm_name = "harden-sls-retbr",
.description = "Harden against straight line speculation across RET and BR instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.hbc)] = .{
.llvm_name = "hbc",
.description = "Enable Armv8.8-A Hinted Conditional Branches Extension (FEAT_HBC)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.hcx)] = .{
.llvm_name = "hcx",
.description = "Enable Armv8.7-A HCRX_EL2 system register (FEAT_HCX)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.i8mm)] = .{
.llvm_name = "i8mm",
.description = "Enable Matrix Multiply Int8 Extension (FEAT_I8MM)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ite)] = .{
.llvm_name = "ite",
.description = "Enable Armv9.4-A Instrumentation Extension FEAT_ITE",
.dependencies = featureSet(&[_]Feature{
.ete,
}),
};
result[@intFromEnum(Feature.jsconv)] = .{
.llvm_name = "jsconv",
.description = "Enable v8.3-A JavaScript FP conversion instructions (FEAT_JSCVT)",
.dependencies = featureSet(&[_]Feature{
.fp_armv8,
}),
};
result[@intFromEnum(Feature.lor)] = .{
.llvm_name = "lor",
.description = "Enables ARM v8.1 Limited Ordering Regions extension (FEAT_LOR)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ls64)] = .{
.llvm_name = "ls64",
.description = "Enable Armv8.7-A LD64B/ST64B Accelerator Extension (FEAT_LS64, FEAT_LS64_V, FEAT_LS64_ACCDATA)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.lse)] = .{
.llvm_name = "lse",
.description = "Enable ARMv8.1 Large System Extension (LSE) atomic instructions (FEAT_LSE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.lse128)] = .{
.llvm_name = "lse128",
.description = "Enable Armv9.4-A 128-bit Atomic Instructions (FEAT_LSE128)",
.dependencies = featureSet(&[_]Feature{
.lse,
}),
};
result[@intFromEnum(Feature.lse2)] = .{
.llvm_name = "lse2",
.description = "Enable ARMv8.4 Large System Extension 2 (LSE2) atomicity rules (FEAT_LSE2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.lsl_fast)] = .{
.llvm_name = "lsl-fast",
.description = "CPU has a fastpath logical shift of up to 3 places",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.mec)] = .{
.llvm_name = "mec",
.description = "Enable Memory Encryption Contexts Extension",
.dependencies = featureSet(&[_]Feature{
.rme,
}),
};
result[@intFromEnum(Feature.mops)] = .{
.llvm_name = "mops",
.description = "Enable Armv8.8-A memcpy and memset acceleration instructions (FEAT_MOPS)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.mpam)] = .{
.llvm_name = "mpam",
.description = "Enable v8.4-A Memory system Partitioning and Monitoring extension (FEAT_MPAM)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.mte)] = .{
.llvm_name = "mte",
.description = "Enable Memory Tagging Extension (FEAT_MTE, FEAT_MTE2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.neon)] = .{
.llvm_name = "neon",
.description = "Enable Advanced SIMD instructions (FEAT_AdvSIMD)",
.dependencies = featureSet(&[_]Feature{
.fp_armv8,
}),
};
result[@intFromEnum(Feature.nmi)] = .{
.llvm_name = "nmi",
.description = "Enable Armv8.8-A Non-maskable Interrupts (FEAT_NMI, FEAT_GICv3_NMI)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.no_bti_at_return_twice)] = .{
.llvm_name = "no-bti-at-return-twice",
.description = "Don't place a BTI instruction after a return-twice",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.no_neg_immediates)] = .{
.llvm_name = "no-neg-immediates",
.description = "Convert immediates and instructions to their negated or complemented equivalent when the immediate does not fit in the encoding.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.no_sve_fp_ld1r)] = .{
.llvm_name = "no-sve-fp-ld1r",
.description = "Avoid using LD1RX instructions for FP",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.no_zcz_fp)] = .{
.llvm_name = "no-zcz-fp",
.description = "Has no zero-cycle zeroing instructions for FP registers",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.nv)] = .{
.llvm_name = "nv",
.description = "Enable v8.4-A Nested Virtualization Enchancement (FEAT_NV, FEAT_NV2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.outline_atomics)] = .{
.llvm_name = "outline-atomics",
.description = "Enable out of line atomics to support LSE instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.pan)] = .{
.llvm_name = "pan",
.description = "Enables ARM v8.1 Privileged Access-Never extension (FEAT_PAN)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.pan_rwv)] = .{
.llvm_name = "pan-rwv",
.description = "Enable v8.2 PAN s1e1R and s1e1W Variants (FEAT_PAN2)",
.dependencies = featureSet(&[_]Feature{
.pan,
}),
};
result[@intFromEnum(Feature.pauth)] = .{
.llvm_name = "pauth",
.description = "Enable v8.3-A Pointer Authentication extension (FEAT_PAuth)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.perfmon)] = .{
.llvm_name = "perfmon",
.description = "Enable Code Generation for ARMv8 PMUv3 Performance Monitors extension (FEAT_PMUv3)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.predictable_select_expensive)] = .{
.llvm_name = "predictable-select-expensive",
.description = "Prefer likely predicted branches over selects",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.predres)] = .{
.llvm_name = "predres",
.description = "Enable v8.5a execution and data prediction invalidation instructions (FEAT_SPECRES)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.prfm_slc_target)] = .{
.llvm_name = "prfm-slc-target",
.description = "Enable SLC target for PRFM instruction",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.rand)] = .{
.llvm_name = "rand",
.description = "Enable Random Number generation instructions (FEAT_RNG)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ras)] = .{
.llvm_name = "ras",
.description = "Enable ARMv8 Reliability, Availability and Serviceability Extensions (FEAT_RAS, FEAT_RASv1p1)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.rasv2)] = .{
.llvm_name = "rasv2",
.description = "Enable ARMv8.9-A Reliability, Availability and Serviceability Extensions (FEAT_RASv2)",
.dependencies = featureSet(&[_]Feature{
.ras,
}),
};
result[@intFromEnum(Feature.rcpc)] = .{
.llvm_name = "rcpc",
.description = "Enable support for RCPC extension (FEAT_LRCPC)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.rcpc3)] = .{
.llvm_name = "rcpc3",
.description = "Enable Armv8.9-A RCPC instructions for A64 and Advanced SIMD and floating-point instruction set (FEAT_LRCPC3)",
.dependencies = featureSet(&[_]Feature{
.rcpc_immo,
}),
};
result[@intFromEnum(Feature.rcpc_immo)] = .{
.llvm_name = "rcpc-immo",
.description = "Enable v8.4-A RCPC instructions with Immediate Offsets (FEAT_LRCPC2)",
.dependencies = featureSet(&[_]Feature{
.rcpc,
}),
};
result[@intFromEnum(Feature.rdm)] = .{
.llvm_name = "rdm",
.description = "Enable ARMv8.1 Rounding Double Multiply Add/Subtract instructions (FEAT_RDM)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x1)] = .{
.llvm_name = "reserve-x1",
.description = "Reserve X1, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x10)] = .{
.llvm_name = "reserve-x10",
.description = "Reserve X10, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x11)] = .{
.llvm_name = "reserve-x11",
.description = "Reserve X11, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x12)] = .{
.llvm_name = "reserve-x12",
.description = "Reserve X12, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x13)] = .{
.llvm_name = "reserve-x13",
.description = "Reserve X13, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x14)] = .{
.llvm_name = "reserve-x14",
.description = "Reserve X14, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x15)] = .{
.llvm_name = "reserve-x15",
.description = "Reserve X15, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x18)] = .{
.llvm_name = "reserve-x18",
.description = "Reserve X18, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x2)] = .{
.llvm_name = "reserve-x2",
.description = "Reserve X2, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x20)] = .{
.llvm_name = "reserve-x20",
.description = "Reserve X20, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x21)] = .{
.llvm_name = "reserve-x21",
.description = "Reserve X21, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x22)] = .{
.llvm_name = "reserve-x22",
.description = "Reserve X22, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x23)] = .{
.llvm_name = "reserve-x23",
.description = "Reserve X23, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x24)] = .{
.llvm_name = "reserve-x24",
.description = "Reserve X24, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x25)] = .{
.llvm_name = "reserve-x25",
.description = "Reserve X25, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x26)] = .{
.llvm_name = "reserve-x26",
.description = "Reserve X26, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x27)] = .{
.llvm_name = "reserve-x27",
.description = "Reserve X27, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x28)] = .{
.llvm_name = "reserve-x28",
.description = "Reserve X28, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x3)] = .{
.llvm_name = "reserve-x3",
.description = "Reserve X3, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x30)] = .{
.llvm_name = "reserve-x30",
.description = "Reserve X30, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x4)] = .{
.llvm_name = "reserve-x4",
.description = "Reserve X4, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x5)] = .{
.llvm_name = "reserve-x5",
.description = "Reserve X5, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x6)] = .{
.llvm_name = "reserve-x6",
.description = "Reserve X6, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x7)] = .{
.llvm_name = "reserve-x7",
.description = "Reserve X7, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x9)] = .{
.llvm_name = "reserve-x9",
.description = "Reserve X9, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.rme)] = .{
.llvm_name = "rme",
.description = "Enable Realm Management Extension (FEAT_RME)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sb)] = .{
.llvm_name = "sb",
.description = "Enable v8.5 Speculation Barrier (FEAT_SB)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sel2)] = .{
.llvm_name = "sel2",
.description = "Enable v8.4-A Secure Exception Level 2 extension (FEAT_SEL2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sha2)] = .{
.llvm_name = "sha2",
.description = "Enable SHA1 and SHA256 support (FEAT_SHA1, FEAT_SHA256)",
.dependencies = featureSet(&[_]Feature{
.neon,
}),
};
result[@intFromEnum(Feature.sha3)] = .{
.llvm_name = "sha3",
.description = "Enable SHA512 and SHA3 support (FEAT_SHA3, FEAT_SHA512)",
.dependencies = featureSet(&[_]Feature{
.sha2,
}),
};
result[@intFromEnum(Feature.slow_misaligned_128store)] = .{
.llvm_name = "slow-misaligned-128store",
.description = "Misaligned 128 bit stores are slow",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.slow_paired_128)] = .{
.llvm_name = "slow-paired-128",
.description = "Paired 128 bit loads and stores are slow",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.slow_strqro_store)] = .{
.llvm_name = "slow-strqro-store",
.description = "STR of Q register with register offset is slow",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sm4)] = .{
.llvm_name = "sm4",
.description = "Enable SM3 and SM4 support (FEAT_SM4, FEAT_SM3)",
.dependencies = featureSet(&[_]Feature{
.neon,
}),
};
result[@intFromEnum(Feature.sme)] = .{
.llvm_name = "sme",
.description = "Enable Scalable Matrix Extension (SME) (FEAT_SME)",
.dependencies = featureSet(&[_]Feature{
.bf16,
.use_scalar_inc_vl,
}),
};
result[@intFromEnum(Feature.sme2)] = .{
.llvm_name = "sme2",
.description = "Enable Scalable Matrix Extension 2 (SME2) instructions",
.dependencies = featureSet(&[_]Feature{
.sme,
}),
};
result[@intFromEnum(Feature.sme2p1)] = .{
.llvm_name = "sme2p1",
.description = "Enable Scalable Matrix Extension 2.1 (FEAT_SME2p1) instructions",
.dependencies = featureSet(&[_]Feature{
.sme2,
}),
};
result[@intFromEnum(Feature.sme_f16f16)] = .{
.llvm_name = "sme-f16f16",
.description = "Enable SME2.1 non-widening Float16 instructions (FEAT_SME_F16F16)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sme_f64f64)] = .{
.llvm_name = "sme-f64f64",
.description = "Enable Scalable Matrix Extension (SME) F64F64 instructions (FEAT_SME_F64F64)",
.dependencies = featureSet(&[_]Feature{
.sme,
}),
};
result[@intFromEnum(Feature.sme_i16i64)] = .{
.llvm_name = "sme-i16i64",
.description = "Enable Scalable Matrix Extension (SME) I16I64 instructions (FEAT_SME_I16I64)",
.dependencies = featureSet(&[_]Feature{
.sme,
}),
};
result[@intFromEnum(Feature.spe)] = .{
.llvm_name = "spe",
.description = "Enable Statistical Profiling extension (FEAT_SPE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.spe_eef)] = .{
.llvm_name = "spe-eef",
.description = "Enable extra register in the Statistical Profiling Extension (FEAT_SPEv1p2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.specres2)] = .{
.llvm_name = "specres2",
.description = "Enable Speculation Restriction Instruction (FEAT_SPECRES2)",
.dependencies = featureSet(&[_]Feature{
.predres,
}),
};
result[@intFromEnum(Feature.specrestrict)] = .{
.llvm_name = "specrestrict",
.description = "Enable architectural speculation restriction (FEAT_CSV2_2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ssbs)] = .{
.llvm_name = "ssbs",
.description = "Enable Speculative Store Bypass Safe bit (FEAT_SSBS, FEAT_SSBS2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.strict_align)] = .{
.llvm_name = "strict-align",
.description = "Disallow all unaligned memory access",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sve)] = .{
.llvm_name = "sve",
.description = "Enable Scalable Vector Extension (SVE) instructions (FEAT_SVE)",
.dependencies = featureSet(&[_]Feature{
.fullfp16,
}),
};
result[@intFromEnum(Feature.sve2)] = .{
.llvm_name = "sve2",
.description = "Enable Scalable Vector Extension 2 (SVE2) instructions (FEAT_SVE2)",
.dependencies = featureSet(&[_]Feature{
.sve,
.use_scalar_inc_vl,
}),
};
result[@intFromEnum(Feature.sve2_aes)] = .{
.llvm_name = "sve2-aes",
.description = "Enable AES SVE2 instructions (FEAT_SVE_AES, FEAT_SVE_PMULL128)",
.dependencies = featureSet(&[_]Feature{
.aes,
.sve2,
}),
};
result[@intFromEnum(Feature.sve2_bitperm)] = .{
.llvm_name = "sve2-bitperm",
.description = "Enable bit permutation SVE2 instructions (FEAT_SVE_BitPerm)",
.dependencies = featureSet(&[_]Feature{
.sve2,
}),
};
result[@intFromEnum(Feature.sve2_sha3)] = .{
.llvm_name = "sve2-sha3",
.description = "Enable SHA3 SVE2 instructions (FEAT_SVE_SHA3)",
.dependencies = featureSet(&[_]Feature{
.sha3,
.sve2,
}),
};
result[@intFromEnum(Feature.sve2_sm4)] = .{
.llvm_name = "sve2-sm4",
.description = "Enable SM4 SVE2 instructions (FEAT_SVE_SM4)",
.dependencies = featureSet(&[_]Feature{
.sm4,
.sve2,
}),
};
result[@intFromEnum(Feature.sve2p1)] = .{
.llvm_name = "sve2p1",
.description = "Enable Scalable Vector Extension 2.1 instructions",
.dependencies = featureSet(&[_]Feature{
.sve2,
}),
};
result[@intFromEnum(Feature.tagged_globals)] = .{
.llvm_name = "tagged-globals",
.description = "Use an instruction sequence for taking the address of a global that allows a memory tag in the upper address bits",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.the)] = .{
.llvm_name = "the",
.description = "Enable Armv8.9-A Translation Hardening Extension (FEAT_THE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tlb_rmi)] = .{
.llvm_name = "tlb-rmi",
.description = "Enable v8.4-A TLB Range and Maintenance Instructions (FEAT_TLBIOS, FEAT_TLBIRANGE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tme)] = .{
.llvm_name = "tme",
.description = "Enable Transactional Memory Extension (FEAT_TME)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tpidr_el1)] = .{
.llvm_name = "tpidr-el1",
.description = "Permit use of TPIDR_EL1 for the TLS base",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tpidr_el2)] = .{
.llvm_name = "tpidr-el2",
.description = "Permit use of TPIDR_EL2 for the TLS base",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tpidr_el3)] = .{
.llvm_name = "tpidr-el3",
.description = "Permit use of TPIDR_EL3 for the TLS base",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tpidrro_el0)] = .{
.llvm_name = "tpidrro-el0",
.description = "Permit use of TPIDRRO_EL0 for the TLS base",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tracev8_4)] = .{
.llvm_name = "tracev8.4",
.description = "Enable v8.4-A Trace extension (FEAT_TRF)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.trbe)] = .{
.llvm_name = "trbe",
.description = "Enable Trace Buffer Extension (FEAT_TRBE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.uaops)] = .{
.llvm_name = "uaops",
.description = "Enable v8.2 UAO PState (FEAT_UAO)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.use_experimental_zeroing_pseudos)] = .{
.llvm_name = "use-experimental-zeroing-pseudos",
.description = "Hint to the compiler that the MOVPRFX instruction is merged with destructive operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.use_postra_scheduler)] = .{
.llvm_name = "use-postra-scheduler",
.description = "Schedule again after register allocation",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.use_reciprocal_square_root)] = .{
.llvm_name = "use-reciprocal-square-root",
.description = "Use the reciprocal square root approximation",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.use_scalar_inc_vl)] = .{
.llvm_name = "use-scalar-inc-vl",
.description = "Prefer inc/dec over add+cnt",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.v8_1a)] = .{
.llvm_name = "v8.1a",
.description = "Support ARM v8.1a instructions",
.dependencies = featureSet(&[_]Feature{
.crc,
.lor,
.lse,
.pan,
.rdm,
.v8a,
.vh,
}),
};
result[@intFromEnum(Feature.v8_2a)] = .{
.llvm_name = "v8.2a",
.description = "Support ARM v8.2a instructions",
.dependencies = featureSet(&[_]Feature{
.ccpp,
.pan_rwv,
.ras,
.uaops,
.v8_1a,
}),
};
result[@intFromEnum(Feature.v8_3a)] = .{
.llvm_name = "v8.3a",
.description = "Support ARM v8.3a instructions",
.dependencies = featureSet(&[_]Feature{
.ccidx,
.complxnum,
.jsconv,
.pauth,
.rcpc,
.v8_2a,
}),
};
result[@intFromEnum(Feature.v8_4a)] = .{
.llvm_name = "v8.4a",
.description = "Support ARM v8.4a instructions",
.dependencies = featureSet(&[_]Feature{
.am,
.dit,
.dotprod,
.flagm,
.lse2,
.mpam,
.nv,
.rcpc_immo,
.sel2,
.tlb_rmi,
.tracev8_4,
.v8_3a,
}),
};
result[@intFromEnum(Feature.v8_5a)] = .{
.llvm_name = "v8.5a",
.description = "Support ARM v8.5a instructions",
.dependencies = featureSet(&[_]Feature{
.altnzcv,
.bti,
.ccdp,
.fptoint,
.predres,
.sb,
.specrestrict,
.ssbs,
.v8_4a,
}),
};
result[@intFromEnum(Feature.v8_6a)] = .{
.llvm_name = "v8.6a",
.description = "Support ARM v8.6a instructions",
.dependencies = featureSet(&[_]Feature{
.amvs,
.bf16,
.ecv,
.fgt,
.i8mm,
.v8_5a,
}),
};
result[@intFromEnum(Feature.v8_7a)] = .{
.llvm_name = "v8.7a",
.description = "Support ARM v8.7a instructions",
.dependencies = featureSet(&[_]Feature{
.hcx,
.v8_6a,
.wfxt,
.xs,
}),
};
result[@intFromEnum(Feature.v8_8a)] = .{
.llvm_name = "v8.8a",
.description = "Support ARM v8.8a instructions",
.dependencies = featureSet(&[_]Feature{
.hbc,
.mops,
.nmi,
.v8_7a,
}),
};
result[@intFromEnum(Feature.v8_9a)] = .{
.llvm_name = "v8.9a",
.description = "Support ARM v8.9a instructions",
.dependencies = featureSet(&[_]Feature{
.chk,
.clrbhb,
.cssc,
.prfm_slc_target,
.rasv2,
.specres2,
.v8_8a,
}),
};
result[@intFromEnum(Feature.v8a)] = .{
.llvm_name = "v8a",
.description = "Support ARM v8.0a instructions",
.dependencies = featureSet(&[_]Feature{
.el2vmsa,
.el3,
.neon,
}),
};
result[@intFromEnum(Feature.v8r)] = .{
.llvm_name = "v8r",
.description = "Support ARM v8r instructions",
.dependencies = featureSet(&[_]Feature{
.ccidx,
.ccpp,
.complxnum,
.contextidr_el2,
.crc,
.dit,
.dotprod,
.flagm,
.jsconv,
.lse,
.pan_rwv,
.pauth,
.ras,
.rcpc_immo,
.rdm,
.sel2,
.specrestrict,
.tlb_rmi,
.tracev8_4,
.uaops,
}),
};
result[@intFromEnum(Feature.v9_1a)] = .{
.llvm_name = "v9.1a",
.description = "Support ARM v9.1a instructions",
.dependencies = featureSet(&[_]Feature{
.v8_6a,
.v9a,
}),
};
result[@intFromEnum(Feature.v9_2a)] = .{
.llvm_name = "v9.2a",
.description = "Support ARM v9.2a instructions",
.dependencies = featureSet(&[_]Feature{
.v8_7a,
.v9_1a,
}),
};
result[@intFromEnum(Feature.v9_3a)] = .{
.llvm_name = "v9.3a",
.description = "Support ARM v9.3a instructions",
.dependencies = featureSet(&[_]Feature{
.v8_8a,
.v9_2a,
}),
};
result[@intFromEnum(Feature.v9_4a)] = .{
.llvm_name = "v9.4a",
.description = "Support ARM v9.4a instructions",
.dependencies = featureSet(&[_]Feature{
.v8_9a,
.v9_3a,
}),
};
result[@intFromEnum(Feature.v9a)] = .{
.llvm_name = "v9a",
.description = "Support ARM v9a instructions",
.dependencies = featureSet(&[_]Feature{
.mec,
.sve2,
.v8_5a,
}),
};
result[@intFromEnum(Feature.vh)] = .{
.llvm_name = "vh",
.description = "Enables ARM v8.1 Virtual Host extension (FEAT_VHE)",
.dependencies = featureSet(&[_]Feature{
.contextidr_el2,
}),
};
result[@intFromEnum(Feature.wfxt)] = .{
.llvm_name = "wfxt",
.description = "Enable Armv8.7-A WFET and WFIT instruction (FEAT_WFxT)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.xs)] = .{
.llvm_name = "xs",
.description = "Enable Armv8.7-A limited-TLB-maintenance instruction (FEAT_XS)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.zcm)] = .{
.llvm_name = "zcm",
.description = "Has zero-cycle register moves",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.zcz)] = .{
.llvm_name = "zcz",
.description = "Has zero-cycle zeroing instructions",
.dependencies = featureSet(&[_]Feature{
.zcz_gp,
}),
};
result[@intFromEnum(Feature.zcz_fp_workaround)] = .{
.llvm_name = "zcz-fp-workaround",
.description = "The zero-cycle floating-point zeroing instruction has a bug",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.zcz_gp)] = .{
.llvm_name = "zcz-gp",
.description = "Has zero-cycle zeroing instructions for generic registers",
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
break :blk result;
};
pub const cpu = struct {
pub const a64fx = CpuModel{
.name = "a64fx",
.llvm_name = "a64fx",
.features = featureSet(&[_]Feature{
.aggressive_fma,
.arith_bcc_fusion,
.complxnum,
.perfmon,
.predictable_select_expensive,
.sha2,
.sve,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const ampere1 = CpuModel{
.name = "ampere1",
.llvm_name = "ampere1",
.features = featureSet(&[_]Feature{
.aes,
.aggressive_fma,
.arith_bcc_fusion,
.cmp_bcc_fusion,
.fuse_address,
.fuse_aes,
.fuse_literals,
.lsl_fast,
.perfmon,
.rand,
.sha3,
.use_postra_scheduler,
.v8_6a,
}),
};
pub const ampere1a = CpuModel{
.name = "ampere1a",
.llvm_name = "ampere1a",
.features = featureSet(&[_]Feature{
.aes,
.aggressive_fma,
.arith_bcc_fusion,
.cmp_bcc_fusion,
.fuse_address,
.fuse_aes,
.fuse_literals,
.lsl_fast,
.mte,
.perfmon,
.rand,
.sha3,
.sm4,
.use_postra_scheduler,
.v8_6a,
}),
};
pub const apple_a10 = CpuModel{
.name = "apple_a10",
.llvm_name = "apple-a10",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crc,
.crypto,
.disable_latency_sched_heuristic,
.fuse_aes,
.fuse_crypto_eor,
.lor,
.pan,
.perfmon,
.rdm,
.v8a,
.vh,
.zcm,
.zcz,
}),
};
pub const apple_a11 = CpuModel{
.name = "apple_a11",
.llvm_name = "apple-a11",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fullfp16,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8_2a,
.zcm,
.zcz,
}),
};
pub const apple_a12 = CpuModel{
.name = "apple_a12",
.llvm_name = "apple-a12",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fullfp16,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8_3a,
.zcm,
.zcz,
}),
};
pub const apple_a13 = CpuModel{
.name = "apple_a13",
.llvm_name = "apple-a13",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.sha3,
.v8_4a,
.zcm,
.zcz,
}),
};
pub const apple_a14 = CpuModel{
.name = "apple_a14",
.llvm_name = "apple-a14",
.features = featureSet(&[_]Feature{
.aggressive_fma,
.alternate_sextload_cvt_f32_pattern,
.altnzcv,
.arith_bcc_fusion,
.arith_cbz_fusion,
.ccdp,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fptoint,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.perfmon,
.predres,
.sb,
.sha3,
.specrestrict,
.ssbs,
.v8_4a,
.zcm,
.zcz,
}),
};
pub const apple_a15 = CpuModel{
.name = "apple_a15",
.llvm_name = "apple-a15",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fuse_address,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.perfmon,
.sha3,
.v8_6a,
.zcm,
.zcz,
}),
};
pub const apple_a16 = CpuModel{
.name = "apple_a16",
.llvm_name = "apple-a16",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fuse_address,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.hcx,
.perfmon,
.sha3,
.v8_6a,
.zcm,
.zcz,
}),
};
pub const apple_a7 = CpuModel{
.name = "apple_a7",
.llvm_name = "apple-a7",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8a,
.zcm,
.zcz,
.zcz_fp_workaround,
}),
};
pub const apple_a8 = CpuModel{
.name = "apple_a8",
.llvm_name = "apple-a8",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8a,
.zcm,
.zcz,
.zcz_fp_workaround,
}),
};
pub const apple_a9 = CpuModel{
.name = "apple_a9",
.llvm_name = "apple-a9",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8a,
.zcm,
.zcz,
.zcz_fp_workaround,
}),
};
pub const apple_latest = CpuModel{
.name = "apple_latest",
.llvm_name = "apple-latest",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fuse_address,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.hcx,
.perfmon,
.sha3,
.v8_6a,
.zcm,
.zcz,
}),
};
pub const apple_m1 = CpuModel{
.name = "apple_m1",
.llvm_name = "apple-m1",
.features = featureSet(&[_]Feature{
.aggressive_fma,
.alternate_sextload_cvt_f32_pattern,
.altnzcv,
.arith_bcc_fusion,
.arith_cbz_fusion,
.ccdp,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fptoint,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.perfmon,
.predres,
.sb,
.sha3,
.specrestrict,
.ssbs,
.v8_4a,
.zcm,
.zcz,
}),
};
pub const apple_m2 = CpuModel{
.name = "apple_m2",
.llvm_name = "apple-m2",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fuse_address,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.perfmon,
.sha3,
.v8_6a,
.zcm,
.zcz,
}),
};
pub const apple_s4 = CpuModel{
.name = "apple_s4",
.llvm_name = "apple-s4",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fullfp16,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8_3a,
.zcm,
.zcz,
}),
};
pub const apple_s5 = CpuModel{
.name = "apple_s5",
.llvm_name = "apple-s5",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fullfp16,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8_3a,
.zcm,
.zcz,
}),
};
pub const carmel = CpuModel{
.name = "carmel",
.llvm_name = "carmel",
.features = featureSet(&[_]Feature{
.crypto,
.fullfp16,
.v8_2a,
}),
};
pub const cortex_a34 = CpuModel{
.name = "cortex_a34",
.llvm_name = "cortex-a34",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.v8a,
}),
};
pub const cortex_a35 = CpuModel{
.name = "cortex_a35",
.llvm_name = "cortex-a35",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.v8a,
}),
};
pub const cortex_a510 = CpuModel{
.name = "cortex_a510",
.llvm_name = "cortex-a510",
.features = featureSet(&[_]Feature{
.a510,
.bf16,
.ete,
.fp16fml,
.i8mm,
.mte,
.perfmon,
.sve2_bitperm,
.v9a,
}),
};
pub const cortex_a53 = CpuModel{
.name = "cortex_a53",
.llvm_name = "cortex-a53",
.features = featureSet(&[_]Feature{
.balance_fp_ops,
.crc,
.crypto,
.custom_cheap_as_move,
.fuse_adrp_add,
.fuse_aes,
.perfmon,
.use_postra_scheduler,
.v8a,
}),
};
pub const cortex_a55 = CpuModel{
.name = "cortex_a55",
.llvm_name = "cortex-a55",
.features = featureSet(&[_]Feature{
.crypto,
.dotprod,
.fullfp16,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.perfmon,
.rcpc,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const cortex_a57 = CpuModel{
.name = "cortex_a57",
.llvm_name = "cortex-a57",
.features = featureSet(&[_]Feature{
.balance_fp_ops,
.crc,
.crypto,
.custom_cheap_as_move,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.fuse_literals,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const cortex_a65 = CpuModel{
.name = "cortex_a65",
.llvm_name = "cortex-a65",
.features = featureSet(&[_]Feature{
.a65,
.crypto,
.dotprod,
.fullfp16,
.perfmon,
.rcpc,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a65ae = CpuModel{
.name = "cortex_a65ae",
.llvm_name = "cortex-a65ae",
.features = featureSet(&[_]Feature{
.a65,
.crypto,
.dotprod,
.fullfp16,
.perfmon,
.rcpc,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a710 = CpuModel{
.name = "cortex_a710",
.llvm_name = "cortex-a710",
.features = featureSet(&[_]Feature{
.a710,
.bf16,
.ete,
.fp16fml,
.i8mm,
.mte,
.perfmon,
.sve2_bitperm,
.v9a,
}),
};
pub const cortex_a715 = CpuModel{
.name = "cortex_a715",
.llvm_name = "cortex-a715",
.features = featureSet(&[_]Feature{
.bf16,
.cmp_bcc_fusion,
.enable_select_opt,
.ete,
.fp16fml,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.mte,
.perfmon,
.predictable_select_expensive,
.spe,
.sve2_bitperm,
.use_postra_scheduler,
.v9a,
}),
};
pub const cortex_a72 = CpuModel{
.name = "cortex_a72",
.llvm_name = "cortex-a72",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.fuse_literals,
.perfmon,
.predictable_select_expensive,
.v8a,
}),
};
pub const cortex_a73 = CpuModel{
.name = "cortex_a73",
.llvm_name = "cortex-a73",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.perfmon,
.predictable_select_expensive,
.v8a,
}),
};
pub const cortex_a75 = CpuModel{
.name = "cortex_a75",
.llvm_name = "cortex-a75",
.features = featureSet(&[_]Feature{
.crypto,
.dotprod,
.enable_select_opt,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.perfmon,
.predictable_select_expensive,
.rcpc,
.v8_2a,
}),
};
pub const cortex_a76 = CpuModel{
.name = "cortex_a76",
.llvm_name = "cortex-a76",
.features = featureSet(&[_]Feature{
.a76,
.crypto,
.dotprod,
.fullfp16,
.perfmon,
.rcpc,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a76ae = CpuModel{
.name = "cortex_a76ae",
.llvm_name = "cortex-a76ae",
.features = featureSet(&[_]Feature{
.a76,
.crypto,
.dotprod,
.fullfp16,
.perfmon,
.rcpc,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a77 = CpuModel{
.name = "cortex_a77",
.llvm_name = "cortex-a77",
.features = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.crypto,
.dotprod,
.enable_select_opt,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.rcpc,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a78 = CpuModel{
.name = "cortex_a78",
.llvm_name = "cortex-a78",
.features = featureSet(&[_]Feature{
.a78,
.crypto,
.dotprod,
.fullfp16,
.perfmon,
.rcpc,
.spe,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a78c = CpuModel{
.name = "cortex_a78c",
.llvm_name = "cortex-a78c",
.features = featureSet(&[_]Feature{
.a78c,
.crypto,
.dotprod,
.flagm,
.fp16fml,
.pauth,
.perfmon,
.rcpc,
.spe,
.ssbs,
.v8_2a,
}),
};
pub const cortex_r82 = CpuModel{
.name = "cortex_r82",
.llvm_name = "cortex-r82",
.features = featureSet(&[_]Feature{
.cortex_r82,
.fp16fml,
.perfmon,
.predres,
.sb,
.ssbs,
.v8r,
}),
};
pub const cortex_x1 = CpuModel{
.name = "cortex_x1",
.llvm_name = "cortex-x1",
.features = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.crypto,
.dotprod,
.enable_select_opt,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.rcpc,
.spe,
.ssbs,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const cortex_x1c = CpuModel{
.name = "cortex_x1c",
.llvm_name = "cortex-x1c",
.features = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.crypto,
.dotprod,
.enable_select_opt,
.flagm,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.lse2,
.lsl_fast,
.pauth,
.perfmon,
.predictable_select_expensive,
.rcpc_immo,
.spe,
.ssbs,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const cortex_x2 = CpuModel{
.name = "cortex_x2",
.llvm_name = "cortex-x2",
.features = featureSet(&[_]Feature{
.bf16,
.cmp_bcc_fusion,
.enable_select_opt,
.ete,
.fp16fml,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.mte,
.perfmon,
.predictable_select_expensive,
.sve2_bitperm,
.use_postra_scheduler,
.v9a,
}),
};
pub const cortex_x3 = CpuModel{
.name = "cortex_x3",
.llvm_name = "cortex-x3",
.features = featureSet(&[_]Feature{
.bf16,
.enable_select_opt,
.ete,
.fp16fml,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.mte,
.perfmon,
.predictable_select_expensive,
.spe,
.sve2_bitperm,
.use_postra_scheduler,
.v9a,
}),
};
pub const cyclone = CpuModel{
.name = "cyclone",
.llvm_name = "cyclone",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8a,
.zcm,
.zcz,
.zcz_fp_workaround,
}),
};
pub const emag = CpuModel{
.name = "emag",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.v8a,
}),
};
pub const exynos_m1 = CpuModel{
.name = "exynos_m1",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_aes,
.perfmon,
.slow_misaligned_128store,
.slow_paired_128,
.use_postra_scheduler,
.use_reciprocal_square_root,
.v8a,
}),
};
pub const exynos_m2 = CpuModel{
.name = "exynos_m2",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_aes,
.perfmon,
.slow_misaligned_128store,
.slow_paired_128,
.use_postra_scheduler,
.v8a,
}),
};
pub const exynos_m3 = CpuModel{
.name = "exynos_m3",
.llvm_name = "exynos-m3",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_csel,
.fuse_literals,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const exynos_m4 = CpuModel{
.name = "exynos_m4",
.llvm_name = "exynos-m4",
.features = featureSet(&[_]Feature{
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.dotprod,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fullfp16,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_arith_logic,
.fuse_csel,
.fuse_literals,
.lsl_fast,
.perfmon,
.use_postra_scheduler,
.v8_2a,
.zcz,
}),
};
pub const exynos_m5 = CpuModel{
.name = "exynos_m5",
.llvm_name = "exynos-m5",
.features = featureSet(&[_]Feature{
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.dotprod,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fullfp16,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_arith_logic,
.fuse_csel,
.fuse_literals,
.lsl_fast,
.perfmon,
.use_postra_scheduler,
.v8_2a,
.zcz,
}),
};
pub const falkor = CpuModel{
.name = "falkor",
.llvm_name = "falkor",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.custom_cheap_as_move,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.rdm,
.slow_strqro_store,
.use_postra_scheduler,
.v8a,
.zcz,
}),
};
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{
.enable_select_opt,
.ete,
.fuse_adrp_add,
.fuse_aes,
.neon,
.use_postra_scheduler,
}),
};
pub const kryo = CpuModel{
.name = "kryo",
.llvm_name = "kryo",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.custom_cheap_as_move,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
.zcz,
}),
};
pub const neoverse_512tvb = CpuModel{
.name = "neoverse_512tvb",
.llvm_name = "neoverse-512tvb",
.features = featureSet(&[_]Feature{
.bf16,
.ccdp,
.crypto,
.enable_select_opt,
.fp16fml,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.rand,
.spe,
.ssbs,
.sve,
.use_postra_scheduler,
.v8_4a,
}),
};
pub const neoverse_e1 = CpuModel{
.name = "neoverse_e1",
.llvm_name = "neoverse-e1",
.features = featureSet(&[_]Feature{
.crypto,
.dotprod,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.perfmon,
.rcpc,
.ssbs,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const neoverse_n1 = CpuModel{
.name = "neoverse_n1",
.llvm_name = "neoverse-n1",
.features = featureSet(&[_]Feature{
.crypto,
.dotprod,
.enable_select_opt,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.rcpc,
.spe,
.ssbs,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const neoverse_n2 = CpuModel{
.name = "neoverse_n2",
.llvm_name = "neoverse-n2",
.features = featureSet(&[_]Feature{
.bf16,
.crypto,
.enable_select_opt,
.ete,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.mte,
.perfmon,
.predictable_select_expensive,
.sve2_bitperm,
.use_postra_scheduler,
.v8_5a,
}),
};
pub const neoverse_v1 = CpuModel{
.name = "neoverse_v1",
.llvm_name = "neoverse-v1",
.features = featureSet(&[_]Feature{
.bf16,
.ccdp,
.crypto,
.enable_select_opt,
.fp16fml,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.no_sve_fp_ld1r,
.perfmon,
.predictable_select_expensive,
.rand,
.spe,
.ssbs,
.sve,
.use_postra_scheduler,
.v8_4a,
}),
};
pub const neoverse_v2 = CpuModel{
.name = "neoverse_v2",
.llvm_name = "neoverse-v2",
.features = featureSet(&[_]Feature{
.bf16,
.enable_select_opt,
.ete,
.fp16fml,
.fuse_aes,
.i8mm,
.lsl_fast,
.mte,
.perfmon,
.predictable_select_expensive,
.rand,
.spe,
.sve2_bitperm,
.use_postra_scheduler,
.v9a,
}),
};
pub const saphira = CpuModel{
.name = "saphira",
.llvm_name = "saphira",
.features = featureSet(&[_]Feature{
.crypto,
.custom_cheap_as_move,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.spe,
.use_postra_scheduler,
.v8_4a,
.zcz,
}),
};
pub const thunderx = CpuModel{
.name = "thunderx",
.llvm_name = "thunderx",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const thunderx2t99 = CpuModel{
.name = "thunderx2t99",
.llvm_name = "thunderx2t99",
.features = featureSet(&[_]Feature{
.aggressive_fma,
.arith_bcc_fusion,
.crypto,
.predictable_select_expensive,
.use_postra_scheduler,
.v8_1a,
}),
};
pub const thunderx3t110 = CpuModel{
.name = "thunderx3t110",
.llvm_name = "thunderx3t110",
.features = featureSet(&[_]Feature{
.aggressive_fma,
.arith_bcc_fusion,
.balance_fp_ops,
.crypto,
.perfmon,
.predictable_select_expensive,
.strict_align,
.use_postra_scheduler,
.v8_3a,
}),
};
pub const thunderxt81 = CpuModel{
.name = "thunderxt81",
.llvm_name = "thunderxt81",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const thunderxt83 = CpuModel{
.name = "thunderxt83",
.llvm_name = "thunderxt83",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const thunderxt88 = CpuModel{
.name = "thunderxt88",
.llvm_name = "thunderxt88",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const tsv110 = CpuModel{
.name = "tsv110",
.llvm_name = "tsv110",
.features = featureSet(&[_]Feature{
.crypto,
.custom_cheap_as_move,
.dotprod,
.fp16fml,
.fuse_aes,
.perfmon,
.spe,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const xgene1 = CpuModel{
.name = "xgene1",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.perfmon,
.v8a,
}),
};
};
| https://raw.githubusercontent.com/beingofexistence13/multiversal-lang/dd769e3fc6182c23ef43ed4479614f43f29738c9/zig/lib/std/target/aarch64.zig |
//! This file is auto-generated by tools/update_cpu_features.zig.
const std = @import("../std.zig");
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
a510,
a65,
a710,
a76,
a78,
a78c,
aes,
aggressive_fma,
alternate_sextload_cvt_f32_pattern,
altnzcv,
am,
amvs,
arith_bcc_fusion,
arith_cbz_fusion,
ascend_store_address,
b16b16,
balance_fp_ops,
bf16,
brbe,
bti,
call_saved_x10,
call_saved_x11,
call_saved_x12,
call_saved_x13,
call_saved_x14,
call_saved_x15,
call_saved_x18,
call_saved_x8,
call_saved_x9,
ccdp,
ccidx,
ccpp,
chk,
clrbhb,
cmp_bcc_fusion,
complxnum,
contextidr_el2,
cortex_r82,
crc,
crypto,
cssc,
custom_cheap_as_move,
d128,
disable_latency_sched_heuristic,
dit,
dotprod,
ecv,
el2vmsa,
el3,
enable_select_opt,
ete,
exynos_cheap_as_move,
f32mm,
f64mm,
fgt,
fix_cortex_a53_835769,
flagm,
fmv,
force_32bit_jump_tables,
fp16fml,
fp_armv8,
fptoint,
fullfp16,
fuse_address,
fuse_addsub_2reg_const1,
fuse_adrp_add,
fuse_aes,
fuse_arith_logic,
fuse_crypto_eor,
fuse_csel,
fuse_literals,
gcs,
harden_sls_blr,
harden_sls_nocomdat,
harden_sls_retbr,
hbc,
hcx,
i8mm,
ite,
jsconv,
lor,
ls64,
lse,
lse128,
lse2,
lsl_fast,
mec,
mops,
mpam,
mte,
neon,
nmi,
no_bti_at_return_twice,
no_neg_immediates,
no_sve_fp_ld1r,
no_zcz_fp,
nv,
outline_atomics,
pan,
pan_rwv,
pauth,
perfmon,
predictable_select_expensive,
predres,
prfm_slc_target,
rand,
ras,
rasv2,
rcpc,
rcpc3,
rcpc_immo,
rdm,
reserve_x1,
reserve_x10,
reserve_x11,
reserve_x12,
reserve_x13,
reserve_x14,
reserve_x15,
reserve_x18,
reserve_x2,
reserve_x20,
reserve_x21,
reserve_x22,
reserve_x23,
reserve_x24,
reserve_x25,
reserve_x26,
reserve_x27,
reserve_x28,
reserve_x3,
reserve_x30,
reserve_x4,
reserve_x5,
reserve_x6,
reserve_x7,
reserve_x9,
rme,
sb,
sel2,
sha2,
sha3,
slow_misaligned_128store,
slow_paired_128,
slow_strqro_store,
sm4,
sme,
sme2,
sme2p1,
sme_f16f16,
sme_f64f64,
sme_i16i64,
spe,
spe_eef,
specres2,
specrestrict,
ssbs,
strict_align,
sve,
sve2,
sve2_aes,
sve2_bitperm,
sve2_sha3,
sve2_sm4,
sve2p1,
tagged_globals,
the,
tlb_rmi,
tme,
tpidr_el1,
tpidr_el2,
tpidr_el3,
tpidrro_el0,
tracev8_4,
trbe,
uaops,
use_experimental_zeroing_pseudos,
use_postra_scheduler,
use_reciprocal_square_root,
use_scalar_inc_vl,
v8_1a,
v8_2a,
v8_3a,
v8_4a,
v8_5a,
v8_6a,
v8_7a,
v8_8a,
v8_9a,
v8a,
v8r,
v9_1a,
v9_2a,
v9_3a,
v9_4a,
v9a,
vh,
wfxt,
xs,
zcm,
zcz,
zcz_fp_workaround,
zcz_gp,
};
pub const featureSet = CpuFeature.feature_set_fns(Feature).featureSet;
pub const featureSetHas = CpuFeature.feature_set_fns(Feature).featureSetHas;
pub const featureSetHasAny = CpuFeature.feature_set_fns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.feature_set_fns(Feature).featureSetHasAll;
pub const all_features = blk: {
@setEvalBranchQuota(2000);
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.a510)] = .{
.llvm_name = "a510",
.description = "Cortex-A510 ARM processors",
.dependencies = featureSet(&[_]Feature{
.fuse_adrp_add,
.fuse_aes,
.use_postra_scheduler,
}),
};
result[@intFromEnum(Feature.a65)] = .{
.llvm_name = "a65",
.description = "Cortex-A65 ARM processors",
.dependencies = featureSet(&[_]Feature{
.enable_select_opt,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_literals,
.predictable_select_expensive,
}),
};
result[@intFromEnum(Feature.a710)] = .{
.llvm_name = "a710",
.description = "Cortex-A710 ARM processors",
.dependencies = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.predictable_select_expensive,
.use_postra_scheduler,
}),
};
result[@intFromEnum(Feature.a76)] = .{
.llvm_name = "a76",
.description = "Cortex-A76 ARM processors",
.dependencies = featureSet(&[_]Feature{
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.predictable_select_expensive,
}),
};
result[@intFromEnum(Feature.a78)] = .{
.llvm_name = "a78",
.description = "Cortex-A78 ARM processors",
.dependencies = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.predictable_select_expensive,
.use_postra_scheduler,
}),
};
result[@intFromEnum(Feature.a78c)] = .{
.llvm_name = "a78c",
.description = "Cortex-A78C ARM processors",
.dependencies = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.predictable_select_expensive,
.use_postra_scheduler,
}),
};
result[@intFromEnum(Feature.aes)] = .{
.llvm_name = "aes",
.description = "Enable AES support (FEAT_AES, FEAT_PMULL)",
.dependencies = featureSet(&[_]Feature{
.neon,
}),
};
result[@intFromEnum(Feature.aggressive_fma)] = .{
.llvm_name = "aggressive-fma",
.description = "Enable Aggressive FMA for floating-point.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.alternate_sextload_cvt_f32_pattern)] = .{
.llvm_name = "alternate-sextload-cvt-f32-pattern",
.description = "Use alternative pattern for sextload convert to f32",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.altnzcv)] = .{
.llvm_name = "altnzcv",
.description = "Enable alternative NZCV format for floating point comparisons (FEAT_FlagM2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.am)] = .{
.llvm_name = "am",
.description = "Enable v8.4-A Activity Monitors extension (FEAT_AMUv1)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.amvs)] = .{
.llvm_name = "amvs",
.description = "Enable v8.6-A Activity Monitors Virtualization support (FEAT_AMUv1p1)",
.dependencies = featureSet(&[_]Feature{
.am,
}),
};
result[@intFromEnum(Feature.arith_bcc_fusion)] = .{
.llvm_name = "arith-bcc-fusion",
.description = "CPU fuses arithmetic+bcc operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.arith_cbz_fusion)] = .{
.llvm_name = "arith-cbz-fusion",
.description = "CPU fuses arithmetic + cbz/cbnz operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ascend_store_address)] = .{
.llvm_name = "ascend-store-address",
.description = "Schedule vector stores by ascending address",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.b16b16)] = .{
.llvm_name = "b16b16",
.description = "Enable SVE2.1 or SME2.1 non-widening BFloat16 to BFloat16 instructions (FEAT_B16B16)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.balance_fp_ops)] = .{
.llvm_name = "balance-fp-ops",
.description = "balance mix of odd and even D-registers for fp multiply(-accumulate) ops",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.bf16)] = .{
.llvm_name = "bf16",
.description = "Enable BFloat16 Extension (FEAT_BF16)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.brbe)] = .{
.llvm_name = "brbe",
.description = "Enable Branch Record Buffer Extension (FEAT_BRBE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.bti)] = .{
.llvm_name = "bti",
.description = "Enable Branch Target Identification (FEAT_BTI)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x10)] = .{
.llvm_name = "call-saved-x10",
.description = "Make X10 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x11)] = .{
.llvm_name = "call-saved-x11",
.description = "Make X11 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x12)] = .{
.llvm_name = "call-saved-x12",
.description = "Make X12 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x13)] = .{
.llvm_name = "call-saved-x13",
.description = "Make X13 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x14)] = .{
.llvm_name = "call-saved-x14",
.description = "Make X14 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x15)] = .{
.llvm_name = "call-saved-x15",
.description = "Make X15 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x18)] = .{
.llvm_name = "call-saved-x18",
.description = "Make X18 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x8)] = .{
.llvm_name = "call-saved-x8",
.description = "Make X8 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.call_saved_x9)] = .{
.llvm_name = "call-saved-x9",
.description = "Make X9 callee saved.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ccdp)] = .{
.llvm_name = "ccdp",
.description = "Enable v8.5 Cache Clean to Point of Deep Persistence (FEAT_DPB2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ccidx)] = .{
.llvm_name = "ccidx",
.description = "Enable v8.3-A Extend of the CCSIDR number of sets (FEAT_CCIDX)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ccpp)] = .{
.llvm_name = "ccpp",
.description = "Enable v8.2 data Cache Clean to Point of Persistence (FEAT_DPB)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.chk)] = .{
.llvm_name = "chk",
.description = "Enable Armv8.0-A Check Feature Status Extension (FEAT_CHK)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.clrbhb)] = .{
.llvm_name = "clrbhb",
.description = "Enable Clear BHB instruction (FEAT_CLRBHB)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.cmp_bcc_fusion)] = .{
.llvm_name = "cmp-bcc-fusion",
.description = "CPU fuses cmp+bcc operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.complxnum)] = .{
.llvm_name = "complxnum",
.description = "Enable v8.3-A Floating-point complex number support (FEAT_FCMA)",
.dependencies = featureSet(&[_]Feature{
.neon,
}),
};
result[@intFromEnum(Feature.contextidr_el2)] = .{
.llvm_name = "CONTEXTIDREL2",
.description = "Enable RW operand Context ID Register (EL2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.cortex_r82)] = .{
.llvm_name = "cortex-r82",
.description = "Cortex-R82 ARM processors",
.dependencies = featureSet(&[_]Feature{
.use_postra_scheduler,
}),
};
result[@intFromEnum(Feature.crc)] = .{
.llvm_name = "crc",
.description = "Enable ARMv8 CRC-32 checksum instructions (FEAT_CRC32)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.crypto)] = .{
.llvm_name = "crypto",
.description = "Enable cryptographic instructions",
.dependencies = featureSet(&[_]Feature{
.aes,
.sha2,
}),
};
result[@intFromEnum(Feature.cssc)] = .{
.llvm_name = "cssc",
.description = "Enable Common Short Sequence Compression (CSSC) instructions (FEAT_CSSC)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.custom_cheap_as_move)] = .{
.llvm_name = "custom-cheap-as-move",
.description = "Use custom handling of cheap instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.d128)] = .{
.llvm_name = "d128",
.description = "Enable Armv9.4-A 128-bit Page Table Descriptors, System Registers and Instructions (FEAT_D128, FEAT_LVA3, FEAT_SYSREG128, FEAT_SYSINSTR128)",
.dependencies = featureSet(&[_]Feature{
.lse128,
}),
};
result[@intFromEnum(Feature.disable_latency_sched_heuristic)] = .{
.llvm_name = "disable-latency-sched-heuristic",
.description = "Disable latency scheduling heuristic",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.dit)] = .{
.llvm_name = "dit",
.description = "Enable v8.4-A Data Independent Timing instructions (FEAT_DIT)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.dotprod)] = .{
.llvm_name = "dotprod",
.description = "Enable dot product support (FEAT_DotProd)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ecv)] = .{
.llvm_name = "ecv",
.description = "Enable enhanced counter virtualization extension (FEAT_ECV)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.el2vmsa)] = .{
.llvm_name = "el2vmsa",
.description = "Enable Exception Level 2 Virtual Memory System Architecture",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.el3)] = .{
.llvm_name = "el3",
.description = "Enable Exception Level 3",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.enable_select_opt)] = .{
.llvm_name = "enable-select-opt",
.description = "Enable the select optimize pass for select loop heuristics",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ete)] = .{
.llvm_name = "ete",
.description = "Enable Embedded Trace Extension (FEAT_ETE)",
.dependencies = featureSet(&[_]Feature{
.trbe,
}),
};
result[@intFromEnum(Feature.exynos_cheap_as_move)] = .{
.llvm_name = "exynos-cheap-as-move",
.description = "Use Exynos specific handling of cheap instructions",
.dependencies = featureSet(&[_]Feature{
.custom_cheap_as_move,
}),
};
result[@intFromEnum(Feature.f32mm)] = .{
.llvm_name = "f32mm",
.description = "Enable Matrix Multiply FP32 Extension (FEAT_F32MM)",
.dependencies = featureSet(&[_]Feature{
.sve,
}),
};
result[@intFromEnum(Feature.f64mm)] = .{
.llvm_name = "f64mm",
.description = "Enable Matrix Multiply FP64 Extension (FEAT_F64MM)",
.dependencies = featureSet(&[_]Feature{
.sve,
}),
};
result[@intFromEnum(Feature.fgt)] = .{
.llvm_name = "fgt",
.description = "Enable fine grained virtualization traps extension (FEAT_FGT)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fix_cortex_a53_835769)] = .{
.llvm_name = "fix-cortex-a53-835769",
.description = "Mitigate Cortex-A53 Erratum 835769",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.flagm)] = .{
.llvm_name = "flagm",
.description = "Enable v8.4-A Flag Manipulation Instructions (FEAT_FlagM)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fmv)] = .{
.llvm_name = "fmv",
.description = "Enable Function Multi Versioning support.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.force_32bit_jump_tables)] = .{
.llvm_name = "force-32bit-jump-tables",
.description = "Force jump table entries to be 32-bits wide except at MinSize",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fp16fml)] = .{
.llvm_name = "fp16fml",
.description = "Enable FP16 FML instructions (FEAT_FHM)",
.dependencies = featureSet(&[_]Feature{
.fullfp16,
}),
};
result[@intFromEnum(Feature.fp_armv8)] = .{
.llvm_name = "fp-armv8",
.description = "Enable ARMv8 FP (FEAT_FP)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fptoint)] = .{
.llvm_name = "fptoint",
.description = "Enable FRInt[32|64][Z|X] instructions that round a floating-point number to an integer (in FP format) forcing it to fit into a 32- or 64-bit int (FEAT_FRINTTS)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fullfp16)] = .{
.llvm_name = "fullfp16",
.description = "Full FP16 (FEAT_FP16)",
.dependencies = featureSet(&[_]Feature{
.fp_armv8,
}),
};
result[@intFromEnum(Feature.fuse_address)] = .{
.llvm_name = "fuse-address",
.description = "CPU fuses address generation and memory operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_addsub_2reg_const1)] = .{
.llvm_name = "fuse-addsub-2reg-const1",
.description = "CPU fuses (a + b + 1) and (a - b - 1)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_adrp_add)] = .{
.llvm_name = "fuse-adrp-add",
.description = "CPU fuses adrp+add operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_aes)] = .{
.llvm_name = "fuse-aes",
.description = "CPU fuses AES crypto operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_arith_logic)] = .{
.llvm_name = "fuse-arith-logic",
.description = "CPU fuses arithmetic and logic operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_crypto_eor)] = .{
.llvm_name = "fuse-crypto-eor",
.description = "CPU fuses AES/PMULL and EOR operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_csel)] = .{
.llvm_name = "fuse-csel",
.description = "CPU fuses conditional select operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.fuse_literals)] = .{
.llvm_name = "fuse-literals",
.description = "CPU fuses literal generation operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.gcs)] = .{
.llvm_name = "gcs",
.description = "Enable Armv9.4-A Guarded Call Stack Extension",
.dependencies = featureSet(&[_]Feature{
.chk,
}),
};
result[@intFromEnum(Feature.harden_sls_blr)] = .{
.llvm_name = "harden-sls-blr",
.description = "Harden against straight line speculation across BLR instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.harden_sls_nocomdat)] = .{
.llvm_name = "harden-sls-nocomdat",
.description = "Generate thunk code for SLS mitigation in the normal text section",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.harden_sls_retbr)] = .{
.llvm_name = "harden-sls-retbr",
.description = "Harden against straight line speculation across RET and BR instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.hbc)] = .{
.llvm_name = "hbc",
.description = "Enable Armv8.8-A Hinted Conditional Branches Extension (FEAT_HBC)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.hcx)] = .{
.llvm_name = "hcx",
.description = "Enable Armv8.7-A HCRX_EL2 system register (FEAT_HCX)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.i8mm)] = .{
.llvm_name = "i8mm",
.description = "Enable Matrix Multiply Int8 Extension (FEAT_I8MM)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ite)] = .{
.llvm_name = "ite",
.description = "Enable Armv9.4-A Instrumentation Extension FEAT_ITE",
.dependencies = featureSet(&[_]Feature{
.ete,
}),
};
result[@intFromEnum(Feature.jsconv)] = .{
.llvm_name = "jsconv",
.description = "Enable v8.3-A JavaScript FP conversion instructions (FEAT_JSCVT)",
.dependencies = featureSet(&[_]Feature{
.fp_armv8,
}),
};
result[@intFromEnum(Feature.lor)] = .{
.llvm_name = "lor",
.description = "Enables ARM v8.1 Limited Ordering Regions extension (FEAT_LOR)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ls64)] = .{
.llvm_name = "ls64",
.description = "Enable Armv8.7-A LD64B/ST64B Accelerator Extension (FEAT_LS64, FEAT_LS64_V, FEAT_LS64_ACCDATA)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.lse)] = .{
.llvm_name = "lse",
.description = "Enable ARMv8.1 Large System Extension (LSE) atomic instructions (FEAT_LSE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.lse128)] = .{
.llvm_name = "lse128",
.description = "Enable Armv9.4-A 128-bit Atomic Instructions (FEAT_LSE128)",
.dependencies = featureSet(&[_]Feature{
.lse,
}),
};
result[@intFromEnum(Feature.lse2)] = .{
.llvm_name = "lse2",
.description = "Enable ARMv8.4 Large System Extension 2 (LSE2) atomicity rules (FEAT_LSE2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.lsl_fast)] = .{
.llvm_name = "lsl-fast",
.description = "CPU has a fastpath logical shift of up to 3 places",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.mec)] = .{
.llvm_name = "mec",
.description = "Enable Memory Encryption Contexts Extension",
.dependencies = featureSet(&[_]Feature{
.rme,
}),
};
result[@intFromEnum(Feature.mops)] = .{
.llvm_name = "mops",
.description = "Enable Armv8.8-A memcpy and memset acceleration instructions (FEAT_MOPS)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.mpam)] = .{
.llvm_name = "mpam",
.description = "Enable v8.4-A Memory system Partitioning and Monitoring extension (FEAT_MPAM)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.mte)] = .{
.llvm_name = "mte",
.description = "Enable Memory Tagging Extension (FEAT_MTE, FEAT_MTE2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.neon)] = .{
.llvm_name = "neon",
.description = "Enable Advanced SIMD instructions (FEAT_AdvSIMD)",
.dependencies = featureSet(&[_]Feature{
.fp_armv8,
}),
};
result[@intFromEnum(Feature.nmi)] = .{
.llvm_name = "nmi",
.description = "Enable Armv8.8-A Non-maskable Interrupts (FEAT_NMI, FEAT_GICv3_NMI)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.no_bti_at_return_twice)] = .{
.llvm_name = "no-bti-at-return-twice",
.description = "Don't place a BTI instruction after a return-twice",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.no_neg_immediates)] = .{
.llvm_name = "no-neg-immediates",
.description = "Convert immediates and instructions to their negated or complemented equivalent when the immediate does not fit in the encoding.",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.no_sve_fp_ld1r)] = .{
.llvm_name = "no-sve-fp-ld1r",
.description = "Avoid using LD1RX instructions for FP",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.no_zcz_fp)] = .{
.llvm_name = "no-zcz-fp",
.description = "Has no zero-cycle zeroing instructions for FP registers",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.nv)] = .{
.llvm_name = "nv",
.description = "Enable v8.4-A Nested Virtualization Enchancement (FEAT_NV, FEAT_NV2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.outline_atomics)] = .{
.llvm_name = "outline-atomics",
.description = "Enable out of line atomics to support LSE instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.pan)] = .{
.llvm_name = "pan",
.description = "Enables ARM v8.1 Privileged Access-Never extension (FEAT_PAN)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.pan_rwv)] = .{
.llvm_name = "pan-rwv",
.description = "Enable v8.2 PAN s1e1R and s1e1W Variants (FEAT_PAN2)",
.dependencies = featureSet(&[_]Feature{
.pan,
}),
};
result[@intFromEnum(Feature.pauth)] = .{
.llvm_name = "pauth",
.description = "Enable v8.3-A Pointer Authentication extension (FEAT_PAuth)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.perfmon)] = .{
.llvm_name = "perfmon",
.description = "Enable Code Generation for ARMv8 PMUv3 Performance Monitors extension (FEAT_PMUv3)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.predictable_select_expensive)] = .{
.llvm_name = "predictable-select-expensive",
.description = "Prefer likely predicted branches over selects",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.predres)] = .{
.llvm_name = "predres",
.description = "Enable v8.5a execution and data prediction invalidation instructions (FEAT_SPECRES)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.prfm_slc_target)] = .{
.llvm_name = "prfm-slc-target",
.description = "Enable SLC target for PRFM instruction",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.rand)] = .{
.llvm_name = "rand",
.description = "Enable Random Number generation instructions (FEAT_RNG)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ras)] = .{
.llvm_name = "ras",
.description = "Enable ARMv8 Reliability, Availability and Serviceability Extensions (FEAT_RAS, FEAT_RASv1p1)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.rasv2)] = .{
.llvm_name = "rasv2",
.description = "Enable ARMv8.9-A Reliability, Availability and Serviceability Extensions (FEAT_RASv2)",
.dependencies = featureSet(&[_]Feature{
.ras,
}),
};
result[@intFromEnum(Feature.rcpc)] = .{
.llvm_name = "rcpc",
.description = "Enable support for RCPC extension (FEAT_LRCPC)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.rcpc3)] = .{
.llvm_name = "rcpc3",
.description = "Enable Armv8.9-A RCPC instructions for A64 and Advanced SIMD and floating-point instruction set (FEAT_LRCPC3)",
.dependencies = featureSet(&[_]Feature{
.rcpc_immo,
}),
};
result[@intFromEnum(Feature.rcpc_immo)] = .{
.llvm_name = "rcpc-immo",
.description = "Enable v8.4-A RCPC instructions with Immediate Offsets (FEAT_LRCPC2)",
.dependencies = featureSet(&[_]Feature{
.rcpc,
}),
};
result[@intFromEnum(Feature.rdm)] = .{
.llvm_name = "rdm",
.description = "Enable ARMv8.1 Rounding Double Multiply Add/Subtract instructions (FEAT_RDM)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x1)] = .{
.llvm_name = "reserve-x1",
.description = "Reserve X1, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x10)] = .{
.llvm_name = "reserve-x10",
.description = "Reserve X10, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x11)] = .{
.llvm_name = "reserve-x11",
.description = "Reserve X11, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x12)] = .{
.llvm_name = "reserve-x12",
.description = "Reserve X12, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x13)] = .{
.llvm_name = "reserve-x13",
.description = "Reserve X13, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x14)] = .{
.llvm_name = "reserve-x14",
.description = "Reserve X14, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x15)] = .{
.llvm_name = "reserve-x15",
.description = "Reserve X15, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x18)] = .{
.llvm_name = "reserve-x18",
.description = "Reserve X18, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x2)] = .{
.llvm_name = "reserve-x2",
.description = "Reserve X2, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x20)] = .{
.llvm_name = "reserve-x20",
.description = "Reserve X20, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x21)] = .{
.llvm_name = "reserve-x21",
.description = "Reserve X21, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x22)] = .{
.llvm_name = "reserve-x22",
.description = "Reserve X22, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x23)] = .{
.llvm_name = "reserve-x23",
.description = "Reserve X23, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x24)] = .{
.llvm_name = "reserve-x24",
.description = "Reserve X24, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x25)] = .{
.llvm_name = "reserve-x25",
.description = "Reserve X25, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x26)] = .{
.llvm_name = "reserve-x26",
.description = "Reserve X26, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x27)] = .{
.llvm_name = "reserve-x27",
.description = "Reserve X27, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x28)] = .{
.llvm_name = "reserve-x28",
.description = "Reserve X28, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x3)] = .{
.llvm_name = "reserve-x3",
.description = "Reserve X3, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x30)] = .{
.llvm_name = "reserve-x30",
.description = "Reserve X30, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x4)] = .{
.llvm_name = "reserve-x4",
.description = "Reserve X4, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x5)] = .{
.llvm_name = "reserve-x5",
.description = "Reserve X5, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x6)] = .{
.llvm_name = "reserve-x6",
.description = "Reserve X6, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x7)] = .{
.llvm_name = "reserve-x7",
.description = "Reserve X7, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.reserve_x9)] = .{
.llvm_name = "reserve-x9",
.description = "Reserve X9, making it unavailable as a GPR",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.rme)] = .{
.llvm_name = "rme",
.description = "Enable Realm Management Extension (FEAT_RME)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sb)] = .{
.llvm_name = "sb",
.description = "Enable v8.5 Speculation Barrier (FEAT_SB)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sel2)] = .{
.llvm_name = "sel2",
.description = "Enable v8.4-A Secure Exception Level 2 extension (FEAT_SEL2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sha2)] = .{
.llvm_name = "sha2",
.description = "Enable SHA1 and SHA256 support (FEAT_SHA1, FEAT_SHA256)",
.dependencies = featureSet(&[_]Feature{
.neon,
}),
};
result[@intFromEnum(Feature.sha3)] = .{
.llvm_name = "sha3",
.description = "Enable SHA512 and SHA3 support (FEAT_SHA3, FEAT_SHA512)",
.dependencies = featureSet(&[_]Feature{
.sha2,
}),
};
result[@intFromEnum(Feature.slow_misaligned_128store)] = .{
.llvm_name = "slow-misaligned-128store",
.description = "Misaligned 128 bit stores are slow",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.slow_paired_128)] = .{
.llvm_name = "slow-paired-128",
.description = "Paired 128 bit loads and stores are slow",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.slow_strqro_store)] = .{
.llvm_name = "slow-strqro-store",
.description = "STR of Q register with register offset is slow",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sm4)] = .{
.llvm_name = "sm4",
.description = "Enable SM3 and SM4 support (FEAT_SM4, FEAT_SM3)",
.dependencies = featureSet(&[_]Feature{
.neon,
}),
};
result[@intFromEnum(Feature.sme)] = .{
.llvm_name = "sme",
.description = "Enable Scalable Matrix Extension (SME) (FEAT_SME)",
.dependencies = featureSet(&[_]Feature{
.bf16,
.use_scalar_inc_vl,
}),
};
result[@intFromEnum(Feature.sme2)] = .{
.llvm_name = "sme2",
.description = "Enable Scalable Matrix Extension 2 (SME2) instructions",
.dependencies = featureSet(&[_]Feature{
.sme,
}),
};
result[@intFromEnum(Feature.sme2p1)] = .{
.llvm_name = "sme2p1",
.description = "Enable Scalable Matrix Extension 2.1 (FEAT_SME2p1) instructions",
.dependencies = featureSet(&[_]Feature{
.sme2,
}),
};
result[@intFromEnum(Feature.sme_f16f16)] = .{
.llvm_name = "sme-f16f16",
.description = "Enable SME2.1 non-widening Float16 instructions (FEAT_SME_F16F16)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sme_f64f64)] = .{
.llvm_name = "sme-f64f64",
.description = "Enable Scalable Matrix Extension (SME) F64F64 instructions (FEAT_SME_F64F64)",
.dependencies = featureSet(&[_]Feature{
.sme,
}),
};
result[@intFromEnum(Feature.sme_i16i64)] = .{
.llvm_name = "sme-i16i64",
.description = "Enable Scalable Matrix Extension (SME) I16I64 instructions (FEAT_SME_I16I64)",
.dependencies = featureSet(&[_]Feature{
.sme,
}),
};
result[@intFromEnum(Feature.spe)] = .{
.llvm_name = "spe",
.description = "Enable Statistical Profiling extension (FEAT_SPE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.spe_eef)] = .{
.llvm_name = "spe-eef",
.description = "Enable extra register in the Statistical Profiling Extension (FEAT_SPEv1p2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.specres2)] = .{
.llvm_name = "specres2",
.description = "Enable Speculation Restriction Instruction (FEAT_SPECRES2)",
.dependencies = featureSet(&[_]Feature{
.predres,
}),
};
result[@intFromEnum(Feature.specrestrict)] = .{
.llvm_name = "specrestrict",
.description = "Enable architectural speculation restriction (FEAT_CSV2_2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.ssbs)] = .{
.llvm_name = "ssbs",
.description = "Enable Speculative Store Bypass Safe bit (FEAT_SSBS, FEAT_SSBS2)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.strict_align)] = .{
.llvm_name = "strict-align",
.description = "Disallow all unaligned memory access",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.sve)] = .{
.llvm_name = "sve",
.description = "Enable Scalable Vector Extension (SVE) instructions (FEAT_SVE)",
.dependencies = featureSet(&[_]Feature{
.fullfp16,
}),
};
result[@intFromEnum(Feature.sve2)] = .{
.llvm_name = "sve2",
.description = "Enable Scalable Vector Extension 2 (SVE2) instructions (FEAT_SVE2)",
.dependencies = featureSet(&[_]Feature{
.sve,
.use_scalar_inc_vl,
}),
};
result[@intFromEnum(Feature.sve2_aes)] = .{
.llvm_name = "sve2-aes",
.description = "Enable AES SVE2 instructions (FEAT_SVE_AES, FEAT_SVE_PMULL128)",
.dependencies = featureSet(&[_]Feature{
.aes,
.sve2,
}),
};
result[@intFromEnum(Feature.sve2_bitperm)] = .{
.llvm_name = "sve2-bitperm",
.description = "Enable bit permutation SVE2 instructions (FEAT_SVE_BitPerm)",
.dependencies = featureSet(&[_]Feature{
.sve2,
}),
};
result[@intFromEnum(Feature.sve2_sha3)] = .{
.llvm_name = "sve2-sha3",
.description = "Enable SHA3 SVE2 instructions (FEAT_SVE_SHA3)",
.dependencies = featureSet(&[_]Feature{
.sha3,
.sve2,
}),
};
result[@intFromEnum(Feature.sve2_sm4)] = .{
.llvm_name = "sve2-sm4",
.description = "Enable SM4 SVE2 instructions (FEAT_SVE_SM4)",
.dependencies = featureSet(&[_]Feature{
.sm4,
.sve2,
}),
};
result[@intFromEnum(Feature.sve2p1)] = .{
.llvm_name = "sve2p1",
.description = "Enable Scalable Vector Extension 2.1 instructions",
.dependencies = featureSet(&[_]Feature{
.sve2,
}),
};
result[@intFromEnum(Feature.tagged_globals)] = .{
.llvm_name = "tagged-globals",
.description = "Use an instruction sequence for taking the address of a global that allows a memory tag in the upper address bits",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.the)] = .{
.llvm_name = "the",
.description = "Enable Armv8.9-A Translation Hardening Extension (FEAT_THE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tlb_rmi)] = .{
.llvm_name = "tlb-rmi",
.description = "Enable v8.4-A TLB Range and Maintenance Instructions (FEAT_TLBIOS, FEAT_TLBIRANGE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tme)] = .{
.llvm_name = "tme",
.description = "Enable Transactional Memory Extension (FEAT_TME)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tpidr_el1)] = .{
.llvm_name = "tpidr-el1",
.description = "Permit use of TPIDR_EL1 for the TLS base",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tpidr_el2)] = .{
.llvm_name = "tpidr-el2",
.description = "Permit use of TPIDR_EL2 for the TLS base",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tpidr_el3)] = .{
.llvm_name = "tpidr-el3",
.description = "Permit use of TPIDR_EL3 for the TLS base",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tpidrro_el0)] = .{
.llvm_name = "tpidrro-el0",
.description = "Permit use of TPIDRRO_EL0 for the TLS base",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.tracev8_4)] = .{
.llvm_name = "tracev8.4",
.description = "Enable v8.4-A Trace extension (FEAT_TRF)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.trbe)] = .{
.llvm_name = "trbe",
.description = "Enable Trace Buffer Extension (FEAT_TRBE)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.uaops)] = .{
.llvm_name = "uaops",
.description = "Enable v8.2 UAO PState (FEAT_UAO)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.use_experimental_zeroing_pseudos)] = .{
.llvm_name = "use-experimental-zeroing-pseudos",
.description = "Hint to the compiler that the MOVPRFX instruction is merged with destructive operations",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.use_postra_scheduler)] = .{
.llvm_name = "use-postra-scheduler",
.description = "Schedule again after register allocation",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.use_reciprocal_square_root)] = .{
.llvm_name = "use-reciprocal-square-root",
.description = "Use the reciprocal square root approximation",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.use_scalar_inc_vl)] = .{
.llvm_name = "use-scalar-inc-vl",
.description = "Prefer inc/dec over add+cnt",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.v8_1a)] = .{
.llvm_name = "v8.1a",
.description = "Support ARM v8.1a instructions",
.dependencies = featureSet(&[_]Feature{
.crc,
.lor,
.lse,
.pan,
.rdm,
.v8a,
.vh,
}),
};
result[@intFromEnum(Feature.v8_2a)] = .{
.llvm_name = "v8.2a",
.description = "Support ARM v8.2a instructions",
.dependencies = featureSet(&[_]Feature{
.ccpp,
.pan_rwv,
.ras,
.uaops,
.v8_1a,
}),
};
result[@intFromEnum(Feature.v8_3a)] = .{
.llvm_name = "v8.3a",
.description = "Support ARM v8.3a instructions",
.dependencies = featureSet(&[_]Feature{
.ccidx,
.complxnum,
.jsconv,
.pauth,
.rcpc,
.v8_2a,
}),
};
result[@intFromEnum(Feature.v8_4a)] = .{
.llvm_name = "v8.4a",
.description = "Support ARM v8.4a instructions",
.dependencies = featureSet(&[_]Feature{
.am,
.dit,
.dotprod,
.flagm,
.lse2,
.mpam,
.nv,
.rcpc_immo,
.sel2,
.tlb_rmi,
.tracev8_4,
.v8_3a,
}),
};
result[@intFromEnum(Feature.v8_5a)] = .{
.llvm_name = "v8.5a",
.description = "Support ARM v8.5a instructions",
.dependencies = featureSet(&[_]Feature{
.altnzcv,
.bti,
.ccdp,
.fptoint,
.predres,
.sb,
.specrestrict,
.ssbs,
.v8_4a,
}),
};
result[@intFromEnum(Feature.v8_6a)] = .{
.llvm_name = "v8.6a",
.description = "Support ARM v8.6a instructions",
.dependencies = featureSet(&[_]Feature{
.amvs,
.bf16,
.ecv,
.fgt,
.i8mm,
.v8_5a,
}),
};
result[@intFromEnum(Feature.v8_7a)] = .{
.llvm_name = "v8.7a",
.description = "Support ARM v8.7a instructions",
.dependencies = featureSet(&[_]Feature{
.hcx,
.v8_6a,
.wfxt,
.xs,
}),
};
result[@intFromEnum(Feature.v8_8a)] = .{
.llvm_name = "v8.8a",
.description = "Support ARM v8.8a instructions",
.dependencies = featureSet(&[_]Feature{
.hbc,
.mops,
.nmi,
.v8_7a,
}),
};
result[@intFromEnum(Feature.v8_9a)] = .{
.llvm_name = "v8.9a",
.description = "Support ARM v8.9a instructions",
.dependencies = featureSet(&[_]Feature{
.chk,
.clrbhb,
.cssc,
.prfm_slc_target,
.rasv2,
.specres2,
.v8_8a,
}),
};
result[@intFromEnum(Feature.v8a)] = .{
.llvm_name = "v8a",
.description = "Support ARM v8.0a instructions",
.dependencies = featureSet(&[_]Feature{
.el2vmsa,
.el3,
.neon,
}),
};
result[@intFromEnum(Feature.v8r)] = .{
.llvm_name = "v8r",
.description = "Support ARM v8r instructions",
.dependencies = featureSet(&[_]Feature{
.ccidx,
.ccpp,
.complxnum,
.contextidr_el2,
.crc,
.dit,
.dotprod,
.flagm,
.jsconv,
.lse,
.pan_rwv,
.pauth,
.ras,
.rcpc_immo,
.rdm,
.sel2,
.specrestrict,
.tlb_rmi,
.tracev8_4,
.uaops,
}),
};
result[@intFromEnum(Feature.v9_1a)] = .{
.llvm_name = "v9.1a",
.description = "Support ARM v9.1a instructions",
.dependencies = featureSet(&[_]Feature{
.v8_6a,
.v9a,
}),
};
result[@intFromEnum(Feature.v9_2a)] = .{
.llvm_name = "v9.2a",
.description = "Support ARM v9.2a instructions",
.dependencies = featureSet(&[_]Feature{
.v8_7a,
.v9_1a,
}),
};
result[@intFromEnum(Feature.v9_3a)] = .{
.llvm_name = "v9.3a",
.description = "Support ARM v9.3a instructions",
.dependencies = featureSet(&[_]Feature{
.v8_8a,
.v9_2a,
}),
};
result[@intFromEnum(Feature.v9_4a)] = .{
.llvm_name = "v9.4a",
.description = "Support ARM v9.4a instructions",
.dependencies = featureSet(&[_]Feature{
.v8_9a,
.v9_3a,
}),
};
result[@intFromEnum(Feature.v9a)] = .{
.llvm_name = "v9a",
.description = "Support ARM v9a instructions",
.dependencies = featureSet(&[_]Feature{
.mec,
.sve2,
.v8_5a,
}),
};
result[@intFromEnum(Feature.vh)] = .{
.llvm_name = "vh",
.description = "Enables ARM v8.1 Virtual Host extension (FEAT_VHE)",
.dependencies = featureSet(&[_]Feature{
.contextidr_el2,
}),
};
result[@intFromEnum(Feature.wfxt)] = .{
.llvm_name = "wfxt",
.description = "Enable Armv8.7-A WFET and WFIT instruction (FEAT_WFxT)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.xs)] = .{
.llvm_name = "xs",
.description = "Enable Armv8.7-A limited-TLB-maintenance instruction (FEAT_XS)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.zcm)] = .{
.llvm_name = "zcm",
.description = "Has zero-cycle register moves",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.zcz)] = .{
.llvm_name = "zcz",
.description = "Has zero-cycle zeroing instructions",
.dependencies = featureSet(&[_]Feature{
.zcz_gp,
}),
};
result[@intFromEnum(Feature.zcz_fp_workaround)] = .{
.llvm_name = "zcz-fp-workaround",
.description = "The zero-cycle floating-point zeroing instruction has a bug",
.dependencies = featureSet(&[_]Feature{}),
};
result[@intFromEnum(Feature.zcz_gp)] = .{
.llvm_name = "zcz-gp",
.description = "Has zero-cycle zeroing instructions for generic registers",
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
break :blk result;
};
pub const cpu = struct {
pub const a64fx = CpuModel{
.name = "a64fx",
.llvm_name = "a64fx",
.features = featureSet(&[_]Feature{
.aggressive_fma,
.arith_bcc_fusion,
.complxnum,
.perfmon,
.predictable_select_expensive,
.sha2,
.sve,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const ampere1 = CpuModel{
.name = "ampere1",
.llvm_name = "ampere1",
.features = featureSet(&[_]Feature{
.aes,
.aggressive_fma,
.arith_bcc_fusion,
.cmp_bcc_fusion,
.fuse_address,
.fuse_aes,
.fuse_literals,
.lsl_fast,
.perfmon,
.rand,
.sha3,
.use_postra_scheduler,
.v8_6a,
}),
};
pub const ampere1a = CpuModel{
.name = "ampere1a",
.llvm_name = "ampere1a",
.features = featureSet(&[_]Feature{
.aes,
.aggressive_fma,
.arith_bcc_fusion,
.cmp_bcc_fusion,
.fuse_address,
.fuse_aes,
.fuse_literals,
.lsl_fast,
.mte,
.perfmon,
.rand,
.sha3,
.sm4,
.use_postra_scheduler,
.v8_6a,
}),
};
pub const apple_a10 = CpuModel{
.name = "apple_a10",
.llvm_name = "apple-a10",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crc,
.crypto,
.disable_latency_sched_heuristic,
.fuse_aes,
.fuse_crypto_eor,
.lor,
.pan,
.perfmon,
.rdm,
.v8a,
.vh,
.zcm,
.zcz,
}),
};
pub const apple_a11 = CpuModel{
.name = "apple_a11",
.llvm_name = "apple-a11",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fullfp16,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8_2a,
.zcm,
.zcz,
}),
};
pub const apple_a12 = CpuModel{
.name = "apple_a12",
.llvm_name = "apple-a12",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fullfp16,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8_3a,
.zcm,
.zcz,
}),
};
pub const apple_a13 = CpuModel{
.name = "apple_a13",
.llvm_name = "apple-a13",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.sha3,
.v8_4a,
.zcm,
.zcz,
}),
};
pub const apple_a14 = CpuModel{
.name = "apple_a14",
.llvm_name = "apple-a14",
.features = featureSet(&[_]Feature{
.aggressive_fma,
.alternate_sextload_cvt_f32_pattern,
.altnzcv,
.arith_bcc_fusion,
.arith_cbz_fusion,
.ccdp,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fptoint,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.perfmon,
.predres,
.sb,
.sha3,
.specrestrict,
.ssbs,
.v8_4a,
.zcm,
.zcz,
}),
};
pub const apple_a15 = CpuModel{
.name = "apple_a15",
.llvm_name = "apple-a15",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fuse_address,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.perfmon,
.sha3,
.v8_6a,
.zcm,
.zcz,
}),
};
pub const apple_a16 = CpuModel{
.name = "apple_a16",
.llvm_name = "apple-a16",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fuse_address,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.hcx,
.perfmon,
.sha3,
.v8_6a,
.zcm,
.zcz,
}),
};
pub const apple_a7 = CpuModel{
.name = "apple_a7",
.llvm_name = "apple-a7",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8a,
.zcm,
.zcz,
.zcz_fp_workaround,
}),
};
pub const apple_a8 = CpuModel{
.name = "apple_a8",
.llvm_name = "apple-a8",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8a,
.zcm,
.zcz,
.zcz_fp_workaround,
}),
};
pub const apple_a9 = CpuModel{
.name = "apple_a9",
.llvm_name = "apple-a9",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8a,
.zcm,
.zcz,
.zcz_fp_workaround,
}),
};
pub const apple_latest = CpuModel{
.name = "apple_latest",
.llvm_name = "apple-latest",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fuse_address,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.hcx,
.perfmon,
.sha3,
.v8_6a,
.zcm,
.zcz,
}),
};
pub const apple_m1 = CpuModel{
.name = "apple_m1",
.llvm_name = "apple-m1",
.features = featureSet(&[_]Feature{
.aggressive_fma,
.alternate_sextload_cvt_f32_pattern,
.altnzcv,
.arith_bcc_fusion,
.arith_cbz_fusion,
.ccdp,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fptoint,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.perfmon,
.predres,
.sb,
.sha3,
.specrestrict,
.ssbs,
.v8_4a,
.zcm,
.zcz,
}),
};
pub const apple_m2 = CpuModel{
.name = "apple_m2",
.llvm_name = "apple-m2",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fp16fml,
.fuse_address,
.fuse_aes,
.fuse_arith_logic,
.fuse_crypto_eor,
.fuse_csel,
.fuse_literals,
.perfmon,
.sha3,
.v8_6a,
.zcm,
.zcz,
}),
};
pub const apple_s4 = CpuModel{
.name = "apple_s4",
.llvm_name = "apple-s4",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fullfp16,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8_3a,
.zcm,
.zcz,
}),
};
pub const apple_s5 = CpuModel{
.name = "apple_s5",
.llvm_name = "apple-s5",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fullfp16,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8_3a,
.zcm,
.zcz,
}),
};
pub const carmel = CpuModel{
.name = "carmel",
.llvm_name = "carmel",
.features = featureSet(&[_]Feature{
.crypto,
.fullfp16,
.v8_2a,
}),
};
pub const cortex_a34 = CpuModel{
.name = "cortex_a34",
.llvm_name = "cortex-a34",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.v8a,
}),
};
pub const cortex_a35 = CpuModel{
.name = "cortex_a35",
.llvm_name = "cortex-a35",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.v8a,
}),
};
pub const cortex_a510 = CpuModel{
.name = "cortex_a510",
.llvm_name = "cortex-a510",
.features = featureSet(&[_]Feature{
.a510,
.bf16,
.ete,
.fp16fml,
.i8mm,
.mte,
.perfmon,
.sve2_bitperm,
.v9a,
}),
};
pub const cortex_a53 = CpuModel{
.name = "cortex_a53",
.llvm_name = "cortex-a53",
.features = featureSet(&[_]Feature{
.balance_fp_ops,
.crc,
.crypto,
.custom_cheap_as_move,
.fuse_adrp_add,
.fuse_aes,
.perfmon,
.use_postra_scheduler,
.v8a,
}),
};
pub const cortex_a55 = CpuModel{
.name = "cortex_a55",
.llvm_name = "cortex-a55",
.features = featureSet(&[_]Feature{
.crypto,
.dotprod,
.fullfp16,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.perfmon,
.rcpc,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const cortex_a57 = CpuModel{
.name = "cortex_a57",
.llvm_name = "cortex-a57",
.features = featureSet(&[_]Feature{
.balance_fp_ops,
.crc,
.crypto,
.custom_cheap_as_move,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.fuse_literals,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const cortex_a65 = CpuModel{
.name = "cortex_a65",
.llvm_name = "cortex-a65",
.features = featureSet(&[_]Feature{
.a65,
.crypto,
.dotprod,
.fullfp16,
.perfmon,
.rcpc,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a65ae = CpuModel{
.name = "cortex_a65ae",
.llvm_name = "cortex-a65ae",
.features = featureSet(&[_]Feature{
.a65,
.crypto,
.dotprod,
.fullfp16,
.perfmon,
.rcpc,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a710 = CpuModel{
.name = "cortex_a710",
.llvm_name = "cortex-a710",
.features = featureSet(&[_]Feature{
.a710,
.bf16,
.ete,
.fp16fml,
.i8mm,
.mte,
.perfmon,
.sve2_bitperm,
.v9a,
}),
};
pub const cortex_a715 = CpuModel{
.name = "cortex_a715",
.llvm_name = "cortex-a715",
.features = featureSet(&[_]Feature{
.bf16,
.cmp_bcc_fusion,
.enable_select_opt,
.ete,
.fp16fml,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.mte,
.perfmon,
.predictable_select_expensive,
.spe,
.sve2_bitperm,
.use_postra_scheduler,
.v9a,
}),
};
pub const cortex_a72 = CpuModel{
.name = "cortex_a72",
.llvm_name = "cortex-a72",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.fuse_literals,
.perfmon,
.predictable_select_expensive,
.v8a,
}),
};
pub const cortex_a73 = CpuModel{
.name = "cortex_a73",
.llvm_name = "cortex-a73",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.enable_select_opt,
.fuse_adrp_add,
.fuse_aes,
.perfmon,
.predictable_select_expensive,
.v8a,
}),
};
pub const cortex_a75 = CpuModel{
.name = "cortex_a75",
.llvm_name = "cortex-a75",
.features = featureSet(&[_]Feature{
.crypto,
.dotprod,
.enable_select_opt,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.perfmon,
.predictable_select_expensive,
.rcpc,
.v8_2a,
}),
};
pub const cortex_a76 = CpuModel{
.name = "cortex_a76",
.llvm_name = "cortex-a76",
.features = featureSet(&[_]Feature{
.a76,
.crypto,
.dotprod,
.fullfp16,
.perfmon,
.rcpc,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a76ae = CpuModel{
.name = "cortex_a76ae",
.llvm_name = "cortex-a76ae",
.features = featureSet(&[_]Feature{
.a76,
.crypto,
.dotprod,
.fullfp16,
.perfmon,
.rcpc,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a77 = CpuModel{
.name = "cortex_a77",
.llvm_name = "cortex-a77",
.features = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.crypto,
.dotprod,
.enable_select_opt,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.rcpc,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a78 = CpuModel{
.name = "cortex_a78",
.llvm_name = "cortex-a78",
.features = featureSet(&[_]Feature{
.a78,
.crypto,
.dotprod,
.fullfp16,
.perfmon,
.rcpc,
.spe,
.ssbs,
.v8_2a,
}),
};
pub const cortex_a78c = CpuModel{
.name = "cortex_a78c",
.llvm_name = "cortex-a78c",
.features = featureSet(&[_]Feature{
.a78c,
.crypto,
.dotprod,
.flagm,
.fp16fml,
.pauth,
.perfmon,
.rcpc,
.spe,
.ssbs,
.v8_2a,
}),
};
pub const cortex_r82 = CpuModel{
.name = "cortex_r82",
.llvm_name = "cortex-r82",
.features = featureSet(&[_]Feature{
.cortex_r82,
.fp16fml,
.perfmon,
.predres,
.sb,
.ssbs,
.v8r,
}),
};
pub const cortex_x1 = CpuModel{
.name = "cortex_x1",
.llvm_name = "cortex-x1",
.features = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.crypto,
.dotprod,
.enable_select_opt,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.rcpc,
.spe,
.ssbs,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const cortex_x1c = CpuModel{
.name = "cortex_x1c",
.llvm_name = "cortex-x1c",
.features = featureSet(&[_]Feature{
.cmp_bcc_fusion,
.crypto,
.dotprod,
.enable_select_opt,
.flagm,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.lse2,
.lsl_fast,
.pauth,
.perfmon,
.predictable_select_expensive,
.rcpc_immo,
.spe,
.ssbs,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const cortex_x2 = CpuModel{
.name = "cortex_x2",
.llvm_name = "cortex-x2",
.features = featureSet(&[_]Feature{
.bf16,
.cmp_bcc_fusion,
.enable_select_opt,
.ete,
.fp16fml,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.mte,
.perfmon,
.predictable_select_expensive,
.sve2_bitperm,
.use_postra_scheduler,
.v9a,
}),
};
pub const cortex_x3 = CpuModel{
.name = "cortex_x3",
.llvm_name = "cortex-x3",
.features = featureSet(&[_]Feature{
.bf16,
.enable_select_opt,
.ete,
.fp16fml,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.mte,
.perfmon,
.predictable_select_expensive,
.spe,
.sve2_bitperm,
.use_postra_scheduler,
.v9a,
}),
};
pub const cyclone = CpuModel{
.name = "cyclone",
.llvm_name = "cyclone",
.features = featureSet(&[_]Feature{
.alternate_sextload_cvt_f32_pattern,
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.disable_latency_sched_heuristic,
.fuse_aes,
.fuse_crypto_eor,
.perfmon,
.v8a,
.zcm,
.zcz,
.zcz_fp_workaround,
}),
};
pub const emag = CpuModel{
.name = "emag",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.v8a,
}),
};
pub const exynos_m1 = CpuModel{
.name = "exynos_m1",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_aes,
.perfmon,
.slow_misaligned_128store,
.slow_paired_128,
.use_postra_scheduler,
.use_reciprocal_square_root,
.v8a,
}),
};
pub const exynos_m2 = CpuModel{
.name = "exynos_m2",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_aes,
.perfmon,
.slow_misaligned_128store,
.slow_paired_128,
.use_postra_scheduler,
.v8a,
}),
};
pub const exynos_m3 = CpuModel{
.name = "exynos_m3",
.llvm_name = "exynos-m3",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_csel,
.fuse_literals,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const exynos_m4 = CpuModel{
.name = "exynos_m4",
.llvm_name = "exynos-m4",
.features = featureSet(&[_]Feature{
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.dotprod,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fullfp16,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_arith_logic,
.fuse_csel,
.fuse_literals,
.lsl_fast,
.perfmon,
.use_postra_scheduler,
.v8_2a,
.zcz,
}),
};
pub const exynos_m5 = CpuModel{
.name = "exynos_m5",
.llvm_name = "exynos-m5",
.features = featureSet(&[_]Feature{
.arith_bcc_fusion,
.arith_cbz_fusion,
.crypto,
.dotprod,
.exynos_cheap_as_move,
.force_32bit_jump_tables,
.fullfp16,
.fuse_address,
.fuse_adrp_add,
.fuse_aes,
.fuse_arith_logic,
.fuse_csel,
.fuse_literals,
.lsl_fast,
.perfmon,
.use_postra_scheduler,
.v8_2a,
.zcz,
}),
};
pub const falkor = CpuModel{
.name = "falkor",
.llvm_name = "falkor",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.custom_cheap_as_move,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.rdm,
.slow_strqro_store,
.use_postra_scheduler,
.v8a,
.zcz,
}),
};
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{
.enable_select_opt,
.ete,
.fuse_adrp_add,
.fuse_aes,
.neon,
.use_postra_scheduler,
}),
};
pub const kryo = CpuModel{
.name = "kryo",
.llvm_name = "kryo",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.custom_cheap_as_move,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
.zcz,
}),
};
pub const neoverse_512tvb = CpuModel{
.name = "neoverse_512tvb",
.llvm_name = "neoverse-512tvb",
.features = featureSet(&[_]Feature{
.bf16,
.ccdp,
.crypto,
.enable_select_opt,
.fp16fml,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.rand,
.spe,
.ssbs,
.sve,
.use_postra_scheduler,
.v8_4a,
}),
};
pub const neoverse_e1 = CpuModel{
.name = "neoverse_e1",
.llvm_name = "neoverse-e1",
.features = featureSet(&[_]Feature{
.crypto,
.dotprod,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.perfmon,
.rcpc,
.ssbs,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const neoverse_n1 = CpuModel{
.name = "neoverse_n1",
.llvm_name = "neoverse-n1",
.features = featureSet(&[_]Feature{
.crypto,
.dotprod,
.enable_select_opt,
.fullfp16,
.fuse_adrp_add,
.fuse_aes,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.rcpc,
.spe,
.ssbs,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const neoverse_n2 = CpuModel{
.name = "neoverse_n2",
.llvm_name = "neoverse-n2",
.features = featureSet(&[_]Feature{
.bf16,
.crypto,
.enable_select_opt,
.ete,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.mte,
.perfmon,
.predictable_select_expensive,
.sve2_bitperm,
.use_postra_scheduler,
.v8_5a,
}),
};
pub const neoverse_v1 = CpuModel{
.name = "neoverse_v1",
.llvm_name = "neoverse-v1",
.features = featureSet(&[_]Feature{
.bf16,
.ccdp,
.crypto,
.enable_select_opt,
.fp16fml,
.fuse_adrp_add,
.fuse_aes,
.i8mm,
.lsl_fast,
.no_sve_fp_ld1r,
.perfmon,
.predictable_select_expensive,
.rand,
.spe,
.ssbs,
.sve,
.use_postra_scheduler,
.v8_4a,
}),
};
pub const neoverse_v2 = CpuModel{
.name = "neoverse_v2",
.llvm_name = "neoverse-v2",
.features = featureSet(&[_]Feature{
.bf16,
.enable_select_opt,
.ete,
.fp16fml,
.fuse_aes,
.i8mm,
.lsl_fast,
.mte,
.perfmon,
.predictable_select_expensive,
.rand,
.spe,
.sve2_bitperm,
.use_postra_scheduler,
.v9a,
}),
};
pub const saphira = CpuModel{
.name = "saphira",
.llvm_name = "saphira",
.features = featureSet(&[_]Feature{
.crypto,
.custom_cheap_as_move,
.lsl_fast,
.perfmon,
.predictable_select_expensive,
.spe,
.use_postra_scheduler,
.v8_4a,
.zcz,
}),
};
pub const thunderx = CpuModel{
.name = "thunderx",
.llvm_name = "thunderx",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const thunderx2t99 = CpuModel{
.name = "thunderx2t99",
.llvm_name = "thunderx2t99",
.features = featureSet(&[_]Feature{
.aggressive_fma,
.arith_bcc_fusion,
.crypto,
.predictable_select_expensive,
.use_postra_scheduler,
.v8_1a,
}),
};
pub const thunderx3t110 = CpuModel{
.name = "thunderx3t110",
.llvm_name = "thunderx3t110",
.features = featureSet(&[_]Feature{
.aggressive_fma,
.arith_bcc_fusion,
.balance_fp_ops,
.crypto,
.perfmon,
.predictable_select_expensive,
.strict_align,
.use_postra_scheduler,
.v8_3a,
}),
};
pub const thunderxt81 = CpuModel{
.name = "thunderxt81",
.llvm_name = "thunderxt81",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const thunderxt83 = CpuModel{
.name = "thunderxt83",
.llvm_name = "thunderxt83",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const thunderxt88 = CpuModel{
.name = "thunderxt88",
.llvm_name = "thunderxt88",
.features = featureSet(&[_]Feature{
.crc,
.crypto,
.perfmon,
.predictable_select_expensive,
.use_postra_scheduler,
.v8a,
}),
};
pub const tsv110 = CpuModel{
.name = "tsv110",
.llvm_name = "tsv110",
.features = featureSet(&[_]Feature{
.crypto,
.custom_cheap_as_move,
.dotprod,
.fp16fml,
.fuse_aes,
.perfmon,
.spe,
.use_postra_scheduler,
.v8_2a,
}),
};
pub const xgene1 = CpuModel{
.name = "xgene1",
.llvm_name = null,
.features = featureSet(&[_]Feature{
.perfmon,
.v8a,
}),
};
};
| https://raw.githubusercontent.com/2lambda123/ziglang-zig-bootstrap/f56dc0fd298f41c8cc2a4f76a9648111e6c75503/zig/lib/std/Target/aarch64.zig |
//
// One of the more common uses of 'comptime' function parameters is
// passing a type to a function:
//
// fn foo(comptime MyType: type) void { ... }
//
// In fact, types are ONLY available at compile time, so the
// 'comptime' keyword is required here.
//
// Please take a moment to put on the wizard hat which has been
// provided for you. We're about to use this ability to implement
// a generic function.
//
const print = @import("std").debug.print;
pub fn main() void {
// Here we declare arrays of three different types and sizes
// at compile time from a function call. Neat!
const s1 = makeSequence(u8, 3); // creates a [3]u8
const s2 = makeSequence(u32, 5); // creates a [5]u32
const s3 = makeSequence(i64, 7); // creates a [7]i64
print("s1={any}, s2={any}, s3={any}\n", .{ s1, s2, s3 });
}
// This function is pretty wild because it executes at runtime
// and is part of the final compiled program. The function is
// compiled with unchanging data sizes and types.
//
// And yet it ALSO allows for different sizes and types. This
// seems paradoxical. How could both things be true?
//
// To accomplish this, the Zig compiler actually generates a
// separate copy of the function for every size/type combination!
// So in this case, three different functions will be generated
// for you, each with machine code that handles that specific
// data size and type.
//
// Please fix this function so that the 'size' parameter:
//
// 1) Is guaranteed to be known at compile time.
// 2) Sets the size of the array of type T (which is the
// sequence we're creating and returning).
//
fn makeSequence(comptime T: type, ??? size: usize) [???]T {
var sequence: [???]T = undefined;
var i: usize = 0;
while (i < size) : (i += 1) {
sequence[i] = @intCast(T, i) + 1;
}
return sequence;
}
| https://raw.githubusercontent.com/arpangreat/my_ziglings/781f88d8c8eb12b15349462e176b7fb7b384925f/exercises/069_comptime4.zig |
//
// One of the more common uses of 'comptime' function parameters is
// passing a type to a function:
//
// fn foo(comptime MyType: type) void { ... }
//
// In fact, types are ONLY available at compile time, so the
// 'comptime' keyword is required here.
//
// Please take a moment to put on the wizard hat which has been
// provided for you. We're about to use this ability to implement
// a generic function.
//
const print = @import("std").debug.print;
pub fn main() void {
// Here we declare arrays of three different types and sizes
// at compile time from a function call. Neat!
const s1 = makeSequence(u8, 3); // creates a [3]u8
const s2 = makeSequence(u32, 5); // creates a [5]u32
const s3 = makeSequence(i64, 7); // creates a [7]i64
print("s1={any}, s2={any}, s3={any}\n", .{ s1, s2, s3 });
}
// This function is pretty wild because it executes at runtime
// and is part of the final compiled program. The function is
// compiled with unchanging data sizes and types.
//
// And yet it ALSO allows for different sizes and types. This
// seems paradoxical. How could both things be true?
//
// To accomplish this, the Zig compiler actually generates a
// separate copy of the function for every size/type combination!
// So in this case, three different functions will be generated
// for you, each with machine code that handles that specific
// data size and type.
//
// Please fix this function so that the 'size' parameter:
//
// 1) Is guaranteed to be known at compile time.
// 2) Sets the size of the array of type T (which is the
// sequence we're creating and returning).
//
fn makeSequence(comptime T: type, ??? size: usize) [???]T {
var sequence: [???]T = undefined;
var i: usize = 0;
while (i < size) : (i += 1) {
sequence[i] = @intCast(T, i) + 1;
}
return sequence;
}
| https://raw.githubusercontent.com/stdrc/ziglings/68a151a8d7c0d69e8dd6590026758dba8c82150a/exercises/069_comptime4.zig |
//
// One of the more common uses of 'comptime' function parameters is
// passing a type to a function:
//
// fn foo(comptime MyType: type) void { ... }
//
// In fact, types are ONLY available at compile time, so the
// 'comptime' keyword is required here.
//
// Please take a moment to put on the wizard hat which has been
// provided for you. We're about to use this ability to implement
// a generic function.
//
const print = @import("std").debug.print;
pub fn main() void {
// Here we declare arrays of three different types and sizes
// at compile time from a function call. Neat!
const s1 = makeSequence(u8, 3); // creates a [3]u8
const s2 = makeSequence(u32, 5); // creates a [5]u32
const s3 = makeSequence(i64, 7); // creates a [7]i64
print("s1={any}, s2={any}, s3={any}\n", .{ s1, s2, s3 });
}
// This function is pretty wild because it executes at runtime
// and is part of the final compiled program. The function is
// compiled with unchanging data sizes and types.
//
// And yet it ALSO allows for different sizes and types. This
// seems paradoxical. How could both things be true?
//
// To accomplish this, the Zig compiler actually generates a
// separate copy of the function for every size/type combination!
// So in this case, three different functions will be generated
// for you, each with machine code that handles that specific
// data size and type.
//
// Please fix this function so that the 'size' parameter:
//
// 1) Is guaranteed to be known at compile time.
// 2) Sets the size of the array of type T (which is the
// sequence we're creating and returning).
//
fn makeSequence(comptime T: type, ??? size: usize) [???]T {
var sequence: [???]T = undefined;
var i: usize = 0;
while (i < size) : (i += 1) {
sequence[i] = @intCast(T, i) + 1;
}
return sequence;
}
| https://raw.githubusercontent.com/blotta/ziglings/b5ad06ab1f28683bc6931ab1fa6f3238113df73d/exercises/069_comptime4.zig |
//
// One of the more common uses of 'comptime' function parameters is
// passing a type to a function:
//
// fn foo(comptime MyType: type) void { ... }
//
// In fact, types are ONLY available at compile time, so the
// 'comptime' keyword is required here.
//
// Please take a moment to put on the wizard hat which has been
// provided for you. We're about to use this ability to implement
// a generic function.
//
const print = @import("std").debug.print;
pub fn main() void {
// Here we declare arrays of three different types and sizes
// at compile time from a function call. Neat!
const s1 = makeSequence(u8, 3); // creates a [3]u8
const s2 = makeSequence(u32, 5); // creates a [5]u32
const s3 = makeSequence(i64, 7); // creates a [7]i64
print("s1={any}, s2={any}, s3={any}\n", .{ s1, s2, s3 });
}
// This function is pretty wild because it executes at runtime
// and is part of the final compiled program. The function is
// compiled with unchanging data sizes and types.
//
// And yet it ALSO allows for different sizes and types. This
// seems paradoxical. How could both things be true?
//
// To accomplish this, the Zig compiler actually generates a
// separate copy of the function for every size/type combination!
// So in this case, three different functions will be generated
// for you, each with machine code that handles that specific
// data size and type.
//
// Please fix this function so that the 'size' parameter:
//
// 1) Is guaranteed to be known at compile time.
// 2) Sets the size of the array of type T (which is the
// sequence we're creating and returning).
//
fn makeSequence(comptime T: type, ??? size: usize) [???]T {
var sequence: [???]T = undefined;
var i: usize = 0;
while (i < size) : (i += 1) {
sequence[i] = @intCast(T, i) + 1;
}
return sequence;
}
| https://raw.githubusercontent.com/marlalain/ziglings/c282e48c41d77dab8f96c0d72082e9679d59a31d/exercises/069_comptime4.zig |
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2021 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const uefi = @import("std").os.uefi;
const Guid = uefi.Guid;
const Event = uefi.Event;
const Status = uefi.Status;
const MacAddress = uefi.protocols.MacAddress;
const ManagedNetworkConfigData = uefi.protocols.ManagedNetworkConfigData;
const SimpleNetworkMode = uefi.protocols.SimpleNetworkMode;
pub const Ip6Protocol = extern struct {
_get_mode_data: fn (*const Ip6Protocol, ?*Ip6ModeData, ?*ManagedNetworkConfigData, ?*SimpleNetworkMode) callconv(.C) Status,
_configure: fn (*const Ip6Protocol, ?*const Ip6ConfigData) callconv(.C) Status,
_groups: fn (*const Ip6Protocol, bool, ?*const Ip6Address) callconv(.C) Status,
_routes: fn (*const Ip6Protocol, bool, ?*const Ip6Address, u8, ?*const Ip6Address) callconv(.C) Status,
_neighbors: fn (*const Ip6Protocol, bool, *const Ip6Address, ?*const MacAddress, u32, bool) callconv(.C) Status,
_transmit: fn (*const Ip6Protocol, *Ip6CompletionToken) callconv(.C) Status,
_receive: fn (*const Ip6Protocol, *Ip6CompletionToken) callconv(.C) Status,
_cancel: fn (*const Ip6Protocol, ?*Ip6CompletionToken) callconv(.C) Status,
_poll: fn (*const Ip6Protocol) callconv(.C) Status,
/// Gets the current operational settings for this instance of the EFI IPv6 Protocol driver.
pub fn getModeData(self: *const Ip6Protocol, ip6_mode_data: ?*Ip6ModeData, mnp_config_data: ?*ManagedNetworkConfigData, snp_mode_data: ?*SimpleNetworkMode) Status {
return self._get_mode_data(self, ip6_mode_data, mnp_config_data, snp_mode_data);
}
/// Assign IPv6 address and other configuration parameter to this EFI IPv6 Protocol driver instance.
pub fn configure(self: *const Ip6Protocol, ip6_config_data: ?*const Ip6ConfigData) Status {
return self._configure(self, ip6_config_data);
}
/// Joins and leaves multicast groups.
pub fn groups(self: *const Ip6Protocol, join_flag: bool, group_address: ?*const Ip6Address) Status {
return self._groups(self, join_flag, group_address);
}
/// Adds and deletes routing table entries.
pub fn routes(self: *const Ip6Protocol, delete_route: bool, destination: ?*const Ip6Address, prefix_length: u8, gateway_address: ?*const Ip6Address) Status {
return self._routes(self, delete_route, destination, prefix_length, gateway_address);
}
/// Add or delete Neighbor cache entries.
pub fn neighbors(self: *const Ip6Protocol, delete_flag: bool, target_ip6_address: *const Ip6Address, target_link_address: ?*const MacAddress, timeout: u32, override: bool) Status {
return self._neighbors(self, delete_flag, target_ip6_address, target_link_address, timeout, override);
}
/// Places outgoing data packets into the transmit queue.
pub fn transmit(self: *const Ip6Protocol, token: *Ip6CompletionToken) Status {
return self._transmit(self, token);
}
/// Places a receiving request into the receiving queue.
pub fn receive(self: *const Ip6Protocol, token: *Ip6CompletionToken) Status {
return self._receive(self, token);
}
/// Abort an asynchronous transmits or receive request.
pub fn cancel(self: *const Ip6Protocol, token: ?*Ip6CompletionToken) Status {
return self._cancel(self, token);
}
/// Polls for incoming data packets and processes outgoing data packets.
pub fn poll(self: *const Ip6Protocol) Status {
return self._poll(self);
}
pub const guid align(8) = Guid{
.time_low = 0x2c8759d5,
.time_mid = 0x5c2d,
.time_high_and_version = 0x66ef,
.clock_seq_high_and_reserved = 0x92,
.clock_seq_low = 0x5f,
.node = [_]u8{ 0xb6, 0x6c, 0x10, 0x19, 0x57, 0xe2 },
};
};
pub const Ip6ModeData = extern struct {
is_started: bool,
max_packet_size: u32,
config_data: Ip6ConfigData,
is_configured: bool,
address_count: u32,
address_list: [*]Ip6AddressInfo,
group_count: u32,
group_table: [*]Ip6Address,
route_count: u32,
route_table: [*]Ip6RouteTable,
neighbor_count: u32,
neighbor_cache: [*]Ip6NeighborCache,
prefix_count: u32,
prefix_table: [*]Ip6AddressInfo,
icmp_type_count: u32,
icmp_type_list: [*]Ip6IcmpType,
};
pub const Ip6ConfigData = extern struct {
default_protocol: u8,
accept_any_protocol: bool,
accept_icmp_errors: bool,
accept_promiscuous: bool,
destination_address: Ip6Address,
station_address: Ip6Address,
traffic_class: u8,
hop_limit: u8,
flow_label: u32,
receive_timeout: u32,
transmit_timeout: u32,
};
pub const Ip6Address = [16]u8;
pub const Ip6AddressInfo = extern struct {
address: Ip6Address,
prefix_length: u8,
};
pub const Ip6RouteTable = extern struct {
gateway: Ip6Address,
destination: Ip6Address,
prefix_length: u8,
};
pub const Ip6NeighborState = extern enum(u32) {
Incomplete,
Reachable,
Stale,
Delay,
Probe,
};
pub const Ip6NeighborCache = extern struct {
neighbor: Ip6Address,
link_address: MacAddress,
state: Ip6NeighborState,
};
pub const Ip6IcmpType = extern struct {
type: u8,
code: u8,
};
pub const Ip6CompletionToken = extern struct {
event: Event,
status: Status,
packet: *c_void, // union TODO
};
| https://raw.githubusercontent.com/creationix/zig-toolset/9ad208cd93d1f05eb772deff4af24f58eb42386f/zig-linux-x86_64-0.8.0-dev.1860+1fada3746/lib/std/os/uefi/protocols/ip6_protocol.zig |
const std = @import("std");
pub fn main() void {
var value: i32 = -1; // runtime-known
_ = &value;
const unsigned: u32 = @intCast(value);
std.debug.print("value: {}\n", .{unsigned});
}
// exe=fail
| https://raw.githubusercontent.com/ziglang/zig-bootstrap/ec2dca85a340f134d2fcfdc9007e91f9abed6996/zig/doc/langref/runtime_invalid_cast.zig |
const std = @import("std");
pub fn main() void {
var value: i32 = -1; // runtime-known
_ = &value;
const unsigned: u32 = @intCast(value);
std.debug.print("value: {}\n", .{unsigned});
}
// exe=fail
| https://raw.githubusercontent.com/kassane/zig-mos-bootstrap/19aac4779b9e93b0e833402c26c93cfc13bb94e2/zig/doc/langref/runtime_invalid_cast.zig |
const std = @import("std");
const bun = @import("root").bun;
const string = bun.string;
const Output = bun.Output;
const Global = bun.Global;
const Environment = bun.Environment;
const strings = bun.strings;
const MutableString = bun.MutableString;
const stringZ = bun.stringZ;
const default_allocator = bun.default_allocator;
const C = bun.C;
const clap = @import("../src/deps/zig-clap/clap.zig");
const URL = @import("../src/url.zig").URL;
const Headers = @import("../src/bun.js/webcore/response.zig").Headers;
const Method = @import("../src/http/method.zig").Method;
const ColonListType = @import("../src/cli/colon_list_type.zig").ColonListType;
const HeadersTuple = ColonListType(string, noop_resolver);
const path_handler = @import("../src/resolver/resolve_path.zig");
fn noop_resolver(in: string) !string {
return in;
}
const VERSION = "0.0.0";
const params = [_]clap.Param(clap.Help){
clap.parseParam("-v, --verbose Show headers & status code") catch unreachable,
clap.parseParam("-H, --header <STR>... Add a header") catch unreachable,
clap.parseParam("-r, --max-redirects <STR> Maximum number of redirects to follow (default: 128)") catch unreachable,
clap.parseParam("-b, --body <STR> HTTP request body as a string") catch unreachable,
clap.parseParam("-f, --file <STR> File path to load as body") catch unreachable,
clap.parseParam("-n, --count <INT> How many runs? Default 10") catch unreachable,
clap.parseParam("-t, --timeout <INT> Max duration per request") catch unreachable,
clap.parseParam("-r, --retry <INT> Max retry count") catch unreachable,
clap.parseParam("--no-gzip Disable gzip") catch unreachable,
clap.parseParam("--no-deflate Disable deflate") catch unreachable,
clap.parseParam("--no-compression Disable gzip & deflate") catch unreachable,
clap.parseParam("--version Print the version and exit") catch unreachable,
clap.parseParam("--turbo Skip sending TLS shutdown signals") catch unreachable,
clap.parseParam("--repeat <INT> Repeat N times") catch unreachable,
clap.parseParam("--max-concurrency <INT> Max concurrent") catch unreachable,
clap.parseParam("<POS>... ") catch unreachable,
};
const MethodNames = std.ComptimeStringMap(Method, .{
.{ "GET", Method.GET },
.{ "get", Method.GET },
.{ "POST", Method.POST },
.{ "post", Method.POST },
.{ "PUT", Method.PUT },
.{ "put", Method.PUT },
.{ "PATCH", Method.PATCH },
.{ "patch", Method.PATCH },
.{ "OPTIONS", Method.OPTIONS },
.{ "options", Method.OPTIONS },
.{ "HEAD", Method.HEAD },
.{ "head", Method.HEAD },
});
var file_path_buf: bun.PathBuffer = undefined;
var cwd_buf: bun.PathBuffer = undefined;
pub const Arguments = struct {
url: URL,
method: Method,
verbose: bool = false,
headers: Headers.Entries,
headers_buf: string,
body: string = "",
turbo: bool = false,
count: usize = 10,
timeout: usize = 0,
repeat: usize = 0,
concurrency: u16 = 32,
pub fn parse(allocator: std.mem.Allocator) !Arguments {
var diag = clap.Diagnostic{};
var args = clap.parse(clap.Help, ¶ms, .{
.diagnostic = &diag,
.allocator = allocator,
}) catch |err| {
// Report useful error and exit
diag.report(Output.errorWriter(), err) catch {};
return err;
};
const positionals = args.positionals();
var raw_args: std.ArrayListUnmanaged(string) = undefined;
if (positionals.len > 0) {
raw_args = .{ .capacity = positionals.len, .items = @as([*][]const u8, @ptrFromInt(@intFromPtr(positionals.ptr)))[0..positionals.len] };
} else {
raw_args = .{};
}
if (args.flag("--version")) {
try Output.writer().writeAll(VERSION);
Global.exit(0);
}
var method = Method.GET;
var url: URL = .{};
var body_string: string = args.option("--body") orelse "";
if (args.option("--file")) |file_path| {
if (file_path.len > 0) {
const cwd = try std.process.getCwd(&cwd_buf);
var parts = [_]string{file_path};
const absolute_path = path_handler.joinAbsStringBuf(cwd, &file_path_buf, &parts, .auto);
file_path_buf[absolute_path.len] = 0;
file_path_buf[absolute_path.len + 1] = 0;
const absolute_path_len = absolute_path.len;
const absolute_path_ = file_path_buf[0..absolute_path_len :0];
var body_file = std.fs.openFileAbsoluteZ(absolute_path_, .{ .mode = .read_only }) catch |err| {
Output.printErrorln("<r><red>{s}<r> opening file {s}", .{ @errorName(err), absolute_path });
Global.exit(1);
};
const file_contents = body_file.readToEndAlloc(allocator, try body_file.getEndPos()) catch |err| {
Output.printErrorln("<r><red>{s}<r> reading file {s}", .{ @errorName(err), absolute_path });
Global.exit(1);
};
body_string = file_contents;
}
}
{
var raw_arg_i: usize = 0;
while (raw_arg_i < raw_args.items.len) : (raw_arg_i += 1) {
const arg = raw_args.items[raw_arg_i];
if (MethodNames.get(arg[0..])) |method_| {
method = method_;
_ = raw_args.swapRemove(raw_arg_i);
}
}
if (raw_args.items.len == 0) {
Output.prettyErrorln("<r><red>error<r><d>:<r> <b>Missing URL<r>\n\nExample:\n<r><b>fetch GET https://example.com<r>\n\n<b>fetch example.com/foo<r>\n\n", .{});
Global.exit(1);
}
const url_position = raw_args.items.len - 1;
url = URL.parse(raw_args.swapRemove(url_position));
if (!url.isAbsolute()) {
Output.prettyErrorln("<r><red>error<r><d>:<r> <b>Invalid URL<r>\n\nExample:\n<r><b>fetch GET https://example.com<r>\n\n<b>fetch example.com/foo<r>\n\n", .{});
Global.exit(1);
}
}
return Arguments{
.url = url,
.method = method,
.verbose = args.flag("--verbose"),
.headers = .{},
.headers_buf = "",
.body = body_string,
// .keep_alive = !args.flag("--no-keep-alive"),
.concurrency = std.fmt.parseInt(u16, args.option("--max-concurrency") orelse "32", 10) catch 32,
.turbo = args.flag("--turbo"),
.timeout = std.fmt.parseInt(usize, args.option("--timeout") orelse "0", 10) catch |err| {
Output.prettyErrorln("<r><red>{s}<r> parsing timeout", .{@errorName(err)});
Global.exit(1);
},
.count = std.fmt.parseInt(usize, args.option("--count") orelse "10", 10) catch |err| {
Output.prettyErrorln("<r><red>{s}<r> parsing count", .{@errorName(err)});
Global.exit(1);
},
};
}
};
const HTTP = bun.http;
const NetworkThread = HTTP.NetworkThread;
var stdout_: std.fs.File = undefined;
var stderr_: std.fs.File = undefined;
pub fn main() anyerror!void {
stdout_ = std.io.getStdOut();
stderr_ = std.io.getStdErr();
var output_source = Output.Source.init(stdout_, stderr_);
Output.Source.set(&output_source);
defer Output.flush();
const args = try Arguments.parse(default_allocator);
var channel = try default_allocator.create(HTTP.HTTPChannel);
channel.* = HTTP.HTTPChannel.init();
try channel.buffer.ensureTotalCapacity(args.count);
try NetworkThread.init();
if (args.concurrency > 0) HTTP.AsyncHTTP.max_simultaneous_requests.store(args.concurrency, .Monotonic);
const Group = struct {
response_body: MutableString = undefined,
context: HTTP.HTTPChannelContext = undefined,
};
const Batch = @import("../src/thread_pool.zig").Batch;
var groups = try default_allocator.alloc(Group, args.count);
var repeat_i: usize = 0;
while (repeat_i < args.repeat + 1) : (repeat_i += 1) {
var i: usize = 0;
var batch = Batch{};
while (i < args.count) : (i += 1) {
groups[i] = Group{};
const response_body = &groups[i].response_body;
response_body.* = try MutableString.init(default_allocator, 1024);
var ctx = &groups[i].context;
ctx.* = .{
.channel = channel,
.http = try HTTP.AsyncHTTP.init(
default_allocator,
args.method,
args.url,
args.headers,
args.headers_buf,
response_body,
"",
args.timeout,
),
};
ctx.http.client.verbose = args.verbose;
ctx.http.callback = HTTP.HTTPChannelContext.callback;
ctx.http.schedule(default_allocator, &batch);
}
NetworkThread.global.schedule(batch);
var read_count: usize = 0;
var success_count: usize = 0;
var fail_count: usize = 0;
var min_duration: usize = std.math.maxInt(usize);
var max_duration: usize = 0;
var timer = try std.time.Timer.start();
while (read_count < args.count) {
const http = channel.readItem() catch continue;
read_count += 1;
Output.printElapsed(@as(f64, @floatCast(@as(f128, @floatFromInt(http.elapsed)) / std.time.ns_per_ms)));
if (http.response) |resp| {
if (resp.status_code == 200) {
success_count += 1;
} else {
fail_count += 1;
}
max_duration = @max(max_duration, http.elapsed);
min_duration = @min(min_duration, http.elapsed);
switch (resp.status_code) {
200, 202, 302 => {
Output.prettyError(" <r><green>{d}<r>", .{resp.status_code});
},
else => {
Output.prettyError(" <r><red>{d}<r>", .{resp.status_code});
},
}
if (http.gzip_elapsed > 0) {
Output.prettyError(" <d>{s}<r><d> - {s}<r> <d>({d} bytes, ", .{
@tagName(http.client.method),
http.client.url.href,
http.response_buffer.list.items.len,
});
Output.printElapsed(@as(f64, @floatCast(@as(f128, @floatFromInt(http.gzip_elapsed)) / std.time.ns_per_ms)));
Output.prettyError("<d> gzip)<r>\n", .{});
} else {
Output.prettyError(" <d>{s}<r><d> - {s}<r> <d>({d} bytes)<r>\n", .{
@tagName(http.client.method),
http.client.url.href,
http.response_buffer.list.items.len,
});
}
} else if (http.err) |err| {
fail_count += 1;
Output.printError(" err: {s}\n", .{@errorName(err)});
} else {
fail_count += 1;
Output.prettyError(" Uh-oh: {s}\n", .{@tagName(http.state.raw)});
}
Output.flush();
}
Output.prettyErrorln("\n<d>------<r>\n\n", .{});
Output.prettyErrorln("Success: <b><green>{d}<r>\nFailure: <b><red>{d}<r>\n\n", .{
success_count,
fail_count,
});
Output.printElapsed(@as(f64, @floatCast(@as(f128, @floatFromInt(timer.read())) / std.time.ns_per_ms)));
Output.prettyErrorln(" {d} requests", .{
read_count,
});
Output.flush();
}
}
| https://raw.githubusercontent.com/oven-sh/bun/fab96a74ea13da04459ea7f62663c4d2fd421778/misctools/http_bench.zig |
const std = @import("std.zig");
const debug = std.debug;
const assert = debug.assert;
const testing = std.testing;
const mem = std.mem;
const math = std.math;
const Allocator = mem.Allocator;
/// A contiguous, growable list of items in memory.
/// This is a wrapper around an array of T values. Initialize with `init`.
///
/// This struct internally stores a `std.mem.Allocator` for memory management.
/// To manually specify an allocator with each function call see `ArrayListUnmanaged`.
pub fn ArrayList(comptime T: type) type {
return ArrayListAligned(T, null);
}
/// A contiguous, growable list of arbitrarily aligned items in memory.
/// This is a wrapper around an array of T values aligned to `alignment`-byte
/// addresses. If the specified alignment is `null`, then `@alignOf(T)` is used.
/// Initialize with `init`.
///
/// This struct internally stores a `std.mem.Allocator` for memory management.
/// To manually specify an allocator with each function call see `ArrayListAlignedUnmanaged`.
pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
if (alignment) |a| {
if (a == @alignOf(T)) {
return ArrayListAligned(T, null);
}
}
return struct {
const Self = @This();
/// Contents of the list. This field is intended to be accessed
/// directly.
///
/// Pointers to elements in this slice are invalidated by various
/// functions of this ArrayList in accordance with the respective
/// documentation. In all cases, "invalidated" means that the memory
/// has been passed to this allocator's resize or free function.
items: Slice,
/// How many T values this list can hold without allocating
/// additional memory.
capacity: usize,
allocator: Allocator,
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
pub fn SentinelSlice(comptime s: T) type {
return if (alignment) |a| ([:s]align(a) T) else [:s]T;
}
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn init(allocator: Allocator) Self {
return Self{
.items = &[_]T{},
.capacity = 0,
.allocator = allocator,
};
}
/// Initialize with capacity to hold `num` elements.
/// The resulting capacity will equal `num` exactly.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: Allocator, num: usize) Allocator.Error!Self {
var self = Self.init(allocator);
try self.ensureTotalCapacityPrecise(num);
return self;
}
/// Release all allocated memory.
pub fn deinit(self: Self) void {
if (@sizeOf(T) > 0) {
self.allocator.free(self.allocatedSlice());
}
}
/// ArrayList takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn fromOwnedSlice(allocator: Allocator, slice: Slice) Self {
return Self{
.items = slice,
.capacity = slice.len,
.allocator = allocator,
};
}
/// ArrayList takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn fromOwnedSliceSentinel(allocator: Allocator, comptime sentinel: T, slice: [:sentinel]T) Self {
return Self{
.items = slice,
.capacity = slice.len + 1,
.allocator = allocator,
};
}
/// Initializes an ArrayListUnmanaged with the `items` and `capacity` fields
/// of this ArrayList. Empties this ArrayList.
pub fn moveToUnmanaged(self: *Self) ArrayListAlignedUnmanaged(T, alignment) {
const allocator = self.allocator;
const result = .{ .items = self.items, .capacity = self.capacity };
self.* = init(allocator);
return result;
}
/// The caller owns the returned memory. Empties this ArrayList,
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
pub fn toOwnedSlice(self: *Self) Allocator.Error!Slice {
const allocator = self.allocator;
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, self.items.len)) {
const result = self.items;
self.* = init(allocator);
return result;
}
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
@memcpy(new_memory, self.items);
@memset(self.items, undefined);
self.clearAndFree();
return new_memory;
}
/// The caller owns the returned memory. Empties this ArrayList.
pub fn toOwnedSliceSentinel(self: *Self, comptime sentinel: T) Allocator.Error!SentinelSlice(sentinel) {
// This addition can never overflow because `self.items` can never occupy the whole address space
try self.ensureTotalCapacityPrecise(self.items.len + 1);
self.appendAssumeCapacity(sentinel);
const result = try self.toOwnedSlice();
return result[0 .. result.len - 1 :sentinel];
}
/// Creates a copy of this ArrayList, using the same allocator.
pub fn clone(self: Self) Allocator.Error!Self {
var cloned = try Self.initCapacity(self.allocator, self.capacity);
cloned.appendSliceAssumeCapacity(self.items);
return cloned;
}
/// Insert `item` at index `i`. Moves `list[i .. list.len]` to higher indices to make room.
/// If `i` is equal to the length of the list this operation is equivalent to append.
/// This operation is O(N).
/// Invalidates element pointers if additional memory is needed.
/// Asserts that the index is in bounds or equal to the length.
pub fn insert(self: *Self, i: usize, item: T) Allocator.Error!void {
const dst = try self.addManyAt(i, 1);
dst[0] = item;
}
/// Insert `item` at index `i`. Moves `list[i .. list.len]` to higher indices to make room.
/// If `i` is equal to the length of the list this operation is
/// equivalent to appendAssumeCapacity.
/// This operation is O(N).
/// Asserts that there is enough capacity for the new item.
/// Asserts that the index is in bounds or equal to the length.
pub fn insertAssumeCapacity(self: *Self, i: usize, item: T) void {
assert(self.items.len < self.capacity);
self.items.len += 1;
mem.copyBackwards(T, self.items[i + 1 .. self.items.len], self.items[i .. self.items.len - 1]);
self.items[i] = item;
}
/// Add `count` new elements at position `index`, which have
/// `undefined` values. Returns a slice pointing to the newly allocated
/// elements, which becomes invalid after various `ArrayList`
/// operations.
/// Invalidates pre-existing pointers to elements at and after `index`.
/// Invalidates all pre-existing element pointers if capacity must be
/// increased to accomodate the new elements.
/// Asserts that the index is in bounds or equal to the length.
pub fn addManyAt(self: *Self, index: usize, count: usize) Allocator.Error![]T {
const new_len = try addOrOom(self.items.len, count);
if (self.capacity >= new_len)
return addManyAtAssumeCapacity(self, index, count);
// Here we avoid copying allocated but unused bytes by
// attempting a resize in place, and falling back to allocating
// a new buffer and doing our own copy. With a realloc() call,
// the allocator implementation would pointlessly copy our
// extra capacity.
const new_capacity = growCapacity(self.capacity, new_len);
const old_memory = self.allocatedSlice();
if (self.allocator.resize(old_memory, new_capacity)) {
self.capacity = new_capacity;
return addManyAtAssumeCapacity(self, index, count);
}
// Make a new allocation, avoiding `ensureTotalCapacity` in order
// to avoid extra memory copies.
const new_memory = try self.allocator.alignedAlloc(T, alignment, new_capacity);
const to_move = self.items[index..];
@memcpy(new_memory[0..index], self.items[0..index]);
@memcpy(new_memory[index + count ..][0..to_move.len], to_move);
self.allocator.free(old_memory);
self.items = new_memory[0..new_len];
self.capacity = new_memory.len;
// The inserted elements at `new_memory[index..][0..count]` have
// already been set to `undefined` by memory allocation.
return new_memory[index..][0..count];
}
/// Add `count` new elements at position `index`, which have
/// `undefined` values. Returns a slice pointing to the newly allocated
/// elements, which becomes invalid after various `ArrayList`
/// operations.
/// Asserts that there is enough capacity for the new elements.
/// Invalidates pre-existing pointers to elements at and after `index`, but
/// does not invalidate any before that.
/// Asserts that the index is in bounds or equal to the length.
pub fn addManyAtAssumeCapacity(self: *Self, index: usize, count: usize) []T {
const new_len = self.items.len + count;
assert(self.capacity >= new_len);
const to_move = self.items[index..];
self.items.len = new_len;
mem.copyBackwards(T, self.items[index + count ..], to_move);
const result = self.items[index..][0..count];
@memset(result, undefined);
return result;
}
/// Insert slice `items` at index `i` by moving `list[i .. list.len]` to make room.
/// This operation is O(N).
/// Invalidates pre-existing pointers to elements at and after `index`.
/// Invalidates all pre-existing element pointers if capacity must be
/// increased to accomodate the new elements.
/// Asserts that the index is in bounds or equal to the length.
pub fn insertSlice(
self: *Self,
index: usize,
items: []const T,
) Allocator.Error!void {
const dst = try self.addManyAt(index, items.len);
@memcpy(dst, items);
}
/// Grows or shrinks the list as necessary.
/// Invalidates element pointers if additional capacity is allocated.
/// Asserts that the range is in bounds.
pub fn replaceRange(self: *Self, start: usize, len: usize, new_items: []const T) Allocator.Error!void {
var unmanaged = self.moveToUnmanaged();
defer self.* = unmanaged.toManaged(self.allocator);
return unmanaged.replaceRange(self.allocator, start, len, new_items);
}
/// Grows or shrinks the list as necessary.
/// Never invalidates element pointers.
/// Asserts the capacity is enough for additional items.
pub fn replaceRangeAssumeCapacity(self: *Self, start: usize, len: usize, new_items: []const T) void {
var unmanaged = self.moveToUnmanaged();
defer self.* = unmanaged.toManaged(self.allocator);
return unmanaged.replaceRangeAssumeCapacity(start, len, new_items);
}
/// Extends the list by 1 element. Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
pub fn append(self: *Self, item: T) Allocator.Error!void {
const new_item_ptr = try self.addOne();
new_item_ptr.* = item;
}
/// Extends the list by 1 element.
/// Never invalidates element pointers.
/// Asserts that the list can hold one additional item.
pub fn appendAssumeCapacity(self: *Self, item: T) void {
const new_item_ptr = self.addOneAssumeCapacity();
new_item_ptr.* = item;
}
/// Remove the element at index `i`, shift elements after index
/// `i` forward, and return the removed element.
/// Invalidates element pointers to end of list.
/// This operation is O(N).
/// This preserves item order. Use `swapRemove` if order preservation is not important.
/// Asserts that the index is in bounds.
/// Asserts that the list is not empty.
pub fn orderedRemove(self: *Self, i: usize) T {
const old_item = self.items[i];
self.replaceRangeAssumeCapacity(i, 1, &.{});
return old_item;
}
/// Removes the element at the specified index and returns it.
/// The empty slot is filled from the end of the list.
/// This operation is O(1).
/// This may not preserve item order. Use `orderedRemove` if you need to preserve order.
/// Asserts that the list is not empty.
/// Asserts that the index is in bounds.
pub fn swapRemove(self: *Self, i: usize) T {
if (self.items.len - 1 == i) return self.pop();
const old_item = self.items[i];
self.items[i] = self.pop();
return old_item;
}
/// Append the slice of items to the list. Allocates more
/// memory as necessary.
/// Invalidates element pointers if additional memory is needed.
pub fn appendSlice(self: *Self, items: []const T) Allocator.Error!void {
try self.ensureUnusedCapacity(items.len);
self.appendSliceAssumeCapacity(items);
}
/// Append the slice of items to the list.
/// Never invalidates element pointers.
/// Asserts that the list can hold the additional items.
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
const old_len = self.items.len;
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
@memcpy(self.items[old_len..][0..items.len], items);
}
/// Append an unaligned slice of items to the list. Allocates more
/// memory as necessary. Only call this function if calling
/// `appendSlice` instead would be a compile error.
/// Invalidates element pointers if additional memory is needed.
pub fn appendUnalignedSlice(self: *Self, items: []align(1) const T) Allocator.Error!void {
try self.ensureUnusedCapacity(items.len);
self.appendUnalignedSliceAssumeCapacity(items);
}
/// Append the slice of items to the list.
/// Never invalidates element pointers.
/// This function is only needed when calling
/// `appendSliceAssumeCapacity` instead would be a compile error due to the
/// alignment of the `items` parameter.
/// Asserts that the list can hold the additional items.
pub fn appendUnalignedSliceAssumeCapacity(self: *Self, items: []align(1) const T) void {
const old_len = self.items.len;
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
@memcpy(self.items[old_len..][0..items.len], items);
}
pub const Writer = if (T != u8)
@compileError("The Writer interface is only defined for ArrayList(u8) " ++
"but the given type is ArrayList(" ++ @typeName(T) ++ ")")
else
std.io.Writer(*Self, Allocator.Error, appendWrite);
/// Initializes a Writer which will append to the list.
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
/// Same as `append` except it returns the number of bytes written, which is always the same
/// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
/// Invalidates element pointers if additional memory is needed.
fn appendWrite(self: *Self, m: []const u8) Allocator.Error!usize {
try self.appendSlice(m);
return m.len;
}
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
/// The function is inline so that a comptime-known `value` parameter will
/// have a more optimal memset codegen in case it has a repeated byte pattern.
pub inline fn appendNTimes(self: *Self, value: T, n: usize) Allocator.Error!void {
const old_len = self.items.len;
try self.resize(try addOrOom(old_len, n));
@memset(self.items[old_len..self.items.len], value);
}
/// Append a value to the list `n` times.
/// Never invalidates element pointers.
/// The function is inline so that a comptime-known `value` parameter will
/// have a more optimal memset codegen in case it has a repeated byte pattern.
/// Asserts that the list can hold the additional items.
pub inline fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const new_len = self.items.len + n;
assert(new_len <= self.capacity);
@memset(self.items.ptr[self.items.len..new_len], value);
self.items.len = new_len;
}
/// Adjust the list length to `new_len`.
/// Additional elements contain the value `undefined`.
/// Invalidates element pointers if additional memory is needed.
pub fn resize(self: *Self, new_len: usize) Allocator.Error!void {
try self.ensureTotalCapacity(new_len);
self.items.len = new_len;
}
/// Reduce allocated capacity to `new_len`.
/// May invalidate element pointers.
/// Asserts that the new length is less than or equal to the previous length.
pub fn shrinkAndFree(self: *Self, new_len: usize) void {
var unmanaged = self.moveToUnmanaged();
unmanaged.shrinkAndFree(self.allocator, new_len);
self.* = unmanaged.toManaged(self.allocator);
}
/// Reduce length to `new_len`.
/// Invalidates element pointers for the elements `items[new_len..]`.
/// Asserts that the new length is less than or equal to the previous length.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
self.items.len = new_len;
}
/// Invalidates all element pointers.
pub fn clearRetainingCapacity(self: *Self) void {
self.items.len = 0;
}
/// Invalidates all element pointers.
pub fn clearAndFree(self: *Self) void {
self.allocator.free(self.allocatedSlice());
self.items.len = 0;
self.capacity = 0;
}
/// If the current capacity is less than `new_capacity`, this function will
/// modify the array so that it can hold at least `new_capacity` items.
/// Invalidates element pointers if additional memory is needed.
pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) Allocator.Error!void {
if (@sizeOf(T) == 0) {
self.capacity = math.maxInt(usize);
return;
}
if (self.capacity >= new_capacity) return;
const better_capacity = growCapacity(self.capacity, new_capacity);
return self.ensureTotalCapacityPrecise(better_capacity);
}
/// If the current capacity is less than `new_capacity`, this function will
/// modify the array so that it can hold exactly `new_capacity` items.
/// Invalidates element pointers if additional memory is needed.
pub fn ensureTotalCapacityPrecise(self: *Self, new_capacity: usize) Allocator.Error!void {
if (@sizeOf(T) == 0) {
self.capacity = math.maxInt(usize);
return;
}
if (self.capacity >= new_capacity) return;
// Here we avoid copying allocated but unused bytes by
// attempting a resize in place, and falling back to allocating
// a new buffer and doing our own copy. With a realloc() call,
// the allocator implementation would pointlessly copy our
// extra capacity.
const old_memory = self.allocatedSlice();
if (self.allocator.resize(old_memory, new_capacity)) {
self.capacity = new_capacity;
} else {
const new_memory = try self.allocator.alignedAlloc(T, alignment, new_capacity);
@memcpy(new_memory[0..self.items.len], self.items);
self.allocator.free(old_memory);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
}
}
/// Modify the array so that it can hold at least `additional_count` **more** items.
/// Invalidates element pointers if additional memory is needed.
pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) Allocator.Error!void {
return self.ensureTotalCapacity(try addOrOom(self.items.len, additional_count));
}
/// Increases the array's length to match the full capacity that is already allocated.
/// The new elements have `undefined` values.
/// Never invalidates element pointers.
pub fn expandToCapacity(self: *Self) void {
self.items.len = self.capacity;
}
/// Increase length by 1, returning pointer to the new item.
/// The returned pointer becomes invalid when the list resized.
pub fn addOne(self: *Self) Allocator.Error!*T {
// This can never overflow because `self.items` can never occupy the whole address space
const newlen = self.items.len + 1;
try self.ensureTotalCapacity(newlen);
return self.addOneAssumeCapacity();
}
/// Increase length by 1, returning pointer to the new item.
/// The returned pointer becomes invalid when the list is resized.
/// Never invalidates element pointers.
/// Asserts that the list can hold one additional item.
pub fn addOneAssumeCapacity(self: *Self) *T {
assert(self.items.len < self.capacity);
self.items.len += 1;
return &self.items[self.items.len - 1];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
/// Resizes list if `self.capacity` is not large enough.
pub fn addManyAsArray(self: *Self, comptime n: usize) Allocator.Error!*[n]T {
const prev_len = self.items.len;
try self.resize(try addOrOom(self.items.len, n));
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// Never invalidates element pointers.
/// The returned pointer becomes invalid when the list is resized.
/// Asserts that the list can hold the additional items.
pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
assert(self.items.len + n <= self.capacity);
const prev_len = self.items.len;
self.items.len += n;
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
/// Resizes list if `self.capacity` is not large enough.
pub fn addManyAsSlice(self: *Self, n: usize) Allocator.Error![]T {
const prev_len = self.items.len;
try self.resize(try addOrOom(self.items.len, n));
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// Never invalidates element pointers.
/// The returned pointer becomes invalid when the list is resized.
/// Asserts that the list can hold the additional items.
pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T {
assert(self.items.len + n <= self.capacity);
const prev_len = self.items.len;
self.items.len += n;
return self.items[prev_len..][0..n];
}
/// Remove and return the last element from the list.
/// Invalidates element pointers to the removed element.
/// Asserts that the list is not empty.
pub fn pop(self: *Self) T {
const val = self.items[self.items.len - 1];
self.items.len -= 1;
return val;
}
/// Remove and return the last element from the list, or
/// return `null` if list is empty.
/// Invalidates element pointers to the removed element, if any.
pub fn popOrNull(self: *Self) ?T {
if (self.items.len == 0) return null;
return self.pop();
}
/// Returns a slice of all the items plus the extra capacity, whose memory
/// contents are `undefined`.
pub fn allocatedSlice(self: Self) Slice {
// `items.len` is the length, not the capacity.
return self.items.ptr[0..self.capacity];
}
/// Returns a slice of only the extra capacity after items.
/// This can be useful for writing directly into an ArrayList.
/// Note that such an operation must be followed up with a direct
/// modification of `self.items.len`.
pub fn unusedCapacitySlice(self: Self) Slice {
return self.allocatedSlice()[self.items.len..];
}
/// Returns the last element from the list.
/// Asserts that the list is not empty.
pub fn getLast(self: Self) T {
const val = self.items[self.items.len - 1];
return val;
}
/// Returns the last element from the list, or `null` if list is empty.
pub fn getLastOrNull(self: Self) ?T {
if (self.items.len == 0) return null;
return self.getLast();
}
};
}
/// An ArrayList, but the allocator is passed as a parameter to the relevant functions
/// rather than stored in the struct itself. The same allocator must be used throughout
/// the entire lifetime of an ArrayListUnmanaged. Initialize directly or with
/// `initCapacity`, and deinitialize with `deinit` or use `toOwnedSlice`.
pub fn ArrayListUnmanaged(comptime T: type) type {
return ArrayListAlignedUnmanaged(T, null);
}
/// A contiguous, growable list of arbitrarily aligned items in memory.
/// This is a wrapper around an array of T values aligned to `alignment`-byte
/// addresses. If the specified alignment is `null`, then `@alignOf(T)` is used.
///
/// Functions that potentially allocate memory accept an `Allocator` parameter.
/// Initialize directly or with `initCapacity`, and deinitialize with `deinit`
/// or use `toOwnedSlice`.
pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) type {
if (alignment) |a| {
if (a == @alignOf(T)) {
return ArrayListAlignedUnmanaged(T, null);
}
}
return struct {
const Self = @This();
/// Contents of the list. This field is intended to be accessed
/// directly.
///
/// Pointers to elements in this slice are invalidated by various
/// functions of this ArrayList in accordance with the respective
/// documentation. In all cases, "invalidated" means that the memory
/// has been passed to an allocator's resize or free function.
items: Slice = &[_]T{},
/// How many T values this list can hold without allocating
/// additional memory.
capacity: usize = 0,
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
pub fn SentinelSlice(comptime s: T) type {
return if (alignment) |a| ([:s]align(a) T) else [:s]T;
}
/// Initialize with capacity to hold `num` elements.
/// The resulting capacity will equal `num` exactly.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: Allocator, num: usize) Allocator.Error!Self {
var self = Self{};
try self.ensureTotalCapacityPrecise(allocator, num);
return self;
}
/// Initialize with externally-managed memory. The buffer determines the
/// capacity, and the length is set to zero.
/// When initialized this way, all functions that accept an Allocator
/// argument cause illegal behavior.
pub fn initBuffer(buffer: Slice) Self {
return .{
.items = buffer[0..0],
.capacity = buffer.len,
};
}
/// Release all allocated memory.
pub fn deinit(self: *Self, allocator: Allocator) void {
allocator.free(self.allocatedSlice());
self.* = undefined;
}
/// Convert this list into an analogous memory-managed one.
/// The returned list has ownership of the underlying memory.
pub fn toManaged(self: *Self, allocator: Allocator) ArrayListAligned(T, alignment) {
return .{ .items = self.items, .capacity = self.capacity, .allocator = allocator };
}
/// ArrayListUnmanaged takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn fromOwnedSlice(slice: Slice) Self {
return Self{
.items = slice,
.capacity = slice.len,
};
}
/// ArrayListUnmanaged takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn fromOwnedSliceSentinel(comptime sentinel: T, slice: [:sentinel]T) Self {
return Self{
.items = slice,
.capacity = slice.len + 1,
};
}
/// The caller owns the returned memory. Empties this ArrayList.
/// Its capacity is cleared, making deinit() safe but unnecessary to call.
pub fn toOwnedSlice(self: *Self, allocator: Allocator) Allocator.Error!Slice {
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, self.items.len)) {
const result = self.items;
self.* = .{};
return result;
}
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
@memcpy(new_memory, self.items);
@memset(self.items, undefined);
self.clearAndFree(allocator);
return new_memory;
}
/// The caller owns the returned memory. ArrayList becomes empty.
pub fn toOwnedSliceSentinel(self: *Self, allocator: Allocator, comptime sentinel: T) Allocator.Error!SentinelSlice(sentinel) {
// This addition can never overflow because `self.items` can never occupy the whole address space
try self.ensureTotalCapacityPrecise(allocator, self.items.len + 1);
self.appendAssumeCapacity(sentinel);
const result = try self.toOwnedSlice(allocator);
return result[0 .. result.len - 1 :sentinel];
}
/// Creates a copy of this ArrayList.
pub fn clone(self: Self, allocator: Allocator) Allocator.Error!Self {
var cloned = try Self.initCapacity(allocator, self.capacity);
cloned.appendSliceAssumeCapacity(self.items);
return cloned;
}
/// Insert `item` at index `i`. Moves `list[i .. list.len]` to higher indices to make room.
/// If `i` is equal to the length of the list this operation is equivalent to append.
/// This operation is O(N).
/// Invalidates element pointers if additional memory is needed.
/// Asserts that the index is in bounds or equal to the length.
pub fn insert(self: *Self, allocator: Allocator, i: usize, item: T) Allocator.Error!void {
const dst = try self.addManyAt(allocator, i, 1);
dst[0] = item;
}
/// Insert `item` at index `i`. Moves `list[i .. list.len]` to higher indices to make room.
/// If in` is equal to the length of the list this operation is equivalent to append.
/// This operation is O(N).
/// Asserts that the list has capacity for one additional item.
/// Asserts that the index is in bounds or equal to the length.
pub fn insertAssumeCapacity(self: *Self, i: usize, item: T) void {
assert(self.items.len < self.capacity);
self.items.len += 1;
mem.copyBackwards(T, self.items[i + 1 .. self.items.len], self.items[i .. self.items.len - 1]);
self.items[i] = item;
}
/// Add `count` new elements at position `index`, which have
/// `undefined` values. Returns a slice pointing to the newly allocated
/// elements, which becomes invalid after various `ArrayList`
/// operations.
/// Invalidates pre-existing pointers to elements at and after `index`.
/// Invalidates all pre-existing element pointers if capacity must be
/// increased to accomodate the new elements.
/// Asserts that the index is in bounds or equal to the length.
pub fn addManyAt(
self: *Self,
allocator: Allocator,
index: usize,
count: usize,
) Allocator.Error![]T {
var managed = self.toManaged(allocator);
defer self.* = managed.moveToUnmanaged();
return managed.addManyAt(index, count);
}
/// Add `count` new elements at position `index`, which have
/// `undefined` values. Returns a slice pointing to the newly allocated
/// elements, which becomes invalid after various `ArrayList`
/// operations.
/// Invalidates pre-existing pointers to elements at and after `index`, but
/// does not invalidate any before that.
/// Asserts that the list has capacity for the additional items.
/// Asserts that the index is in bounds or equal to the length.
pub fn addManyAtAssumeCapacity(self: *Self, index: usize, count: usize) []T {
const new_len = self.items.len + count;
assert(self.capacity >= new_len);
const to_move = self.items[index..];
self.items.len = new_len;
mem.copyBackwards(T, self.items[index + count ..], to_move);
const result = self.items[index..][0..count];
@memset(result, undefined);
return result;
}
/// Insert slice `items` at index `i` by moving `list[i .. list.len]` to make room.
/// This operation is O(N).
/// Invalidates pre-existing pointers to elements at and after `index`.
/// Invalidates all pre-existing element pointers if capacity must be
/// increased to accomodate the new elements.
/// Asserts that the index is in bounds or equal to the length.
pub fn insertSlice(
self: *Self,
allocator: Allocator,
index: usize,
items: []const T,
) Allocator.Error!void {
const dst = try self.addManyAt(
allocator,
index,
items.len,
);
@memcpy(dst, items);
}
/// Grows or shrinks the list as necessary.
/// Invalidates element pointers if additional capacity is allocated.
/// Asserts that the range is in bounds.
pub fn replaceRange(
self: *Self,
allocator: Allocator,
start: usize,
len: usize,
new_items: []const T,
) Allocator.Error!void {
const after_range = start + len;
const range = self.items[start..after_range];
if (range.len < new_items.len) {
const first = new_items[0..range.len];
const rest = new_items[range.len..];
@memcpy(range[0..first.len], first);
try self.insertSlice(allocator, after_range, rest);
} else {
self.replaceRangeAssumeCapacity(start, len, new_items);
}
}
/// Grows or shrinks the list as necessary.
/// Never invalidates element pointers.
/// Asserts the capacity is enough for additional items.
pub fn replaceRangeAssumeCapacity(self: *Self, start: usize, len: usize, new_items: []const T) void {
const after_range = start + len;
const range = self.items[start..after_range];
if (range.len == new_items.len)
@memcpy(range[0..new_items.len], new_items)
else if (range.len < new_items.len) {
const first = new_items[0..range.len];
const rest = new_items[range.len..];
@memcpy(range[0..first.len], first);
const dst = self.addManyAtAssumeCapacity(after_range, rest.len);
@memcpy(dst, rest);
} else {
const extra = range.len - new_items.len;
@memcpy(range[0..new_items.len], new_items);
std.mem.copyForwards(
T,
self.items[after_range - extra ..],
self.items[after_range..],
);
@memset(self.items[self.items.len - extra ..], undefined);
self.items.len -= extra;
}
}
/// Extend the list by 1 element. Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
pub fn append(self: *Self, allocator: Allocator, item: T) Allocator.Error!void {
const new_item_ptr = try self.addOne(allocator);
new_item_ptr.* = item;
}
/// Extend the list by 1 element.
/// Never invalidates element pointers.
/// Asserts that the list can hold one additional item.
pub fn appendAssumeCapacity(self: *Self, item: T) void {
const new_item_ptr = self.addOneAssumeCapacity();
new_item_ptr.* = item;
}
/// Remove the element at index `i` from the list and return its value.
/// Invalidates pointers to the last element.
/// This operation is O(N).
/// Asserts that the list is not empty.
/// Asserts that the index is in bounds.
pub fn orderedRemove(self: *Self, i: usize) T {
const old_item = self.items[i];
self.replaceRangeAssumeCapacity(i, 1, &.{});
return old_item;
}
/// Removes the element at the specified index and returns it.
/// The empty slot is filled from the end of the list.
/// Invalidates pointers to last element.
/// This operation is O(1).
/// Asserts that the list is not empty.
/// Asserts that the index is in bounds.
pub fn swapRemove(self: *Self, i: usize) T {
if (self.items.len - 1 == i) return self.pop();
const old_item = self.items[i];
self.items[i] = self.pop();
return old_item;
}
/// Append the slice of items to the list. Allocates more
/// memory as necessary.
/// Invalidates element pointers if additional memory is needed.
pub fn appendSlice(self: *Self, allocator: Allocator, items: []const T) Allocator.Error!void {
try self.ensureUnusedCapacity(allocator, items.len);
self.appendSliceAssumeCapacity(items);
}
/// Append the slice of items to the list.
/// Asserts that the list can hold the additional items.
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
const old_len = self.items.len;
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
@memcpy(self.items[old_len..][0..items.len], items);
}
/// Append the slice of items to the list. Allocates more
/// memory as necessary. Only call this function if a call to `appendSlice` instead would
/// be a compile error.
/// Invalidates element pointers if additional memory is needed.
pub fn appendUnalignedSlice(self: *Self, allocator: Allocator, items: []align(1) const T) Allocator.Error!void {
try self.ensureUnusedCapacity(allocator, items.len);
self.appendUnalignedSliceAssumeCapacity(items);
}
/// Append an unaligned slice of items to the list.
/// Only call this function if a call to `appendSliceAssumeCapacity`
/// instead would be a compile error.
/// Asserts that the list can hold the additional items.
pub fn appendUnalignedSliceAssumeCapacity(self: *Self, items: []align(1) const T) void {
const old_len = self.items.len;
const new_len = old_len + items.len;
assert(new_len <= self.capacity);
self.items.len = new_len;
@memcpy(self.items[old_len..][0..items.len], items);
}
pub const WriterContext = struct {
self: *Self,
allocator: Allocator,
};
pub const Writer = if (T != u8)
@compileError("The Writer interface is only defined for ArrayList(u8) " ++
"but the given type is ArrayList(" ++ @typeName(T) ++ ")")
else
std.io.Writer(WriterContext, Allocator.Error, appendWrite);
/// Initializes a Writer which will append to the list.
pub fn writer(self: *Self, allocator: Allocator) Writer {
return .{ .context = .{ .self = self, .allocator = allocator } };
}
/// Same as `append` except it returns the number of bytes written,
/// which is always the same as `m.len`. The purpose of this function
/// existing is to match `std.io.Writer` API.
/// Invalidates element pointers if additional memory is needed.
fn appendWrite(context: WriterContext, m: []const u8) Allocator.Error!usize {
try context.self.appendSlice(context.allocator, m);
return m.len;
}
pub const FixedWriter = std.io.Writer(*Self, Allocator.Error, appendWriteFixed);
/// Initializes a Writer which will append to the list but will return
/// `error.OutOfMemory` rather than increasing capacity.
pub fn fixedWriter(self: *Self) FixedWriter {
return .{ .context = self };
}
/// The purpose of this function existing is to match `std.io.Writer` API.
fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
const available_capacity = self.capacity - self.items.len;
if (m.len > available_capacity)
return error.OutOfMemory;
self.appendSliceAssumeCapacity(m);
return m.len;
}
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
/// The function is inline so that a comptime-known `value` parameter will
/// have a more optimal memset codegen in case it has a repeated byte pattern.
pub inline fn appendNTimes(self: *Self, allocator: Allocator, value: T, n: usize) Allocator.Error!void {
const old_len = self.items.len;
try self.resize(allocator, try addOrOom(old_len, n));
@memset(self.items[old_len..self.items.len], value);
}
/// Append a value to the list `n` times.
/// Never invalidates element pointers.
/// The function is inline so that a comptime-known `value` parameter will
/// have better memset codegen in case it has a repeated byte pattern.
/// Asserts that the list can hold the additional items.
pub inline fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const new_len = self.items.len + n;
assert(new_len <= self.capacity);
@memset(self.items.ptr[self.items.len..new_len], value);
self.items.len = new_len;
}
/// Adjust the list length to `new_len`.
/// Additional elements contain the value `undefined`.
/// Invalidates element pointers if additional memory is needed.
pub fn resize(self: *Self, allocator: Allocator, new_len: usize) Allocator.Error!void {
try self.ensureTotalCapacity(allocator, new_len);
self.items.len = new_len;
}
/// Reduce allocated capacity to `new_len`.
/// May invalidate element pointers.
/// Asserts that the new length is less than or equal to the previous length.
pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void {
assert(new_len <= self.items.len);
if (@sizeOf(T) == 0) {
self.items.len = new_len;
return;
}
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, new_len)) {
self.capacity = new_len;
self.items.len = new_len;
return;
}
const new_memory = allocator.alignedAlloc(T, alignment, new_len) catch |e| switch (e) {
error.OutOfMemory => {
// No problem, capacity is still correct then.
self.items.len = new_len;
return;
},
};
@memcpy(new_memory, self.items[0..new_len]);
allocator.free(old_memory);
self.items = new_memory;
self.capacity = new_memory.len;
}
/// Reduce length to `new_len`.
/// Invalidates pointers to elements `items[new_len..]`.
/// Keeps capacity the same.
/// Asserts that the new length is less than or equal to the previous length.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
self.items.len = new_len;
}
/// Invalidates all element pointers.
pub fn clearRetainingCapacity(self: *Self) void {
self.items.len = 0;
}
/// Invalidates all element pointers.
pub fn clearAndFree(self: *Self, allocator: Allocator) void {
allocator.free(self.allocatedSlice());
self.items.len = 0;
self.capacity = 0;
}
/// If the current capacity is less than `new_capacity`, this function will
/// modify the array so that it can hold at least `new_capacity` items.
/// Invalidates element pointers if additional memory is needed.
pub fn ensureTotalCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
if (self.capacity >= new_capacity) return;
const better_capacity = growCapacity(self.capacity, new_capacity);
return self.ensureTotalCapacityPrecise(allocator, better_capacity);
}
/// If the current capacity is less than `new_capacity`, this function will
/// modify the array so that it can hold exactly `new_capacity` items.
/// Invalidates element pointers if additional memory is needed.
pub fn ensureTotalCapacityPrecise(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
if (@sizeOf(T) == 0) {
self.capacity = math.maxInt(usize);
return;
}
if (self.capacity >= new_capacity) return;
// Here we avoid copying allocated but unused bytes by
// attempting a resize in place, and falling back to allocating
// a new buffer and doing our own copy. With a realloc() call,
// the allocator implementation would pointlessly copy our
// extra capacity.
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, new_capacity)) {
self.capacity = new_capacity;
} else {
const new_memory = try allocator.alignedAlloc(T, alignment, new_capacity);
@memcpy(new_memory[0..self.items.len], self.items);
allocator.free(old_memory);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
}
}
/// Modify the array so that it can hold at least `additional_count` **more** items.
/// Invalidates element pointers if additional memory is needed.
pub fn ensureUnusedCapacity(
self: *Self,
allocator: Allocator,
additional_count: usize,
) Allocator.Error!void {
return self.ensureTotalCapacity(allocator, try addOrOom(self.items.len, additional_count));
}
/// Increases the array's length to match the full capacity that is already allocated.
/// The new elements have `undefined` values.
/// Never invalidates element pointers.
pub fn expandToCapacity(self: *Self) void {
self.items.len = self.capacity;
}
/// Increase length by 1, returning pointer to the new item.
/// The returned element pointer becomes invalid when the list is resized.
pub fn addOne(self: *Self, allocator: Allocator) Allocator.Error!*T {
// This can never overflow because `self.items` can never occupy the whole address space
const newlen = self.items.len + 1;
try self.ensureTotalCapacity(allocator, newlen);
return self.addOneAssumeCapacity();
}
/// Increase length by 1, returning pointer to the new item.
/// Never invalidates element pointers.
/// The returned element pointer becomes invalid when the list is resized.
/// Asserts that the list can hold one additional item.
pub fn addOneAssumeCapacity(self: *Self) *T {
assert(self.items.len < self.capacity);
self.items.len += 1;
return &self.items[self.items.len - 1];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
pub fn addManyAsArray(self: *Self, allocator: Allocator, comptime n: usize) Allocator.Error!*[n]T {
const prev_len = self.items.len;
try self.resize(allocator, try addOrOom(self.items.len, n));
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// Never invalidates element pointers.
/// The returned pointer becomes invalid when the list is resized.
/// Asserts that the list can hold the additional items.
pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
assert(self.items.len + n <= self.capacity);
const prev_len = self.items.len;
self.items.len += n;
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
/// Resizes list if `self.capacity` is not large enough.
pub fn addManyAsSlice(self: *Self, allocator: Allocator, n: usize) Allocator.Error![]T {
const prev_len = self.items.len;
try self.resize(allocator, try addOrOom(self.items.len, n));
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// Never invalidates element pointers.
/// The returned pointer becomes invalid when the list is resized.
/// Asserts that the list can hold the additional items.
pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T {
assert(self.items.len + n <= self.capacity);
const prev_len = self.items.len;
self.items.len += n;
return self.items[prev_len..][0..n];
}
/// Remove and return the last element from the list.
/// Invalidates pointers to last element.
/// Asserts that the list is not empty.
pub fn pop(self: *Self) T {
const val = self.items[self.items.len - 1];
self.items.len -= 1;
return val;
}
/// Remove and return the last element from the list.
/// If the list is empty, returns `null`.
/// Invalidates pointers to last element.
pub fn popOrNull(self: *Self) ?T {
if (self.items.len == 0) return null;
return self.pop();
}
/// Returns a slice of all the items plus the extra capacity, whose memory
/// contents are `undefined`.
pub fn allocatedSlice(self: Self) Slice {
return self.items.ptr[0..self.capacity];
}
/// Returns a slice of only the extra capacity after items.
/// This can be useful for writing directly into an ArrayList.
/// Note that such an operation must be followed up with a direct
/// modification of `self.items.len`.
pub fn unusedCapacitySlice(self: Self) Slice {
return self.allocatedSlice()[self.items.len..];
}
/// Return the last element from the list.
/// Asserts that the list is not empty.
pub fn getLast(self: Self) T {
const val = self.items[self.items.len - 1];
return val;
}
/// Return the last element from the list, or
/// return `null` if list is empty.
pub fn getLastOrNull(self: Self) ?T {
if (self.items.len == 0) return null;
return self.getLast();
}
};
}
/// Called when memory growth is necessary. Returns a capacity larger than
/// minimum that grows super-linearly.
fn growCapacity(current: usize, minimum: usize) usize {
var new = current;
while (true) {
new +|= new / 2 + 8;
if (new >= minimum)
return new;
}
}
/// Integer addition returning `error.OutOfMemory` on overflow.
fn addOrOom(a: usize, b: usize) error{OutOfMemory}!usize {
const result, const overflow = @addWithOverflow(a, b);
if (overflow != 0) return error.OutOfMemory;
return result;
}
test "init" {
{
var list = ArrayList(i32).init(testing.allocator);
defer list.deinit();
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity == 0);
}
{
const list = ArrayListUnmanaged(i32){};
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity == 0);
}
}
test "initCapacity" {
const a = testing.allocator;
{
var list = try ArrayList(i8).initCapacity(a, 200);
defer list.deinit();
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity >= 200);
}
{
var list = try ArrayListUnmanaged(i8).initCapacity(a, 200);
defer list.deinit(a);
try testing.expect(list.items.len == 0);
try testing.expect(list.capacity >= 200);
}
}
test "clone" {
const a = testing.allocator;
{
var array = ArrayList(i32).init(a);
try array.append(-1);
try array.append(3);
try array.append(5);
const cloned = try array.clone();
defer cloned.deinit();
try testing.expectEqualSlices(i32, array.items, cloned.items);
try testing.expectEqual(array.allocator, cloned.allocator);
try testing.expect(cloned.capacity >= array.capacity);
array.deinit();
try testing.expectEqual(@as(i32, -1), cloned.items[0]);
try testing.expectEqual(@as(i32, 3), cloned.items[1]);
try testing.expectEqual(@as(i32, 5), cloned.items[2]);
}
{
var array = ArrayListUnmanaged(i32){};
try array.append(a, -1);
try array.append(a, 3);
try array.append(a, 5);
var cloned = try array.clone(a);
defer cloned.deinit(a);
try testing.expectEqualSlices(i32, array.items, cloned.items);
try testing.expect(cloned.capacity >= array.capacity);
array.deinit(a);
try testing.expectEqual(@as(i32, -1), cloned.items[0]);
try testing.expectEqual(@as(i32, 3), cloned.items[1]);
try testing.expectEqual(@as(i32, 5), cloned.items[2]);
}
}
test "basic" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
{
var i: usize = 0;
while (i < 10) : (i += 1) {
list.append(@as(i32, @intCast(i + 1))) catch unreachable;
}
}
{
var i: usize = 0;
while (i < 10) : (i += 1) {
try testing.expect(list.items[i] == @as(i32, @intCast(i + 1)));
}
}
for (list.items, 0..) |v, i| {
try testing.expect(v == @as(i32, @intCast(i + 1)));
}
try testing.expect(list.pop() == 10);
try testing.expect(list.items.len == 9);
list.appendSlice(&[_]i32{ 1, 2, 3 }) catch unreachable;
try testing.expect(list.items.len == 12);
try testing.expect(list.pop() == 3);
try testing.expect(list.pop() == 2);
try testing.expect(list.pop() == 1);
try testing.expect(list.items.len == 9);
var unaligned: [3]i32 align(1) = [_]i32{ 4, 5, 6 };
list.appendUnalignedSlice(&unaligned) catch unreachable;
try testing.expect(list.items.len == 12);
try testing.expect(list.pop() == 6);
try testing.expect(list.pop() == 5);
try testing.expect(list.pop() == 4);
try testing.expect(list.items.len == 9);
list.appendSlice(&[_]i32{}) catch unreachable;
try testing.expect(list.items.len == 9);
// can only set on indices < self.items.len
list.items[7] = 33;
list.items[8] = 42;
try testing.expect(list.pop() == 42);
try testing.expect(list.pop() == 33);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
{
var i: usize = 0;
while (i < 10) : (i += 1) {
list.append(a, @as(i32, @intCast(i + 1))) catch unreachable;
}
}
{
var i: usize = 0;
while (i < 10) : (i += 1) {
try testing.expect(list.items[i] == @as(i32, @intCast(i + 1)));
}
}
for (list.items, 0..) |v, i| {
try testing.expect(v == @as(i32, @intCast(i + 1)));
}
try testing.expect(list.pop() == 10);
try testing.expect(list.items.len == 9);
list.appendSlice(a, &[_]i32{ 1, 2, 3 }) catch unreachable;
try testing.expect(list.items.len == 12);
try testing.expect(list.pop() == 3);
try testing.expect(list.pop() == 2);
try testing.expect(list.pop() == 1);
try testing.expect(list.items.len == 9);
var unaligned: [3]i32 align(1) = [_]i32{ 4, 5, 6 };
list.appendUnalignedSlice(a, &unaligned) catch unreachable;
try testing.expect(list.items.len == 12);
try testing.expect(list.pop() == 6);
try testing.expect(list.pop() == 5);
try testing.expect(list.pop() == 4);
try testing.expect(list.items.len == 9);
list.appendSlice(a, &[_]i32{}) catch unreachable;
try testing.expect(list.items.len == 9);
// can only set on indices < self.items.len
list.items[7] = 33;
list.items[8] = 42;
try testing.expect(list.pop() == 42);
try testing.expect(list.pop() == 33);
}
}
test "appendNTimes" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendNTimes(2, 10);
try testing.expectEqual(@as(usize, 10), list.items.len);
for (list.items) |element| {
try testing.expectEqual(@as(i32, 2), element);
}
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendNTimes(a, 2, 10);
try testing.expectEqual(@as(usize, 10), list.items.len);
for (list.items) |element| {
try testing.expectEqual(@as(i32, 2), element);
}
}
}
test "appendNTimes with failing allocator" {
const a = testing.failing_allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try testing.expectError(error.OutOfMemory, list.appendNTimes(2, 10));
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try testing.expectError(error.OutOfMemory, list.appendNTimes(a, 2, 10));
}
}
test "orderedRemove" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
try list.append(4);
try list.append(5);
try list.append(6);
try list.append(7);
//remove from middle
try testing.expectEqual(@as(i32, 4), list.orderedRemove(3));
try testing.expectEqual(@as(i32, 5), list.items[3]);
try testing.expectEqual(@as(usize, 6), list.items.len);
//remove from end
try testing.expectEqual(@as(i32, 7), list.orderedRemove(5));
try testing.expectEqual(@as(usize, 5), list.items.len);
//remove from front
try testing.expectEqual(@as(i32, 1), list.orderedRemove(0));
try testing.expectEqual(@as(i32, 2), list.items[0]);
try testing.expectEqual(@as(usize, 4), list.items.len);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.append(a, 1);
try list.append(a, 2);
try list.append(a, 3);
try list.append(a, 4);
try list.append(a, 5);
try list.append(a, 6);
try list.append(a, 7);
//remove from middle
try testing.expectEqual(@as(i32, 4), list.orderedRemove(3));
try testing.expectEqual(@as(i32, 5), list.items[3]);
try testing.expectEqual(@as(usize, 6), list.items.len);
//remove from end
try testing.expectEqual(@as(i32, 7), list.orderedRemove(5));
try testing.expectEqual(@as(usize, 5), list.items.len);
//remove from front
try testing.expectEqual(@as(i32, 1), list.orderedRemove(0));
try testing.expectEqual(@as(i32, 2), list.items[0]);
try testing.expectEqual(@as(usize, 4), list.items.len);
}
{
// remove last item
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.append(1);
try testing.expectEqual(@as(i32, 1), list.orderedRemove(0));
try testing.expectEqual(@as(usize, 0), list.items.len);
}
{
// remove last item
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.append(a, 1);
try testing.expectEqual(@as(i32, 1), list.orderedRemove(0));
try testing.expectEqual(@as(usize, 0), list.items.len);
}
}
test "swapRemove" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
try list.append(4);
try list.append(5);
try list.append(6);
try list.append(7);
//remove from middle
try testing.expect(list.swapRemove(3) == 4);
try testing.expect(list.items[3] == 7);
try testing.expect(list.items.len == 6);
//remove from end
try testing.expect(list.swapRemove(5) == 6);
try testing.expect(list.items.len == 5);
//remove from front
try testing.expect(list.swapRemove(0) == 1);
try testing.expect(list.items[0] == 5);
try testing.expect(list.items.len == 4);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.append(a, 1);
try list.append(a, 2);
try list.append(a, 3);
try list.append(a, 4);
try list.append(a, 5);
try list.append(a, 6);
try list.append(a, 7);
//remove from middle
try testing.expect(list.swapRemove(3) == 4);
try testing.expect(list.items[3] == 7);
try testing.expect(list.items.len == 6);
//remove from end
try testing.expect(list.swapRemove(5) == 6);
try testing.expect(list.items.len == 5);
//remove from front
try testing.expect(list.swapRemove(0) == 1);
try testing.expect(list.items[0] == 5);
try testing.expect(list.items.len == 4);
}
}
test "insert" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.insert(0, 1);
try list.append(2);
try list.insert(2, 3);
try list.insert(0, 5);
try testing.expect(list.items[0] == 5);
try testing.expect(list.items[1] == 1);
try testing.expect(list.items[2] == 2);
try testing.expect(list.items[3] == 3);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.insert(a, 0, 1);
try list.append(a, 2);
try list.insert(a, 2, 3);
try list.insert(a, 0, 5);
try testing.expect(list.items[0] == 5);
try testing.expect(list.items[1] == 1);
try testing.expect(list.items[2] == 2);
try testing.expect(list.items[3] == 3);
}
}
test "insertSlice" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
try list.append(4);
try list.insertSlice(1, &[_]i32{ 9, 8 });
try testing.expect(list.items[0] == 1);
try testing.expect(list.items[1] == 9);
try testing.expect(list.items[2] == 8);
try testing.expect(list.items[3] == 2);
try testing.expect(list.items[4] == 3);
try testing.expect(list.items[5] == 4);
const items = [_]i32{1};
try list.insertSlice(0, items[0..0]);
try testing.expect(list.items.len == 6);
try testing.expect(list.items[0] == 1);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.append(a, 1);
try list.append(a, 2);
try list.append(a, 3);
try list.append(a, 4);
try list.insertSlice(a, 1, &[_]i32{ 9, 8 });
try testing.expect(list.items[0] == 1);
try testing.expect(list.items[1] == 9);
try testing.expect(list.items[2] == 8);
try testing.expect(list.items[3] == 2);
try testing.expect(list.items[4] == 3);
try testing.expect(list.items[5] == 4);
const items = [_]i32{1};
try list.insertSlice(a, 0, items[0..0]);
try testing.expect(list.items.len == 6);
try testing.expect(list.items[0] == 1);
}
}
test "ArrayList.replaceRange" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
try list.replaceRange(1, 0, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items);
}
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
try list.replaceRange(1, 1, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(
i32,
&[_]i32{ 1, 0, 0, 0, 3, 4, 5 },
list.items,
);
}
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
try list.replaceRange(1, 2, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items);
}
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
try list.replaceRange(1, 3, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items);
}
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
try list.replaceRange(1, 4, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0 }, list.items);
}
}
test "ArrayList.replaceRangeAssumeCapacity" {
const a = testing.allocator;
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
list.replaceRangeAssumeCapacity(1, 0, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items);
}
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
list.replaceRangeAssumeCapacity(1, 1, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(
i32,
&[_]i32{ 1, 0, 0, 0, 3, 4, 5 },
list.items,
);
}
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
list.replaceRangeAssumeCapacity(1, 2, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items);
}
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
list.replaceRangeAssumeCapacity(1, 3, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items);
}
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendSlice(&[_]i32{ 1, 2, 3, 4, 5 });
list.replaceRangeAssumeCapacity(1, 4, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0 }, list.items);
}
}
test "ArrayListUnmanaged.replaceRange" {
const a = testing.allocator;
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
try list.replaceRange(a, 1, 0, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
try list.replaceRange(a, 1, 1, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(
i32,
&[_]i32{ 1, 0, 0, 0, 3, 4, 5 },
list.items,
);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
try list.replaceRange(a, 1, 2, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
try list.replaceRange(a, 1, 3, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
try list.replaceRange(a, 1, 4, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0 }, list.items);
}
}
test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
const a = testing.allocator;
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
list.replaceRangeAssumeCapacity(1, 0, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
list.replaceRangeAssumeCapacity(1, 1, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(
i32,
&[_]i32{ 1, 0, 0, 0, 3, 4, 5 },
list.items,
);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
list.replaceRangeAssumeCapacity(1, 2, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
list.replaceRangeAssumeCapacity(1, 3, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
list.replaceRangeAssumeCapacity(1, 4, &[_]i32{ 0, 0, 0 });
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0 }, list.items);
}
}
const Item = struct {
integer: i32,
sub_items: ArrayList(Item),
};
const ItemUnmanaged = struct {
integer: i32,
sub_items: ArrayListUnmanaged(ItemUnmanaged),
};
test "ArrayList(T) of struct T" {
const a = std.testing.allocator;
{
var root = Item{ .integer = 1, .sub_items = ArrayList(Item).init(a) };
defer root.sub_items.deinit();
try root.sub_items.append(Item{ .integer = 42, .sub_items = ArrayList(Item).init(a) });
try testing.expect(root.sub_items.items[0].integer == 42);
}
{
var root = ItemUnmanaged{ .integer = 1, .sub_items = ArrayListUnmanaged(ItemUnmanaged){} };
defer root.sub_items.deinit(a);
try root.sub_items.append(a, ItemUnmanaged{ .integer = 42, .sub_items = ArrayListUnmanaged(ItemUnmanaged){} });
try testing.expect(root.sub_items.items[0].integer == 42);
}
}
test "ArrayList(u8) implements writer" {
const a = testing.allocator;
{
var buffer = ArrayList(u8).init(a);
defer buffer.deinit();
const x: i32 = 42;
const y: i32 = 1234;
try buffer.writer().print("x: {}\ny: {}\n", .{ x, y });
try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
}
{
var list = ArrayListAligned(u8, 2).init(a);
defer list.deinit();
const writer = list.writer();
try writer.writeAll("a");
try writer.writeAll("bc");
try writer.writeAll("d");
try writer.writeAll("efg");
try testing.expectEqualSlices(u8, list.items, "abcdefg");
}
}
test "ArrayListUnmanaged(u8) implements writer" {
const a = testing.allocator;
{
var buffer: ArrayListUnmanaged(u8) = .{};
defer buffer.deinit(a);
const x: i32 = 42;
const y: i32 = 1234;
try buffer.writer(a).print("x: {}\ny: {}\n", .{ x, y });
try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
}
{
var list: ArrayListAlignedUnmanaged(u8, 2) = .{};
defer list.deinit(a);
const writer = list.writer(a);
try writer.writeAll("a");
try writer.writeAll("bc");
try writer.writeAll("d");
try writer.writeAll("efg");
try testing.expectEqualSlices(u8, list.items, "abcdefg");
}
}
test "shrink still sets length when resizing is disabled" {
var failing_allocator = testing.FailingAllocator.init(testing.allocator, .{ .resize_fail_index = 0 });
const a = failing_allocator.allocator();
{
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.append(1);
try list.append(2);
try list.append(3);
list.shrinkAndFree(1);
try testing.expect(list.items.len == 1);
}
{
var list = ArrayListUnmanaged(i32){};
defer list.deinit(a);
try list.append(a, 1);
try list.append(a, 2);
try list.append(a, 3);
list.shrinkAndFree(a, 1);
try testing.expect(list.items.len == 1);
}
}
test "shrinkAndFree with a copy" {
var failing_allocator = testing.FailingAllocator.init(testing.allocator, .{ .resize_fail_index = 0 });
const a = failing_allocator.allocator();
var list = ArrayList(i32).init(a);
defer list.deinit();
try list.appendNTimes(3, 16);
list.shrinkAndFree(4);
try testing.expect(mem.eql(i32, list.items, &.{ 3, 3, 3, 3 }));
}
test "addManyAsArray" {
const a = std.testing.allocator;
{
var list = ArrayList(u8).init(a);
defer list.deinit();
(try list.addManyAsArray(4)).* = "aoeu".*;
try list.ensureTotalCapacity(8);
list.addManyAsArrayAssumeCapacity(4).* = "asdf".*;
try testing.expectEqualSlices(u8, list.items, "aoeuasdf");
}
{
var list = ArrayListUnmanaged(u8){};
defer list.deinit(a);
(try list.addManyAsArray(a, 4)).* = "aoeu".*;
try list.ensureTotalCapacity(a, 8);
list.addManyAsArrayAssumeCapacity(4).* = "asdf".*;
try testing.expectEqualSlices(u8, list.items, "aoeuasdf");
}
}
test "growing memory preserves contents" {
// Shrink the list after every insertion to ensure that a memory growth
// will be triggered in the next operation.
const a = std.testing.allocator;
{
var list = ArrayList(u8).init(a);
defer list.deinit();
(try list.addManyAsArray(4)).* = "abcd".*;
list.shrinkAndFree(4);
try list.appendSlice("efgh");
try testing.expectEqualSlices(u8, list.items, "abcdefgh");
list.shrinkAndFree(8);
try list.insertSlice(4, "ijkl");
try testing.expectEqualSlices(u8, list.items, "abcdijklefgh");
}
{
var list = ArrayListUnmanaged(u8){};
defer list.deinit(a);
(try list.addManyAsArray(a, 4)).* = "abcd".*;
list.shrinkAndFree(a, 4);
try list.appendSlice(a, "efgh");
try testing.expectEqualSlices(u8, list.items, "abcdefgh");
list.shrinkAndFree(a, 8);
try list.insertSlice(a, 4, "ijkl");
try testing.expectEqualSlices(u8, list.items, "abcdijklefgh");
}
}
test "fromOwnedSlice" {
const a = testing.allocator;
{
var orig_list = ArrayList(u8).init(a);
defer orig_list.deinit();
try orig_list.appendSlice("foobar");
const slice = try orig_list.toOwnedSlice();
var list = ArrayList(u8).fromOwnedSlice(a, slice);
defer list.deinit();
try testing.expectEqualStrings(list.items, "foobar");
}
{
var list = ArrayList(u8).init(a);
defer list.deinit();
try list.appendSlice("foobar");
const slice = try list.toOwnedSlice();
var unmanaged = ArrayListUnmanaged(u8).fromOwnedSlice(slice);
defer unmanaged.deinit(a);
try testing.expectEqualStrings(unmanaged.items, "foobar");
}
}
test "fromOwnedSliceSentinel" {
const a = testing.allocator;
{
var orig_list = ArrayList(u8).init(a);
defer orig_list.deinit();
try orig_list.appendSlice("foobar");
const sentinel_slice = try orig_list.toOwnedSliceSentinel(0);
var list = ArrayList(u8).fromOwnedSliceSentinel(a, 0, sentinel_slice);
defer list.deinit();
try testing.expectEqualStrings(list.items, "foobar");
}
{
var list = ArrayList(u8).init(a);
defer list.deinit();
try list.appendSlice("foobar");
const sentinel_slice = try list.toOwnedSliceSentinel(0);
var unmanaged = ArrayListUnmanaged(u8).fromOwnedSliceSentinel(0, sentinel_slice);
defer unmanaged.deinit(a);
try testing.expectEqualStrings(unmanaged.items, "foobar");
}
}
test "toOwnedSliceSentinel" {
const a = testing.allocator;
{
var list = ArrayList(u8).init(a);
defer list.deinit();
try list.appendSlice("foobar");
const result = try list.toOwnedSliceSentinel(0);
defer a.free(result);
try testing.expectEqualStrings(result, mem.sliceTo(result.ptr, 0));
}
{
var list = ArrayListUnmanaged(u8){};
defer list.deinit(a);
try list.appendSlice(a, "foobar");
const result = try list.toOwnedSliceSentinel(a, 0);
defer a.free(result);
try testing.expectEqualStrings(result, mem.sliceTo(result.ptr, 0));
}
}
test "accepts unaligned slices" {
const a = testing.allocator;
{
var list = std.ArrayListAligned(u8, 8).init(a);
defer list.deinit();
try list.appendSlice(&.{ 0, 1, 2, 3 });
try list.insertSlice(2, &.{ 4, 5, 6, 7 });
try list.replaceRange(1, 3, &.{ 8, 9 });
try testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 });
}
{
var list = std.ArrayListAlignedUnmanaged(u8, 8){};
defer list.deinit(a);
try list.appendSlice(a, &.{ 0, 1, 2, 3 });
try list.insertSlice(a, 2, &.{ 4, 5, 6, 7 });
try list.replaceRange(a, 1, 3, &.{ 8, 9 });
try testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 });
}
}
test "ArrayList(u0)" {
// An ArrayList on zero-sized types should not need to allocate
const a = testing.failing_allocator;
var list = ArrayList(u0).init(a);
defer list.deinit();
try list.append(0);
try list.append(0);
try list.append(0);
try testing.expectEqual(list.items.len, 3);
var count: usize = 0;
for (list.items) |x| {
try testing.expectEqual(x, 0);
count += 1;
}
try testing.expectEqual(count, 3);
}
test "ArrayList(?u32).popOrNull()" {
const a = testing.allocator;
var list = ArrayList(?u32).init(a);
defer list.deinit();
try list.append(null);
try list.append(1);
try list.append(2);
try testing.expectEqual(list.items.len, 3);
try testing.expect(list.popOrNull().? == @as(u32, 2));
try testing.expect(list.popOrNull().? == @as(u32, 1));
try testing.expect(list.popOrNull().? == null);
try testing.expect(list.popOrNull() == null);
}
test "ArrayList(u32).getLast()" {
const a = testing.allocator;
var list = ArrayList(u32).init(a);
defer list.deinit();
try list.append(2);
const const_list = list;
try testing.expectEqual(const_list.getLast(), 2);
}
test "ArrayList(u32).getLastOrNull()" {
const a = testing.allocator;
var list = ArrayList(u32).init(a);
defer list.deinit();
try testing.expectEqual(list.getLastOrNull(), null);
try list.append(2);
const const_list = list;
try testing.expectEqual(const_list.getLastOrNull().?, 2);
}
test "return OutOfMemory when capacity would exceed maximum usize integer value" {
const a = testing.allocator;
const new_item: u32 = 42;
const items = &.{ 42, 43 };
{
var list: ArrayListUnmanaged(u32) = .{
.items = undefined,
.capacity = math.maxInt(usize) - 1,
};
list.items.len = math.maxInt(usize) - 1;
try testing.expectError(error.OutOfMemory, list.appendSlice(a, items));
try testing.expectError(error.OutOfMemory, list.appendNTimes(a, new_item, 2));
try testing.expectError(error.OutOfMemory, list.appendUnalignedSlice(a, &.{ new_item, new_item }));
try testing.expectError(error.OutOfMemory, list.addManyAt(a, 0, 2));
try testing.expectError(error.OutOfMemory, list.addManyAsArray(a, 2));
try testing.expectError(error.OutOfMemory, list.addManyAsSlice(a, 2));
try testing.expectError(error.OutOfMemory, list.insertSlice(a, 0, items));
try testing.expectError(error.OutOfMemory, list.ensureUnusedCapacity(a, 2));
}
{
var list: ArrayList(u32) = .{
.items = undefined,
.capacity = math.maxInt(usize) - 1,
.allocator = a,
};
list.items.len = math.maxInt(usize) - 1;
try testing.expectError(error.OutOfMemory, list.appendSlice(items));
try testing.expectError(error.OutOfMemory, list.appendNTimes(new_item, 2));
try testing.expectError(error.OutOfMemory, list.appendUnalignedSlice(&.{ new_item, new_item }));
try testing.expectError(error.OutOfMemory, list.addManyAt(0, 2));
try testing.expectError(error.OutOfMemory, list.addManyAsArray(2));
try testing.expectError(error.OutOfMemory, list.addManyAsSlice(2));
try testing.expectError(error.OutOfMemory, list.insertSlice(0, items));
try testing.expectError(error.OutOfMemory, list.ensureUnusedCapacity(2));
}
}
| https://raw.githubusercontent.com/ziglang/zig/d9bd34fd0533295044ffb4160da41f7873aff905/lib/std/array_list.zig |
const std = @import("std");
const raylib = @import("src/build.zig");
// This has been tested to work with zig 0.12.0
pub fn build(b: *std.Build) !void {
try raylib.build(b);
}
// expose helper functions to user's build.zig
pub const addRaylib = raylib.addRaylib;
pub const addRaygui = raylib.addRaygui;
| https://raw.githubusercontent.com/raysan5/raylib/33c598123c4b6ef78ff2ac3dcc1094da4dd6efef/build.zig |
const mach = @import("../main.zig");
const math = mach.math;
pub const name = .mach_gfx_text_style;
pub const Mod = mach.Mod(@This());
pub const components = .{
// // TODO: ship a default font
// .font_name = .{ .type = []const u8, .description =
// \\ Desired font to render text with.
// \\ TODO(text): this is not currently implemented
// },
.font_size = .{ .type = f32, .description =
\\ Font size in pixels
\\ e.g. 12 * mach.gfx.px_per_pt for 12pt font size
},
// // e.g. mach.gfx.font_weight_normal
// .font_weight = .{ .type = u16, .description =
// \\ Font weight
// \\ TODO(text): this is not currently implemented
// },
// // e.g. false
// .italic = .{ .type = bool, .description =
// \\ Italic text
// \\ TODO(text): this is not currently implemented
// },
.font_color = .{ .type = math.Vec4, .description =
\\ Fill color of text
\\ e.g. vec4(0.0, 0.0, 0.0, 1.0) // black
\\ e.g. vec4(1.0, 1.0, 1.0, 1.0) // white
},
// TODO(text): allow user to specify projection matrix (3d-space flat text etc.)
};
| https://raw.githubusercontent.com/hexops/mach/b72f0e11b6d292c2b60789359a61f7ee6d7dc371/src/gfx/TextStyle.zig |
const builtin = @import("builtin");
const native_arch = builtin.cpu.arch;
const std = @import("std");
pub extern "ole32" fn CoInitializeEx(pvReserved: ?LPVOID, dwCoInit: DWORD) callconv(WINAPI) HRESULT;
pub extern "ole32" fn CoUninitialize() callconv(WINAPI) void;
pub extern "ole32" fn CoTaskMemAlloc(size: SIZE_T) callconv(WINAPI) ?LPVOID;
pub extern "ole32" fn CoTaskMemFree(pv: LPVOID) callconv(WINAPI) void;
pub const COINIT_APARTMENTTHREADED = 0x2;
pub const COINIT_MULTITHREADED = 0x3;
pub const COINIT_DISABLE_OLE1DDE = 0x4;
pub const COINIT_SPEED_OVER_MEMORY = 0x8;
pub const WINAPI: std.builtin.CallingConvention = if (native_arch == .x86) .Stdcall else .C;
pub const BOOL = c_int;
pub const BOOLEAN = BYTE;
pub const BYTE = u8;
pub const CHAR = u8;
pub const UCHAR = u8;
pub const FLOAT = f32;
pub const HANDLE = *anyopaque;
pub const HCRYPTPROV = ULONG_PTR;
pub const ATOM = u16;
pub const HBRUSH = *opaque {};
pub const HCURSOR = *opaque {};
pub const HICON = *opaque {};
pub const HINSTANCE = *opaque {};
pub const HMENU = *opaque {};
pub const HMODULE = *opaque {};
pub const HWND = std.os.windows.HWND;
pub const HDC = *opaque {};
pub const HGLRC = *opaque {};
pub const FARPROC = *opaque {};
pub const INT = c_int;
pub const LPCSTR = [*:0]const CHAR;
pub const LPCVOID = *const anyopaque;
pub const LPSTR = [*:0]CHAR;
pub const LPVOID = *anyopaque;
pub const LPWSTR = [*:0]WCHAR;
pub const LPCWSTR = [*:0]const WCHAR;
pub const PVOID = *anyopaque;
pub const PWSTR = [*:0]WCHAR;
pub const PCWSTR = [*:0]const WCHAR;
pub const SIZE_T = usize;
pub const UINT = c_uint;
pub const UINT_MAX: UINT = 4294967295;
pub const ULONG_PTR = usize;
pub const LONG_PTR = isize;
pub const DWORD_PTR = ULONG_PTR;
pub const WCHAR = u16;
pub const WORD = u16;
pub const DWORD = u32;
pub const DWORD64 = u64;
pub const LARGE_INTEGER = i64;
pub const ULARGE_INTEGER = u64;
pub const USHORT = u16;
pub const SHORT = i16;
pub const ULONG = u32;
pub const LONG = i32;
pub const ULONG64 = u64;
pub const ULONGLONG = u64;
pub const LONGLONG = i64;
pub const HLOCAL = HANDLE;
pub const LANGID = c_ushort;
pub const HRESULT = c_long;
pub const GUID = std.os.windows.GUID;
pub const NTSTATUS = std.os.windows.NTSTATUS;
pub const CRITICAL_SECTION = std.os.windows.CRITICAL_SECTION;
pub const WPARAM = usize;
pub const LPARAM = LONG_PTR;
pub const LRESULT = LONG_PTR;
pub const TRUE = 1;
pub const FALSE = 0;
pub const MAX_PATH = 260;
pub const S_OK = 0;
pub const S_FALSE = @as(HRESULT, @bitCast(@as(c_ulong, 0x00000001)));
pub const E_NOTIMPL = @as(c_long, @bitCast(@as(c_ulong, 0x80004001)));
pub const E_NOINTERFACE = @as(c_long, @bitCast(@as(c_ulong, 0x80004002)));
pub const E_POINTER = @as(c_long, @bitCast(@as(c_ulong, 0x80004003)));
pub const E_ABORT = @as(c_long, @bitCast(@as(c_ulong, 0x80004004)));
pub const E_FAIL = @as(c_long, @bitCast(@as(c_ulong, 0x80004005)));
pub const E_UNEXPECTED = @as(c_long, @bitCast(@as(c_ulong, 0x8000FFFF)));
pub const E_ACCESSDENIED = @as(c_long, @bitCast(@as(c_ulong, 0x80070005)));
pub const E_HANDLE = @as(c_long, @bitCast(@as(c_ulong, 0x80070006)));
pub const E_OUTOFMEMORY = @as(c_long, @bitCast(@as(c_ulong, 0x8007000E)));
pub const E_INVALIDARG = @as(c_long, @bitCast(@as(c_ulong, 0x80070057)));
pub const E_FILE_NOT_FOUND = @as(HRESULT, @bitCast(@as(c_ulong, 0x80070002)));
pub const Error = error{
UNEXPECTED,
NOTIMPL,
OUTOFMEMORY,
INVALIDARG,
POINTER,
HANDLE,
ABORT,
FAIL,
ACCESSDENIED,
};
pub const MiscError = error{
E_FILE_NOT_FOUND,
S_FALSE,
};
pub const ERROR_SUCCESS = @as(LONG, 0);
pub const ERROR_DEVICE_NOT_CONNECTED = @as(LONG, 1167);
pub const ERROR_EMPTY = @as(LONG, 4306);
pub const SEVERITY_SUCCESS = 0;
pub const SEVERITY_ERROR = 1;
pub fn MAKE_HRESULT(severity: LONG, facility: LONG, value: LONG) HRESULT {
return @as(HRESULT, (severity << 31) | (facility << 16) | value);
}
pub const GENERIC_READ = 0x80000000;
pub const GENERIC_WRITE = 0x40000000;
pub const GENERIC_EXECUTE = 0x20000000;
pub const GENERIC_ALL = 0x10000000;
pub const CW_USEDEFAULT = @as(i32, @bitCast(@as(u32, 0x80000000)));
pub const RECT = extern struct {
left: LONG,
top: LONG,
right: LONG,
bottom: LONG,
};
pub const POINT = extern struct {
x: LONG,
y: LONG,
};
pub const MINMAXINFO = extern struct {
ptReserved: POINT,
ptMaxSize: POINT,
ptMaxPosition: POINT,
ptMinTrackSize: POINT,
ptMaxTrackSize: POINT,
};
pub extern "user32" fn SetProcessDPIAware() callconv(WINAPI) BOOL;
pub extern "user32" fn GetClientRect(HWND, *RECT) callconv(WINAPI) BOOL;
pub extern "user32" fn SetWindowTextA(hWnd: ?HWND, lpString: LPCSTR) callconv(WINAPI) BOOL;
pub extern "user32" fn GetAsyncKeyState(vKey: c_int) callconv(WINAPI) SHORT;
pub extern "user32" fn GetKeyState(vKey: c_int) callconv(WINAPI) SHORT;
pub extern "user32" fn LoadCursorA(hInstance: ?HINSTANCE, lpCursorName: LPCSTR) callconv(WINAPI) ?HCURSOR;
pub const TME_LEAVE = 0x00000002;
pub const TRACKMOUSEEVENT = extern struct {
cbSize: DWORD,
dwFlags: DWORD,
hwndTrack: ?HWND,
dwHoverTime: DWORD,
};
pub extern "user32" fn TrackMouseEvent(event: *TRACKMOUSEEVENT) callconv(WINAPI) BOOL;
pub extern "user32" fn SetCapture(hWnd: ?HWND) callconv(WINAPI) ?HWND;
pub extern "user32" fn GetCapture() callconv(WINAPI) ?HWND;
pub extern "user32" fn ReleaseCapture() callconv(WINAPI) BOOL;
pub extern "user32" fn GetForegroundWindow() callconv(WINAPI) ?HWND;
pub extern "user32" fn IsChild(hWndParent: ?HWND, hWnd: ?HWND) callconv(WINAPI) BOOL;
pub extern "user32" fn GetCursorPos(point: *POINT) callconv(WINAPI) BOOL;
pub extern "user32" fn ScreenToClient(hWnd: ?HWND, lpPoint: *POINT) callconv(WINAPI) BOOL;
pub extern "user32" fn RegisterClassExA(*const WNDCLASSEXA) callconv(WINAPI) ATOM;
pub extern "user32" fn GetWindowLongPtrA(hWnd: ?HWND, nIndex: INT) callconv(WINAPI) ?*anyopaque;
pub extern "user32" fn SetWindowLongPtrA(hWnd: ?HWND, nIndex: INT, dwNewLong: ?*anyopaque) callconv(WINAPI) LONG_PTR;
pub extern "user32" fn AdjustWindowRectEx(
lpRect: *RECT,
dwStyle: DWORD,
bMenu: BOOL,
dwExStyle: DWORD,
) callconv(WINAPI) BOOL;
pub extern "user32" fn CreateWindowExA(
dwExStyle: DWORD,
lpClassName: ?LPCSTR,
lpWindowName: ?LPCSTR,
dwStyle: DWORD,
X: i32,
Y: i32,
nWidth: i32,
nHeight: i32,
hWindParent: ?HWND,
hMenu: ?HMENU,
hInstance: HINSTANCE,
lpParam: ?LPVOID,
) callconv(WINAPI) ?HWND;
pub extern "user32" fn DestroyWindow(hWnd: HWND) BOOL;
pub extern "user32" fn PostQuitMessage(nExitCode: i32) callconv(WINAPI) void;
pub extern "user32" fn DefWindowProcA(
hWnd: HWND,
Msg: UINT,
wParam: WPARAM,
lParam: LPARAM,
) callconv(WINAPI) LRESULT;
pub const PM_NOREMOVE = 0x0000;
pub const PM_REMOVE = 0x0001;
pub const PM_NOYIELD = 0x0002;
pub extern "user32" fn PeekMessageA(
lpMsg: *MSG,
hWnd: ?HWND,
wMsgFilterMin: UINT,
wMsgFilterMax: UINT,
wRemoveMsg: UINT,
) callconv(WINAPI) BOOL;
pub extern "user32" fn DispatchMessageA(lpMsg: *const MSG) callconv(WINAPI) LRESULT;
pub extern "user32" fn TranslateMessage(lpMsg: *const MSG) callconv(WINAPI) BOOL;
pub const MB_OK = 0x00000000;
pub const MB_ICONHAND = 0x00000010;
pub const MB_ICONERROR = MB_ICONHAND;
pub extern "user32" fn MessageBoxA(
hWnd: ?HWND,
lpText: LPCSTR,
lpCaption: LPCSTR,
uType: UINT,
) callconv(WINAPI) i32;
pub const KNOWNFOLDERID = GUID;
pub const FOLDERID_LocalAppData = GUID.parse("{F1B32785-6FBA-4FCF-9D55-7B8E7F157091}");
pub const FOLDERID_ProgramFiles = GUID.parse("{905e63b6-c1bf-494e-b29c-65b732d3d21a}");
pub const KF_FLAG_DEFAULT = 0;
pub const KF_FLAG_NO_APPCONTAINER_REDIRECTION = 65536;
pub const KF_FLAG_CREATE = 32768;
pub const KF_FLAG_DONT_VERIFY = 16384;
pub const KF_FLAG_DONT_UNEXPAND = 8192;
pub const KF_FLAG_NO_ALIAS = 4096;
pub const KF_FLAG_INIT = 2048;
pub const KF_FLAG_DEFAULT_PATH = 1024;
pub const KF_FLAG_NOT_PARENT_RELATIVE = 512;
pub const KF_FLAG_SIMPLE_IDLIST = 256;
pub const KF_FLAG_ALIAS_ONLY = -2147483648;
pub extern "shell32" fn SHGetKnownFolderPath(
rfid: *const KNOWNFOLDERID,
dwFlags: DWORD,
hToken: ?HANDLE,
ppszPath: *[*:0]WCHAR,
) callconv(WINAPI) HRESULT;
pub const WS_BORDER = 0x00800000;
pub const WS_OVERLAPPED = 0x00000000;
pub const WS_SYSMENU = 0x00080000;
pub const WS_DLGFRAME = 0x00400000;
pub const WS_CAPTION = WS_BORDER | WS_DLGFRAME;
pub const WS_MINIMIZEBOX = 0x00020000;
pub const WS_MAXIMIZEBOX = 0x00010000;
pub const WS_THICKFRAME = 0x00040000;
pub const WS_OVERLAPPEDWINDOW = WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU | WS_THICKFRAME |
WS_MINIMIZEBOX | WS_MAXIMIZEBOX;
pub const WS_VISIBLE = 0x10000000;
pub const WM_MOUSEMOVE = 0x0200;
pub const WM_LBUTTONDOWN = 0x0201;
pub const WM_LBUTTONUP = 0x0202;
pub const WM_LBUTTONDBLCLK = 0x0203;
pub const WM_RBUTTONDOWN = 0x0204;
pub const WM_RBUTTONUP = 0x0205;
pub const WM_RBUTTONDBLCLK = 0x0206;
pub const WM_MBUTTONDOWN = 0x0207;
pub const WM_MBUTTONUP = 0x0208;
pub const WM_MBUTTONDBLCLK = 0x0209;
pub const WM_MOUSEWHEEL = 0x020A;
pub const WM_MOUSELEAVE = 0x02A3;
pub const WM_INPUT = 0x00FF;
pub const WM_KEYDOWN = 0x0100;
pub const WM_KEYUP = 0x0101;
pub const WM_CHAR = 0x0102;
pub const WM_SYSKEYDOWN = 0x0104;
pub const WM_SYSKEYUP = 0x0105;
pub const WM_SETFOCUS = 0x0007;
pub const WM_KILLFOCUS = 0x0008;
pub const WM_CREATE = 0x0001;
pub const WM_DESTROY = 0x0002;
pub const WM_MOVE = 0x0003;
pub const WM_SIZE = 0x0005;
pub const WM_ACTIVATE = 0x0006;
pub const WM_ENABLE = 0x000A;
pub const WM_PAINT = 0x000F;
pub const WM_CLOSE = 0x0010;
pub const WM_QUIT = 0x0012;
pub const WM_GETMINMAXINFO = 0x0024;
pub const SECURITY_ATTRIBUTES = extern struct {
nLength: DWORD,
lpSecurityDescriptor: ?*anyopaque,
bInheritHandle: BOOL,
};
pub extern "kernel32" fn GetModuleHandleA(lpModuleName: ?LPCSTR) callconv(WINAPI) ?HMODULE;
pub extern "kernel32" fn LoadLibraryA(lpLibFileName: LPCSTR) callconv(WINAPI) ?HMODULE;
pub extern "kernel32" fn GetProcAddress(hModule: HMODULE, lpProcName: LPCSTR) callconv(WINAPI) ?FARPROC;
pub extern "kernel32" fn FreeLibrary(hModule: HMODULE) callconv(WINAPI) BOOL;
pub extern "kernel32" fn ExitProcess(exit_code: UINT) callconv(WINAPI) noreturn;
pub extern "kernel32" fn CloseHandle(hObject: HANDLE) callconv(WINAPI) BOOL;
pub const PTHREAD_START_ROUTINE = *const fn (LPVOID) callconv(.C) DWORD;
pub const LPTHREAD_START_ROUTINE = PTHREAD_START_ROUTINE;
pub extern "kernel32" fn CreateThread(
lpThreadAttributes: ?*SECURITY_ATTRIBUTES,
dwStackSize: SIZE_T,
lpStartAddress: LPTHREAD_START_ROUTINE,
lpParameter: ?LPVOID,
dwCreationFlags: DWORD,
lpThreadId: ?*DWORD,
) callconv(WINAPI) ?HANDLE;
pub const EVENT_ALL_ACCESS = 0x1F0003;
pub extern "kernel32" fn CreateEventExA(
lpEventAttributes: ?*SECURITY_ATTRIBUTES,
lpName: LPCSTR,
dwFlags: DWORD,
dwDesiredAccess: DWORD,
) callconv(WINAPI) ?HANDLE;
pub const INFINITE = 4294967295;
pub extern "kernel32" fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) callconv(WINAPI) DWORD;
pub extern "kernel32" fn InitializeCriticalSection(lpCriticalSection: *CRITICAL_SECTION) callconv(WINAPI) void;
pub extern "kernel32" fn EnterCriticalSection(lpCriticalSection: *CRITICAL_SECTION) callconv(WINAPI) void;
pub extern "kernel32" fn LeaveCriticalSection(lpCriticalSection: *CRITICAL_SECTION) callconv(WINAPI) void;
pub extern "kernel32" fn DeleteCriticalSection(lpCriticalSection: *CRITICAL_SECTION) callconv(WINAPI) void;
pub extern "kernel32" fn Sleep(dwMilliseconds: DWORD) void;
pub extern "ntdll" fn RtlGetVersion(lpVersionInformation: *OSVERSIONINFOW) callconv(WINAPI) NTSTATUS;
pub const WNDPROC = *const fn (hwnd: HWND, uMsg: UINT, wParam: WPARAM, lParam: LPARAM) callconv(WINAPI) LRESULT;
pub const MSG = extern struct {
hWnd: ?HWND,
message: UINT,
wParam: WPARAM,
lParam: LPARAM,
time: DWORD,
pt: POINT,
lPrivate: DWORD,
};
pub const WNDCLASSEXA = extern struct {
cbSize: UINT = @sizeOf(WNDCLASSEXA),
style: UINT,
lpfnWndProc: WNDPROC,
cbClsExtra: i32 = 0,
cbWndExtra: i32 = 0,
hInstance: HINSTANCE,
hIcon: ?HICON,
hCursor: ?HCURSOR,
hbrBackground: ?HBRUSH,
lpszMenuName: ?LPCSTR,
lpszClassName: LPCSTR,
hIconSm: ?HICON,
};
pub const OSVERSIONINFOW = extern struct {
dwOSVersionInfoSize: ULONG,
dwMajorVersion: ULONG,
dwMinorVersion: ULONG,
dwBuildNumber: ULONG,
dwPlatformId: ULONG,
szCSDVersion: [128]WCHAR,
};
pub const INT8 = i8;
pub const UINT8 = u8;
pub const UINT16 = c_ushort;
pub const UINT32 = c_uint;
pub const UINT64 = c_ulonglong;
pub const HMONITOR = HANDLE;
pub const REFERENCE_TIME = c_longlong;
pub const LUID = extern struct {
LowPart: DWORD,
HighPart: LONG,
};
pub const VT_UI4 = 19;
pub const VT_I8 = 20;
pub const VT_UI8 = 21;
pub const VT_INT = 22;
pub const VT_UINT = 23;
pub const VARTYPE = u16;
pub const PROPVARIANT = extern struct {
vt: VARTYPE,
wReserved1: WORD = 0,
wReserved2: WORD = 0,
wReserved3: WORD = 0,
u: extern union {
intVal: i32,
uintVal: u32,
hVal: i64,
},
decVal: u64 = 0,
};
comptime {
std.debug.assert(@sizeOf(PROPVARIANT) == 24);
}
pub const WHEEL_DELTA = 120;
pub inline fn GET_WHEEL_DELTA_WPARAM(wparam: WPARAM) i16 {
return @as(i16, @bitCast(@as(u16, @intCast((wparam >> 16) & 0xffff))));
}
pub inline fn GET_X_LPARAM(lparam: LPARAM) i32 {
return @as(i32, @intCast(@as(i16, @bitCast(@as(u16, @intCast(lparam & 0xffff))))));
}
pub inline fn GET_Y_LPARAM(lparam: LPARAM) i32 {
return @as(i32, @intCast(@as(i16, @bitCast(@as(u16, @intCast((lparam >> 16) & 0xffff))))));
}
pub inline fn LOWORD(dword: DWORD) WORD {
return @as(WORD, @bitCast(@as(u16, @intCast(dword & 0xffff))));
}
pub inline fn HIWORD(dword: DWORD) WORD {
return @as(WORD, @bitCast(@as(u16, @intCast((dword >> 16) & 0xffff))));
}
pub const IID_IUnknown = GUID.parse("{00000000-0000-0000-C000-000000000046}");
pub const IUnknown = extern struct {
__v: *const VTable,
pub usingnamespace Methods(@This());
pub fn Methods(comptime T: type) type {
return extern struct {
pub inline fn QueryInterface(self: *T, guid: *const GUID, outobj: ?*?*anyopaque) HRESULT {
return @as(*const IUnknown.VTable, @ptrCast(self.__v))
.QueryInterface(@as(*IUnknown, @ptrCast(self)), guid, outobj);
}
pub inline fn AddRef(self: *T) ULONG {
return @as(*const IUnknown.VTable, @ptrCast(self.__v)).AddRef(@as(*IUnknown, @ptrCast(self)));
}
pub inline fn Release(self: *T) ULONG {
return @as(*const IUnknown.VTable, @ptrCast(self.__v)).Release(@as(*IUnknown, @ptrCast(self)));
}
};
}
pub const VTable = extern struct {
QueryInterface: *const fn (*IUnknown, *const GUID, ?*?*anyopaque) callconv(WINAPI) HRESULT,
AddRef: *const fn (*IUnknown) callconv(WINAPI) ULONG,
Release: *const fn (*IUnknown) callconv(WINAPI) ULONG,
};
};
pub extern "kernel32" fn ExitThread(DWORD) callconv(WINAPI) void;
pub extern "kernel32" fn TerminateThread(HANDLE, DWORD) callconv(WINAPI) BOOL;
pub const CLSCTX_INPROC_SERVER = 0x1;
pub extern "ole32" fn CoCreateInstance(
rclsid: *const GUID,
pUnkOuter: ?*IUnknown,
dwClsContext: DWORD,
riid: *const GUID,
ppv: *?*anyopaque,
) callconv(WINAPI) HRESULT;
pub const VK_LBUTTON = 0x01;
pub const VK_RBUTTON = 0x02;
pub const VK_TAB = 0x09;
pub const VK_ESCAPE = 0x1B;
pub const VK_LEFT = 0x25;
pub const VK_UP = 0x26;
pub const VK_RIGHT = 0x27;
pub const VK_DOWN = 0x28;
pub const VK_PRIOR = 0x21;
pub const VK_NEXT = 0x22;
pub const VK_END = 0x23;
pub const VK_HOME = 0x24;
pub const VK_DELETE = 0x2E;
pub const VK_BACK = 0x08;
pub const VK_RETURN = 0x0D;
pub const VK_CONTROL = 0x11;
pub const VK_SHIFT = 0x10;
pub const VK_MENU = 0x12;
pub const VK_SPACE = 0x20;
pub const VK_INSERT = 0x2D;
pub const VK_LSHIFT = 0xA0;
pub const VK_RSHIFT = 0xA1;
pub const VK_LCONTROL = 0xA2;
pub const VK_RCONTROL = 0xA3;
pub const VK_LMENU = 0xA4;
pub const VK_RMENU = 0xA5;
pub const VK_LWIN = 0x5B;
pub const VK_RWIN = 0x5C;
pub const VK_APPS = 0x5D;
pub const VK_OEM_1 = 0xBA;
pub const VK_OEM_PLUS = 0xBB;
pub const VK_OEM_COMMA = 0xBC;
pub const VK_OEM_MINUS = 0xBD;
pub const VK_OEM_PERIOD = 0xBE;
pub const VK_OEM_2 = 0xBF;
pub const VK_OEM_3 = 0xC0;
pub const VK_OEM_4 = 0xDB;
pub const VK_OEM_5 = 0xDC;
pub const VK_OEM_6 = 0xDD;
pub const VK_OEM_7 = 0xDE;
pub const VK_CAPITAL = 0x14;
pub const VK_SCROLL = 0x91;
pub const VK_NUMLOCK = 0x90;
pub const VK_SNAPSHOT = 0x2C;
pub const VK_PAUSE = 0x13;
pub const VK_NUMPAD0 = 0x60;
pub const VK_NUMPAD1 = 0x61;
pub const VK_NUMPAD2 = 0x62;
pub const VK_NUMPAD3 = 0x63;
pub const VK_NUMPAD4 = 0x64;
pub const VK_NUMPAD5 = 0x65;
pub const VK_NUMPAD6 = 0x66;
pub const VK_NUMPAD7 = 0x67;
pub const VK_NUMPAD8 = 0x68;
pub const VK_NUMPAD9 = 0x69;
pub const VK_MULTIPLY = 0x6A;
pub const VK_ADD = 0x6B;
pub const VK_SEPARATOR = 0x6C;
pub const VK_SUBTRACT = 0x6D;
pub const VK_DECIMAL = 0x6E;
pub const VK_DIVIDE = 0x6F;
pub const VK_F1 = 0x70;
pub const VK_F2 = 0x71;
pub const VK_F3 = 0x72;
pub const VK_F4 = 0x73;
pub const VK_F5 = 0x74;
pub const VK_F6 = 0x75;
pub const VK_F7 = 0x76;
pub const VK_F8 = 0x77;
pub const VK_F9 = 0x78;
pub const VK_F10 = 0x79;
pub const VK_F11 = 0x7A;
pub const VK_F12 = 0x7B;
pub const IM_VK_KEYPAD_ENTER = VK_RETURN + 256;
pub const KF_EXTENDED = 0x0100;
pub const GUID_NULL = GUID.parse("{00000000-0000-0000-0000-000000000000}");
| https://raw.githubusercontent.com/zig-gamedev/zig-gamedev/dcef066b6ecc9f9b9de0e23fc9360f6bea1db2aa/libs/zwin32/src/w32.zig |
std.log.debug("{}", .{{{_cursor_}}});
| https://raw.githubusercontent.com/mattn/vim-sonictemplate/9959750bc0c6ac9f00a613c1870d589651a6b317/template/zig/snip-log-debug.zig |
//! This example is ported from : https://github.com/microsoft/Windows-classic-samples/blob/master/Samples/Win7Samples/begin/LearnWin32/Direct2DCircle/cpp/main.cpp
pub const UNICODE = true;
const WINAPI = @import("std").os.windows.WINAPI;
const win32 = struct {
usingnamespace @import("win32").zig;
usingnamespace @import("win32").foundation;
usingnamespace @import("win32").system.system_services;
usingnamespace @import("win32").ui.windows_and_messaging;
usingnamespace @import("win32").graphics.gdi;
usingnamespace @import("win32").graphics.direct2d;
usingnamespace @import("win32").graphics.direct2d.common;
usingnamespace @import("win32").graphics.direct3d9;
usingnamespace @import("win32").graphics.dxgi.common;
usingnamespace @import("win32").system.com;
};
const L = win32.L;
const FAILED = win32.FAILED;
const SUCCEEDED = win32.SUCCEEDED;
const HRESULT = win32.HRESULT;
const HINSTANCE = win32.HINSTANCE;
const HWND = win32.HWND;
const MSG = win32.MSG;
const WPARAM = win32.WPARAM;
const LPARAM = win32.LPARAM;
const LRESULT = win32.LRESULT;
const RECT = win32.RECT;
const D2D_SIZE_U = win32.D2D_SIZE_U;
const D2D_SIZE_F = win32.D2D_SIZE_F;
const SafeReslease = win32.SafeRelease;
const basewin = @import("basewin.zig");
const BaseWindow = basewin.BaseWindow;
fn SafeRelease(ppT: anytype) void {
if (ppT.*) |t| {
_ = t.IUnknown_Release();
ppT.* = null;
}
}
const MainWindow = struct {
base: BaseWindow(@This()) = .{},
pFactory: ?*win32.ID2D1Factory = null,
pRenderTarget: ?*win32.ID2D1HwndRenderTarget = null,
pBrush: ?*win32.ID2D1SolidColorBrush = null,
ellipse: win32.D2D1_ELLIPSE = undefined,
pub fn CalculateLayout(self: *MainWindow) callconv(.Inline) void { MainWindowCalculateLayout(self); }
pub fn CreateGraphicsResources(self: *MainWindow) callconv(.Inline) HRESULT { return MainWindowCreateGraphicsResources(self); }
pub fn DiscardGraphicsResources(self: *MainWindow) callconv(.Inline) void { MainWindowDiscardGraphicsResources(self); }
pub fn OnPaint(self: *MainWindow) callconv(.Inline) void { MainWindowOnPaint(self); }
pub fn Resize(self: *MainWindow) callconv(.Inline) void { MainWindowResize(self); }
pub fn ClassName() [*:0]const u16 { return L("Circle Window Class"); }
pub fn HandleMessage(self: *MainWindow, uMsg: u32, wParam: WPARAM, lParam: LPARAM) LRESULT {
return MainWindowHandleMessage(self, uMsg, wParam, lParam);
}
};
// Recalculate drawing layout when the size of the window changes.
fn MainWindowCalculateLayout(self: *MainWindow) void {
if (self.pRenderTarget) |pRenderTarget| {
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// TODO: this call is causing a segfault when we return from this function!!!
// I believe it is caused by this issue: https://github.com/ziglang/zig/issues/1481
// Zig unable to handle a return type of extern struct { x: f32, y: f32 } for WINAPI
_ = pRenderTarget;
//const size: D2D_SIZE_F = pRenderTarget.ID2D1RenderTarget_GetSize();
const size = D2D_SIZE_F { .width = 300, .height = 300 };
const x: f32 = size.width / 2;
const y: f32 = size.height / 2;
const radius = @min(x, y);
self.ellipse = D2D1.Ellipse(D2D1.Point2F(x, y), radius, radius);
}
}
fn MainWindowCreateGraphicsResources(self: *MainWindow) HRESULT
{
var hr = win32.S_OK;
if (self.pRenderTarget == null)
{
var rc: RECT = undefined;
_ = win32.GetClientRect(self.base.m_hwnd.?, &rc);
const size = D2D_SIZE_U{ .width = @intCast(rc.right), .height = @intCast(rc.bottom) };
hr = self.pFactory.?.ID2D1Factory_CreateHwndRenderTarget(
&D2D1.RenderTargetProperties(),
&D2D1.HwndRenderTargetProperties(self.base.m_hwnd.?, size),
// TODO: figure out how to cast a COM object to a base type
@ptrCast(&self.pRenderTarget));
if (SUCCEEDED(hr))
{
const color = D2D1.ColorF(.{ .r = 1, .g = 1, .b = 0});
// TODO: how do I do this ptrCast better by using COM base type?
hr = self.pRenderTarget.?.ID2D1RenderTarget_CreateSolidColorBrush(&color, null, @ptrCast(&self.pBrush));
if (SUCCEEDED(hr))
{
self.CalculateLayout();
}
}
}
return hr;
}
fn MainWindowDiscardGraphicsResources(self: *MainWindow) void
{
SafeRelease(&self.pRenderTarget);
SafeRelease(&self.pBrush);
}
fn MainWindowOnPaint(self: *MainWindow) void
{
var hr = self.CreateGraphicsResources();
if (SUCCEEDED(hr))
{
var ps : win32.PAINTSTRUCT = undefined;
_ = win32.BeginPaint(self.base.m_hwnd.?, &ps);
self.pRenderTarget.?.ID2D1RenderTarget_BeginDraw();
self.pRenderTarget.?.ID2D1RenderTarget_Clear(&D2D1.ColorFU32(.{ .rgb = D2D1.SkyBlue }));
// TODO: how do I get a COM interface type to convert to a base type without
// an explicit cast like this?
self.pRenderTarget.?.ID2D1RenderTarget_FillEllipse(&self.ellipse, @ptrCast(self.pBrush));
hr = self.pRenderTarget.?.ID2D1RenderTarget_EndDraw(null, null);
if (FAILED(hr) or hr == win32.D2DERR_RECREATE_TARGET)
{
self.DiscardGraphicsResources();
}
_ = win32.EndPaint(self.base.m_hwnd.?, &ps);
}
}
fn MainWindowResize(self: *MainWindow) void
{
if (self.pRenderTarget) |renderTarget|
{
var rc: RECT = undefined;
_ = win32.GetClientRect(self.base.m_hwnd.?, &rc);
const size = D2D_SIZE_U{ .width = @intCast(rc.right), .height = @intCast(rc.bottom) };
_ = renderTarget.ID2D1HwndRenderTarget_Resize(&size);
self.CalculateLayout();
_ = win32.InvalidateRect(self.base.m_hwnd.?, null, win32.FALSE);
}
}
pub export fn wWinMain(_: HINSTANCE, __: ?HINSTANCE, ___: [*:0]u16, nCmdShow: u32) callconv(WINAPI) c_int
{
_ = __;
_ = ___;
var win = MainWindow { };
if (win32.TRUE != win.base.Create(L("Circle"), win32.WS_OVERLAPPEDWINDOW, .{}))
{
return 0;
}
_ = win32.ShowWindow(win.base.Window(), @bitCast(nCmdShow));
// Run the message loop.
var msg : MSG = undefined;
while (0 != win32.GetMessage(&msg, null, 0, 0))
{
_ = win32.TranslateMessage(&msg);
_ = win32.DispatchMessage(&msg);
}
return 0;
}
fn MainWindowHandleMessage(self: *MainWindow, uMsg: u32, wParam: WPARAM, lParam: LPARAM) LRESULT
{
switch (uMsg)
{
win32.WM_CREATE => {
// TODO: Should I need to case &self.pFactory to **anyopaque? Maybe
// D2D2CreateFactory probably doesn't have the correct type yet?
if (FAILED(win32.D2D1CreateFactory(
win32.D2D1_FACTORY_TYPE_SINGLE_THREADED, win32.IID_ID2D1Factory, null, @ptrCast(&self.pFactory))))
{
return -1; // Fail CreateWindowEx.
}
return 0;
},
win32.WM_DESTROY => {
self.DiscardGraphicsResources();
SafeRelease(&self.pFactory);
win32.PostQuitMessage(0);
return 0;
},
win32.WM_PAINT => {
self.OnPaint();
return 0;
},
// Other messages not shown...
win32.WM_SIZE => {
self.Resize();
return 0;
},
else => {},
}
return win32.DefWindowProc(self.base.m_hwnd.?, uMsg, wParam, lParam);
}
// TODO: tthis D2D1 namespace is referenced in the C++ example but it doesn't exist in win32metadata
const D2D1 = struct {
// TODO: SkyBlue is missing from win32metadata? file an issue?
pub const SkyBlue = 0x87CEEB;
// TODO: this is missing
pub fn ColorF(o: struct { r: f32, g: f32, b: f32, a: f32 = 1 }) win32.D2D1_COLOR_F {
return .{ .r = o.r, .g = o.g, .b = o.b, .a = o.a };
}
// TODO: this is missing
pub fn ColorFU32(o: struct { rgb: u32, a: f32 = 1 }) win32.D2D1_COLOR_F {
return .{
.r = @as(f32, @floatFromInt((o.rgb >> 16) & 0xff)) / 255,
.g = @as(f32, @floatFromInt((o.rgb >> 8) & 0xff)) / 255,
.b = @as(f32, @floatFromInt((o.rgb >> 0) & 0xff)) / 255,
.a = o.a,
};
}
pub fn Point2F(x: f32, y: f32) win32.D2D_POINT_2F {
return .{ .x = x, .y = y };
}
pub fn Ellipse(center: win32.D2D_POINT_2F, radiusX: f32, radiusY: f32) win32.D2D1_ELLIPSE {
return .{
.point = center,
.radiusX = radiusX,
.radiusY = radiusY,
};
}
// TODO: this is missing
pub fn RenderTargetProperties() win32.D2D1_RENDER_TARGET_PROPERTIES {
return .{
.type = win32.D2D1_RENDER_TARGET_TYPE_DEFAULT,
.pixelFormat = PixelFormat(),
.dpiX = 0,
.dpiY = 0,
.usage = win32.D2D1_RENDER_TARGET_USAGE_NONE,
.minLevel = win32.D2D1_FEATURE_LEVEL_DEFAULT,
};
}
// TODO: this is missing
pub fn PixelFormat() win32.D2D1_PIXEL_FORMAT {
return .{
.format = win32.DXGI_FORMAT_UNKNOWN,
.alphaMode = win32.D2D1_ALPHA_MODE_UNKNOWN,
};
}
// TODO: this is missing
pub fn HwndRenderTargetProperties(hwnd: HWND, size: D2D_SIZE_U) win32.D2D1_HWND_RENDER_TARGET_PROPERTIES {
return .{
.hwnd = hwnd,
.pixelSize = size,
.presentOptions = win32.D2D1_PRESENT_OPTIONS_NONE,
};
}
};
| https://raw.githubusercontent.com/marlersoft/zigwin32gen/a7ea975694193ef077c34aa3bca5ec3fbdba4712/examples/d2dcircle.zig |
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
const py = @import("pydust.zig");
const Type = @import("pytypes.zig").Type;
const State = @import("discovery.zig").State;
pub const Attribute = struct {
name: [:0]const u8,
ctor: fn (module: py.PyModule) py.PyError!py.PyObject,
};
/// Finds the attributes on a module or class definition.
pub fn Attributes(comptime definition: type) type {
return struct {
const attr_count = blk: {
var cnt = 0;
for (@typeInfo(definition).Struct.decls) |decl| {
const value = @field(definition, decl.name);
if (@typeInfo(@TypeOf(value)) != .Type) {
continue;
}
if (State.findDefinition(value)) |def| {
if (def.type == .class) {
cnt += 1;
}
}
}
break :blk cnt;
};
pub const attributes: [attr_count]Attribute = blk: {
var attrs: [attr_count]Attribute = undefined;
var idx = 0;
for (@typeInfo(definition).Struct.decls) |decl| {
const value = @field(definition, decl.name);
if (State.findDefinition(value)) |def| {
if (def.type == .class) {
const Closure = struct {
pub fn init(module: py.PyModule) !py.PyObject {
const typedef = Type(decl.name ++ "", def.definition);
return try typedef.init(module);
}
};
attrs[idx] = .{ .name = decl.name ++ "", .ctor = Closure.init };
idx += 1;
}
}
}
break :blk attrs;
};
};
}
| https://raw.githubusercontent.com/spiraldb/ziggy-pydust/59703a878b8c3dbbbb6198fff4e67dc8d7bf50f0/pydust/src/attributes.zig |
// Forked from https://github.com/gernest/base32
const std = @import("std");
const testing = std.testing;
const log = std.log.scoped(.base32);
const encode_std = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567";
const encode_hex = "0123456789ABCDEFGHIJKLMNOPQRSTUV";
const crockford_alphabet = "0123456789ABCDEFGHJKMNPQRSTVWXYZ";
pub const std_encoding = Encoding.initWithPadding(encode_std, std_padding_char);
pub const hex_encoding = Encoding.initWithPadding(encode_hex, std_padding_char);
pub const crockford_encoding = Encoding.initWithPadding(crockford_alphabet, null);
const std_padding_char = '=';
pub const Encoding = struct {
buf: [32]u8,
decode_map: [256]u8,
pad_char: ?u8 = null,
pub fn init(encoder_string: []const u8) Encoding {
return initWithPadding(encoder_string, null);
}
pub fn initWithPadding(encoder_string: []const u8, pad_char: ?u8) Encoding {
std.debug.assert(encoder_string.len == 32);
if (pad_char) |c| {
std.debug.assert(!(c == 'r' or c == '\n' or c > 0xff));
}
return Encoding{
.buf = blk: {
var a: [32]u8 = undefined;
std.mem.copy(u8, a[0..], encoder_string);
break :blk a;
},
.decode_map = blk: {
var a = [_]u8{0xFF} ** 256;
for (encoder_string) |c, i| {
a[@intCast(usize, c)] = @intCast(u8, i);
}
break :blk a;
},
.pad_char = pad_char,
};
}
pub fn encodeLen(self: Encoding, n: usize) usize {
if (self.pad_char == null) {
return (n * 8 + 4) / 5;
}
return (n + 4) / 5 * 8;
}
pub fn encode(
self: Encoding,
destination: []u8,
source: []const u8,
) []const u8 {
var dst = destination;
var src = source;
var n: usize = 0;
while (src.len > 0) {
var b = [_]u8{0} ** 8;
switch (src.len) {
1 => {
case1(b[0..], src);
},
2 => {
case2(b[0..], src);
case1(b[0..], src);
},
3 => {
case3(b[0..], src);
case2(b[0..], src);
case1(b[0..], src);
},
4 => {
case4(b[0..], src);
case3(b[0..], src);
case2(b[0..], src);
case1(b[0..], src);
},
else => {
b[7] = src[4] & 0x1F;
b[6] = src[4] >> 5;
case4(b[0..], src);
case3(b[0..], src);
case2(b[0..], src);
case1(b[0..], src);
},
}
const size = dst.len;
if (size >= 8) {
dst[0] = self.buf[b[0] & 31];
dst[1] = self.buf[b[1] & 31];
dst[2] = self.buf[b[2] & 31];
dst[3] = self.buf[b[3] & 31];
dst[4] = self.buf[b[4] & 31];
dst[5] = self.buf[b[5] & 31];
dst[6] = self.buf[b[6] & 31];
dst[7] = self.buf[b[7] & 31];
n += 8;
} else {
var i: usize = 0;
while (i < size) : (i += 1) {
dst[i] = self.buf[b[i] & 31];
}
n += i;
}
if (src.len < 5) {
if (self.pad_char == null) break;
dst[7] = self.pad_char.?;
if (src.len < 4) {
dst[6] = self.pad_char.?;
dst[5] = self.pad_char.?;
if (src.len < 3) {
dst[4] = self.pad_char.?;
if (src.len < 2) {
dst[3] = self.pad_char.?;
dst[2] = self.pad_char.?;
}
}
}
break;
}
src = src[5..];
dst = dst[8..];
}
return destination[0..n];
}
pub fn decodeLen(self: Encoding, n: usize) usize {
if (self.pad_char == null) return n * 5 / 8;
return n / 8 * 5;
}
pub fn decode(
self: Encoding,
dest: []u8,
source: []const u8,
) ![]const u8 {
var num_newlines: usize = 0;
for (source) |c| {
if (c == '\r' or c == '\n') num_newlines += 1;
}
if (num_newlines > 0) {
// src contains new lines.
}
if (dest.len < self.decodeLen(source.len)) {
return error.NotEnoughSpace;
}
var dst = dest;
var src = source;
var end: bool = false;
var n: usize = 0;
var dsti: usize = 0;
var olen = src.len;
while (src.len > 0 and !end) {
var dbuf = [_]u8{0} ** 8;
var dlen: usize = 8;
var j: usize = 0;
while (j < 8) {
if (src.len == 0) {
if (self.pad_char != null) {
// We have reached the end and are missing padding
return error.MissingPadding;
}
dlen = j;
end = true;
break;
}
const in = src[0];
src = src[1..];
if (self.pad_char != null and in == self.pad_char.? and j >= 2 and src.len < 8) {
// We've reached the end and there's padding
if (src.len + j < 8 - 1) {
// not enough padding
log.warn("incorrenct input at {}\n", .{olen});
return error.NotEnoughPadding;
}
var k: usize = 0;
while (k < 8 - 1 - j) : (k += 1) {
if (src.len > k and self.pad_char != null and src[k] != self.pad_char.?) {
// incorrect padding
const pos = olen - src.len + k - 1;
log.warn("incorrenct input at {}\n", .{pos});
return error.IncorrectPadding;
}
}
dlen = j;
end = true;
// 7, 5 and 2 are not valid padding lengths, and so 1, 3 and 6 are not
// valid dlen values. See RFC 4648 Section 6 "Base 32 Encoding" listing
// the five valid padding lengths, and Section 9 "Illustrations and
// Examples" for an illustration for how the 1st, 3rd and 6th base32
// src bytes do not yield enough information to decode a dst byte.
if (dlen == 1 or dlen == 3 or dlen == 6) {
const pos = olen - src.len - 1;
log.warn("incorrenct input at {}\n", .{pos});
return error.IncorrectPadding;
}
break;
}
dbuf[j] = self.decode_map[in];
if (dbuf[j] == 0xFF) {
const pos = olen - src.len - 1;
log.warn("{} {}\n", .{ in, self.decode_map[in] });
for (self.decode_map) |m, idx| {
log.warn("== {} ={x}\n", .{ idx, m });
}
log.warn("incorrenct input at {}\n", .{pos});
return error.CorruptImput;
}
j += 1;
}
// Pack 8x 5-bit source blocks into 5 byte destination
// quantum
switch (dlen) {
8 => {
dec8(dst, dsti, dbuf[0..]);
n += 1;
dec7(dst, dsti, dbuf[0..]);
n += 1;
dec5(dst, dsti, dbuf[0..]);
n += 1;
dec4(dst, dsti, dbuf[0..]);
n += 1;
dec2(dst, dsti, dbuf[0..]);
n += 1;
},
7 => {
dec7(dst, dsti, dbuf[0..]);
n += 1;
dec5(dst, dsti, dbuf[0..]);
n += 1;
dec4(dst, dsti, dbuf[0..]);
n += 1;
dec2(dst, dsti, dbuf[0..]);
n += 1;
},
5 => {
dec5(dst, dsti, dbuf[0..]);
n += 1;
dec4(dst, dsti, dbuf[0..]);
n += 1;
dec2(dst, dsti, dbuf[0..]);
n += 1;
},
4 => {
dec4(dst, dsti, dbuf[0..]);
n += 1;
dec2(dst, dsti, dbuf[0..]);
n += 1;
},
2 => {
dec2(dst, dsti, dbuf[0..]);
n += 1;
},
else => {},
}
dsti += 5;
}
return dest[0..n];
}
};
fn dec2(dst: []u8, dsti: usize, dbuf: []u8) void {
dst[dsti + 0] = dbuf[0] << 3 | dbuf[1] >> 2;
}
fn dec4(dst: []u8, dsti: usize, dbuf: []u8) void {
dst[dsti + 1] = dbuf[1] << 6 | dbuf[2] << 1 | dbuf[3] >> 4;
}
fn dec5(dst: []u8, dsti: usize, dbuf: []u8) void {
dst[dsti + 2] = dbuf[3] << 4 | dbuf[4] >> 1;
}
fn dec7(dst: []u8, dsti: usize, dbuf: []u8) void {
dst[dsti + 3] = dbuf[4] << 7 | dbuf[5] << 2 | dbuf[6] >> 3;
}
fn dec8(dst: []u8, dsti: usize, dbuf: []u8) void {
dst[dsti + 4] = dbuf[6] << 5 | dbuf[7];
}
fn case1(b: []u8, src: []const u8) void {
b[1] |= (src[0] << 2) & 0x1F;
b[0] = src[0] >> 3;
}
fn case2(b: []u8, src: []const u8) void {
b[3] |= (src[1] << 4) & 0x1F;
b[2] = (src[1] >> 1) & 0x1F;
b[1] = (src[1] >> 6) & 0x1F;
}
fn case3(b: []u8, src: []const u8) void {
b[4] |= (src[2] << 1) & 0x1F;
b[3] = (src[2] >> 4) & 0x1F;
}
fn case4(b: []u8, src: []const u8) void {
b[6] |= (src[3] << 3) & 0x1F;
b[5] = (src[3] >> 2) & 0x1F;
b[4] = src[3] >> 7;
}
const TestPair = struct {
decoded: []const u8,
encoded: []const u8,
};
const pairs = [_]TestPair{
TestPair{ .decoded = "", .encoded = "" },
TestPair{ .decoded = "f", .encoded = "MY======" },
TestPair{ .decoded = "fo", .encoded = "MZXQ====" },
TestPair{ .decoded = "foo", .encoded = "MZXW6===" },
TestPair{ .decoded = "foob", .encoded = "MZXW6YQ=" },
TestPair{ .decoded = "fooba", .encoded = "MZXW6YTB" },
// Wikipedia examples, converted to base32
TestPair{ .decoded = "sure.", .encoded = "ON2XEZJO" },
TestPair{ .decoded = "sure", .encoded = "ON2XEZI=" },
TestPair{ .decoded = "sur", .encoded = "ON2XE===" },
TestPair{ .decoded = "su", .encoded = "ON2Q====" },
TestPair{ .decoded = "leasure.", .encoded = "NRSWC43VOJSS4===" },
TestPair{ .decoded = "easure.", .encoded = "MVQXG5LSMUXA====" },
TestPair{ .decoded = "easure.", .encoded = "MVQXG5LSMUXA====" },
TestPair{ .decoded = "asure.", .encoded = "MFZXK4TFFY======" },
TestPair{ .decoded = "sure.", .encoded = "ON2XEZJO" },
};
test "Encoding" {
var buf: [1024]u8 = undefined;
for (pairs) |ts| {
const size = std_encoding.encodeLen(ts.decoded.len);
const result = std_encoding.encode(buf[0..size], ts.decoded);
try testing.expectEqualSlices(u8, ts.encoded, result);
}
}
test "Decoding" {
var buf: [1024]u8 = undefined;
for (pairs) |ts| {
const size = std_encoding.decodeLen(ts.encoded.len);
var result = try std_encoding.decode(buf[0..size], ts.encoded);
try testing.expectEqualSlices(u8, ts.decoded, result);
}
}
| https://raw.githubusercontent.com/MarcoPolo/zig-libp2p/478d9b60c99069e5bc557b8f60c36ae72dedc83a/src/multibase/base32.zig |
///
/// タイマドライバシミュレータのシステムコンフィギュレーション記述
///
usingnamespace @import("../../kernel/kernel_cfg.zig");
usingnamespace zig;
///
/// タイマドライバ
///
const sim_timer = @import("sim_timer.zig");
usingnamespace sim_timer.ExternDefs;
///
/// システムコンフィギュレーション記述
///
pub fn configuration(comptime cfg: *CfgData) void {
// 高分解能タイマ,オーバランタイマ共通
cfg.ATT_INI(AINI(TA_NULL, 0, _kernel_target_timer_initialize));
cfg.ATT_TER(ATER(TA_NULL, 0, _kernel_target_timer_terminate));
// 高分解能タイマドライバ
cfg.CFG_INT(sim_timer.hrt.INTNO_HRT,
CINT(TA_ENAINT | sim_timer.hrt.INTATR_HRT,
sim_timer.hrt.INTPRI_HRT));
cfg.DEF_INH(sim_timer.hrt.INHNO_HRT,
DINH(TA_NULL, _kernel_target_hrt_handler));
// オーバランタイマドライバ
if (TOPPERS_SUPPORT_OVRHDR) {
cfg.CFG_INT(sim_timer.ovrtimer.INTNO_OVRTIMER,
CINT(TA_ENAINT | sim_timer.ovrtimer.INTATR_OVRTIMER,
sim_timer.ovrtimer.INTPRI_OVRTIMER));
cfg.DEF_INH(sim_timer.ovrtimer.INHNO_OVRTIMER,
DINH(TA_NULL, _kernel_target_ovrtimer_handler));
}
}
| https://raw.githubusercontent.com/toppers/asp3_in_zig/7413474dc07ab906ac31eb367636ecf9c1cf2580/arch/simtimer/sim_timer_cfg.zig |
pub fn from(addr: usize) []Partition {
return @intToPtr([*]Partition, addr)[0..4];
}
pub fn first_bootable(parts: []Partition) ?*Partition {
for (parts) |*part| {
if (part.is_bootable()) {
return part;
}
}
return null;
}
pub const Partition = packed struct {
attributes: u8,
start_chs: u24,
type: u8,
end_chs: u24,
start_lba: u32,
sectors: u32,
const Self = @This();
pub fn is_bootable(self: *const Self) bool {
return self.attributes & (1 << 7) != 0;
}
};
| https://raw.githubusercontent.com/sajjadium/ctf-archives/b7012a5fc5454f8c74ce534ecf4da521c5b9595e/ctfs/AmateursCTF/2023/pwn/simpleOS/src/common/partitions.zig |
const std = @import("std");
const gl = @import("opengl");
export fn dummy() void {
{
@setEvalBranchQuota(100_000); // Yes, this is necessary. OpenGL gets quite large!
std.testing.refAllDecls(gl);
}
gl.load({}, fakeLoader) catch {};
}
comptime {
@export(gl.getIntegerv, .{ .name = "glGetIntegerv" });
}
fn fakeLoader(_: void, name: [:0]const u8) ?gl.FunctionPointer {
return magic_loader(name.ptr);
}
export var magic_loader: *const fn (name: [*:0]const u8) ?gl.FunctionPointer = undefined;
| https://raw.githubusercontent.com/ikskuh/zig-opengl/77a0a4e9221f81b7b61f5f53b8844b94cd6ab383/scripts/tester.zig |
const std = @import("std");
const bc = @import("bytecode.zig");
const lexer = @import("lexer.zig");
const parser = @import("parser.zig");
const sema = @import("sema.zig");
/// Compile takes a program and compiles it into bytecode.
pub const Compiler = struct {
gpa: std.mem.Allocator,
chunk: bc.Chunk,
program: *const parser.Program,
parent: ?*Compiler = null,
pub fn init(gpa: std.mem.Allocator, program: *const parser.Program) Compiler {
return .{ .gpa = gpa, .chunk = bc.Chunk.init(gpa), .program = program };
}
fn writeConst(self: *Compiler, v: u32) !void {
if (v > std.math.maxInt(u16)) {
try self.chunk.writeOp(.rare);
try self.chunk.writeRateOp(.const32);
try self.chunk.writeU32(@intCast(u32, v));
} else if (v > std.math.maxInt(u8)) {
try self.chunk.writeOp(.rare);
try self.chunk.writeRateOp(.const16);
try self.chunk.writeU16(@intCast(u16, v));
} else {
try self.chunk.writeOp(.const8);
try self.chunk.writeU8(@intCast(u8, v));
}
}
fn writeLocal(self: *Compiler, i: enum { get, geta, set }, v: u32) !void {
if (v > std.math.maxInt(u16)) {
try self.chunk.writeOp(.rare);
try self.chunk.writeRateOp(switch (i) {
.get => .get32,
.geta => .geta32,
.set => .set32,
});
try self.chunk.writeU32(@intCast(u32, v));
} else if (v > std.math.maxInt(u8)) {
try self.chunk.writeOp(.rare);
try self.chunk.writeRateOp(switch (i) {
.get => .get16,
.geta => .geta16,
.set => .set16,
});
try self.chunk.writeU16(@intCast(u16, v));
} else {
try self.chunk.writeOp(switch (i) {
.get => .get8,
.geta => .geta8,
.set => .set8,
});
try self.chunk.writeU8(@intCast(u8, v));
}
}
/// getLocalGlobal tries to find the local of the identifier by looking first in the current chunk's set of locals
/// and then finding the top level compiler and looking for it there otherwise.
fn getLocalOrGlobal(self: *Compiler, ident: []const u8) !void {
if (self.chunk.getLocal(ident)) |l| return try self.writeLocal(.get, l);
var last_parent = self;
var parent = self.parent;
while (parent) |p| {
last_parent = p;
parent = p.parent;
}
const local = last_parent.chunk.getLocal(ident) orelse return error.LocalNotFound;
try self.writeLocal(.geta, local);
}
fn compileExpression(self: *Compiler, expr: *const parser.Expression) !void {
switch (expr.*) {
.integer => |i| if (i == 1 or i == -1)
try self.chunk.writeOp(if (i == 1) .one else .neg_one)
else
try self.writeConst(try self.chunk.addConstant(.{ .integer = i })),
.boolean => |b| try self.chunk.writeOp(if (b) .true else .false),
.identifier => |n| try self.getLocalOrGlobal(n),
.binop => |binop| {
try self.compileExpression(binop.lhs.inner);
try self.compileExpression(binop.rhs.inner);
try self.chunk.writeOp(switch (binop.op) {
.plus => .add,
.minus => .subtract,
.multiply => .multiply,
.divide => .divide,
.apply => unreachable,
.eq => .eq,
.neq => .neq,
.lt => .lt,
.lte => .lte,
.gt => .gt,
.gte => .gte,
});
},
.unaryop => |unaryop| {
if (unaryop.op == .negate and unaryop.e.inner.* == .integer and unaryop.e.inner.integer == 1) {
try self.chunk.writeOp(.neg_one);
return;
}
try self.compileExpression(unaryop.e.inner);
if (unaryop.op == .negate) try self.chunk.writeOp(.negate);
},
.let => |let| {
for (let.assignments) |a| {
try self.compileExpression(a.inner.expression.inner);
const l = try self.chunk.addLocal(a.inner.identifier);
try self.writeLocal(.set, l);
}
try self.compileExpression(let.in.inner);
if (let.assignments.len == 1) {
self.chunk.popLocals(1);
try self.chunk.writeOp(.pop);
return;
}
var locals = @intCast(isize, let.assignments.len);
self.chunk.popLocals(@intCast(u32, locals));
while (locals > 0) : (locals -= std.math.maxInt(u8)) {
try self.chunk.writeOp(.pop_n);
try self.chunk.writeU8(@intCast(u8, std.math.min(std.math.maxInt(u8), locals)));
}
},
.function => |f| {
var comp = Compiler{
.gpa = self.gpa,
.chunk = bc.Chunk.init(self.gpa),
.parent = self,
.program = undefined,
};
var func = try comp.compileFunction("", &f);
const c = try self.chunk.addConstant(func);
try self.writeConst(c);
},
.apply => |a| {
try self.compileExpression(a.f.inner);
for (a.args) |arg| try self.compileExpression(arg.inner);
try self.chunk.writeOp(.call);
try self.chunk.writeU8(@intCast(u8, a.args.len));
},
.@"if" => |f| {
try self.compileExpression(f.condition.inner);
var to_else_jmp = try self.chunk.writeJmp(.jmpf8);
try self.compileExpression(f.then.inner);
var to_after_else_jmp = try self.chunk.writeJmp(.jmp8);
try to_else_jmp.set();
try self.compileExpression(f.@"else".inner);
try to_after_else_jmp.set();
},
}
}
fn compileFunction(self: *Compiler, name: []const u8, f: *const parser.Function) error{ OutOfMemory, LocalNotFound }!bc.Value {
for (f.params) |param| _ = try self.chunk.addLocal(param.inner.identifier);
try self.compileExpression(f.body.inner);
try self.chunk.writeOp(.ret);
var fun = try self.gpa.create(bc.Object);
fun.* = .{ .v = .{ .function = .{ .chunk = self.chunk, .arity = @intCast(u32, f.params.len), .name = name } } };
return bc.Value{ .object = fun };
}
pub fn compile(self: *Compiler) !void {
for (self.program.stmts.items) |stmt| {
switch (stmt.inner) {
.expression => |e| {
try self.compileExpression(e);
try self.chunk.writeOp(.pop);
},
.assignment => |a| {
const l = try self.chunk.addLocal(a.identifier);
try self.compileExpression(a.expression.inner);
try self.writeLocal(.set, l);
},
.signature => {},
.typedef => {},
}
}
}
pub fn deinit(self: *Compiler) void {
self.chunk.deinit();
}
};
const testing = std.testing;
fn testCompile(source: []const u8, disassembly: []const u8) !void {
var l = parser.Lexer{ .real = lexer.Lexer.init(source) };
var p = parser.Parser.init(testing.allocator, l);
defer p.deinit();
var program = try p.parse();
defer program.deinit();
try sema.normaliseProgram(&program);
var c = Compiler.init(testing.allocator, &program);
defer c.deinit();
try c.compile();
var al = std.ArrayList(u8).init(testing.allocator);
defer al.deinit();
try c.chunk.disassemble(al.writer());
try std.testing.expectEqualStrings(disassembly, al.items);
}
test "constants" {
try testCompile("1;-1;",
\\ONE
\\POP
\\NONE
\\POP
\\
);
try testCompile("10;",
\\CONST c0 ; 10
\\POP
\\
);
try testCompile("true;",
\\TRUE
\\POP
\\
);
try testCompile("a = true;",
\\TRUE
\\SET l0
\\
);
}
test "maths" {
try testCompile("3*2 + 6/3;",
\\CONST c0 ; 3
\\CONST c1 ; 2
\\MULT
\\CONST c2 ; 6
\\CONST c3 ; 3
\\DIV
\\ADD
\\POP
\\
);
}
test "let..in" {
try testCompile("let a = 1; in a + 2;",
\\ONE
\\SET l0
\\GET l0
\\CONST c0 ; 2
\\ADD
\\POP
\\POP
\\
);
try testCompile("a = 5; b = let a = 1; in a + 2;",
\\CONST c0 ; 5
\\SET l0
\\ONE
\\SET l2
\\GET l2
\\CONST c1 ; 2
\\ADD
\\POP
\\SET l1
\\
);
}
test "function" {
try testCompile("a = 1; f = fn x y => x + y; b = f 1 1;",
\\============ ============
\\GET l0
\\GET l1
\\ADD
\\RET
\\============================
\\ONE
\\SET l0
\\CONST c0 ; <func >
\\SET l1
\\GET l1
\\ONE
\\ONE
\\CALL 2
\\SET l2
\\
);
try testCompile("f = fn x y => x + y; b = f 1 (f 2 3);",
\\============ ============
\\GET l0
\\GET l1
\\ADD
\\RET
\\============================
\\CONST c0 ; <func >
\\SET l0
\\GET l0
\\ONE
\\GET l0
\\CONST c1 ; 2
\\CONST c2 ; 3
\\CALL 2
\\CALL 2
\\SET l1
\\
);
}
test "if/then/elif/else" {
try testCompile("if true then 1 else -1;",
\\TRUE
\\JMPF p6
\\ONE
\\JMP p7
\\NONE
\\POP
\\
);
try testCompile("if 1 == 2 then 1 else -1;",
\\ONE
\\CONST c0 ; 2
\\EQ
\\JMPF p9
\\ONE
\\JMP pa
\\NONE
\\POP
\\
);
}
| https://raw.githubusercontent.com/mattyhall/polari/8ea72bbded88d771d3f5450158f35581576ae2da/src/compiler.zig |
//! run 'zig build jsons' to generate bindings for raylib
//! run 'zig build raylib_parser' to build the raylib_parser.exe
/// this needs to be here so the zig compiler won't complain that there is no entry point
/// but actually we are using main() of 'raylib/src/parser/raylib_parser.c'
pub extern fn main() c_int;
| https://raw.githubusercontent.com/ryupold/raylib.zig/bd561b3689bc4e703f46bf1908633abb09680b4b/raylib_parser.zig |
const std = @import("std");
const microzig = @import("microzig");
const include_path = [_][]const u8{
"csrc/connectivity/paho.mqtt.embedded-c/MQTTPacket/src/",
};
const paho_src_path = "csrc/connectivity/paho.mqtt.embedded-c/MQTTPacket/src/";
const source_path = [_][]const u8{
paho_src_path ++ "MQTTFormat.c",
paho_src_path ++ "MQTTPacket.c",
paho_src_path ++ "MQTTSerializePublish.c",
paho_src_path ++ "MQTTDeserializePublish.c",
paho_src_path ++ "MQTTConnectClient.c",
paho_src_path ++ "MQTTSubscribeClient.c",
paho_src_path ++ "MQTTUnsubscribeClient.c",
};
const c_flags = [_][]const u8{ "-DMQTT_CLIENT=1", "-O2", "-fdata-sections", "-ffunction-sections" };
pub fn aggregate(exe: *microzig.Firmware) void {
for (include_path) |path| {
exe.addIncludePath(.{ .path = path });
}
for (source_path) |path| {
exe.addCSourceFile(.{ .file = .{ .path = path }, .flags = &c_flags });
}
}
| https://raw.githubusercontent.com/FranciscoLlobet/efm32-freertos-zig/879cb83fe3aa68205e3726b50fc47973ebf0ccd0/build_mqtt.zig |
/// Calculates Md5 hash of each chunk in parallel and then hashes all Md5 hashes to produce
/// the final digest.
/// While this is NOT a correct MD5 hash of the contents, this methodology is used by LLVM/LLD
/// and we will use it too as it seems accepted by Apple OSes.
/// TODO LLD also hashes the output filename to disambiguate between same builds with different
/// output files. Should we also do that?
pub fn calcUuid(
allocator: Allocator,
thread_pool: *ThreadPool,
file: fs.File,
file_size: u64,
out: *[Md5.digest_length]u8,
) !void {
const tracy = trace(@src());
defer tracy.end();
const chunk_size: usize = 1024 * 1024;
const num_chunks: usize = std.math.cast(usize, @divTrunc(file_size, chunk_size)) orelse return error.Overflow;
const actual_num_chunks = if (@rem(file_size, chunk_size) > 0) num_chunks + 1 else num_chunks;
const hashes = try allocator.alloc([Md5.digest_length]u8, actual_num_chunks);
defer allocator.free(hashes);
var hasher = Hasher(Md5){ .allocator = allocator, .thread_pool = thread_pool };
try hasher.hash(file, hashes, .{
.chunk_size = chunk_size,
.max_file_size = file_size,
});
const final_buffer = try allocator.alloc(u8, actual_num_chunks * Md5.digest_length);
defer allocator.free(final_buffer);
for (hashes, 0..) |hash, i| {
@memcpy(final_buffer[i * Md5.digest_length ..][0..Md5.digest_length], &hash);
}
Md5.hash(final_buffer, out, .{});
conform(out);
}
inline fn conform(out: *[Md5.digest_length]u8) void {
// LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
out[6] = (out[6] & 0x0F) | (3 << 4);
out[8] = (out[8] & 0x3F) | 0x80;
}
const fs = std.fs;
const mem = std.mem;
const std = @import("std");
const trace = @import("../tracy.zig").trace;
const Allocator = mem.Allocator;
const Md5 = std.crypto.hash.Md5;
const Hasher = @import("hasher.zig").ParallelHasher;
const ThreadPool = std.Thread.Pool;
| https://raw.githubusercontent.com/kubkon/zld/01da87911bd7f53b860d8ba0aebb99eee63e5b76/src/MachO/uuid.zig |
const std = @import("std");
const Ast = std.zig.Ast;
const log = std.log.scoped(.zls_hover);
const ast = @import("../ast.zig");
const types = @import("../lsp.zig");
const offsets = @import("../offsets.zig");
const URI = @import("../uri.zig");
const tracy = @import("tracy");
const Analyser = @import("../analysis.zig");
const DocumentStore = @import("../DocumentStore.zig");
const data = @import("version_data");
fn hoverSymbol(
analyser: *Analyser,
arena: std.mem.Allocator,
decl_handle: Analyser.DeclWithHandle,
markup_kind: types.MarkupKind,
) error{OutOfMemory}!?[]const u8 {
var doc_strings = std.ArrayListUnmanaged([]const u8){};
return hoverSymbolRecursive(analyser, arena, decl_handle, markup_kind, &doc_strings);
}
fn hoverSymbolRecursive(
analyser: *Analyser,
arena: std.mem.Allocator,
decl_handle: Analyser.DeclWithHandle,
markup_kind: types.MarkupKind,
doc_strings: *std.ArrayListUnmanaged([]const u8),
) error{OutOfMemory}!?[]const u8 {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
const handle = decl_handle.handle;
const tree = handle.tree;
var type_references = Analyser.ReferencedType.Set.init(arena);
var reference_collector = Analyser.ReferencedType.Collector.init(&type_references);
if (try decl_handle.docComments(arena)) |doc|
try doc_strings.append(arena, doc);
var is_fn = false;
const def_str = switch (decl_handle.decl) {
.ast_node => |node| def: {
if (try analyser.resolveVarDeclAlias(.{ .node = node, .handle = handle })) |result| {
return try hoverSymbolRecursive(analyser, arena, result, markup_kind, doc_strings);
}
var buf: [1]Ast.Node.Index = undefined;
if (tree.fullVarDecl(node)) |var_decl| {
var struct_init_buf: [2]Ast.Node.Index = undefined;
var type_node: Ast.Node.Index = 0;
if (var_decl.ast.type_node != 0) {
type_node = var_decl.ast.type_node;
} else if (tree.fullStructInit(&struct_init_buf, var_decl.ast.init_node)) |struct_init| {
if (struct_init.ast.type_expr != 0)
type_node = struct_init.ast.type_expr;
}
if (type_node != 0)
try analyser.referencedTypesFromNode(
.{ .node = type_node, .handle = handle },
&reference_collector,
);
break :def try Analyser.getVariableSignature(arena, tree, var_decl, true);
} else if (tree.fullFnProto(&buf, node)) |fn_proto| {
is_fn = true;
break :def Analyser.getFunctionSignature(tree, fn_proto);
} else if (tree.fullContainerField(node)) |field| {
var converted = field;
converted.convertToNonTupleLike(tree.nodes);
if (converted.ast.type_expr != 0)
try analyser.referencedTypesFromNode(
.{ .node = converted.ast.type_expr, .handle = handle },
&reference_collector,
);
break :def Analyser.getContainerFieldSignature(tree, field) orelse return null;
} else {
break :def Analyser.nodeToString(tree, node) orelse return null;
}
},
.function_parameter => |pay| def: {
const param = pay.get(tree).?;
if (param.type_expr != 0) // zero for `anytype` and extern C varargs `...`
try analyser.referencedTypesFromNode(
.{ .node = param.type_expr, .handle = handle },
&reference_collector,
);
break :def ast.paramSlice(tree, param, false);
},
.optional_payload,
.error_union_payload,
.error_union_error,
.for_loop_payload,
.assign_destructure,
.switch_payload,
.label,
.error_token,
=> tree.tokenSlice(decl_handle.nameToken()),
};
var resolved_type_str: []const u8 = "unknown";
if (try decl_handle.resolveType(analyser)) |resolved_type| {
if (try resolved_type.docComments(arena)) |doc|
try doc_strings.append(arena, doc);
try analyser.referencedTypes(
resolved_type,
&reference_collector,
);
resolved_type_str = try std.fmt.allocPrint(arena, "{}", .{resolved_type.fmt(analyser, .{ .truncate_container_decls = false })});
}
const referenced_types: []const Analyser.ReferencedType = type_references.keys();
var hover_text = std.ArrayList(u8).init(arena);
const writer = hover_text.writer();
if (markup_kind == .markdown) {
if (is_fn) {
try writer.print("```zig\n{s}\n```", .{def_str});
} else {
try writer.print("```zig\n{s}\n```\n```zig\n({s})\n```", .{ def_str, resolved_type_str });
}
for (doc_strings.items) |doc|
try writer.print("\n\n{s}", .{doc});
if (referenced_types.len > 0)
try writer.print("\n\n" ++ "Go to ", .{});
for (referenced_types, 0..) |ref, index| {
if (index > 0)
try writer.print(" | ", .{});
const source_index = offsets.tokenToIndex(ref.handle.tree, ref.token);
const line = 1 + std.mem.count(u8, ref.handle.tree.source[0..source_index], "\n");
try writer.print("[{s}]({s}#L{d})", .{ ref.str, ref.handle.uri, line });
}
} else {
if (is_fn) {
try writer.print("{s}", .{def_str});
} else {
try writer.print("{s}\n({s})", .{ def_str, resolved_type_str });
}
for (doc_strings.items) |doc|
try writer.print("\n\n{s}", .{doc});
}
return hover_text.items;
}
fn hoverDefinitionLabel(
analyser: *Analyser,
arena: std.mem.Allocator,
handle: *DocumentStore.Handle,
pos_index: usize,
markup_kind: types.MarkupKind,
offset_encoding: offsets.Encoding,
) error{OutOfMemory}!?types.Hover {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
const name_loc = Analyser.identifierLocFromPosition(pos_index, handle) orelse return null;
const name = offsets.locToSlice(handle.tree.source, name_loc);
const decl = (try Analyser.getLabelGlobal(pos_index, handle, name)) orelse return null;
return .{
.contents = .{
.MarkupContent = .{
.kind = markup_kind,
.value = (try hoverSymbol(analyser, arena, decl, markup_kind)) orelse return null,
},
},
.range = offsets.locToRange(handle.tree.source, name_loc, offset_encoding),
};
}
fn hoverDefinitionBuiltin(
analyser: *Analyser,
arena: std.mem.Allocator,
handle: *DocumentStore.Handle,
pos_index: usize,
markup_kind: types.MarkupKind,
offset_encoding: offsets.Encoding,
) error{OutOfMemory}!?types.Hover {
_ = analyser;
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
const name_loc = Analyser.identifierLocFromPosition(pos_index, handle) orelse return null;
const name = offsets.locToSlice(handle.tree.source, name_loc);
const builtin = for (data.builtins) |builtin| {
if (std.mem.eql(u8, builtin.name[1..], name)) {
break builtin;
}
} else return null;
var contents: std.ArrayListUnmanaged(u8) = .{};
var writer = contents.writer(arena);
if (std.mem.eql(u8, name, "cImport")) blk: {
const index = for (handle.cimports.items(.node), 0..) |cimport_node, index| {
const main_token = handle.tree.nodes.items(.main_token)[cimport_node];
const cimport_loc = offsets.tokenToLoc(handle.tree, main_token);
if (cimport_loc.start <= pos_index and pos_index <= cimport_loc.end) break index;
} else break :blk;
const source = handle.cimports.items(.source)[index];
switch (markup_kind) {
.plaintext => {
try writer.print(
\\{s}
\\
, .{source});
},
.markdown => {
try writer.print(
\\```c
\\{s}
\\```
\\
, .{source});
},
}
}
switch (markup_kind) {
.plaintext => {
try writer.print(
\\{s}
\\{s}
, .{ builtin.signature, builtin.documentation });
},
.markdown => {
try writer.print(
\\```zig
\\{s}
\\```
\\{s}
, .{ builtin.signature, builtin.documentation });
},
}
return types.Hover{
.contents = .{
.MarkupContent = .{
.kind = markup_kind,
.value = contents.items,
},
},
.range = offsets.locToRange(handle.tree.source, name_loc, offset_encoding),
};
}
fn hoverDefinitionGlobal(
analyser: *Analyser,
arena: std.mem.Allocator,
handle: *DocumentStore.Handle,
pos_index: usize,
markup_kind: types.MarkupKind,
offset_encoding: offsets.Encoding,
) error{OutOfMemory}!?types.Hover {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
const name_loc = Analyser.identifierLocFromPosition(pos_index, handle) orelse return null;
const name = offsets.locToSlice(handle.tree.source, name_loc);
const decl = (try analyser.getSymbolGlobal(pos_index, handle, name)) orelse return null;
return .{
.contents = .{
.MarkupContent = .{
.kind = markup_kind,
.value = (try hoverSymbol(analyser, arena, decl, markup_kind)) orelse return null,
},
},
.range = offsets.locToRange(handle.tree.source, name_loc, offset_encoding),
};
}
fn hoverDefinitionEnumLiteral(
analyser: *Analyser,
arena: std.mem.Allocator,
handle: *DocumentStore.Handle,
source_index: usize,
markup_kind: types.MarkupKind,
offset_encoding: offsets.Encoding,
) error{OutOfMemory}!?types.Hover {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
const name_loc = Analyser.identifierLocFromPosition(source_index, handle) orelse return null;
const name = offsets.locToSlice(handle.tree.source, name_loc);
const decl = (try analyser.getSymbolEnumLiteral(arena, handle, source_index, name)) orelse return null;
return .{
.contents = .{
.MarkupContent = .{
.kind = markup_kind,
.value = (try hoverSymbol(analyser, arena, decl, markup_kind)) orelse return null,
},
},
.range = offsets.locToRange(handle.tree.source, name_loc, offset_encoding),
};
}
fn hoverDefinitionFieldAccess(
analyser: *Analyser,
arena: std.mem.Allocator,
handle: *DocumentStore.Handle,
source_index: usize,
loc: offsets.Loc,
markup_kind: types.MarkupKind,
offset_encoding: offsets.Encoding,
) error{OutOfMemory}!?types.Hover {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
const name_loc = Analyser.identifierLocFromPosition(source_index, handle) orelse return null;
const name = offsets.locToSlice(handle.tree.source, name_loc);
const held_loc = offsets.locMerge(loc, name_loc);
const decls = (try analyser.getSymbolFieldAccesses(arena, handle, source_index, held_loc, name)) orelse return null;
var content = try std.ArrayListUnmanaged([]const u8).initCapacity(arena, decls.len);
for (decls) |decl| {
content.appendAssumeCapacity(try hoverSymbol(analyser, arena, decl, markup_kind) orelse continue);
}
return .{
.contents = .{ .MarkupContent = .{
.kind = markup_kind,
.value = switch (content.items.len) {
0 => return null,
1 => content.items[0],
else => try std.mem.join(arena, "\n\n", content.items),
},
} },
.range = offsets.locToRange(handle.tree.source, name_loc, offset_encoding),
};
}
fn hoverNumberLiteral(
handle: *DocumentStore.Handle,
token_index: Ast.TokenIndex,
arena: std.mem.Allocator,
markup_kind: types.MarkupKind,
) error{OutOfMemory}!?[]const u8 {
const tree = handle.tree;
// number literals get tokenized separately from their minus sign
const is_negative = tree.tokens.items(.tag)[token_index -| 1] == .minus;
const num_slice = tree.tokenSlice(token_index);
const parsed = std.zig.parseNumberLiteral(num_slice);
switch (parsed) {
.int => |number| switch (markup_kind) {
.markdown => return try std.fmt.allocPrint(arena,
\\| Base | {[value]s:<[count]} |
\\| ---- | {[dash]s:-<[count]} |
\\| BIN | {[sign]s}0b{[number]b:<[len]} |
\\| OCT | {[sign]s}0o{[number]o:<[len]} |
\\| DEC | {[sign]s}{[number]d:<[len]} |
\\| HEX | {[sign]s}0x{[number]X:<[len]} |
, .{
.sign = if (is_negative) "-" else "",
.dash = "-",
.value = "Value",
.number = number,
.count = @bitSizeOf(@TypeOf(number)) - @clz(number) + "0x".len + @intFromBool(is_negative),
.len = @bitSizeOf(@TypeOf(number)) - @clz(number),
}),
.plaintext => return try std.fmt.allocPrint(
arena,
\\BIN: {[sign]s}0b{[number]b}
\\OCT: {[sign]s}0o{[number]o}
\\DEC: {[sign]s}{[number]d}
\\HEX: {[sign]s}0x{[number]X}
,
.{ .sign = if (is_negative) "-" else "", .number = number },
),
},
.big_int, .float, .failure => return null,
}
}
fn hoverDefinitionNumberLiteral(
arena: std.mem.Allocator,
handle: *DocumentStore.Handle,
source_index: usize,
markup_kind: types.MarkupKind,
offset_encoding: offsets.Encoding,
) !?types.Hover {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
const tree = handle.tree;
const token_index = offsets.sourceIndexToTokenIndex(tree, source_index);
const num_loc = offsets.tokenToLoc(tree, token_index);
const hover_text = (try hoverNumberLiteral(handle, token_index, arena, markup_kind)) orelse return null;
return .{
.contents = .{ .MarkupContent = .{
.kind = markup_kind,
.value = hover_text,
} },
.range = offsets.locToRange(handle.tree.source, num_loc, offset_encoding),
};
}
pub fn hover(
analyser: *Analyser,
arena: std.mem.Allocator,
handle: *DocumentStore.Handle,
source_index: usize,
markup_kind: types.MarkupKind,
offset_encoding: offsets.Encoding,
) !?types.Hover {
const pos_context = try Analyser.getPositionContext(arena, handle.tree.source, source_index, true);
const response = switch (pos_context) {
.builtin => try hoverDefinitionBuiltin(analyser, arena, handle, source_index, markup_kind, offset_encoding),
.var_access => try hoverDefinitionGlobal(analyser, arena, handle, source_index, markup_kind, offset_encoding),
.field_access => |loc| try hoverDefinitionFieldAccess(analyser, arena, handle, source_index, loc, markup_kind, offset_encoding),
.label => try hoverDefinitionLabel(analyser, arena, handle, source_index, markup_kind, offset_encoding),
.enum_literal => try hoverDefinitionEnumLiteral(analyser, arena, handle, source_index, markup_kind, offset_encoding),
.number_literal => try hoverDefinitionNumberLiteral(arena, handle, source_index, markup_kind, offset_encoding),
else => null,
};
return response;
}
| https://raw.githubusercontent.com/zigtools/zls/c5ceadf362df07aa40b657db166bf6229a5ea1c5/src/features/hover.zig |
// Copyright (C) 2023-2024 Lightpanda (Selecy SAS)
//
// Francis Bouvier <[email protected]>
// Pierre Tachoire <[email protected]>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
// This file makes the glue between mimalloc heap allocation and libdom memory
// management.
// We replace the libdom default usage of allocations with mimalloc heap
// allocation to be able to free all memory used at once, like an arena usage.
const std = @import("std");
const c = @cImport({
@cInclude("mimalloc.h");
});
const Error = error{
HeapNotNull,
HeapNull,
};
var heap: ?*c.mi_heap_t = null;
pub fn create() Error!void {
if (heap != null) return Error.HeapNotNull;
heap = c.mi_heap_new();
if (heap == null) return Error.HeapNull;
}
pub fn destroy() void {
if (heap == null) return;
c.mi_heap_destroy(heap.?);
heap = null;
}
pub export fn m_alloc(size: usize) callconv(.C) ?*anyopaque {
if (heap == null) return null;
return c.mi_heap_malloc(heap.?, size);
}
pub export fn re_alloc(ptr: ?*anyopaque, size: usize) callconv(.C) ?*anyopaque {
if (heap == null) return null;
return c.mi_heap_realloc(heap.?, ptr, size);
}
pub export fn c_alloc(nmemb: usize, size: usize) callconv(.C) ?*anyopaque {
if (heap == null) return null;
return c.mi_heap_calloc(heap.?, nmemb, size);
}
pub export fn str_dup(s: [*c]const u8) callconv(.C) [*c]u8 {
if (heap == null) return null;
return c.mi_heap_strdup(heap.?, s);
}
pub export fn strn_dup(s: [*c]const u8, size: usize) callconv(.C) [*c]u8 {
if (heap == null) return null;
return c.mi_heap_strndup(heap.?, s, size);
}
// NOOP, use destroy to clear all the memory allocated at once.
pub export fn f_ree(_: ?*anyopaque) callconv(.C) void {
return;
}
| https://raw.githubusercontent.com/lightpanda-io/browser/152a4e5e7fdab3cc5c4509599407740aaae56f25/src/mimalloc.zig |
//! https://learn.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-bitmapinfoheader
//! https://learn.microsoft.com/en-us/previous-versions//dd183376(v=vs.85)
//! https://learn.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-bitmapinfo
//! https://learn.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-bitmapcoreheader
//! https://archive.org/details/mac_Graphics_File_Formats_Second_Edition_1996/page/n607/mode/2up
//! https://learn.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-bitmapv5header
//!
//! Notes:
//! - The Microsoft documentation is incredibly unclear about the color table when the
//! bit depth is >= 16.
//! + For bit depth 24 it says "the bmiColors member of BITMAPINFO is NULL" but also
//! says "the bmiColors color table is used for optimizing colors used on palette-based
//! devices, and must contain the number of entries specified by the bV5ClrUsed member"
//! + For bit depth 16 and 32, it seems to imply that if the compression is BI_BITFIELDS
//! or BI_ALPHABITFIELDS, then the color table *only* consists of the bit masks, but
//! doesn't really say this outright and the Wikipedia article seems to disagree
//! For the purposes of this implementation, color tables can always be present for any
//! bit depth and compression, and the color table follows the header + any optional
//! bit mask fields dictated by the specified compression.
const std = @import("std");
const BitmapHeader = @import("ico.zig").BitmapHeader;
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
pub const windows_format_id = std.mem.readInt(u16, "BM", native_endian);
pub const file_header_len = 14;
pub const ReadError = error{
UnexpectedEOF,
InvalidFileHeader,
ImpossiblePixelDataOffset,
UnknownBitmapVersion,
InvalidBitsPerPixel,
TooManyColorsInPalette,
MissingBitfieldMasks,
};
pub const BitmapInfo = struct {
dib_header_size: u32,
/// Contains the interpreted number of colors in the palette (e.g.
/// if the field's value is zero and the bit depth is <= 8, this
/// will contain the maximum number of colors for the bit depth
/// rather than the field's value directly).
colors_in_palette: u32,
bytes_per_color_palette_element: u8,
pixel_data_offset: u32,
compression: Compression,
pub fn getExpectedPaletteByteLen(self: *const BitmapInfo) u64 {
return @as(u64, self.colors_in_palette) * self.bytes_per_color_palette_element;
}
pub fn getActualPaletteByteLen(self: *const BitmapInfo) u64 {
return self.getByteLenBetweenHeadersAndPixels() - self.getBitmasksByteLen();
}
pub fn getByteLenBetweenHeadersAndPixels(self: *const BitmapInfo) u64 {
return @as(u64, self.pixel_data_offset) - self.dib_header_size - file_header_len;
}
pub fn getBitmasksByteLen(self: *const BitmapInfo) u8 {
// Only BITMAPINFOHEADER (3.1) has trailing bytes for the BITFIELDS
// The 2.0 format doesn't have a compression field and 4.0+ has dedicated
// fields for the masks in the header.
const dib_version = BitmapHeader.Version.get(self.dib_header_size);
return switch (dib_version) {
.@"nt3.1" => switch (self.compression) {
.BI_BITFIELDS => 12,
.BI_ALPHABITFIELDS => 16,
else => 0,
},
else => 0,
};
}
pub fn getMissingPaletteByteLen(self: *const BitmapInfo) u64 {
if (self.getActualPaletteByteLen() >= self.getExpectedPaletteByteLen()) return 0;
return self.getExpectedPaletteByteLen() - self.getActualPaletteByteLen();
}
/// Returns the full byte len of the DIB header + optional bitmasks + color palette
pub fn getExpectedByteLenBeforePixelData(self: *const BitmapInfo) u64 {
return @as(u64, self.dib_header_size) + self.getBitmasksByteLen() + self.getExpectedPaletteByteLen();
}
/// Returns the full expected byte len
pub fn getExpectedByteLen(self: *const BitmapInfo, file_size: u64) u64 {
return self.getExpectedByteLenBeforePixelData() + self.getPixelDataLen(file_size);
}
pub fn getPixelDataLen(self: *const BitmapInfo, file_size: u64) u64 {
return file_size - self.pixel_data_offset;
}
};
pub fn read(reader: anytype, max_size: u64) ReadError!BitmapInfo {
var bitmap_info: BitmapInfo = undefined;
const file_header = reader.readBytesNoEof(file_header_len) catch return error.UnexpectedEOF;
const id = std.mem.readInt(u16, file_header[0..2], native_endian);
if (id != windows_format_id) return error.InvalidFileHeader;
bitmap_info.pixel_data_offset = std.mem.readInt(u32, file_header[10..14], .little);
if (bitmap_info.pixel_data_offset > max_size) return error.ImpossiblePixelDataOffset;
bitmap_info.dib_header_size = reader.readInt(u32, .little) catch return error.UnexpectedEOF;
if (bitmap_info.pixel_data_offset < file_header_len + bitmap_info.dib_header_size) return error.ImpossiblePixelDataOffset;
const dib_version = BitmapHeader.Version.get(bitmap_info.dib_header_size);
switch (dib_version) {
.@"nt3.1", .@"nt4.0", .@"nt5.0" => {
var dib_header_buf: [@sizeOf(BITMAPINFOHEADER)]u8 align(@alignOf(BITMAPINFOHEADER)) = undefined;
std.mem.writeInt(u32, dib_header_buf[0..4], bitmap_info.dib_header_size, .little);
reader.readNoEof(dib_header_buf[4..]) catch return error.UnexpectedEOF;
var dib_header: *BITMAPINFOHEADER = @ptrCast(&dib_header_buf);
structFieldsLittleToNative(BITMAPINFOHEADER, dib_header);
bitmap_info.colors_in_palette = try dib_header.numColorsInTable();
bitmap_info.bytes_per_color_palette_element = 4;
bitmap_info.compression = @enumFromInt(dib_header.biCompression);
if (bitmap_info.getByteLenBetweenHeadersAndPixels() < bitmap_info.getBitmasksByteLen()) {
return error.MissingBitfieldMasks;
}
},
.@"win2.0" => {
var dib_header_buf: [@sizeOf(BITMAPCOREHEADER)]u8 align(@alignOf(BITMAPCOREHEADER)) = undefined;
std.mem.writeInt(u32, dib_header_buf[0..4], bitmap_info.dib_header_size, .little);
reader.readNoEof(dib_header_buf[4..]) catch return error.UnexpectedEOF;
const dib_header: *BITMAPCOREHEADER = @ptrCast(&dib_header_buf);
structFieldsLittleToNative(BITMAPCOREHEADER, dib_header);
// > The size of the color palette is calculated from the BitsPerPixel value.
// > The color palette has 2, 16, 256, or 0 entries for a BitsPerPixel of
// > 1, 4, 8, and 24, respectively.
bitmap_info.colors_in_palette = switch (dib_header.bcBitCount) {
inline 1, 4, 8 => |bit_count| 1 << bit_count,
24 => 0,
else => return error.InvalidBitsPerPixel,
};
bitmap_info.bytes_per_color_palette_element = 3;
bitmap_info.compression = .BI_RGB;
},
.unknown => return error.UnknownBitmapVersion,
}
return bitmap_info;
}
/// https://learn.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-bitmapcoreheader
pub const BITMAPCOREHEADER = extern struct {
bcSize: u32,
bcWidth: u16,
bcHeight: u16,
bcPlanes: u16,
bcBitCount: u16,
};
/// https://learn.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-bitmapinfoheader
pub const BITMAPINFOHEADER = extern struct {
bcSize: u32,
biWidth: i32,
biHeight: i32,
biPlanes: u16,
biBitCount: u16,
biCompression: u32,
biSizeImage: u32,
biXPelsPerMeter: i32,
biYPelsPerMeter: i32,
biClrUsed: u32,
biClrImportant: u32,
/// Returns error.TooManyColorsInPalette if the number of colors specified
/// exceeds the number of possible colors referenced in the pixel data (i.e.
/// if 1 bit is used per pixel, then the color table can't have more than 2 colors
/// since any more couldn't possibly be indexed in the pixel data)
///
/// Returns error.InvalidBitsPerPixel if the bit depth is not 1, 4, 8, 16, 24, or 32.
pub fn numColorsInTable(self: BITMAPINFOHEADER) !u32 {
switch (self.biBitCount) {
inline 1, 4, 8 => |bit_count| switch (self.biClrUsed) {
// > If biClrUsed is zero, the array contains the maximum number of
// > colors for the given bitdepth; that is, 2^biBitCount colors
0 => return 1 << bit_count,
// > If biClrUsed is nonzero and the biBitCount member is less than 16,
// > the biClrUsed member specifies the actual number of colors the
// > graphics engine or device driver accesses.
else => {
const max_colors = 1 << bit_count;
if (self.biClrUsed > max_colors) {
return error.TooManyColorsInPalette;
}
return self.biClrUsed;
},
},
// > If biBitCount is 16 or greater, the biClrUsed member specifies
// > the size of the color table used to optimize performance of the
// > system color palettes.
//
// Note: Bit depths >= 16 only use the color table 'for optimizing colors
// used on palette-based devices', but it still makes sense to limit their
// colors since the pixel data is still limited to this number of colors
// (i.e. even though the color table is not indexed by the pixel data,
// the color table having more colors than the pixel data can represent
// would never make sense and indicates a malformed bitmap).
inline 16, 24, 32 => |bit_count| {
const max_colors = 1 << bit_count;
if (self.biClrUsed > max_colors) {
return error.TooManyColorsInPalette;
}
return self.biClrUsed;
},
else => return error.InvalidBitsPerPixel,
}
}
};
pub const Compression = enum(u32) {
BI_RGB = 0,
BI_RLE8 = 1,
BI_RLE4 = 2,
BI_BITFIELDS = 3,
BI_JPEG = 4,
BI_PNG = 5,
BI_ALPHABITFIELDS = 6,
BI_CMYK = 11,
BI_CMYKRLE8 = 12,
BI_CMYKRLE4 = 13,
_,
};
fn structFieldsLittleToNative(comptime T: type, x: *T) void {
inline for (@typeInfo(T).Struct.fields) |field| {
@field(x, field.name) = std.mem.littleToNative(field.type, @field(x, field.name));
}
}
test "read" {
var bmp_data = "BM<\x00\x00\x00\x00\x00\x00\x006\x00\x00\x00(\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01\x00\x10\x00\x00\x00\x00\x00\x06\x00\x00\x00\x12\x0b\x00\x00\x12\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x7f\x00\x00\x00\x00".*;
var fbs = std.io.fixedBufferStream(&bmp_data);
{
const bitmap = try read(fbs.reader(), bmp_data.len);
try std.testing.expectEqual(@as(u32, BitmapHeader.Version.@"nt3.1".len()), bitmap.dib_header_size);
}
{
fbs.reset();
bmp_data[file_header_len] = 11;
try std.testing.expectError(error.UnknownBitmapVersion, read(fbs.reader(), bmp_data.len));
// restore
bmp_data[file_header_len] = BitmapHeader.Version.@"nt3.1".len();
}
{
fbs.reset();
bmp_data[0] = 'b';
try std.testing.expectError(error.InvalidFileHeader, read(fbs.reader(), bmp_data.len));
// restore
bmp_data[0] = 'B';
}
{
const cutoff_len = file_header_len + BitmapHeader.Version.@"nt3.1".len() - 1;
var dib_cutoff_fbs = std.io.fixedBufferStream(bmp_data[0..cutoff_len]);
try std.testing.expectError(error.UnexpectedEOF, read(dib_cutoff_fbs.reader(), bmp_data.len));
}
{
const cutoff_len = file_header_len - 1;
var bmp_cutoff_fbs = std.io.fixedBufferStream(bmp_data[0..cutoff_len]);
try std.testing.expectError(error.UnexpectedEOF, read(bmp_cutoff_fbs.reader(), bmp_data.len));
}
}
| https://raw.githubusercontent.com/squeek502/resinator/e9023319993177ac3d49a8a833522eecedc903f7/src/bmp.zig |
// pub fn checkTransaction(tx: *Transaction, state: *TxValidationState) !bool {
// if (tx.vin.empty()) {
// return false;
// }
// if (tx.vout.empty()) {
// return false;
// }
// return true;
// }
| https://raw.githubusercontent.com/iskyd/zbtc/ce96eca9ef58530ce2fda696adf193e1a1c62454/src/consensus/txcheck.zig |
const std = @import("std");
pub const Fixbuf = @compileError("Please use std.BoundedArray instead");
pub fn errSetContains(comptime ErrorSet: type, err: anyerror) bool {
inline for (comptime std.meta.fields(ErrorSet)) |e| {
if (err == @field(ErrorSet, e.name)) {
return true;
}
}
return false;
}
pub fn ReturnOf(comptime func: anytype) type {
return switch (@typeInfo(@TypeOf(func))) {
.Fn, .BoundFn => |fn_info| fn_info.return_type.?,
else => unreachable,
};
}
pub fn ErrorOf(comptime func: anytype) type {
const return_type = ReturnOf(func);
return switch (@typeInfo(return_type)) {
.ErrorUnion => |eu_info| eu_info.error_set,
else => unreachable,
};
}
pub fn Mailbox(comptime T: type) type {
return struct {
const Self = @This();
value: ?T = null,
mutex: std.Thread.Mutex = .{},
reset_event: std.Thread.ResetEvent = .{},
pub fn get(self: *Self) T {
self.reset_event.wait();
self.mutex.lock();
defer self.mutex.unlock();
self.reset_event.reset();
defer self.value = null;
return self.value.?;
}
pub fn getWithTimeout(self: *Self, timeout_ns: u64) ?T {
self.reset_event.timedWait(timeout_ns) catch |err| switch (err) {
error.Timeout => {},
};
self.mutex.lock();
defer self.mutex.unlock();
self.reset_event.reset();
defer self.value = null;
return self.value;
}
pub fn putOverwrite(self: *Self, value: T) void {
self.mutex.lock();
defer self.mutex.unlock();
self.value = value;
self.reset_event.set();
}
};
}
pub const TimeoutStream = struct {
underlying_stream: std.net.Stream,
expiration: ?std.os.timespec = null,
pub fn init(stream: std.net.Stream, duration_ms: u32) !TimeoutStream {
if (duration_ms == 0) {
return TimeoutStream{ .underlying_stream = stream };
}
var now: std.os.timespec = undefined;
std.os.clock_gettime(std.os.CLOCK.REALTIME, &now) catch |err| switch (err) {
error.UnsupportedClock => unreachable,
else => |e| return e,
};
const raw_ns = now.tv_nsec + @as(i64, duration_ms % 1000) * 1_000_000;
return TimeoutStream{
.underlying_stream = stream,
.expiration = std.os.timespec{
.tv_sec = now.tv_sec + duration_ms / 1000 + @divFloor(raw_ns, 1_000_000_000),
.tv_nsec = @mod(raw_ns, 1_000_000_000),
},
};
}
pub fn close(self: TimeoutStream) void {
self.underlying_stream.close();
}
pub const ReadError = std.net.Stream.ReadError || error{Timeout};
pub const WriteError = std.net.Stream.WriteError || error{Timeout};
pub const Reader = std.io.Reader(TimeoutStream, ReadError, read);
pub const Writer = std.io.Writer(TimeoutStream, WriteError, write);
pub fn reader(self: TimeoutStream) Reader {
return .{ .context = self };
}
pub fn writer(self: TimeoutStream) Writer {
return .{ .context = self };
}
const PollFdEvents = std.meta.fieldInfo(std.os.pollfd, .events).field_type;
fn pollWait(self: TimeoutStream, events: PollFdEvents) !void {
if (self.expiration) |expiration| {
var polling = [_]std.os.pollfd{.{
.fd = self.underlying_stream.handle,
.events = events,
.revents = 0,
}};
var now: std.os.timespec = undefined;
std.os.clock_gettime(std.os.CLOCK.REALTIME, &now) catch |err| switch (err) {
error.UnsupportedClock => unreachable,
else => |e| return e,
};
const timeout_ms = std.math.cast(u31, (expiration.tv_sec - now.tv_sec) * 1_000 + @divFloor(expiration.tv_nsec - now.tv_nsec, 1_000_000)) orelse return error.Timeout;
const poll_result = std.os.poll(&polling, timeout_ms) catch |err| return switch (err) {
error.NetworkSubsystemFailed => error.Timeout,
else => |e| e,
};
if (poll_result == 0) {
return error.Timeout;
}
}
}
pub fn read(self: TimeoutStream, buffer: []u8) ReadError!usize {
try self.pollWait(std.os.POLL.IN);
return self.underlying_stream.read(buffer);
}
pub fn write(self: TimeoutStream, buffer: []const u8) WriteError!usize {
try self.pollWait(std.os.POLL.OUT);
return self.underlying_stream.write(buffer);
}
};
| https://raw.githubusercontent.com/fengb/zCord/a6334ebf39773abe7e7272205170d7bed1d93dcd/src/util.zig |
const std = @import("../../../std.zig");
const kern = @import("kern.zig");
const PtRegs = @compileError("TODO missing os bits: PtRegs");
const TcpHdr = @compileError("TODO missing os bits: TcpHdr");
const SkFullSock = @compileError("TODO missing os bits: SkFullSock");
// in BPF, all the helper calls
// TODO: when https://github.com/ziglang/zig/issues/1717 is here, make a nice
// function that uses the Helper enum
//
// Note, these function signatures were created from documentation found in
// '/usr/include/linux/bpf.h'
pub const map_lookup_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) ?*anyopaque, @ptrFromInt(1));
pub const map_update_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(2));
pub const map_delete_elem = @as(*const fn (map: *const kern.MapDef, key: ?*const anyopaque) c_long, @ptrFromInt(3));
pub const probe_read = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(4));
pub const ktime_get_ns = @as(*const fn () u64, @ptrFromInt(5));
pub const trace_printk = @as(*const fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, @ptrFromInt(6));
pub const get_prandom_u32 = @as(*const fn () u32, @ptrFromInt(7));
pub const get_smp_processor_id = @as(*const fn () u32, @ptrFromInt(8));
pub const skb_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32, flags: u64) c_long, @ptrFromInt(9));
pub const l3_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, @ptrFromInt(10));
pub const l4_csum_replace = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, @ptrFromInt(11));
pub const tail_call = @as(*const fn (ctx: ?*anyopaque, prog_array_map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(12));
pub const clone_redirect = @as(*const fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, @ptrFromInt(13));
pub const get_current_pid_tgid = @as(*const fn () u64, @ptrFromInt(14));
pub const get_current_uid_gid = @as(*const fn () u64, @ptrFromInt(15));
pub const get_current_comm = @as(*const fn (buf: ?*anyopaque, size_of_buf: u32) c_long, @ptrFromInt(16));
pub const get_cgroup_classid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(17));
// Note vlan_proto is big endian
pub const skb_vlan_push = @as(*const fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, @ptrFromInt(18));
pub const skb_vlan_pop = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(19));
pub const skb_get_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(20));
pub const skb_set_tunnel_key = @as(*const fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, @ptrFromInt(21));
pub const perf_event_read = @as(*const fn (map: *const kern.MapDef, flags: u64) u64, @ptrFromInt(22));
pub const redirect = @as(*const fn (ifindex: u32, flags: u64) c_long, @ptrFromInt(23));
pub const get_route_realm = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(24));
pub const perf_event_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(25));
pub const skb_load_bytes = @as(*const fn (skb: ?*anyopaque, offset: u32, to: ?*anyopaque, len: u32) c_long, @ptrFromInt(26));
pub const get_stackid = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64) c_long, @ptrFromInt(27));
// from and to point to __be32
pub const csum_diff = @as(*const fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, @ptrFromInt(28));
pub const skb_get_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(29));
pub const skb_set_tunnel_opt = @as(*const fn (skb: *kern.SkBuff, opt: ?*anyopaque, size: u32) c_long, @ptrFromInt(30));
// proto is __be16
pub const skb_change_proto = @as(*const fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, @ptrFromInt(31));
pub const skb_change_type = @as(*const fn (skb: *kern.SkBuff, skb_type: u32) c_long, @ptrFromInt(32));
pub const skb_under_cgroup = @as(*const fn (skb: *kern.SkBuff, map: ?*const anyopaque, index: u32) c_long, @ptrFromInt(33));
pub const get_hash_recalc = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(34));
pub const get_current_task = @as(*const fn () u64, @ptrFromInt(35));
pub const probe_write_user = @as(*const fn (dst: ?*anyopaque, src: ?*const anyopaque, len: u32) c_long, @ptrFromInt(36));
pub const current_task_under_cgroup = @as(*const fn (map: *const kern.MapDef, index: u32) c_long, @ptrFromInt(37));
pub const skb_change_tail = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(38));
pub const skb_pull_data = @as(*const fn (skb: *kern.SkBuff, len: u32) c_long, @ptrFromInt(39));
pub const csum_update = @as(*const fn (skb: *kern.SkBuff, csum: u32) i64, @ptrFromInt(40));
pub const set_hash_invalid = @as(*const fn (skb: *kern.SkBuff) void, @ptrFromInt(41));
pub const get_numa_node_id = @as(*const fn () c_long, @ptrFromInt(42));
pub const skb_change_head = @as(*const fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, @ptrFromInt(43));
pub const xdp_adjust_head = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(44));
pub const probe_read_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(45));
pub const get_socket_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(46));
pub const get_socket_uid = @as(*const fn (skb: *kern.SkBuff) u32, @ptrFromInt(47));
pub const set_hash = @as(*const fn (skb: *kern.SkBuff, hash: u32) c_long, @ptrFromInt(48));
pub const setsockopt = @as(*const fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(49));
pub const skb_adjust_room = @as(*const fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, @ptrFromInt(50));
pub const redirect_map = @as(*const fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(51));
pub const sk_redirect_map = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(52));
pub const sock_map_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(53));
pub const xdp_adjust_meta = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(54));
pub const perf_event_read_value = @as(*const fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(55));
pub const perf_prog_read_value = @as(*const fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, @ptrFromInt(56));
pub const getsockopt = @as(*const fn (bpf_socket: ?*anyopaque, level: c_int, optname: c_int, optval: ?*anyopaque, optlen: c_int) c_long, @ptrFromInt(57));
pub const override_return = @as(*const fn (regs: *PtRegs, rc: u64) c_long, @ptrFromInt(58));
pub const sock_ops_cb_flags_set = @as(*const fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, @ptrFromInt(59));
pub const msg_redirect_map = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, @ptrFromInt(60));
pub const msg_apply_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(61));
pub const msg_cork_bytes = @as(*const fn (msg: *kern.SkMsgMd, bytes: u32) c_long, @ptrFromInt(62));
pub const msg_pull_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, @ptrFromInt(63));
pub const bind = @as(*const fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, @ptrFromInt(64));
pub const xdp_adjust_tail = @as(*const fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, @ptrFromInt(65));
pub const skb_get_xfrm_state = @as(*const fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, @ptrFromInt(66));
pub const get_stack = @as(*const fn (ctx: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(67));
pub const skb_load_bytes_relative = @as(*const fn (skb: ?*const anyopaque, offset: u32, to: ?*anyopaque, len: u32, start_header: u32) c_long, @ptrFromInt(68));
pub const fib_lookup = @as(*const fn (ctx: ?*anyopaque, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, @ptrFromInt(69));
pub const sock_hash_update = @as(*const fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(70));
pub const msg_redirect_hash = @as(*const fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(71));
pub const sk_redirect_hash = @as(*const fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(72));
pub const lwt_push_encap = @as(*const fn (skb: *kern.SkBuff, typ: u32, hdr: ?*anyopaque, len: u32) c_long, @ptrFromInt(73));
pub const lwt_seg6_store_bytes = @as(*const fn (skb: *kern.SkBuff, offset: u32, from: ?*const anyopaque, len: u32) c_long, @ptrFromInt(74));
pub const lwt_seg6_adjust_srh = @as(*const fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, @ptrFromInt(75));
pub const lwt_seg6_action = @as(*const fn (skb: *kern.SkBuff, action: u32, param: ?*anyopaque, param_len: u32) c_long, @ptrFromInt(76));
pub const rc_repeat = @as(*const fn (ctx: ?*anyopaque) c_long, @ptrFromInt(77));
pub const rc_keydown = @as(*const fn (ctx: ?*anyopaque, protocol: u32, scancode: u64, toggle: u32) c_long, @ptrFromInt(78));
pub const skb_cgroup_id = @as(*const fn (skb: *kern.SkBuff) u64, @ptrFromInt(79));
pub const get_current_cgroup_id = @as(*const fn () u64, @ptrFromInt(80));
pub const get_local_storage = @as(*const fn (map: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(81));
pub const sk_select_reuseport = @as(*const fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*anyopaque, flags: u64) c_long, @ptrFromInt(82));
pub const skb_ancestor_cgroup_id = @as(*const fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, @ptrFromInt(83));
pub const sk_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(84));
pub const sk_lookup_udp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(85));
pub const sk_release = @as(*const fn (sock: *kern.Sock) c_long, @ptrFromInt(86));
pub const map_push_elem = @as(*const fn (map: *const kern.MapDef, value: ?*const anyopaque, flags: u64) c_long, @ptrFromInt(87));
pub const map_pop_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(88));
pub const map_peek_elem = @as(*const fn (map: *const kern.MapDef, value: ?*anyopaque) c_long, @ptrFromInt(89));
pub const msg_push_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(90));
pub const msg_pop_data = @as(*const fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, @ptrFromInt(91));
pub const rc_pointer_rel = @as(*const fn (ctx: ?*anyopaque, rel_x: i32, rel_y: i32) c_long, @ptrFromInt(92));
pub const spin_lock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(93));
pub const spin_unlock = @as(*const fn (lock: *kern.SpinLock) c_long, @ptrFromInt(94));
pub const sk_fullsock = @as(*const fn (sk: *kern.Sock) ?*SkFullSock, @ptrFromInt(95));
pub const tcp_sock = @as(*const fn (sk: *kern.Sock) ?*kern.TcpSock, @ptrFromInt(96));
pub const skb_ecn_set_ce = @as(*const fn (skb: *kern.SkBuff) c_long, @ptrFromInt(97));
pub const get_listener_sock = @as(*const fn (sk: *kern.Sock) ?*kern.Sock, @ptrFromInt(98));
pub const skc_lookup_tcp = @as(*const fn (ctx: ?*anyopaque, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, @ptrFromInt(99));
pub const tcp_check_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, @ptrFromInt(100));
pub const sysctl_get_name = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, @ptrFromInt(101));
pub const sysctl_get_current_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(102));
pub const sysctl_get_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, @ptrFromInt(103));
pub const sysctl_set_new_value = @as(*const fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, @ptrFromInt(104));
pub const strtol = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, @ptrFromInt(105));
pub const strtoul = @as(*const fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, @ptrFromInt(106));
pub const sk_storage_get = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*anyopaque, flags: u64) ?*anyopaque, @ptrFromInt(107));
pub const sk_storage_delete = @as(*const fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, @ptrFromInt(108));
pub const send_signal = @as(*const fn (sig: u32) c_long, @ptrFromInt(109));
pub const tcp_gen_syncookie = @as(*const fn (sk: *kern.Sock, iph: ?*anyopaque, iph_len: u32, th: *TcpHdr, th_len: u32) i64, @ptrFromInt(110));
pub const skb_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(111));
pub const probe_read_user = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(112));
pub const probe_read_kernel = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(113));
pub const probe_read_user_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(114));
pub const probe_read_kernel_str = @as(*const fn (dst: ?*anyopaque, size: u32, unsafe_ptr: ?*const anyopaque) c_long, @ptrFromInt(115));
pub const tcp_send_ack = @as(*const fn (tp: ?*anyopaque, rcv_nxt: u32) c_long, @ptrFromInt(116));
pub const send_signal_thread = @as(*const fn (sig: u32) c_long, @ptrFromInt(117));
pub const jiffies64 = @as(*const fn () u64, @ptrFromInt(118));
pub const read_branch_records = @as(*const fn (ctx: *kern.PerfEventData, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(119));
pub const get_ns_current_pid_tgid = @as(*const fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, @ptrFromInt(120));
pub const xdp_output = @as(*const fn (ctx: ?*anyopaque, map: *const kern.MapDef, flags: u64, data: ?*anyopaque, size: u64) c_long, @ptrFromInt(121));
pub const get_netns_cookie = @as(*const fn (ctx: ?*anyopaque) u64, @ptrFromInt(122));
pub const get_current_ancestor_cgroup_id = @as(*const fn (ancestor_level: c_int) u64, @ptrFromInt(123));
pub const sk_assign = @as(*const fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, @ptrFromInt(124));
pub const ktime_get_boot_ns = @as(*const fn () u64, @ptrFromInt(125));
pub const seq_printf = @as(*const fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const anyopaque, data_len: u32) c_long, @ptrFromInt(126));
pub const seq_write = @as(*const fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, @ptrFromInt(127));
pub const sk_cgroup_id = @as(*const fn (sk: *kern.BpfSock) u64, @ptrFromInt(128));
pub const sk_ancestor_cgroup_id = @as(*const fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, @ptrFromInt(129));
pub const ringbuf_output = @as(*const fn (ringbuf: ?*anyopaque, data: ?*anyopaque, size: u64, flags: u64) c_long, @ptrFromInt(130));
pub const ringbuf_reserve = @as(*const fn (ringbuf: ?*anyopaque, size: u64, flags: u64) ?*anyopaque, @ptrFromInt(131));
pub const ringbuf_submit = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(132));
pub const ringbuf_discard = @as(*const fn (data: ?*anyopaque, flags: u64) void, @ptrFromInt(133));
pub const ringbuf_query = @as(*const fn (ringbuf: ?*anyopaque, flags: u64) u64, @ptrFromInt(134));
pub const csum_level = @as(*const fn (skb: *kern.SkBuff, level: u64) c_long, @ptrFromInt(135));
pub const skc_to_tcp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Tcp6Sock, @ptrFromInt(136));
pub const skc_to_tcp_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpSock, @ptrFromInt(137));
pub const skc_to_tcp_timewait_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpTimewaitSock, @ptrFromInt(138));
pub const skc_to_tcp_request_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.TcpRequestSock, @ptrFromInt(139));
pub const skc_to_udp6_sock = @as(*const fn (sk: ?*anyopaque) ?*kern.Udp6Sock, @ptrFromInt(140));
pub const get_task_stack = @as(*const fn (task: ?*anyopaque, buf: ?*anyopaque, size: u32, flags: u64) c_long, @ptrFromInt(141));
pub const load_hdr_opt = @as(*const fn (?*kern.BpfSockOps, ?*anyopaque, u32, u64) c_long, @ptrFromInt(142));
pub const store_hdr_opt = @as(*const fn (?*kern.BpfSockOps, ?*const anyopaque, u32, u64) c_long, @ptrFromInt(143));
pub const reserve_hdr_opt = @as(*const fn (?*kern.BpfSockOps, u32, u64) c_long, @ptrFromInt(144));
pub const inode_storage_get = @as(*const fn (?*anyopaque, ?*anyopaque, ?*anyopaque, u64) ?*anyopaque, @ptrFromInt(145));
pub const inode_storage_delete = @as(*const fn (?*anyopaque, ?*anyopaque) c_int, @ptrFromInt(146));
pub const d_path = @as(*const fn (?*kern.Path, [*c]u8, u32) c_long, @ptrFromInt(147));
pub const copy_from_user = @as(*const fn (?*anyopaque, u32, ?*const anyopaque) c_long, @ptrFromInt(148));
pub const snprintf_btf = @as(*const fn ([*c]u8, u32, ?*kern.BTFPtr, u32, u64) c_long, @ptrFromInt(149));
pub const seq_printf_btf = @as(*const fn (?*kern.SeqFile, ?*kern.BTFPtr, u32, u64) c_long, @ptrFromInt(150));
pub const skb_cgroup_classid = @as(*const fn (?*kern.SkBuff) u64, @ptrFromInt(151));
pub const redirect_neigh = @as(*const fn (u32, ?*kern.BpfRedirNeigh, c_int, u64) c_long, @ptrFromInt(152));
pub const per_cpu_ptr = @as(*const fn (?*const anyopaque, u32) ?*anyopaque, @ptrFromInt(153));
pub const this_cpu_ptr = @as(*const fn (?*const anyopaque) ?*anyopaque, @ptrFromInt(154));
pub const redirect_peer = @as(*const fn (u32, u64) c_long, @ptrFromInt(155));
pub const task_storage_get = @as(*const fn (?*anyopaque, ?*kern.Task, ?*anyopaque, u64) ?*anyopaque, @ptrFromInt(156));
pub const task_storage_delete = @as(*const fn (?*anyopaque, ?*kern.Task) c_long, @ptrFromInt(157));
pub const get_current_task_btf = @as(*const fn () ?*kern.Task, @ptrFromInt(158));
pub const bprm_opts_set = @as(*const fn (?*kern.BinPrm, u64) c_long, @ptrFromInt(159));
pub const ktime_get_coarse_ns = @as(*const fn () u64, @ptrFromInt(160));
pub const ima_inode_hash = @as(*const fn (?*kern.Inode, ?*anyopaque, u32) c_long, @ptrFromInt(161));
pub const sock_from_file = @as(*const fn (?*kern.File) ?*kern.Socket, @ptrFromInt(162));
pub const check_mtu = @as(*const fn (?*anyopaque, u32, [*c]u32, i32, u64) c_long, @ptrFromInt(163));
pub const for_each_map_elem = @as(*const fn (?*anyopaque, ?*anyopaque, ?*anyopaque, u64) c_long, @ptrFromInt(164));
pub const snprintf = @as(*const fn ([*c]u8, u32, [*c]const u8, [*c]u64, u32) c_long, @ptrFromInt(165));
pub const sys_bpf = @as(*const fn (u32, ?*anyopaque, u32) c_long, @ptrFromInt(166));
pub const btf_find_by_name_kind = @as(*const fn ([*c]u8, c_int, u32, c_int) c_long, @ptrFromInt(167));
pub const sys_close = @as(*const fn (u32) c_long, @ptrFromInt(168));
pub const timer_init = @as(*const fn (?*kern.BpfTimer, ?*anyopaque, u64) c_long, @ptrFromInt(169));
pub const timer_set_callback = @as(*const fn (?*kern.BpfTimer, ?*anyopaque) c_long, @ptrFromInt(170));
pub const timer_start = @as(*const fn (?*kern.BpfTimer, u64, u64) c_long, @ptrFromInt(171));
pub const timer_cancel = @as(*const fn (?*kern.BpfTimer) c_long, @ptrFromInt(172));
pub const get_func_ip = @as(*const fn (?*anyopaque) u64, @ptrFromInt(173));
pub const get_attach_cookie = @as(*const fn (?*anyopaque) u64, @ptrFromInt(174));
pub const task_pt_regs = @as(*const fn (?*kern.Task) c_long, @ptrFromInt(175));
pub const get_branch_snapshot = @as(*const fn (?*anyopaque, u32, u64) c_long, @ptrFromInt(176));
pub const trace_vprintk = @as(*const fn ([*c]const u8, u32, ?*const anyopaque, u32) c_long, @ptrFromInt(177));
pub const skc_to_unix_sock = @as(*const fn (?*anyopaque) ?*kern.UnixSock, @ptrFromInt(178));
pub const kallsyms_lookup_name = @as(*const fn ([*c]const u8, c_int, c_int, [*c]u64) c_long, @ptrFromInt(179));
pub const find_vma = @as(*const fn (?*kern.Task, u64, ?*anyopaque, ?*anyopaque, u64) c_long, @ptrFromInt(180));
pub const loop = @as(*const fn (u32, ?*anyopaque, ?*anyopaque, u64) c_long, @ptrFromInt(181));
pub const strncmp = @as(*const fn ([*c]const u8, u32, [*c]const u8) c_long, @ptrFromInt(182));
pub const get_func_arg = @as(*const fn (?*anyopaque, u32, [*c]u64) c_long, @ptrFromInt(183));
pub const get_func_ret = @as(*const fn (?*anyopaque, [*c]u64) c_long, @ptrFromInt(184));
pub const get_func_arg_cnt = @as(*const fn (?*anyopaque) c_long, @ptrFromInt(185));
pub const get_retval = @as(*const fn () c_int, @ptrFromInt(186));
pub const set_retval = @as(*const fn (c_int) c_int, @ptrFromInt(187));
pub const xdp_get_buff_len = @as(*const fn (?*kern.XdpMd) u64, @ptrFromInt(188));
pub const xdp_load_bytes = @as(*const fn (?*kern.XdpMd, u32, ?*anyopaque, u32) c_long, @ptrFromInt(189));
pub const xdp_store_bytes = @as(*const fn (?*kern.XdpMd, u32, ?*anyopaque, u32) c_long, @ptrFromInt(190));
pub const copy_from_user_task = @as(*const fn (?*anyopaque, u32, ?*const anyopaque, ?*kern.Task, u64) c_long, @ptrFromInt(191));
pub const skb_set_tstamp = @as(*const fn (?*kern.SkBuff, u64, u32) c_long, @ptrFromInt(192));
pub const ima_file_hash = @as(*const fn (?*kern.File, ?*anyopaque, u32) c_long, @ptrFromInt(193));
pub const kptr_xchg = @as(*const fn (?*anyopaque, ?*anyopaque) ?*anyopaque, @ptrFromInt(194));
pub const map_lookup_percpu_elem = @as(*const fn (?*anyopaque, ?*const anyopaque, u32) ?*anyopaque, @ptrFromInt(195));
pub const skc_to_mptcp_sock = @as(*const fn (?*anyopaque) ?*kern.MpTcpSock, @ptrFromInt(196));
pub const dynptr_from_mem = @as(*const fn (?*anyopaque, u32, u64, ?*kern.BpfDynPtr) c_long, @ptrFromInt(197));
pub const ringbuf_reserve_dynptr = @as(*const fn (?*anyopaque, u32, u64, ?*kern.BpfDynPtr) c_long, @ptrFromInt(198));
pub const ringbuf_submit_dynptr = @as(*const fn (?*kern.BpfDynPtr, u64) void, @ptrFromInt(199));
pub const ringbuf_discard_dynptr = @as(*const fn (?*kern.BpfDynPtr, u64) void, @ptrFromInt(200));
pub const dynptr_read = @as(*const fn (?*anyopaque, u32, ?*kern.BpfDynPtr, u32, u64) c_long, @ptrFromInt(201));
pub const dynptr_write = @as(*const fn (?*kern.BpfDynPtr, u32, ?*anyopaque, u32, u64) c_long, @ptrFromInt(202));
pub const dynptr_data = @as(*const fn (?*kern.BpfDynPtr, u32, u32) ?*anyopaque, @ptrFromInt(203));
pub const tcp_raw_gen_syncookie_ipv4 = @as(*const fn (?*kern.IpHdr, ?*TcpHdr, u32) i64, @ptrFromInt(204));
pub const tcp_raw_gen_syncookie_ipv6 = @as(*const fn (?*kern.Ipv6Hdr, ?*TcpHdr, u32) i64, @ptrFromInt(205));
pub const tcp_raw_check_syncookie_ipv4 = @as(*const fn (?*kern.IpHdr, ?*TcpHdr) c_long, @ptrFromInt(206));
pub const tcp_raw_check_syncookie_ipv6 = @as(*const fn (?*kern.Ipv6Hdr, ?*TcpHdr) c_long, @ptrFromInt(207));
pub const ktime_get_tai_ns = @as(*const fn () u64, @ptrFromInt(208));
pub const user_ringbuf_drain = @as(*const fn (?*anyopaque, ?*anyopaque, ?*anyopaque, u64) c_long, @ptrFromInt(209));
| https://raw.githubusercontent.com/ziglang/zig/d9bd34fd0533295044ffb4160da41f7873aff905/lib/std/os/linux/bpf/helpers.zig |
const std = @import("std");
const mem = std.mem;
const net = std.net;
const os = std.os;
const IO = @import("tigerbeetle-io").IO;
const http = @import("http");
fn IoOpContext(comptime ResultType: type) type {
return struct {
frame: anyframe = undefined,
result: ResultType = undefined,
};
}
const Client = struct {
io: IO,
sock: os.socket_t,
address: std.net.Address,
send_buf: []u8,
recv_buf: []u8,
allocator: mem.Allocator,
done: bool = false,
fn init(allocator: mem.Allocator, address: std.net.Address) !Client {
const sock = try os.socket(address.any.family, os.SOCK_STREAM | os.SOCK_CLOEXEC, 0);
const send_buf = try allocator.alloc(u8, 8192);
const recv_buf = try allocator.alloc(u8, 8192);
return Client{
.io = try IO.init(256, 0),
.sock = sock,
.address = address,
.send_buf = send_buf,
.recv_buf = recv_buf,
.allocator = allocator,
};
}
pub fn deinit(self: *Client) void {
self.allocator.free(self.send_buf);
self.allocator.free(self.recv_buf);
self.io.deinit();
}
pub fn start(self: *Client) !void {
try connect(&self.io, self.sock, self.address);
var fbs = std.io.fixedBufferStream(self.send_buf);
var w = fbs.writer();
std.fmt.format(w, "Hello from client!\n", .{}) catch unreachable;
const sent = try send(&self.io, self.sock, fbs.getWritten());
std.debug.print("Sent: {s}", .{self.send_buf[0..sent]});
const received = try recv(&self.io, self.sock, self.recv_buf);
std.debug.print("Received: {s}", .{self.recv_buf[0..received]});
try close(&self.io, self.sock);
self.done = true;
}
pub fn run(self: *Client) !void {
while (!self.done) try self.io.tick();
}
const ConnectContext = IoOpContext(IO.ConnectError!void);
fn connect(io: *IO, sock: os.socket_t, address: std.net.Address) IO.ConnectError!void {
var ctx: ConnectContext = undefined;
var completion: IO.Completion = undefined;
io.connect(*ConnectContext, &ctx, connectCallback, &completion, sock, address);
suspend {
ctx.frame = @frame();
}
return ctx.result;
}
fn connectCallback(
ctx: *ConnectContext,
completion: *IO.Completion,
result: IO.ConnectError!void,
) void {
ctx.result = result;
resume ctx.frame;
}
const SendContext = IoOpContext(IO.SendError!usize);
fn send(io: *IO, sock: os.socket_t, buffer: []const u8) IO.SendError!usize {
var ctx: SendContext = undefined;
var completion: IO.Completion = undefined;
io.send(
*SendContext,
&ctx,
sendCallback,
&completion,
sock,
buffer,
if (std.Target.current.os.tag == .linux) os.MSG_NOSIGNAL else 0,
);
suspend {
ctx.frame = @frame();
}
return ctx.result;
}
fn sendCallback(
ctx: *SendContext,
completion: *IO.Completion,
result: IO.SendError!usize,
) void {
ctx.result = result;
resume ctx.frame;
}
const RecvContext = IoOpContext(IO.RecvError!usize);
fn recv(io: *IO, sock: os.socket_t, buffer: []u8) IO.RecvError!usize {
var ctx: RecvContext = undefined;
var completion: IO.Completion = undefined;
io.recv(
*RecvContext,
&ctx,
recvCallback,
&completion,
sock,
buffer,
if (std.Target.current.os.tag == .linux) os.MSG_NOSIGNAL else 0,
);
suspend {
ctx.frame = @frame();
}
return ctx.result;
}
fn recvCallback(
ctx: *RecvContext,
completion: *IO.Completion,
result: IO.RecvError!usize,
) void {
ctx.result = result;
resume ctx.frame;
}
const CloseContext = IoOpContext(IO.CloseError!void);
fn close(io: *IO, sock: os.socket_t) IO.CloseError!void {
var ctx: CloseContext = undefined;
var completion: IO.Completion = undefined;
io.close(
*CloseContext,
&ctx,
closeCallback,
&completion,
sock,
);
suspend {
ctx.frame = @frame();
}
return ctx.result;
}
fn closeCallback(
ctx: *CloseContext,
completion: *IO.Completion,
result: IO.CloseError!void,
) void {
ctx.result = result;
resume ctx.frame;
}
};
pub fn main() anyerror!void {
const allocator = std.heap.page_allocator;
const address = try std.net.Address.parseIp4("127.0.0.1", 3131);
var client = try Client.init(allocator, address);
defer client.deinit();
_ = async client.start();
try client.run();
}
| https://raw.githubusercontent.com/hnakamur/tigerbeetle-io/d0369de079ea822935391cf247f1e42ae1778140/examples/async_tcp_echo_client.zig |
const std = @import("std");
const testing = std.testing;
pub fn Trie(comptime T: type) type {
const TrieNode = struct {
children: [26]?*TN = [1]?*TN{null} ** 26,
value: T,
is_word: bool = false,
const TN = @This();
pub fn init(alloc: std.mem.Allocator, value: T) !*TN {
const tn = try alloc.create(TN);
tn.* = TN{
.value = value,
};
return tn;
}
};
return struct {
arena: std.heap.ArenaAllocator,
root: *TrieNode,
num_words: usize = 0,
num_nodes: usize = 0,
const Self = @This();
pub fn init(alloc: std.mem.Allocator) !Self {
var arena = std.heap.ArenaAllocator.init(alloc);
const child_alloc = arena.allocator();
return Self{
.root = try TrieNode.init(child_alloc, 'X'),
.arena = arena,
};
}
pub fn deinit(self: *Self) void {
self.arena.deinit();
}
pub fn insert_word(self: *Self, word: []const u8) !void {
var current_node = self.root;
const alloc = self.arena.allocator();
for (word, 0..) |char, i| {
const index = char - 'a';
if (current_node.children[index]) |child| current_node = child else {
const new_node = try TrieNode.init(alloc, char);
current_node.children[char - 'a'] = new_node;
current_node = new_node;
self.num_nodes += 1;
}
if (i == word.len - 1) {
current_node.is_word = true;
self.num_words += 1;
}
}
}
fn print_node(self: Self, node: *TrieNode, prefix: []u8, len: usize) void {
prefix[len] = node.value;
// std.debug.print("{s}", .{prefix});
if (node.is_word) std.debug.print("{s}\n", .{prefix[1..]});
for (node.children) |child| {
if (child) |kid| self.print_node(kid, prefix, len + 1);
}
prefix[len] = 0;
}
pub fn print_contents(self: Self) !void {
var buffer = [_]u8{0} ** 50;
self.print_node(self.root, &buffer, 0);
}
pub fn contains_word(self: Self, word: []const u8) bool {
var current_node = self.root;
for (word, 0..) |char, i| {
const index = char - 'a';
if (current_node.children[index]) |child| current_node = child;
if (i == word.len - 1) return current_node.is_word;
}
return false;
}
};
}
test "Trie()" {
const alloc = testing.allocator;
var t = try Trie(u8).init(alloc);
defer t.deinit();
//std.debug.print("{any}\n", .{t.root.children.items});
try t.insert_word("wife");
try t.insert_word("wine");
try t.insert_word("wines");
try t.print_contents();
try testing.expectEqual(t.contains_word("wine"), true);
try testing.expectEqual(t.contains_word("tree"), false);
}
| https://raw.githubusercontent.com/tsunaminoai/zigtrie/8d419015af7a73ec34331c282e2c531f5f936620/src/root.zig |
const std = @import("std");
const ascii = std.ascii;
const mem = std.mem;
const expectEqual = std.testing.expectEqual;
test "reads number correctly" {
const testData = "12";
const expected: u32 = 12;
const actual = part1Solver(testData);
try expectEqual(expected, actual);
}
test "single digit" {
const testData = "9";
const expected: u32 = 99;
const actual = part1Solver(testData);
try expectEqual(expected, actual);
}
test "more than two digits" {
const testData = "1999991";
const expected: u32 = 11;
const actual = part1Solver(testData);
try expectEqual(expected, actual);
}
test "skip non digits" {
const testData = "ab3cdef7ghi";
const expected: u32 = 37;
const actual = part1Solver(testData);
try expectEqual(expected, actual);
}
test "complex multiline testcase" {
const testData =
\\1abc2
\\pqr3stu8vwx
\\a1b2c3d4e5f
\\treb7uchet
;
const expected: u32 = 142;
const actual = part1Solver(testData);
try expectEqual(expected, actual);
}
pub fn part1Solver(text: []const u8) u32 {
// Iterate over each line in input.
var iter = mem.tokenizeScalar(u8, text, '\n');
var sum: u32 = 0;
while (iter.next()) |line| {
// Iterate over each character and find first and last number in line.
// Variables for first and last number in line.
var first: ?u8 = null;
var last: u8 = 0;
for (line) |char| {
switch (char) {
'0'...'9' => |c| {
if (first == null) first = c - '0';
last = c - '0';
},
else => {},
}
}
// Add number from this line to the final sum.
sum += (first orelse 0) * 10 + last;
}
return sum;
}
| https://raw.githubusercontent.com/arntj/advent-of-code/d6a234e5bf9dd22ed10e7e215b3c262edfee61ff/2023/01/part1.zig |
const tensor = @import("../tensor.zig");
fn buildMatMulShape(comptime t1_shape: []const u8, comptime t2_shape: []const u8) [2]u8 {
comptime {
if (t1_shape[1] != t2_shape[0]) {
@panic("Cannot matrix multiply tensors with the current shape");
}
}
const shape = [2]u8{ t2_shape[1], t2_shape[0] };
return shape;
}
pub fn matrixMultiply(t1: anytype, t2: anytype) tensor.TensorBuilder(&buildMatMulShape(&@TypeOf(t1).shape, &@TypeOf(t2).shape), @TypeOf(t1).Type) {
return tensor.TensorBuilder(&buildMatMulShape(&@TypeOf(t1).shape, &@TypeOf(t2).shape), @TypeOf(t1).Type).init(&[_]f32{ 1, 2, 3, 4, 5, 6 });
}
| https://raw.githubusercontent.com/SilasMarvin/dfdz/873f486a6a243bbe9263354f2617fda947486198/src/tensor_operations/matrixMultiply.zig |
const std = @import("std");
const console = @import("console");
const Token = @import("Token.zig");
const Error = @import("Error.zig");
const Source = @import("Source.zig");
const Ast = @import("Ast.zig");
const Compiler = @import("Compiler");
const Parser = @This();
gpa: std.mem.Allocator,
temp_arena: std.mem.Allocator,
errors: std.ArrayListUnmanaged(Error) = .{},
source_handle: Source.Handle = 0,
token_kinds: []Token.Kind = &.{},
next_token: Token.Handle = 0,
ast: std.ArrayListUnmanaged(Ast) = .{},
module: ?Ast.Handle = null,
expression_memos: std.AutoHashMapUnmanaged(ExpressionMemoKey, AstMemo) = .{},
expression_list_memos: std.AutoHashMapUnmanaged(ExpressionListMemoKey, AstMemo) = .{},
field_init_list_memos: std.AutoHashMapUnmanaged(Token.Handle, AstMemo) = .{},
decl_list_memos: std.AutoHashMapUnmanaged(Token.Handle, AstMemo) = .{},
const ExpressionMemoKey = struct {
token_handle: Token.Handle,
options: ExpressionOptions,
};
const ExpressionListMemoKey = struct {
token_handle: Token.Handle,
options: ExpressionListOptions,
};
const AstMemo = struct {
ast_handle: ?Ast.Handle,
next_token: Token.Handle,
};
const SyncError = error{Sync};
pub fn init(gpa: std.mem.Allocator, temp_arena: std.mem.Allocator) Parser {
return .{
.gpa = gpa,
.temp_arena = temp_arena,
};
}
pub fn deinit(self: *Parser) void {
self.decl_list_memos.deinit(self.gpa);
self.field_init_list_memos.deinit(self.gpa);
self.expression_list_memos.deinit(self.gpa);
self.expression_memos.deinit(self.gpa);
self.ast.deinit(self.gpa);
self.errors.deinit(self.gpa);
}
pub fn parse(self: *Parser, source_handle: Source.Handle, token_kinds: []Token.Kind) void {
self.errors.clearRetainingCapacity();
self.ast.clearRetainingCapacity();
self.module = null;
self.expression_memos.clearRetainingCapacity();
self.expression_list_memos.clearRetainingCapacity();
self.field_init_list_memos.clearRetainingCapacity();
self.decl_list_memos.clearRetainingCapacity();
self.source_handle = source_handle;
self.token_kinds = token_kinds;
self.next_token = 0;
while (!self.tryToken(.eof)) {
const decl_start_token = self.next_token;
const maybe_decl = self.tryDeclaration(.{ .allow_anonymous = true }) catch {
self.syncPastToken(.newline);
continue;
};
if (maybe_decl) |decl_handle| {
if (!self.tryNewline()) {
self.recordErrorAbs("Failed to parse declaration", self.ast.items[decl_handle].token_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected end of line", .{});
self.syncPastToken(.newline);
continue;
}
if (self.module) |prev| {
self.module = self.addBinary(.list, decl_start_token, prev, decl_handle);
} else {
self.module = decl_handle;
}
} else if (self.tryNewline()) {
continue;
} else {
self.skipLinespace();
const first = self.next_token;
self.syncPastToken(.newline);
var last = self.backtrackToken(self.next_token, .{});
if (first + 50 < last) {
self.recordErrorAbs("Expected a declaration", first, Error.FlagSet.initOne(.has_continuation));
self.recordErrorAbs("End of declaration/assignment/expression", last, Error.FlagSet.initOne(.supplemental));
} else {
self.recordErrorAbsRange("Expected a declaration", .{
.first = first,
.last = last,
}, .{});
}
}
}
}
fn tryDeclaration(self: *Parser) SyncError!?Ast.Handle {
const start_token = self.next_token;
self.skipLinespace();
const name_token_handle = self.tryIdentifier() orelse self.trySymbol() orelse blk: {
if (self.tryToken(.dot)) {
break :blk self.next_token - 1;
}
self.next_token = start_token;
return null;
};
self.skipLinespace();
if (!self.tryToken(.colon)) {
self.next_token = start_token;
return null;
}
var ast_kind: Ast.Kind = undefined;
var type_expr_handle: Ast.Handle = undefined;
var init_expr_handle: Ast.Handle = undefined;
var has_init = false;
self.skipLinespace();
if (name_token_handle > 0 and self.token_kinds[name_token_handle - 1] == .dot) {
ast_kind = .struct_field_declaration;
if (try self.tryExpression(.{})) |type_expr| {
type_expr_handle = type_expr;
} else {
self.recordErrorAbs("Failed to parse field declaration", name_token_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected type expression", .{});
return error.Sync;
}
self.skipLinespace();
if (self.tryToken(.eql)) {
has_init = true;
}
} else if (self.tryToken(.colon)) {
type_expr_handle = self.addTerminal(.inferred_type, self.next_token - 1);
ast_kind = .constant_declaration;
has_init = true;
} else if (self.tryToken(.eql)) {
type_expr_handle = self.addTerminal(.inferred_type, self.next_token - 1);
ast_kind = .variable_declaration;
has_init = true;
} else {
if (try self.tryExpression(.{ .allow_calls = false })) |type_expr| {
type_expr_handle = type_expr;
} else if (self.tryToken(.kw_mut)) {
type_expr_handle = self.addTerminal(.mut_inferred_type, self.next_token - 1);
} else {
self.recordErrorAbs("Failed to parse declaration", name_token_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected type expression or ':' or '=' followed by initializer expression", .{});
return error.Sync;
}
self.skipLinespace();
if (self.tryToken(.colon)) {
ast_kind = .constant_declaration;
has_init = true;
} else if (self.tryToken(.eql)) {
ast_kind = .variable_declaration;
has_init = true;
} else {
ast_kind = .variable_declaration;
}
}
if (has_init) {
self.skipLinespace();
init_expr_handle = try self.tryExpression(.{}) orelse {
self.recordErrorAbs("Failed to parse declaration", name_token_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected initializer expression", .{});
return error.Sync;
};
} else {
init_expr_handle = self.addTerminal(.empty, name_token_handle);
}
return self.addBinary(ast_kind, name_token_handle, type_expr_handle, init_expr_handle);
}
fn tryUnionFieldDeclaration(self: *Parser) SyncError!?Ast.Handle {
const start_token = self.next_token;
const field_ids_list = try self.expressionList(.{ .multi_line = false });
if (self.ast.items[field_ids_list].info == .empty) {
self.next_token = start_token;
} else {
self.skipLinespace();
if (!self.tryToken(.thick_arrow)) {
self.next_token = start_token;
}
}
if (self.tryDeclarationName()) |name_token| {
_ = name_token;
self.skipLinespace();
if (self.tryToken(.colon)) {
self.skipLinespace();
}
}
}
// .symbol
// .symbol : type_expr
// . : type_expr
fn tryUnionFieldDeclarationWithoutID(self: *Parser, maybe_id_list: ?Ast.Handle) SyncError!?Ast.Handle {
const start_token = self.next_token;
self.skipLinespace();
if (self.trySymbol() orelse self.tryIdentifier()) |name_handle| {
self.skipLinespace();
if (self.tryToken(.colon)) {
self.skipLinespace();
const type_expr_handle = (try self.tryExpression(.{})) orelse {
self.recordErrorAbs("Failed to parse field declaration", name_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected type expression", .{});
return error.Sync;
};
const id_list = self.addTerminal(.empty, start_token);
return self.addBinary(.union_field_declaration, name_handle, id_list, type_expr_handle);
} else {
const id_list = self.addTerminal(.empty, start_token);
const type_expr_handle = self.addTerminal(.inferred_type, start_token);
return self.addBinary(.union_field_declaration, name_handle, id_list, type_expr_handle);
}
} else if (self.tryToken(.dot)) {
self.skipLinespace();
if (self.tryToken(.colon)) {
if (try self.tryExpression(.{})) |type_expr| {
type_expr_handle = type_expr;
} else {
self.recordErrorAbs("Failed to parse field declaration", name_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected type expression", .{});
return error.Sync;
}
}
}
if (token_handle > 0 and self.token_kinds[token_handle - 1] == .dot) {
self.skipLinespace();
if (self.tryToken(.eql)) {
has_init = true;
}
} else {
if (try self.tryExpression(.{ .allow_calls = false })) |type_expr| {
type_expr_handle = type_expr;
} else if (self.tryToken(.kw_mut)) {
type_expr_handle = self.addTerminal(.mut_inferred_type, self.next_token - 1);
} else {
self.recordErrorAbs("Failed to parse declaration", token_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected type expression or ':' or '=' followed by initializer expression", .{});
return error.Sync;
}
self.skipLinespace();
if (self.tryToken(.colon)) {
ast_kind = .constant_declaration;
has_init = true;
} else if (self.tryToken(.eql)) {
ast_kind = .variable_declaration;
has_init = true;
} else {
ast_kind = .variable_declaration;
}
}
if (has_init) {
self.skipLinespace();
init_expr_handle = try self.tryExpression(.{}) orelse {
self.recordErrorAbs("Failed to parse declaration", token_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected initializer expression", .{});
return error.Sync;
};
} else {
init_expr_handle = self.addTerminal(.empty, token_handle);
}
return self.addBinary(ast_kind, token_handle, type_expr_handle, init_expr_handle);
}
fn trySingleLineDeclarationList(self: *Parser) SyncError!?Ast.Handle {
self.skipLinespace();
const token_handle = self.next_token;
if (self.decl_list_memos.get(token_handle)) |memo| {
self.next_token = memo.next_token;
return memo.ast_handle;
}
var maybe_list: ?Ast.Handle = null;
while (true) {
const start_of_decl = self.next_token;
const decl = try self.tryDeclaration() orelse break;
if (maybe_list) |list| {
maybe_list = self.addBinary(.list, start_of_decl, list, decl);
} else {
maybe_list = decl;
}
if (self.tryToken(.comma)) {
self.skipWhitespace();
} else {
break;
}
}
self.decl_list_memos.put(self.gpa, token_handle, .{
.next_token = self.next_token,
.ast_handle = maybe_list,
}) catch @panic("OOM");
return maybe_list;
}
fn trySingleLineExpressionList(self: *Parser) SyncError!?Ast.Handle {
self.skipLinespace();
const token_handle = self.next_token;
if (self.decl_list_memos.get(token_handle)) |memo| {
self.next_token = memo.next_token;
return memo.ast_handle;
}
var maybe_list: ?Ast.Handle = null;
while (true) {
const start_of_decl = self.next_token;
const decl = try self.tryDeclaration() orelse break;
if (maybe_list) |list| {
maybe_list = self.addBinary(.list, start_of_decl, list, decl);
} else {
maybe_list = decl;
}
if (self.tryToken(.comma)) {
self.skipWhitespace();
} else {
break;
}
}
self.decl_list_memos.put(self.gpa, token_handle, .{
.next_token = self.next_token,
.ast_handle = maybe_list,
}) catch @panic("OOM");
return maybe_list;
}
fn fieldInitList(self: *Parser) SyncError!Ast.Handle {
self.skipWhitespace();
const token_handle = self.next_token;
if (self.field_init_list_memos.get(token_handle)) |memo| {
self.next_token = memo.next_token;
return memo.ast_handle.?;
}
var maybe_list: ?Ast.Handle = null;
while (true) {
const item_token_handle = self.next_token;
const ast_handle = (try self.tryAssignmentOrExpression()) orelse break;
if (maybe_list) |list| {
maybe_list = self.addBinary(.list, item_token_handle, list, ast_handle);
} else {
maybe_list = ast_handle;
}
if (self.tryToken(.comma) or self.tryNewline()) {
self.skipWhitespace();
} else {
break;
}
}
const list = maybe_list orelse self.addTerminal(.empty, token_handle);
self.field_init_list_memos.put(self.gpa, token_handle, .{
.next_token = self.next_token,
.ast_handle = list,
}) catch @panic("OOM");
return list;
}
const ExpressionListOptions = struct {
allow_multi_line: bool = true,
};
fn expressionList(self: *Parser, options: ExpressionListOptions) SyncError!Ast.Handle {
if (options.allow_multi_line) {
self.skipWhitespace();
} else {
self.skipLinespace();
}
const key = ExpressionListOptions{
.token_handle = self.next_token,
.options = options,
};
const token_handle = self.next_token;
if (self.expression_list_memos.get(key)) |memo| {
self.next_token = memo.next_token;
return memo.ast_handle.?;
}
var maybe_list: ?Ast.Handle = null;
while (true) {
const item_token_handle = self.next_token;
const ast_handle = (try self.tryExpression(.{})) orelse break;
if (maybe_list) |list| {
maybe_list = self.addBinary(.list, item_token_handle, list, ast_handle);
} else {
maybe_list = ast_handle;
}
if (options.allow_multi_line) {
if (self.tryToken(.comma) or self.tryNewline()) {
self.skipWhitespace();
} else {
break;
}
} else {
if (self.tryToken(.comma)) {
self.skipLinespace();
} else {
break;
}
}
}
const list = maybe_list orelse self.addTerminal(.empty, token_handle);
self.expression_list_memos.put(self.gpa, key, .{
.next_token = self.next_token,
.ast_handle = list,
}) catch @panic("OOM");
return list;
}
fn tryStatement(self: *Parser) SyncError!?Ast.Handle {
const begin_token_handle = self.next_token;
if (self.tryToken(.kw_defer)) {
self.skipLinespace();
if (try self.tryExpression(.{})) |expr_handle| {
return self.addUnary(.defer_expr, begin_token_handle, expr_handle);
} else {
self.recordErrorAbs("Failed to parse defer expression", begin_token_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected expression", .{});
return error.Sync;
}
} else if (self.tryToken(.kw_errordefer)) {
self.skipLinespace();
if (try self.tryExpression(.{})) |expr_handle| {
return self.addUnary(.errordefer_expr, begin_token_handle, expr_handle);
} else {
self.recordErrorAbs("Failed to parse errordefer expression", begin_token_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected expression", .{});
return error.Sync;
}
} else return self.tryAssignmentOrExpression();
}
fn tryAssignmentOrExpression(self: *Parser) SyncError!?Ast.Handle {
if (try self.tryExpression(.{})) |lhs_handle| {
self.skipLinespace();
const assign_token_handle = self.next_token;
if (self.tryToken(.eql)) {
if (try self.tryExpression(.{})) |rhs_handle| {
return self.addBinary(.assignment, assign_token_handle, lhs_handle, rhs_handle);
} else {
self.recordErrorAbs("Failed to parse assignment", assign_token_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected expression", .{});
return error.Sync;
}
} else return lhs_handle;
} else return null;
}
const ExpressionOptions = struct {
allow_calls: bool = true,
};
fn tryExpression(self: *Parser, options: ExpressionOptions) SyncError!?Ast.Handle {
const key = ExpressionMemoKey{
.token_handle = self.next_token,
.options = options,
};
if (self.expression_memos.get(key)) |memo| {
self.next_token = memo.next_token;
return memo.ast_handle;
}
const expr = try self.tryExpressionPratt(0, options);
self.expression_memos.put(self.gpa, key, .{
.next_token = self.next_token,
.ast_handle = expr,
}) catch @panic("OOM");
return expr;
}
const OperatorInfo = struct {
token: Token.Handle,
// If used, this should be a memoized node to ensure that if the operator is rejected, it won't leak
other: ?Ast.Handle,
kind: Ast.Kind,
// For prefix operators, this should usually be set to 0xFF.
left_bp: u8,
// When null, this must be used as a suffix operator, otherwise it can be a binary operator.
right_bp: ?u8,
// When both right_bp and alt_when_suffix are non-null, this may be either an infix or a suffix operator.
// When it functions as a suffix, these override the values from the outer struct:
alt_when_suffix: ?struct {
left_bp: u8,
kind: Ast.Kind,
},
};
fn tryPrefixOperator(self: *Parser, options: ExpressionOptions) SyncError!?OperatorInfo {
const begin = self.next_token;
self.skipLinespace();
const t = self.next_token;
const kind = self.token_kinds[t];
if (kind == .eof) {
self.next_token = begin;
return null;
}
self.next_token += 1;
const base_bp: u8 = if (self.tryLinespace()) 0x40 else 0xC0;
var info = OperatorInfo{
.token = t,
.other = null,
.kind = undefined,
.left_bp = 0xFF,
.right_bp = base_bp + 1,
.alt_when_suffix = null,
};
switch (kind) {
.kw_if => {
info.right_bp = base_bp - 0x3A;
info.kind = .if_expr;
// TODO handle optional unwrapping lists
info.other = (try self.tryExpression(options)) orelse {
self.recordErrorAbs("Failed to parse if expression", t, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected condition expression", .{});
return error.Sync;
};
},
.kw_while => {
info.right_bp = base_bp - 0x3A;
info.kind = .while_expr;
// TODO handle optional unwrapping lists
info.other = (try self.tryExpression(options)) orelse {
self.recordErrorAbs("Failed to parse while expression", t, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected condition expression", .{});
return error.Sync;
};
},
.kw_until => {
info.right_bp = base_bp - 0x3A;
info.kind = .until_expr;
info.other = (try self.tryExpression(options)) orelse {
self.recordErrorAbs("Failed to parse until expression", t, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected condition expression", .{});
return error.Sync;
};
},
.kw_repeat => {
info.right_bp = base_bp - 0x3A;
if (try self.tryExpression(options)) |loop_expr_handle| {
self.skipLinespace();
if (self.tryToken(.kw_while)) {
info.other = loop_expr_handle;
info.kind = .repeat_while;
} else if (self.tryToken(.kw_until)) {
info.other = loop_expr_handle;
info.kind = .repeat_until;
} else {
info.kind = .repeat_infinite;
self.next_token = t + 1;
}
} else {
self.recordErrorAbs("Failed to parse repeat expression", t, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected loop expression", .{});
return error.Sync;
}
},
.kw_with => {
info.right_bp = base_bp - 0x3A;
info.kind = if (self.tryToken(.kw_only)) .with_only else .with_expr;
while (true) {
self.skipLinespace();
const token_handle = self.next_token;
const decl_handle = (try self.tryDeclaration()) orelse break;
if (info.other) |list| {
info.other = self.addBinary(.list, token_handle, list, decl_handle);
} else {
info.other = decl_handle;
}
if (self.tryToken(.comma)) {
self.skipWhitespace();
} else {
break;
}
}
if (info.other == null) {
self.recordErrorAbs("Failed to parse with-scope expression", t, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected at least one declaration", .{});
return error.Sync;
}
},
.kw_for => {
info.right_bp = base_bp - 0x3A;
// TODO handle @rev
info.kind = .for_expr;
while (true) {
self.skipLinespace();
const token_handle = self.next_token;
const decl_handle = (try self.tryDeclaration()) orelse break;
if (info.other) |list| {
info.other = self.addBinary(.list, token_handle, list, decl_handle);
} else {
info.other = decl_handle;
}
if (self.tryToken(.comma)) {
self.skipWhitespace();
} else {
break;
}
}
if (info.other == null) {
self.recordErrorAbs("Failed to parse for expression", t, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected at least one declaration", .{});
return error.Sync;
}
},
.kw_return => { info.right_bp = base_bp - 0x38; info.kind = .return_expr; },
.kw_break => { info.right_bp = base_bp - 0x38; info.kind = .break_expr; },
.kw_try => { info.right_bp = base_bp - 0x30; info.kind = .try_expr; },
.kw_not => { info.right_bp = base_bp - 0x05; info.kind = .logical_not; },
.kw_mut => { info.kind = .mut_type; },
.kw_distinct => { info.kind = .distinct_type; },
.kw_error => { info.kind = .error_type; },
.tilde => { info.right_bp = base_bp + 0x2D; info.kind = .range_expr_infer_start_exclusive_end; },
.tilde_tilde => { info.right_bp = base_bp + 0x2D; info.kind = .range_expr_infer_start_inclusive_end; },
.question => { info.right_bp = base_bp + 0x3D; info.kind = .optional_type; },
.star => { info.right_bp = base_bp + 0x3D; info.kind = .make_pointer; },
.dash => { info.right_bp = base_bp + 0x3D; info.kind = .negate; },
.index_open => {
info.right_bp = base_bp + 0x3D;
const index_expr = try self.tryExpression(.{});
info.kind = if (index_expr) |_| .array_type else .slice_type;
info.other = index_expr;
self.skipLinespace();
try self.closeRegion(t, .index_close, "]", "array/slice type prefix");
},
else => {
self.next_token = begin;
return null;
},
}
return info;
}
fn tryOperator(self: *Parser, options: ExpressionOptions) SyncError!?OperatorInfo {
const begin = self.next_token;
const linespace_before = self.tryLinespace();
const t = self.next_token;
const kind = self.token_kinds[t];
if (kind == .eof) {
self.next_token = begin;
return null;
}
self.next_token += 1;
const linespace_after = self.tryLinespace();
const left_base: u8 = if (linespace_before) 0x40 else 0xC0;
const right_base: u8 = if (linespace_after) 0x40 else 0xC0;
var info = OperatorInfo{
.token = t,
.other = null,
.kind = undefined,
.left_bp = left_base,
.right_bp = right_base + 1,
.alt_when_suffix = null,
};
switch (kind) {
.octothorpe => { info.left_bp = left_base - 0x3F; info.right_bp = right_base - 0x3E; info.kind = .apply_tag; },
.kw_else => { info.left_bp = left_base - 0x3B; info.right_bp = right_base - 0x3A; info.kind = .coalesce; },
.kw_catch => { info.left_bp = left_base - 0x3B; info.right_bp = right_base - 0x3A; info.kind = .catch_expr; },
.bar => { info.left_bp = left_base - 0x13; info.right_bp = right_base - 0x12; info.kind = .type_sum_operator; },
.amp => { info.left_bp = left_base - 0x11; info.right_bp = right_base - 0x10; info.kind = .type_product_operator; },
.kw_or => { info.left_bp = left_base - 0x0A; info.right_bp = right_base - 0x09; info.kind = .logical_or; },
.kw_and => { info.left_bp = left_base - 0x08; info.right_bp = right_base - 0x07; info.kind = .logical_and; },
.eql_eql => { info.left_bp = left_base - 0x04; info.right_bp = right_base - 0x03; info.kind = .test_equal; },
.diamond => { info.left_bp = left_base - 0x04; info.right_bp = right_base - 0x03; info.kind = .test_inequal; },
.lt => { info.left_bp = left_base - 0x02; info.right_bp = right_base - 0x01; info.kind = .test_less_than; },
.gt => { info.left_bp = left_base - 0x02; info.right_bp = right_base - 0x01; info.kind = .test_greater_than; },
.lt_eql => { info.left_bp = left_base - 0x02; info.right_bp = right_base - 0x01; info.kind = .test_less_than_or_equal; },
.gt_eql => { info.left_bp = left_base - 0x02; info.right_bp = right_base - 0x01; info.kind = .test_greater_than_or_equal; },
.spaceship => { info.left_bp = left_base - 0x02; info.right_bp = right_base - 0x01; info.kind = .compare; },
.kw_is => { info.left_bp = left_base - 0x02; info.right_bp = right_base - 0x01; info.kind = .test_active_field; },
.apostrophe => {
if (!options.allow_calls) {
self.next_token = begin;
return null;
}
if (linespace_before == linespace_after) {
info.kind = .ambiguous_call;
} else if (linespace_before) {
info.kind = .suffix_call;
} else {
info.kind = .prefix_call;
}
},
.tilde => { info.left_bp = left_base + 0x20; info.right_bp = right_base + 0x21; info.kind = .range_expr_exclusive_end;
info.alt_when_suffix = .{ .left_bp = left_base + 0x20, .kind = .range_expr_infer_end };
},
.tilde_tilde => { info.left_bp = left_base + 0x20; info.right_bp = right_base + 0x21; info.kind = .range_expr_inclusive_end;
info.alt_when_suffix = .{ .left_bp = left_base + 0x20, .kind = .range_expr_infer_end };
},
.kw_as => { info.left_bp = left_base + 0x2E; info.right_bp = right_base + 0x2F; info.kind = .coerce; },
.kw_in => { info.left_bp = left_base + 0x31;info.right_bp = right_base + 0x30; info.kind = .apply_dim; },
.plus_plus => { info.left_bp = left_base + 0x32; info.right_bp = right_base + 0x33; info.kind = .array_concat; },
.star_star => { info.left_bp = left_base + 0x34; info.right_bp = right_base + 0x35; info.kind = .array_repeat; },
.plus => { info.left_bp = left_base + 0x36; info.right_bp = right_base + 0x37; info.kind = .add; },
.dash => { info.left_bp = left_base + 0x36; info.right_bp = right_base + 0x37; info.kind = .subtract; },
.star => { info.left_bp = left_base + 0x38; info.right_bp = right_base + 0x39; info.kind = .multiply;
info.alt_when_suffix = .{ .left_bp = left_base + 0x3E, .kind = .unmake_pointer };
},
.slash => { info.left_bp = left_base + 0x38; info.right_bp = right_base + 0x39; info.kind = .divide_exact; },
.caret => { info.left_bp = left_base + 0x3B; info.right_bp = right_base + 0x3A; info.kind = .raise_exponent; },
.dot => { info.left_bp = left_base + 0x3E; info.right_bp = right_base + 0x3F; info.kind = .member_access; },
.dot_block_open => {
info.left_bp = left_base + 0x3E;
info.right_bp = null;
info.kind = .typed_struct_literal;
info.other = try self.fieldInitList();
try self.closeMultiLineRegion(t, .block_close, "}", "struct literal");
},
.dot_paren_open => {
info.left_bp = left_base + 0x3E;
info.right_bp = null;
info.kind = .typed_union_literal;
info.other = try self.fieldInitList();
try self.closeMultiLineRegion(t, .paren_close, ")", "union literal");
},
.dot_index_open => {
info.left_bp = left_base + 0x3E;
info.right_bp = null;
info.kind = .typed_array_literal;
info.other = try self.expressionList(.{});
try self.closeMultiLineRegion(t, .index_close, "]", "array literal");
},
.index_open => {
if (try self.tryExpression(.{})) |index_expr| {
info.left_bp = left_base + 0x3E;
info.right_bp = null;
info.kind = .indexed_access;
info.other = index_expr;
self.skipLinespace();
try self.closeRegion(t, .index_close, "]", "index expression");
} else {
const error_token = self.next_token;
self.syncPastTokenOrLine(.index_close);
const end_token = self.backtrackToken(self.next_token, .{});
self.recordErrorAbsRange("Failed to parse index expression", .{
.first = t,
.last = end_token,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected expression", error_token, .{});
return error.Sync;
}
},
else => {
self.next_token = begin;
return null;
},
}
return info;
}
fn tryExpressionPratt(self: *Parser, min_binding_power: u8, options: ExpressionOptions) SyncError!?Ast.Handle {
const before_prefix_operator = self.next_token;
var expr = if (try self.tryPrefixOperator(options)) |operator| e: {
if (operator.left_bp >= min_binding_power) {
if (try self.tryExpressionPratt(operator.right_bp.?, options)) |right| {
if (operator.other) |left| {
break :e self.addBinary(operator.kind, operator.token, left, right);
} else {
break :e self.addUnary(operator.kind, operator.token, right);
}
}
}
self.next_token = before_prefix_operator;
return null;
} else (try self.tryPrimaryExpression()) orelse return null;
var is_suffix_call = false;
while (true) {
const expr_is_suffix_call = is_suffix_call;
is_suffix_call = false;
const before_operator = self.next_token;
if (try self.tryOperator(options)) |operator| {
if (operator.left_bp >= min_binding_power) {
if (operator.right_bp) |binding_power| {
std.debug.assert(operator.other == null);
if (try self.tryExpressionPratt(binding_power, options)) |right| {
if (operator.kind == .prefix_call and expr_is_suffix_call) {
const suffix_call = self.ast.items[expr].info.suffix_call;
var args_expr = self.addBinary(.infix_call_args, operator.token, suffix_call.left, right);
self.ast.items[expr].info = .{ .infix_call = .{
.left = suffix_call.right,
.right = args_expr
}};
continue;
}
expr = self.addBinary(operator.kind, operator.token, expr, right);
is_suffix_call = operator.kind == .suffix_call;
continue;
} else if (operator.alt_when_suffix) |alt_operator| {
if (alt_operator.left_bp >= min_binding_power and try self.tryExpression(options) == null) {
expr = self.addUnary(alt_operator.kind, operator.token, expr);
continue;
}
}
} else if (operator.other) |right| {
expr = self.addBinary(operator.kind, operator.token, expr, right);
continue;
} else {
expr = self.addUnary(operator.kind, operator.token, expr);
continue;
}
} else if (operator.alt_when_suffix) |alt_operator| {
if (alt_operator.left_bp >= min_binding_power and try self.tryExpression(options) == null) {
expr = self.addUnary(alt_operator.kind, operator.token, expr);
continue;
}
}
}
self.next_token = before_operator;
break;
}
return expr;
}
fn tryPrimaryExpression(self: *Parser) SyncError!?Ast.Handle {
const begin = self.next_token;
self.skipLinespace();
const token_handle = self.next_token;
return switch (self.token_kinds[token_handle]) {
.id => self.consumeTerminal(.id_ref),
.paren_open => try self.parenExpression(),
.string_literal, .line_string_literal => self.stringLiteral(),
.numeric_literal => self.consumeTerminal(.numeric_literal),
.dot => if (self.trySymbol()) |symbol_token_handle| self.addTerminal(.symbol, symbol_token_handle) else null,
.block_open => try self.proceduralBlock(),
.dot_block_open => {
self.next_token += 1;
const list = try self.fieldInitList();
try self.closeMultiLineRegion(token_handle, .block_close, "}", "struct literal");
return self.addUnary(.anonymous_struct_literal, token_handle, list);
},
.dot_paren_open => {
self.next_token += 1;
const list = try self.fieldInitList();
try self.closeMultiLineRegion(token_handle, .paren_close, ")", "union literal");
return self.addUnary(.anonymous_union_literal, token_handle, list);
},
.dot_index_open => {
self.next_token += 1;
const list = try self.expressionList(.{});
try self.closeMultiLineRegion(token_handle, .index_close, "]", "array literal");
return self.addUnary(.anonymous_array_literal, token_handle, list);
},
.kw_fn => try self.tryFunctionDefinition() orelse try self.tryFunctionType(),
.kw_struct => try self.structTypeLiteral(),
.kw_union => try self.unionTypeLiteral(),
.kw_match => {
// TODO
unreachable;
},
else => {
self.next_token = begin;
return null;
},
};
}
fn parenExpression(self: *Parser) SyncError!Ast.Handle {
const expr_begin = self.next_token;
std.debug.assert(self.token_kinds[expr_begin] == .paren_open);
self.next_token += 1;
self.skipLinespace();
if (try self.tryExpression(.{})) |expr_handle| {
self.skipLinespace();
try self.closeRegion(expr_begin, .paren_close, ")", "parenthesized expression");
return self.addUnary(.group, expr_begin, expr_handle);
}
const error_token = self.next_token;
self.syncPastTokenOrLine(.paren_close);
const end_token = self.backtrackToken(self.next_token, .{});
self.recordErrorAbsRange("Failed to parse parenthesized expression", .{
.first = expr_begin,
.last = end_token,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected expression", error_token, .{});
return error.Sync;
}
fn proceduralBlock(self: *Parser) SyncError!Ast.Handle {
const begin_token = self.next_token;
std.debug.assert(self.token_kinds[begin_token] == .block_open);
self.next_token += 1;
self.skipWhitespace();
var maybe_list: ?Ast.Handle = null;
while (true) {
const item_token_handle = self.next_token;
const ast_handle = (try self.tryDeclaration()) orelse (try self.tryStatement()) orelse break;
if (maybe_list) |list| {
maybe_list = self.addBinary(.list, item_token_handle, list, ast_handle);
} else {
maybe_list = ast_handle;
}
if (self.tryToken(.comma) or self.tryNewline()) {
self.skipWhitespace();
} else {
break;
}
}
try self.closeMultiLineRegion(begin_token, .block_close, "}", "procedural block");
const list = maybe_list orelse self.addTerminal(.empty, begin_token);
return self.addUnary(.proc_block, begin_token, list);
}
fn tryFunctionType(self: *Parser) SyncError!?Ast.Handle {
self.skipLinespace();
const fn_begin = self.next_token;
if (!self.tryToken(.kw_fn)) return null;
self.skipLinespace();
var maybe_left: ?Ast.Handle = null;
var maybe_right: ?Ast.Handle = null;
if (self.tryToken(.apostrophe)) {
self.skipLinespace();
if (try self.tryExpression(.{ .allow_calls = false })) |expr_handle| {
maybe_right = expr_handle;
} else {
const error_token = self.next_token;
self.recordErrorAbsRange("Failed to parse function type", .{
.first = fn_begin,
.last = error_token,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected type expression", error_token, .{});
return error.Sync;
}
} else {
maybe_left = try self.tryExpression(.{ .allow_calls = false });
self.skipLinespace();
if (self.tryToken(.apostrophe)) {
self.skipLinespace();
if (try self.tryExpression(.{ .allow_calls = false })) |expr_handle| {
maybe_right = expr_handle;
} else {
const error_token = self.next_token;
self.recordErrorAbsRange("Failed to parse function type", .{
.first = fn_begin,
.last = error_token,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected type expression", error_token, .{});
return error.Sync;
}
}
}
var maybe_result: ?Ast.Handle = null;
self.skipLinespace();
if (self.tryToken(.thin_arrow)) {
self.skipLinespace();
if (try self.tryExpression(.{ .allow_calls = false })) |expr_handle| {
maybe_result = expr_handle;
} else {
const error_token = self.next_token;
self.recordErrorAbsRange("Failed to parse function type", .{
.first = fn_begin,
.last = error_token,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected type expression", error_token, .{});
return error.Sync;
}
}
const left = maybe_left orelse self.addTerminal(.inferred_type, fn_begin);
const right = maybe_right orelse self.addTerminal(.inferred_type, fn_begin);
const result = maybe_result orelse self.addTerminal(.inferred_type, fn_begin);
const args = self.addBinary(.fn_sig_args, fn_begin, left, right);
return self.addBinary(.fn_sig, fn_begin, args, result);
}
fn tryFunctionDefinition(self: *Parser) SyncError!?Ast.Handle {
const begin = self.next_token;
self.skipLinespace();
const fn_begin = self.next_token;
if (!self.tryToken(.kw_fn)) return null;
self.skipLinespace();
const maybe_left = try self.trySingleLineDeclarationList();
var maybe_right: ?Ast.Handle = null;
self.skipLinespace();
if (self.tryToken(.apostrophe)) {
maybe_right = try self.trySingleLineDeclarationList();
}
var maybe_result: ?Ast.Handle = null;
var body: Ast.Handle = undefined;
self.skipLinespace();
if (self.tryToken(.thick_arrow)) {
self.skipLinespace();
if (try self.tryExpression(.{})) |expr_handle| {
body = expr_handle;
} else if (maybe_left != null or maybe_right != null) {
const error_token = self.next_token;
self.recordErrorAbsRange("Failed to parse function definition", .{
.first = fn_begin,
.last = error_token,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected body expression", error_token, .{});
return error.Sync;
} else {
self.next_token = begin;
return null;
}
} else {
var after_arrow = self.next_token;
if (self.tryToken(.thin_arrow)) {
self.skipLinespace();
after_arrow = self.next_token;
if (try self.tryExpression(.{ .allow_calls = false })) |expr_handle| {
maybe_result = expr_handle;
} else {
const error_token = self.next_token;
self.recordErrorAbsRange("Failed to parse function type", .{
.first = fn_begin,
.last = error_token,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected type expression", error_token, .{});
return error.Sync;
}
}
self.skipLinespace();
if (self.token_kinds[self.next_token] == .block_open) {
body = try self.proceduralBlock();
} else if (maybe_left != null or maybe_right != null) {
if (maybe_result) |result_handle| {
if (self.ast.items[result_handle].info == .proc_block) {
self.recordErrorAbsRange("Failed to parse function definition", .{
.first = fn_begin,
.last = after_arrow,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected result type", after_arrow, .{});
return error.Sync;
}
}
const error_token = self.next_token;
self.recordErrorAbsRange("Failed to parse function definition", .{
.first = fn_begin,
.last = error_token,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected body block", error_token, .{});
return error.Sync;
} else {
self.next_token = begin;
return null;
}
}
const left = maybe_left orelse self.addTerminal(.empty, fn_begin);
const right = maybe_right orelse self.addTerminal(.empty, fn_begin);
const result = maybe_result orelse self.addTerminal(.inferred_type, fn_begin);
const args = self.addBinary(.fn_sig_args, fn_begin, left, right);
const sig = self.addBinary(.fn_sig, fn_begin, args, result);
return self.addBinary(.fn_def, fn_begin, sig, body);
}
fn structTypeLiteral(self: *Parser) SyncError!Ast.Handle {
const literal_begin = self.next_token;
std.debug.assert(self.token_kinds[literal_begin] == .kw_struct);
self.next_token += 1;
self.skipLinespace();
const block_begin = self.next_token;
if (!self.tryToken(.block_open)) {
self.recordErrorAbs("Failed to parse struct type literal", literal_begin, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected '{'", .{});
return error.Sync;
}
var maybe_list: ?Ast.Handle = null;
while (true) {
self.skipLinespace();
const decl_start_token = self.next_token;
const maybe_decl = self.tryDeclaration() catch {
self.syncPastTokenOrLine(.block_close);
if (self.token_kinds[self.backtrackToken(self.next_token, .{})] == .block_close) {
break;
} else {
continue;
}
};
if (maybe_decl) |decl_handle| {
if (self.tryToken(.block_close)) {
if (maybe_list) |list| {
maybe_list = self.addBinary(.list, decl_start_token, list, decl_handle);
} else {
maybe_list = decl_handle;
}
break;
} else if (!self.tryNewline()) {
self.recordErrorAbs("Failed to parse declaration", self.ast.items[decl_handle].token_handle, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordError("Expected end of line", .{});
self.syncPastToken(.newline);
continue;
}
if (maybe_list) |list| {
maybe_list = self.addBinary(.list, decl_start_token, list, decl_handle);
} else {
maybe_list = decl_handle;
}
} else if (self.tryToken(.block_close)) {
break;
} else if (self.tryNewline()) {
continue;
} else {
const first = self.next_token;
self.syncPastTokenOrLine(.block_close);
var last = self.backtrackToken(self.next_token, .{});
if (first + 50 < last) {
self.recordErrorAbs("Expected a declaration", first, Error.FlagSet.initOne(.has_continuation));
self.recordErrorAbs("End of declaration/assignment/expression", last, Error.FlagSet.initOne(.supplemental));
} else {
self.recordErrorAbsRange("Expected a declaration", .{
.first = first,
.last = last,
}, .{});
}
}
}
const list = maybe_list orelse self.addTerminal(.empty, block_begin);
return self.addUnary(.struct_type_literal, block_begin, list);
}
fn unionTypeLiteral(self: *Parser) SyncError!Ast.Handle {
_ = self;
// TODO
return 0;
}
fn stringLiteral(self: *Parser) Ast.Handle {
const literal_token = self.next_token;
if (self.tryToken(.string_literal)) {
return self.addTerminal(.string_literal, literal_token);
} else if (self.tryToken(.line_string_literal)) {
while (true) {
const end = self.next_token;
if (!self.tryToken(.newline)) break;
self.skipLinespace();
if (self.tryToken(.line_string_literal)) continue;
self.next_token = end;
break;
}
return self.addTerminal(.string_literal, literal_token);
} else unreachable;
}
fn trySymbol(self: *Parser) ?Token.Handle {
const start_token = self.next_token;
if (self.tryToken(.dot)) {
if (self.tryIdentifier()) |token_handle| {
return token_handle;
}
}
self.next_token = start_token;
return null;
}
fn tryIdentifier(self: *Parser) ?Token.Handle {
if (self.tryToken(.id)) {
return self.next_token - 1;
}
return null;
}
fn closeRegion(self: *Parser, begin_token: Token.Handle, token_kind: Token.Kind, comptime token_str: []const u8, comptime region_str: []const u8) SyncError!void {
self.skipLinespace();
if (self.tryToken(token_kind)) {
return;
}
const error_token = self.next_token;
self.syncPastTokenOrLine(token_kind);
const end_token = self.backtrackToken(self.next_token, .{});
self.recordErrorAbsRange("Failed to parse " ++ region_str, .{
.first = begin_token,
.last = end_token,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected '" ++ token_str ++ "'", error_token, .{});
return error.Sync;
}
fn closeMultiLineRegion(self: *Parser, begin_token: Token.Handle, token_kind: Token.Kind, comptime token_str: []const u8, comptime region_str: []const u8) SyncError!void {
self.skipWhitespace();
if (self.tryToken(token_kind)) {
return;
}
const error_token = self.next_token;
self.syncPastToken(token_kind);
const end_token = self.backtrackToken(self.next_token, .{});
if (begin_token + 50 < error_token) {
self.recordErrorAbs("Failed to parse " ++ region_str ++ " starting here", begin_token, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected '" ++ token_str ++ "'", error_token, Error.FlagSet.initOne(.has_continuation));
self.recordErrorAbs("End of " ++ region_str, end_token, Error.FlagSet.initOne(.supplemental));
} else if (error_token + 50 < end_token) {
self.recordErrorAbsRange("Failed to parse " ++ region_str, .{
.first = begin_token,
.last = error_token,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected '" ++ token_str ++ "'", error_token, Error.FlagSet.initOne(.has_continuation));
self.recordErrorAbs("End of " ++ region_str, end_token, Error.FlagSet.initOne(.supplemental));
} else {
self.recordErrorAbsRange("Failed to parse " ++ region_str, .{
.first = begin_token,
.last = end_token,
}, Error.FlagSet.initMany(&.{ .supplemental, .has_continuation }));
self.recordErrorAbs("Expected '" ++ token_str ++ "'", error_token, .{});
}
return error.Sync;
}
fn tryNewline(self: *Parser) bool {
const begin = self.next_token;
self.skipLinespace();
switch (self.token_kinds[self.next_token]) {
.newline => {
self.next_token += 1;
return true;
},
.eof => return true,
else => {
self.next_token = begin;
return false;
}
}
}
fn tryWhitespace(self: *Parser) bool {
var found = false;
while (switch (self.token_kinds[self.next_token]) {
.linespace, .newline, .comment => true,
else => false,
}) {
self.next_token += 1;
found = true;
}
return found;
}
fn skipWhitespace(self: *Parser) void {
while (switch (self.token_kinds[self.next_token]) {
.linespace, .newline, .comment => true,
else => false,
}) {
self.next_token += 1;
}
}
fn tryLinespace(self: *Parser) bool {
var found = false;
while (switch (self.token_kinds[self.next_token]) {
.linespace, .comment => true,
else => false,
}) {
self.next_token += 1;
found = true;
}
return found;
}
fn skipLinespace(self: *Parser) void {
while (switch (self.token_kinds[self.next_token]) {
.linespace, .comment => true,
else => false,
}) {
self.next_token += 1;
}
}
fn tryToken(self: *Parser, kind: Token.Kind) bool {
if (self.token_kinds[self.next_token] == kind) {
self.next_token += 1;
return true;
} else {
return false;
}
}
fn syncPastTokenOrLine(self: *Parser, kind: Token.Kind) void {
while (true) {
const found = self.token_kinds[self.next_token];
self.next_token += 1;
if (found == kind or found == .newline) {
return;
}
switch (found) {
.block_open, .dot_block_open => self.syncPastToken(.block_close),
.index_open, .dot_index_open => self.syncPastToken(.index_close),
.paren_open, .dot_paren_open => self.syncPastToken(.paren_close),
.eof => {
self.next_token -= 1;
return;
},
else => {},
}
}
}
fn syncPastToken(self: *Parser, kind: Token.Kind) void {
while (true) {
const found = self.token_kinds[self.next_token];
self.next_token += 1;
if (found == kind) {
return;
}
switch (found) {
.block_open, .dot_block_open => self.syncPastToken(.block_close),
.index_open, .dot_index_open => self.syncPastToken(.index_close),
.paren_open, .dot_paren_open => self.syncPastToken(.paren_close),
.eof => {
self.next_token -= 1;
return;
},
else => {},
}
}
}
const BacktrackOptions = struct {
any_one_token: bool = true,
eof: bool = true,
newline: bool = true,
comment: bool = true,
linespace: bool = true,
};
fn backtrackToken(self: *Parser, token_handle: Token.Handle, options: BacktrackOptions) Token.Handle {
var t = token_handle;
if (t == 0) return t;
const token_kinds = self.token_kinds;
if (options.any_one_token or options.eof and token_kinds[t] == .eof) {
t -= 1;
if (t == 0) return t;
}
if (options.newline and token_kinds[t] == .newline) {
t -= 1;
if (t == 0) return t;
}
if (options.comment and token_kinds[t] == .comment) {
t -= 1;
if (t == 0) return t;
}
if (options.linespace and token_kinds[t] == .linespace) {
t -= 1;
}
return t;
}
fn consumeTerminal(self: *Parser, kind: Ast.Kind) Ast.Handle {
const token_handle = self.next_token;
self.next_token += 1;
return self.addTerminal(kind, token_handle);
}
fn addTerminal(self: *Parser, kind: Ast.Kind, token_handle: Token.Handle) Ast.Handle {
@setEvalBranchQuota(10000);
switch (kind) {
inline else => |k| if (std.meta.FieldType(Ast.Info, k) == void) {
const info = @unionInit(Ast.Info, @tagName(k), {});
return self.addAst(token_handle, info);
},
}
unreachable;
}
fn addUnary(self: *Parser, kind: Ast.Kind, token_handle: Token.Handle, inner: Ast.Handle) Ast.Handle {
@setEvalBranchQuota(10000);
switch (kind) {
inline else => |k| if (std.meta.FieldType(Ast.Info, k) == Ast.Handle) {
const info = @unionInit(Ast.Info, @tagName(k), inner);
return self.addAst(token_handle, info);
},
}
unreachable;
}
fn addBinary(self: *Parser, kind: Ast.Kind, token_handle: Token.Handle, left: Ast.Handle, right: Ast.Handle) Ast.Handle {
@setEvalBranchQuota(10000);
switch (kind) {
inline else => |k| if (std.meta.FieldType(Ast.Info, k) == Ast.Binary) {
const info = @unionInit(Ast.Info, @tagName(k), .{ .left = left, .right = right });
return self.addAst(token_handle, info);
},
}
unreachable;
}
fn addAst(self: *Parser, token_handle: Token.Handle, info: Ast.Info) Ast.Handle {
const handle = @intCast(Ast.Handle, self.ast.items.len);
self.ast.append(self.gpa, .{
.token_handle = token_handle,
.info = info,
}) catch @panic("OOM");
return handle;
}
fn recordError(self: *Parser, desc: []const u8, flags: Error.FlagSet) void {
self.recordErrorAbs(desc, self.next_token, flags);
}
fn recordErrorRel(self: *Parser, desc: []const u8, token_offset: i8, flags: Error.FlagSet) void {
self.recordErrorAbs(desc, @intCast(Token.Handle, @as(i64, self.next_token) + token_offset), flags);
}
fn recordErrorAbs(self: *Parser, desc: []const u8, token: Token.Handle, flags: Error.FlagSet) void {
self.errors.append(self.gpa, .{
.source_handle = self.source_handle,
.context = .{ .token = token },
.desc = desc,
.flags = flags,
}) catch @panic("OOM");
}
fn recordErrorAbsRange(self: *Parser, desc: []const u8, range: Token.Range, flags: Error.FlagSet) void {
self.errors.append(self.gpa, .{
.source_handle = self.source_handle,
.context = .{ .token_range = range },
.desc = desc,
.flags = flags,
}) catch @panic("OOM");
}
const DumpContext = struct {
source_text: []const u8,
token_offsets: []const u32,
styles: []const console.Style,
next_style: usize = 0,
};
pub fn dump(self: *Parser, ctx: DumpContext, writer: anytype) !void {
var mut_ctx = ctx;
if (mut_ctx.styles.len > 0) {
try mut_ctx.styles[mut_ctx.next_style].apply(writer);
mut_ctx.next_style = (mut_ctx.next_style + 1) % mut_ctx.styles.len;
}
if (self.module) |module| {
try self.dumpAst(&mut_ctx, writer, "Module:", module, .{}, .{});
} else {
try writer.writeAll("Module: null\n");
}
if (mut_ctx.styles.len > 0) {
try (console.Style{}).apply(writer);
}
}
const DumpPrefix = struct {
prev: ?*const DumpPrefix = null,
prefix: []const u8 = "",
pub fn dump(self: DumpPrefix, writer: anytype) !void {
if (self.prev) |prev| try prev.dump(writer);
try writer.writeAll(self.prefix);
}
};
fn dumpAst(self: *Parser, ctx: *DumpContext, writer: anytype, label: []const u8, ast_handle: Ast.Handle, first: DumpPrefix, extra: DumpPrefix) @TypeOf(writer).Error!void {
try first.dump(writer);
var style_buf1: [32]u8 = undefined;
var style_buf2: [32]u8 = undefined;
var set_style: []const u8 = "";
var reset_style: []const u8 = "";
if (ctx.styles.len > 0) {
var stream1 = std.io.fixedBufferStream(&style_buf1);
try ctx.styles[ctx.next_style].apply(stream1.writer());
set_style = stream1.getWritten();
var stream2 = std.io.fixedBufferStream(&style_buf2);
try (console.Style{}).apply(stream2.writer());
reset_style = stream2.getWritten();
ctx.next_style = (ctx.next_style + 1) % ctx.styles.len;
}
try writer.writeAll(label);
try writer.writeAll(set_style);
const ast = self.ast.items[ast_handle];
const tag = @as(Ast.Kind, ast.info);
if (label.len > 0) {
try writer.writeByte(' ');
}
try writer.print("{s}", .{ @tagName(tag) });
var buf1: [40]u8 = undefined;
var buf2: [40]u8 = undefined;
var new_first = DumpPrefix{ .prev = &extra, .prefix = undefined };
var new_extra = DumpPrefix{ .prev = &extra, .prefix = undefined };
new_first.prefix = try std.fmt.bufPrint(&buf1, "{s} *= ", .{ set_style });
new_extra.prefix = try std.fmt.bufPrint(&buf2, "{s} | ", .{ set_style });
@setEvalBranchQuota(10000);
switch (tag) {
.list => {
try writer.writeAll(reset_style);
try writer.writeByte('\n');
_ = try self.dumpAstList(ctx, writer, 0, true, ast.info.list, new_first, new_extra);
},
.id_ref, .symbol, .numeric_literal, .string_literal => {
const token = Token.init(.{
.kind = self.token_kinds[ast.token_handle],
.offset = ctx.token_offsets[ast.token_handle],
}, ctx.source_text);
try writer.writeByte(' ');
if (tag == .symbol) {
try writer.writeByte('.');
}
try writer.print("{s}", .{ std.fmt.fmtSliceEscapeUpper(token.text) });
try writer.writeAll(reset_style);
try writer.writeByte('\n');
},
.struct_field_declaration,
.union_field_declaration,
.variable_declaration,
.constant_declaration => {
const token = Token.init(.{
.kind = self.token_kinds[ast.token_handle],
.offset = ctx.token_offsets[ast.token_handle],
}, ctx.source_text);
try writer.writeByte(' ');
if (tag == .struct_field_declaration or tag == .union_field_declaration) {
try writer.writeByte('.');
}
if (token.kind != .dot) {
try writer.print("{s}", .{ std.fmt.fmtSliceEscapeUpper(token.text) });
}
try writer.writeAll(reset_style);
try writer.writeByte('\n');
const bin = switch (ast.info) {
.struct_field_declaration,
.union_field_declaration,
.variable_declaration,
.constant_declaration,
=> |bin| bin,
else => unreachable,
};
try self.dumpAst(ctx, writer, "L:", bin.left, new_first, new_extra);
buf2[new_extra.prefix.len - 3] = ' ';
try self.dumpAst(ctx, writer, "R:", bin.right, new_first, new_extra);
},
inline else => |k| {
const F = std.meta.FieldType(Ast.Info, k);
if (F == Ast.Handle) {
buf2[new_extra.prefix.len - 3] = ' ';
try self.dumpAst(ctx, writer, " ", @field(ast.info, @tagName(k)), .{}, extra);
} else if (F == Ast.Binary) {
try writer.writeAll(reset_style);
try writer.writeByte('\n');
const bin = @field(ast.info, @tagName(k));
try self.dumpAst(ctx, writer, "L:", bin.left, new_first, new_extra);
buf2[new_extra.prefix.len - 3] = ' ';
try self.dumpAst(ctx, writer, "R:", bin.right, new_first, new_extra);
} else if (F == void) {
try writer.writeAll(reset_style);
try writer.writeByte('\n');
} else {
unreachable;
}
},
}
}
fn dumpAstList(self: *Parser, ctx: *DumpContext, writer: anytype, n: usize, is_last: bool, initial_list: Ast.Binary, first: DumpPrefix, extra: DumpPrefix) @TypeOf(writer).Error!usize {
var mut_n = n;
var list: Ast.Binary = initial_list;
while (true) {
switch (self.ast.items[list.left].info) {
.list => |bin| {
mut_n = try self.dumpAstList(ctx, writer, mut_n, false, bin, first, extra);
},
else => {
var label_buf: [20]u8 = undefined;
const label = try std.fmt.bufPrint(&label_buf, "{}:", .{ mut_n });
try self.dumpAst(ctx, writer, label, list.left, first, extra);
mut_n += 1;
},
}
switch (self.ast.items[list.right].info) {
.list => |bin| {
list = bin;
},
else => {
var label_buf: [20]u8 = undefined;
const label = try std.fmt.bufPrint(&label_buf, "{}:", .{ mut_n });
var extra_buf: [40]u8 = undefined;
const new_extra_prefix = extra_buf[0..extra.prefix.len];
@memcpy(new_extra_prefix, extra.prefix);
if (is_last) {
new_extra_prefix[new_extra_prefix.len - 3] = ' ';
}
var new_extra = DumpPrefix{ .prev = extra.prev, .prefix = new_extra_prefix };
try self.dumpAst(ctx, writer, label, list.right, first, new_extra);
mut_n += 1;
return mut_n;
},
}
}
}
| https://raw.githubusercontent.com/bcrist/footlang/c540286e4bcf006690136728093466a947fc13b1/src/Parser.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const StringHashMap = std.StringHashMap;
const TestAllocator = std.testing.allocator;
const types = @import("types.zig");
const ESVarType = types.ESVarType;
const ESNumber = types.ESNumber;
const ESString = types.ESString;
const ESReference = types.ESReference;
const ESType = types.ESType;
const ESUndefined = types.ESUndefined;
const ESObject = types.ESObject;
const ESObjectImpl = types.ESObjectImpl;
const ESNativeFunction = types.ESNativeFunction;
const UNDEFINED = types.UNDEFINED;
const InvokeAssign = types.InvokeAssign;
const InvokeAdd = types.InvokeAdd;
const Instruction = types.Instruction;
const ESScope = @import("scope.zig").ESScope;
const gc = @import("gc.zig");
const utils = @import("utils.zig");
/// Handles execution of instructions
pub const ESRuntime = struct {
allocator: Allocator,
scope: *ESScope,
objectManager: gc.ESObjectManager,
pub fn init(allocator: Allocator) !*ESRuntime {
var objectManager = gc.ESObjectManager.init(allocator);
var ptr = try allocator.create(ESRuntime);
var scope = try ESScope.init(allocator, &objectManager);
try scope.initStdLib();
ptr.* = ESRuntime {
.allocator = allocator,
.scope = scope,
.objectManager = objectManager,
};
return ptr;
}
pub fn exec(self: *ESRuntime) !void {
return try self.scope.exec(utils.EmptyArray);
}
pub fn deinit(self: *ESRuntime) void {
self.objectManager.deinit();
self.scope.deinit();
self.allocator.destroy(self);
}
};
test "Create var and assign" {
const runtime = try ESRuntime.init(TestAllocator);
defer runtime.deinit();
// Example code:
// const a = "String!";
// Hoisted const a;
try runtime.scope.push( .{ .Declare = .{ .identifier = "a", .type = .CONST } });
// a = "String!";
try runtime.scope.push( .{ .Set = .{ .identifier = "a", .value = "\"String!\""} });
try runtime.exec();
}
test "Ensure const cannot be reassigned" {
const runtime = try ESRuntime.init(TestAllocator);
defer runtime.deinit();
// Example code:
// const a = "String!";
// a = "Another string!";
// Hoisted const a;
try runtime.scope.push( .{ .Declare = .{ .identifier = "a", .type = .CONST } });
// a = "String!";
try runtime.scope.push( .{ .Set = .{ .identifier = "a", .value = "\"String!\""} });
// a = "String!";
try runtime.scope.push( .{ .Set = .{ .identifier = "a", .value = "\"Another string!\""} });
try std.testing.expectError(error.CannotReassignConst, runtime.exec());
}
test "Ensure let can be reassigned" {
const runtime = try ESRuntime.init(TestAllocator);
defer runtime.deinit();
// Example code:
// let a = "String!";
// a = "Another string!";
// Hoisted let a;
try runtime.scope.push( .{ .Declare = .{ .identifier = "a", .type = .LET } });
// a = "String!";
try runtime.scope.push( .{ .Set = .{ .identifier = "a", .value = "\"String!\""} });
// a = "Another string!";
try runtime.scope.push( .{ .Set = .{ .identifier = "a", .value = "\"Another string!\""} });
try runtime.exec();
}
test "Create and run method" {
const runtime = try ESRuntime.init(TestAllocator);
defer runtime.deinit();
// const add;
try runtime.scope.push( .{ .Declare = .{ .identifier = "add", .type = .CONST } });
try runtime.scope.push( .{ .Declare = .{ .identifier = "result", .type = .LET } });
// add = (a, b) => { return a + b; }
//
// note: This can be simplified to:
// try fnScope.push( .{ .Read = ":0" });
// try fnScope.push( .{ .Add = ":1" });
// try fnScope.push( .{ .Return = {} });
//
// This however, demonstrates how JS will put the arguments into scope
// as variables.
const fnScope = try runtime.scope.createScope();
try fnScope.push( .{ .Declare = .{ .identifier = "a", .type = .CONST } });
try fnScope.push( .{ .Set = .{ .identifier = "a", .value = ":0" }});
try fnScope.push( .{ .Declare = .{ .identifier = "b", .type = .CONST } });
try fnScope.push( .{ .Set = .{ .identifier = "b", .value = ":1" }});
try fnScope.push( .{ .Read = "a" });
try fnScope.push( .{ .Add = "b" });
try fnScope.push( .{ .Return = {} });
try runtime.scope.push( .{ .SetReference = .{ .identifier = "add", .value = .{
.Function = .{
.scope = fnScope,
},
}}});
// var result = add("10", "20");
try runtime.scope.push( .{ .Read = "add" });
var args = [_][]const u8 { "10", "20" };
try runtime.scope.push( .{ .Invoke = .{ .args = &args }});
try runtime.scope.push( .{ .Write = "result" });
try runtime.exec();
var result = try runtime.scope.getValue("result");
try std.testing.expectEqualStrings("30", result);
}
test "Access vars from upper scope" {
const runtime = try ESRuntime.init(TestAllocator);
defer runtime.deinit();
try runtime.scope.push( .{ .Declare = .{ .identifier = "a", .type = .CONST }});
try runtime.scope.push( .{ .Declare = .{ .identifier = "funcA", .type = .CONST }});
try runtime.scope.push( .{ .Declare = .{ .identifier = "ret", .type = .CONST }});
try runtime.scope.push( .{ .Set = .{ .identifier = "a", .value = "25" }});
const fnScope = try runtime.scope.createScope();
try fnScope.push( .{ .Declare = .{ .identifier = "funcB", .type = .CONST }});
try runtime.scope.push( .{ .SetReference = .{ .identifier = "funcA", .value = .{
.Function = .{
.scope = fnScope
}
}}});
const nestedFnScope = try fnScope.createScope();
try nestedFnScope.push( .{ .Read = "a" });
try nestedFnScope.push( .{ .Add = "15" });
try nestedFnScope.push( .{ .Return = {} });
try fnScope.push( .{ .SetReference = .{ .identifier = "funcB", .value = .{
.Function = .{
.scope = nestedFnScope
}
}}});
try fnScope.push( .{ .Read = "funcB" });
try fnScope.push( .{ .Invoke = .{ .args = utils.EmptyArray }});
try fnScope.push( .{ .Return = {} });
try runtime.scope.push( .{ .Read = "funcA" });
try runtime.scope.push( .{ .Invoke = .{ .args = utils.EmptyArray }});
try runtime.scope.push( .{ .Write = "ret" });
try runtime.exec();
var result = try runtime.scope.getValue("ret");
try std.testing.expectEqualStrings("40", result);
}
test "Dev Testbench" {
const runtime = try ESRuntime.init(TestAllocator);
defer runtime.deinit();
// const oneHundred;
try runtime.scope.push( .{ .Declare = .{ .identifier = "oneHundred", .type = .CONST } });
// const twoHundred;
try runtime.scope.push(.{ .Declare = .{ .identifier = "twoHundred", .type = .CONST } });
// let twoNinetyNine;
try runtime.scope.push(.{ .Declare = .{ .identifier = "twoNinetyNine", .type = .LET } });
// Vars can have their values set directly.
// oneHundred = 100;
try runtime.scope.push( .{ .Set = .{ .identifier = "oneHundred", .value = "100"} });
// twoHundred = 200;
try runtime.scope.push( .{ .Set = .{ .identifier = "twoHundred", .value = "200"} });
// Vars can also have their values copied from another variable.
// twoNinetyNine = oneHundred;
try runtime.scope.push( .{ .Set = .{ .identifier = "twoNinetyNine", .value = "oneHundred"} });
// twoNinetyNine += twoHundred;
try runtime.scope.push( .{ .Read = "twoNinetyNine" });
try runtime.scope.push( .{ .Add = "twoHundred" });
try runtime.scope.push( .{ .Write = "twoNinetyNine" });
// twoNinetyNine -= 100;
try runtime.scope.push( .{ .Read = "twoNinetyNine" });
try runtime.scope.push( .{ .Add = "-100" });
try runtime.scope.push( .{ .Write = "twoNinetyNine" });
// twoNinetyNine += 50 + 49;
try runtime.scope.push( .{ .Read = "twoNinetyNine" });
try runtime.scope.push( .{ .Add = "50" });
try runtime.scope.push( .{ .Add = "49" });
try runtime.scope.push( .{ .Write = "twoNinetyNine" });
// An example of what would happen if you performed an operation but
// did not capture the result; the allocator will get updated but
// the var will not change.
// twoNinetyNine - 299;
try runtime.scope.push( .{ .Read = "twoNinetyNine" });
try runtime.scope.push( .{ .Add = "-299" });
// Attempts to call console.log
try runtime.scope.push( .{ .Read = "console" });
try runtime.scope.push( .{ .ReadProperty = "log" });
var args = [_][]const u8 {
"\nHello from console.log!\n",
};
try runtime.scope.push( .{ .Invoke = .{ .args = &args } });
try runtime.exec();
// Should result in twoNinetyNine<Number> = 299 being output.
std.debug.print("== Primitives ==\n",.{});
try runtime.scope.debugValue("undefined");
try runtime.scope.debugValue("NaN");
try runtime.scope.debugValue("Infinity");
std.debug.print("== Variables in current scope ==\n",.{});
try runtime.scope.debugValue("oneHundred");
try runtime.scope.debugValue("twoHundred");
try runtime.scope.debugValue("twoNinetyNine");
std.debug.print("== Non-existent variable ==\n",.{});
try runtime.scope.debugValue("nonExist");
} | https://raw.githubusercontent.com/jlandrum/jz/74a66f98d888272b451e2f644baa7aa822d00e7d/lib/runtime.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const Self = @This();
const Error = error{
InvalidHeader,
};
allocator: Allocator,
data: []const u8,
header: std.elf.Elf64_Ehdr,
debug_info_sect: ?std.elf.Elf64_Shdr = null,
debug_string_sect: ?std.elf.Elf64_Shdr = null,
debug_abbrev_sect: ?std.elf.Elf64_Shdr = null,
debug_frame: ?std.elf.Elf64_Shdr = null,
eh_frame: ?std.elf.Elf64_Shdr = null,
pub fn isElfFile(data: []const u8) bool {
const header = @as(*const std.elf.Elf64_Ehdr, @ptrCast(@alignCast(data.ptr))).*;
return std.mem.eql(u8, "\x7fELF", header.e_ident[0..4]);
}
fn init(allocator: Allocator, data: []const u8) !*Self {
const self: *Self = try allocator.create(Self);
self.* = .{
.allocator = allocator,
.data = data,
.header = @as(*const std.elf.Elf64_Ehdr, @ptrCast(@alignCast(data.ptr))).*,
};
return self;
}
pub fn deinit(self: *Self) void {
self.allocator.destroy(self);
}
pub fn parse(allocator: Allocator, data: []const u8) !*Self {
if (!isElfFile(data)) return Error.InvalidHeader;
if (data.len < @sizeOf(std.elf.Elf64_Ehdr)) return Error.InvalidHeader;
const self = try init(allocator, data);
errdefer allocator.destroy(self);
const shdrs = self.getShdrs();
for (shdrs) |shdr| switch (shdr.sh_type) {
std.elf.SHT_PROGBITS => {
const sh_name = self.getShString(@as(u32, @intCast(shdr.sh_name)));
if (std.mem.eql(u8, sh_name, ".debug_info")) {
self.debug_info_sect = shdr;
}
if (std.mem.eql(u8, sh_name, ".debug_abbrev")) {
self.debug_abbrev_sect = shdr;
}
if (std.mem.eql(u8, sh_name, ".debug_str")) {
self.debug_string_sect = shdr;
}
if (std.mem.eql(u8, sh_name, ".debug_frame")) {
self.debug_frame = shdr;
}
if (std.mem.eql(u8, sh_name, ".eh_frame")) {
self.eh_frame = shdr;
}
},
else => {},
};
return self;
}
pub fn getDebugInfoData(self: *const Self) ?[]const u8 {
const shdr = self.debug_info_sect orelse return null;
return self.getShdrData(shdr);
}
pub fn getDebugStringData(self: *const Self) ?[]const u8 {
const shdr = self.debug_string_sect orelse return null;
return self.getShdrData(shdr);
}
pub fn getDebugAbbrevData(self: *const Self) ?[]const u8 {
const shdr = self.debug_abbrev_sect orelse return null;
return self.getShdrData(shdr);
}
pub fn getDebugFrameData(self: *const Self) ?[]const u8 {
const shdr = self.debug_frame orelse return null;
return self.getShdrData(shdr);
}
pub fn getEhFrameData(self: *const Self) ?[]const u8 {
const shdr = self.eh_frame orelse return null;
return self.getShdrData(shdr);
}
pub fn getShdrByName(self: *const Self, name: []const u8) ?std.elf.Elf64_Shdr {
const shdrs = self.getShdrs();
for (shdrs) |shdr| {
const shdr_name = self.getShString(shdr.sh_name);
if (std.mem.eql(u8, shdr_name, name)) return shdr;
}
return null;
}
fn getShdrs(self: *const Self) []const std.elf.Elf64_Shdr {
const shdrs = @as(
[*]const std.elf.Elf64_Shdr,
@ptrCast(@alignCast(self.data.ptr + self.header.e_shoff)),
)[0..self.header.e_shnum];
return shdrs;
}
fn getShdrData(self: *const Self, shdr: std.elf.Elf64_Shdr) []const u8 {
return self.data[shdr.sh_offset..][0..shdr.sh_size];
}
fn getShString(self: *const Self, off: u32) []const u8 {
const shdr = self.getShdrs()[self.header.e_shstrndx];
const shstrtab = self.getShdrData(shdr);
std.debug.assert(off < shstrtab.len);
return std.mem.sliceTo(@as([*:0]const u8, @ptrCast(shstrtab.ptr + off)), 0);
}
pub fn getArch(self: *const Self) ?std.Target.Cpu.Arch {
return self.header.e_machine.toTargetCpuArch();
}
pub fn getDwarfString(self: *const Self, off: u64) []const u8 {
const debug_str = self.getDebugStringData().?;
std.debug.assert(off < debug_str.len);
return std.mem.sliceTo(@as([*:0]const u8, @ptrCast(debug_str.ptr + off)), 0);
}
| https://raw.githubusercontent.com/aapen/aapen/c02010f570ec1f69905afe607d2ed4080c2e8edb/tools/build-symtab/src/dwarf/Elf.zig |
const std = @import("std");
const fs = std.fs;
const print = std.debug.print;
// sauce: https://ziglang.cc/zig-cookbook/01-01-read-file-line-by-line.html
pub fn fileReader(fileName: []const u8) !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const file = try fs.cwd().openFile(fileName, .{});
defer file.close();
// Wrap the file reader in a buffered reader.
// Since it's usually faster to read a bunch of bytes at once.
var buf_reader = std.io.bufferedReader(file.reader());
const reader = buf_reader.reader();
var line = std.ArrayList(u8).init(allocator);
defer line.deinit();
const writer = line.writer();
var line_no: usize = 1;
while (reader.streamUntilDelimiter(writer, '\n', null)) : (line_no += 1) {
// Clear the line so we can reuse it.
defer line.clearRetainingCapacity();
print("{d}--{s}\n", .{ line_no, line.items });
} else |err| switch (err) {
error.EndOfStream => {}, // Continue on
else => return err, // Propagate error
}
}
| https://raw.githubusercontent.com/reillyjodonnell/zig-tokenizer/6b502ebbb1a5fdb31ab16ab0fb77f153ef7d30d2/src/file-reader.zig |
const std = @import("std");
const x = @import("./x.zig");
const common = @import("common.zig");
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = arena.allocator();
const window_width = 400;
const window_height = 400;
pub fn main() !u8 {
try x.wsaStartup();
const conn = try common.connect(allocator);
defer std.posix.shutdown(conn.sock, .both) catch {};
const screen = blk: {
const fixed = conn.setup.fixed();
inline for (@typeInfo(@TypeOf(fixed.*)).Struct.fields) |field| {
std.log.debug("{s}: {any}", .{field.name, @field(fixed, field.name)});
}
std.log.debug("vendor: {s}", .{try conn.setup.getVendorSlice(fixed.vendor_len)});
const format_list_offset = x.ConnectSetup.getFormatListOffset(fixed.vendor_len);
const format_list_limit = x.ConnectSetup.getFormatListLimit(format_list_offset, fixed.format_count);
std.log.debug("fmt list off={} limit={}", .{format_list_offset, format_list_limit});
const formats = try conn.setup.getFormatList(format_list_offset, format_list_limit);
for (formats, 0..) |format, i| {
std.log.debug("format[{}] depth={:3} bpp={:3} scanpad={:3}", .{i, format.depth, format.bits_per_pixel, format.scanline_pad});
}
const screen = conn.setup.getFirstScreenPtr(format_list_limit);
inline for (@typeInfo(@TypeOf(screen.*)).Struct.fields) |field| {
std.log.debug("SCREEN 0| {s}: {any}", .{field.name, @field(screen, field.name)});
}
break :blk screen;
};
// TODO: maybe need to call conn.setup.verify or something?
const window_id = conn.setup.fixed().resource_id_base;
{
var msg_buf: [x.create_window.max_len]u8 = undefined;
const len = x.create_window.serialize(&msg_buf, .{
.window_id = window_id,
.parent_window_id = screen.root,
.x = 0, .y = 0,
.width = window_width, .height = window_height,
.border_width = 0, // TODO: what is this?
.class = .input_output,
.visual_id = screen.root_visual,
.depth = 24, // sure I guess
}, .{
// .bg_pixmap = .copy_from_parent,
.bg_pixel = 0xaabbccdd,
// //.border_pixmap =
// .border_pixel = 0x01fa8ec9,
// .bit_gravity = .north_west,
// .win_gravity = .east,
// .backing_store = .when_mapped,
// .backing_planes = 0x1234,
// .backing_pixel = 0xbbeeeeff,
// .override_redirect = true,
// .save_under = true,
.event_mask =
x.event.key_press
| x.event.key_release
| x.event.button_press
| x.event.button_release
| x.event.enter_window
| x.event.leave_window
| x.event.pointer_motion
// | x.event.pointer_motion_hint WHAT THIS DO?
// | x.event.button1_motion WHAT THIS DO?
// | x.event.button2_motion WHAT THIS DO?
// | x.event.button3_motion WHAT THIS DO?
// | x.event.button4_motion WHAT THIS DO?
// | x.event.button5_motion WHAT THIS DO?
// | x.event.button_motion WHAT THIS DO?
| x.event.keymap_state
| x.event.exposure
,
// .dont_propagate = 1,
});
try conn.send(msg_buf[0..len]);
}
const bg_gc_id = window_id + 1;
{
var msg_buf: [x.create_gc.max_len]u8 = undefined;
const len = x.create_gc.serialize(&msg_buf, .{
.gc_id = bg_gc_id,
.drawable_id = window_id,
}, .{
.foreground = screen.black_pixel,
});
try conn.send(msg_buf[0..len]);
}
const fg_gc_id = window_id + 2;
{
var msg_buf: [x.create_gc.max_len]u8 = undefined;
const len = x.create_gc.serialize(&msg_buf, .{
.gc_id = fg_gc_id,
.drawable_id = window_id,
}, .{
.background = screen.black_pixel,
.foreground = 0xffaadd,
//.line_width = 10,
});
try conn.send(msg_buf[0..len]);
}
// get some font information
{
const text_literal = [_]u16 { 'm' };
const text = x.Slice(u16, [*]const u16) { .ptr = &text_literal, .len = text_literal.len };
var msg: [x.query_text_extents.getLen(text.len)]u8 = undefined;
x.query_text_extents.serialize(&msg, fg_gc_id, text);
try conn.send(&msg);
}
const double_buf = try x.DoubleBuffer.init(
std.mem.alignForward(usize, 1000, std.mem.page_size),
.{ .memfd_name = "ZigX11DoubleBuffer" },
);
defer double_buf.deinit(); // not necessary but good to test
std.log.info("read buffer capacity is {}", .{double_buf.half_len});
var buf = double_buf.contiguousReadBuffer();
const font_dims: FontDims = blk: {
_ = try x.readOneMsg(conn.reader(), @alignCast(buf.nextReadBuffer()));
switch (x.serverMsgTaggedUnion(@alignCast(buf.double_buffer_ptr))) {
.reply => |msg_reply| {
const msg: *x.ServerMsg.QueryTextExtents = @ptrCast(msg_reply);
break :blk .{
.width = @intCast(msg.overall_width),
.height = @intCast(msg.font_ascent + msg.font_descent),
.font_left = @intCast(msg.overall_left),
.font_ascent = msg.font_ascent,
};
},
else => |msg| {
std.log.err("expected a reply but got {}", .{msg});
return 1;
},
}
};
{
var msg: [x.map_window.len]u8 = undefined;
x.map_window.serialize(&msg, window_id);
try conn.send(&msg);
}
while (true) {
{
const recv_buf = buf.nextReadBuffer();
if (recv_buf.len == 0) {
std.log.err("buffer size {} not big enough!", .{buf.half_len});
return 1;
}
const len = try x.readSock(conn.sock, recv_buf, 0);
if (len == 0) {
std.log.info("X server connection closed", .{});
return 0;
}
buf.reserve(len);
}
while (true) {
const data = buf.nextReservedBuffer();
if (data.len < 32)
break;
const msg_len = x.parseMsgLen(data[0..32].*);
if (data.len < msg_len)
break;
buf.release(msg_len);
//buf.resetIfEmpty();
switch (x.serverMsgTaggedUnion(@alignCast(data.ptr))) {
.err => |msg| {
std.log.err("{}", .{msg});
return 1;
},
.reply => |msg| {
std.log.info("todo: handle a reply message {}", .{msg});
return error.TodoHandleReplyMessage;
},
.key_press => |msg| {
std.log.info("key_press: {}", .{msg.keycode});
},
.key_release => |msg| {
std.log.info("key_release: {}", .{msg.keycode});
},
.button_press => |msg| {
std.log.info("button_press: {}", .{msg});
},
.button_release => |msg| {
std.log.info("button_release: {}", .{msg});
},
.enter_notify => |msg| {
std.log.info("enter_window: {}", .{msg});
},
.leave_notify => |msg| {
std.log.info("leave_window: {}", .{msg});
},
.motion_notify => |msg| {
// too much logging
_ = msg;
//std.log.info("pointer_motion: {}", .{msg});
},
.keymap_notify => |msg| {
std.log.info("keymap_state: {}", .{msg});
},
.expose => |msg| {
std.log.info("expose: {}", .{msg});
try render(conn.sock, window_id, bg_gc_id, fg_gc_id, font_dims);
},
.mapping_notify => |msg| {
std.log.info("mapping_notify: {}", .{msg});
},
.no_exposure => |msg| std.debug.panic("unexpected no_exposure {}", .{msg}),
.unhandled => |msg| {
std.log.info("todo: server msg {}", .{msg});
return error.UnhandledServerMsg;
},
.map_notify,
.reparent_notify,
.configure_notify,
=> unreachable, // did not register for these
}
}
}
}
const FontDims = struct {
width: u8,
height: u8,
font_left: i16, // pixels to the left of the text basepoint
font_ascent: i16, // pixels up from the text basepoint to the top of the text
};
fn render(sock: std.posix.socket_t, drawable_id: u32, bg_gc_id: u32, fg_gc_id: u32, font_dims: FontDims) !void {
// {
// var msg: [x.poly_fill_rectangle.getLen(1)]u8 = undefined;
// x.poly_fill_rectangle.serialize(&msg, .{
// .drawable_id = drawable_id,
// .gc_id = bg_gc_id,
// }, &[_]x.Rectangle {
// .{ .x = 100, .y = 100, .width = 200, .height = 200 },
// });
// try common.send(sock, &msg);
// }
_ = bg_gc_id;
{
var msg: [x.clear_area.len]u8 = undefined;
x.clear_area.serialize(&msg, false, drawable_id, .{
.x = 0, .y = 0, .width = 0, .height = 0,
});
try common.send(sock, &msg);
}
_ = font_dims;
{
var msg: [x.poly_line.getLen(3)]u8 = undefined;
x.poly_line.serialize(&msg, .{
.coordinate_mode = .origin,
.drawable_id = drawable_id,
.gc_id = fg_gc_id,
}, &[_]x.Point {
.{ .x = 10, .y = 10 },
.{ .x = 110, .y = 10 },
.{ .x = 55, .y = 55 },
});
try common.send(sock, &msg);
}
// {
// const text_literal: []const u8 = "Hello X!";
// const text = x.Slice(u8, [*]const u8) { .ptr = text_literal.ptr, .len = text_literal.len };
// var msg: [x.image_text8.getLen(text.len)]u8 = undefined;
//
// const text_width = font_dims.width * text_literal.len;
//
// x.image_text8.serialize(&msg, .{
// .drawable_id = drawable_id,
// .gc_id = fg_gc_id,
// .x = @divTrunc((window_width - @intCast(i16, text_width)), 2) + font_dims.font_left,
// .y = @divTrunc((window_height - @intCast(i16, font_dims.height)), 2) + font_dims.font_ascent,
// .text = text,
// });
// try common.send(sock, &msg);
// }
}
| https://raw.githubusercontent.com/marler8997/zigx/f09fd6fa5d593c759c0d9d35db4dfb5a150d366a/graphics.zig |
const std = @import("std");
const Token = @import("Token.zig");
const Error = @import("Error.zig");
const Parser = @import("Parser.zig");
const Source = @This();
handle: Handle,
name: []const u8,
text: []const u8,
token_data: Token.List,
pub const Handle = u32;
pub fn deinit(self: Source, gpa: std.mem.Allocator, maybe_arena: ?std.mem.Allocator) void {
self.token_data.deinit(gpa);
if (maybe_arena) |arena| {
arena.free(self.name);
arena.free(self.text);
}
}
| https://raw.githubusercontent.com/bcrist/footlang/c540286e4bcf006690136728093466a947fc13b1/src/Source.zig |
const std = @import("std");
const allocator = std.heap.page_allocator;
extern fn yomo_observe_datatag(tag: u32) void;
extern fn yomo_context_tag() u32;
extern fn yomo_context_data(pointer: *const u8, size: u32) u32;
extern fn yomo_context_data_size() u32;
extern fn yomo_write(tag: u32, pointer: *const u8, length: usize) u32;
pub fn main() !void {
std.log.info("yomo wasm sfn on zig", .{});
}
export fn yomo_init() u32 {
std.log.info("wasm zig sfn init", .{});
return 0;
}
export fn yomo_observe_datatags() void {
yomo_observe_datatag(0x33);
}
export fn yomo_handler() void {
// load input data
const tag = yomo_context_tag();
const size: u32 = yomo_context_data_size();
const input = allocator.alloc(u8, size) catch undefined;
_ = yomo_context_data(&input[0], size);
defer allocator.free(input);
std.log.info("wasm zig sfn received {d} bytes with 0x{x}", .{ input.len, tag });
// process app data
var output = std.ascii.allocUpperString(allocator, input) catch undefined;
defer allocator.free(output);
// dump output data
_ = yomo_write(0x34, &output[0], output.len);
}
| https://raw.githubusercontent.com/yomorun/yomo/ab56d6d9e007be0af15be0d0d2de67bb6207cd7c/example/7-wasm/sfn/zig/src/main.zig |
const hw = if (@import("BuildOptions").DISCOVERY) @import("discovery.zig") else @import("nucleo.zig");
const blinky = if (@import("BuildOptions").DISCOVERY) @import("examples/discovery_blinky.zig") else @import("examples/nucleo_blinky.zig");
pub fn zig_entry() void {
hw.systemInit();
blinky.blinky();
}
| https://raw.githubusercontent.com/amitfr123/ZigStmBlinky/3f73f48e46a7f5f6d95d37e1ee72c79baccacd35/src/main.zig |
//
// We've absorbed a lot of information about the variations of types
// we can use in Zig. Roughly, in order we have:
//
// u8 single item
// *u8 single-item pointer
// []u8 slice (size known at runtime)
// [5]u8 array of 5 u8s
// [*]u8 many-item pointer (zero or more)
// enum {a, b} set of unique values a and b
// error {e, f} set of unique error values e and f
// struct {y: u8, z: i32} group of values y and z
// union(enum) {a: u8, b: i32} single value either u8 or i32
//
// Values of any of the above types can be assigned as "var" or "const"
// to allow or disallow changes (mutability) via the assigned name:
//
// const a: u8 = 5; // immutable
// var b: u8 = 5; // mutable
//
// We can also make error unions or optional types from any of
// the above:
//
// var a: E!u8 = 5; // can be u8 or error from set E
// var b: ?u8 = 5; // can be u8 or null
//
// Knowing all of this, maybe we can help out a local hermit. He made
// a little Zig program to help him plan his trips through the woods,
// but it has some mistakes.
//
// *************************************************************
// * A NOTE ABOUT THIS EXERCISE *
// * *
// * You do NOT have to read and understand every bit of this *
// * program. This is a very big example. Feel free to skim *
// * through it and then just focus on the few parts that are *
// * actually broken! *
// * *
// *************************************************************
//
const print = @import("std").debug.print;
// The grue is a nod to Zork.
const TripError = error{ Unreachable, EatenByAGrue };
// Let's start with the Places on the map. Each has a name and a
// distance or difficulty of travel (as judged by the hermit).
//
// Note that we declare the places as mutable (var) because we need to
// assign the paths later. And why is that? Because paths contain
// pointers to places and assigning them now would create a dependency
// loop!
const Place = struct {
name: []const u8,
paths: []const Path = undefined,
};
var a = Place{ .name = "Archer's Point" };
var b = Place{ .name = "Bridge" };
var c = Place{ .name = "Cottage" };
var d = Place{ .name = "Dogwood Grove" };
var e = Place{ .name = "East Pond" };
var f = Place{ .name = "Fox Pond" };
// The hermit's hand-drawn ASCII map
// +---------------------------------------------------+
// | * Archer's Point ~~~~ |
// | ~~~ ~~~~~~~~ |
// | ~~~| |~~~~~~~~~~~~ ~~~~~~~ |
// | Bridge ~~~~~~~~ |
// | ^ ^ ^ |
// | ^ ^ / \ |
// | ^ ^ ^ ^ |_| Cottage |
// | Dogwood Grove |
// | ^ <boat> |
// | ^ ^ ^ ^ ~~~~~~~~~~~~~ ^ ^ |
// | ^ ~~ East Pond ~~~ |
// | ^ ^ ^ ~~~~~~~~~~~~~~ |
// | ~~ ^ |
// | ^ ~~~ <-- short waterfall |
// | ^ ~~~~~ |
// | ~~~~~~~~~~~~~~~~~ |
// | ~~~~ Fox Pond ~~~~~~~ ^ ^ |
// | ^ ~~~~~~~~~~~~~~~ ^ ^ |
// | ~~~~~ |
// +---------------------------------------------------+
//
// We'll be reserving memory in our program based on the number of
// places on the map. Note that we do not have to specify the type of
// this value because we don't actually use it in our program once
// it's compiled! (Don't worry if this doesn't make sense yet.)
const place_count = 6;
// Now let's create all of the paths between sites. A path goes from
// one place to another and has a distance.
const Path = struct {
from: *const Place,
to: *const Place,
dist: u8,
};
// By the way, if the following code seems like a lot of tedious
// manual labor, you're right! One of Zig's killer features is letting
// us write code that runs at compile time to "automate" repetitive
// code (much like macros in other languages), but we haven't learned
// how to do that yet!
const a_paths = [_]Path{
Path{
.from = &a, // from: Archer's Point
.to = &b, // to: Bridge
.dist = 2,
},
};
const b_paths = [_]Path{
Path{
.from = &b, // from: Bridge
.to = &a, // to: Archer's Point
.dist = 2,
},
Path{
.from = &b, // from: Bridge
.to = &d, // to: Dogwood Grove
.dist = 1,
},
};
const c_paths = [_]Path{
Path{
.from = &c, // from: Cottage
.to = &d, // to: Dogwood Grove
.dist = 3,
},
Path{
.from = &c, // from: Cottage
.to = &e, // to: East Pond
.dist = 2,
},
};
const d_paths = [_]Path{
Path{
.from = &d, // from: Dogwood Grove
.to = &b, // to: Bridge
.dist = 1,
},
Path{
.from = &d, // from: Dogwood Grove
.to = &c, // to: Cottage
.dist = 3,
},
Path{
.from = &d, // from: Dogwood Grove
.to = &f, // to: Fox Pond
.dist = 7,
},
};
const e_paths = [_]Path{
Path{
.from = &e, // from: East Pond
.to = &c, // to: Cottage
.dist = 2,
},
Path{
.from = &e, // from: East Pond
.to = &f, // to: Fox Pond
.dist = 1, // (one-way down a short waterfall!)
},
};
const f_paths = [_]Path{
Path{
.from = &f, // from: Fox Pond
.to = &d, // to: Dogwood Grove
.dist = 7,
},
};
// Once we've plotted the best course through the woods, we'll make a
// "trip" out of it. A trip is a series of Places connected by Paths.
// We use a TripItem union to allow both Places and Paths to be in the
// same array.
const TripItem = union(enum) {
place: *const Place,
path: *const Path,
// This is a little helper function to print the two different
// types of item correctly.
fn printMe(self: TripItem) void {
switch (self) {
// Oops! The hermit forgot how to capture the union values
// in a switch statement. Please capture both values as
// 'p' so the print statements work!
.place => |p| print("{s}", .{p.name}),
.path => |p| print("--{}->", .{p.dist}),
}
}
};
// The Hermit's Notebook is where all the magic happens. A notebook
// entry is a Place discovered on the map along with the Path taken to
// get there and the distance to reach it from the start point. If we
// find a better Path to reach a Place (shorter distance), we update the
// entry. Entries also serve as a "todo" list which is how we keep
// track of which paths to explore next.
const NotebookEntry = struct {
place: *const Place,
coming_from: ?*const Place,
via_path: ?*const Path,
dist_to_reach: u16,
};
// +------------------------------------------------+
// | ~ Hermit's Notebook ~ |
// +---+----------------+----------------+----------+
// | | Place | From | Distance |
// +---+----------------+----------------+----------+
// | 0 | Archer's Point | null | 0 |
// | 1 | Bridge | Archer's Point | 2 | < next_entry
// | 2 | Dogwood Grove | Bridge | 1 |
// | 3 | | | | < end_of_entries
// | ... |
// +---+----------------+----------------+----------+
//
const HermitsNotebook = struct {
// Remember the array repetition operator `**`? It is no mere
// novelty, it's also a great way to assign multiple items in an
// array without having to list them one by one. Here we use it to
// initialize an array with null values.
entries: [place_count]?NotebookEntry = .{null} ** place_count,
// The next entry keeps track of where we are in our "todo" list.
next_entry: u8 = 0,
// Mark the start of empty space in the notebook.
end_of_entries: u8 = 0,
// We'll often want to find an entry by Place. If one is not
// found, we return null.
fn getEntry(self: *HermitsNotebook, place: *const Place) ?*NotebookEntry {
for (&self.entries, 0..) |*entry, i| {
if (i >= self.end_of_entries) break;
// Here's where the hermit got stuck. We need to return
// an optional pointer to a NotebookEntry.
//
// What we have with "entry" is the opposite: a pointer to
// an optional NotebookEntry!
//
// To get one from the other, we need to dereference
// "entry" (with .*) and get the non-null value from the
// optional (with .?) and return the address of that. The
// if statement provides some clues about how the
// dereference and optional value "unwrapping" look
// together. Remember that you return the address with the
// "&" operator.
if (place == entry.*.?.place) return &entry.*.?;
// Try to make your answer this long:__________;
}
return null;
}
// The checkNote() method is the beating heart of the magical
// notebook. Given a new note in the form of a NotebookEntry
// struct, we check to see if we already have an entry for the
// note's Place.
//
// If we DON'T, we'll add the entry to the end of the notebook
// along with the Path taken and distance.
//
// If we DO, we check to see if the path is "better" (shorter
// distance) than the one we'd noted before. If it is, we
// overwrite the old entry with the new one.
fn checkNote(self: *HermitsNotebook, note: NotebookEntry) void {
var existing_entry = self.getEntry(note.place);
if (existing_entry == null) {
self.entries[self.end_of_entries] = note;
self.end_of_entries += 1;
} else if (note.dist_to_reach < existing_entry.?.dist_to_reach) {
existing_entry.?.* = note;
}
}
// The next two methods allow us to use the notebook as a "todo"
// list.
fn hasNextEntry(self: *HermitsNotebook) bool {
return self.next_entry < self.end_of_entries;
}
fn getNextEntry(self: *HermitsNotebook) *const NotebookEntry {
defer self.next_entry += 1; // Increment after getting entry
return &self.entries[self.next_entry].?;
}
// After we've completed our search of the map, we'll have
// computed the shortest Path to every Place. To collect the
// complete trip from the start to the destination, we need to
// walk backwards from the destination's notebook entry, following
// the coming_from pointers back to the start. What we end up with
// is an array of TripItems with our trip in reverse order.
//
// We need to take the trip array as a parameter because we want
// the main() function to "own" the array memory. What do you
// suppose could happen if we allocated the array in this
// function's stack frame (the space allocated for a function's
// "local" data) and returned a pointer or slice to it?
//
// Looks like the hermit forgot something in the return value of
// this function. What could that be?
fn getTripTo(self: *HermitsNotebook, trip: []?TripItem, dest: *Place) TripError!void {
// We start at the destination entry.
const destination_entry = self.getEntry(dest);
// This function needs to return an error if the requested
// destination was never reached. (This can't actually happen
// in our map since every Place is reachable by every other
// Place.)
if (destination_entry == null) {
return TripError.Unreachable;
}
// Variables hold the entry we're currently examining and an
// index to keep track of where we're appending trip items.
var current_entry = destination_entry.?;
var i: u8 = 0;
// At the end of each looping, a continue expression increments
// our index. Can you see why we need to increment by two?
while (true) : (i += 2) {
trip[i] = TripItem{ .place = current_entry.place };
// An entry "coming from" nowhere means we've reached the
// start, so we're done.
if (current_entry.coming_from == null) break;
// Otherwise, entries have a path.
trip[i + 1] = TripItem{ .path = current_entry.via_path.? };
// Now we follow the entry we're "coming from". If we
// aren't able to find the entry we're "coming from" by
// Place, something has gone horribly wrong with our
// program! (This really shouldn't ever happen. Have you
// checked for grues?)
// Note: you do not need to fix anything here.
const previous_entry = self.getEntry(current_entry.coming_from.?);
if (previous_entry == null) return TripError.EatenByAGrue;
current_entry = previous_entry.?;
}
}
};
pub fn main() void {
// Here's where the hermit decides where he would like to go. Once
// you get the program working, try some different Places on the
// map!
const start = &a; // Archer's Point
const destination = &f; // Fox Pond
// Store each Path array as a slice in each Place. As mentioned
// above, we needed to delay making these references to avoid
// creating a dependency loop when the compiler is trying to
// figure out how to allocate space for each item.
a.paths = a_paths[0..];
b.paths = b_paths[0..];
c.paths = c_paths[0..];
d.paths = d_paths[0..];
e.paths = e_paths[0..];
f.paths = f_paths[0..];
// Now we create an instance of the notebook and add the first
// "start" entry. Note the null values. Read the comments for the
// checkNote() method above to see how this entry gets added to
// the notebook.
var notebook = HermitsNotebook{};
var working_note = NotebookEntry{
.place = start,
.coming_from = null,
.via_path = null,
.dist_to_reach = 0,
};
notebook.checkNote(working_note);
// Get the next entry from the notebook (the first being the
// "start" entry we just added) until we run out, at which point
// we'll have checked every reachable Place.
while (notebook.hasNextEntry()) {
var place_entry = notebook.getNextEntry();
// For every Path that leads FROM the current Place, create a
// new note (in the form of a NotebookEntry) with the
// destination Place and the total distance from the start to
// reach that place. Again, read the comments for the
// checkNote() method to see how this works.
for (place_entry.place.paths) |*path| {
working_note = NotebookEntry{
.place = path.to,
.coming_from = place_entry.place,
.via_path = path,
.dist_to_reach = place_entry.dist_to_reach + path.dist,
};
notebook.checkNote(working_note);
}
}
// Once the loop above is complete, we've calculated the shortest
// path to every reachable Place! What we need to do now is set
// aside memory for the trip and have the hermit's notebook fill
// in the trip from the destination back to the path. Note that
// this is the first time we've actually used the destination!
var trip = [_]?TripItem{null} ** (place_count * 2);
notebook.getTripTo(trip[0..], destination) catch |err| {
print("Oh no! {}\n", .{err});
return;
};
// Print the trip with a little helper function below.
printTrip(trip[0..]);
}
// Remember that trips will be a series of alternating TripItems
// containing a Place or Path from the destination back to the start.
// The remaining space in the trip array will contain null values, so
// we need to loop through the items in reverse, skipping nulls, until
// we reach the destination at the front of the array.
fn printTrip(trip: []?TripItem) void {
// We convert the usize length to a u8 with @intCast(), a
// builtin function just like @import(). We'll learn about
// these properly in a later exercise.
var i: u8 = @intCast(trip.len);
while (i > 0) {
i -= 1;
if (trip[i] == null) continue;
trip[i].?.printMe();
}
print("\n", .{});
}
// Going deeper:
//
// In computer science terms, our map places are "nodes" or "vertices" and
// the paths are "edges". Together, they form a "weighted, directed
// graph". It is "weighted" because each path has a distance (also
// known as a "cost"). It is "directed" because each path goes FROM
// one place TO another place (undirected graphs allow you to travel
// on an edge in either direction).
//
// Since we append new notebook entries at the end of the list and
// then explore each sequentially from the beginning (like a "todo"
// list), we are treating the notebook as a "First In, First Out"
// (FIFO) queue.
//
// Since we examine all closest paths first before trying further ones
// (thanks to the "todo" queue), we are performing a "Breadth-First
// Search" (BFS).
//
// By tracking "lowest cost" paths, we can also say that we're
// performing a "least-cost search".
//
// Even more specifically, the Hermit's Notebook most closely
// resembles the Shortest Path Faster Algorithm (SPFA), attributed to
// Edward F. Moore. By replacing our simple FIFO queue with a
// "priority queue", we would basically have Dijkstra's algorithm. A
// priority queue retrieves items sorted by "weight" (in our case, it
// would keep the paths with the shortest distance at the front of the
// queue). Dijkstra's algorithm is more efficient because longer paths
// can be eliminated more quickly. (Work it out on paper to see why!)
| https://raw.githubusercontent.com/mvolkmann/ziglings-solutions/9375498a857a6aa9278f2d308dd11222efe8f4a1/exercises/058_quiz7.zig |
//! Cairo surface PDF backend.
//! The PDF surface is used to render cairo graphics to Adobe PDF files and is a
//! multi-page vector surface backend.
//! https://www.cairographics.org/manual/cairo-PDF-Surfaces.html
const c = @import("../c.zig");
const Error = @import("../utilities/error_handling.zig").Error;
const enums = @import("../enums.zig");
const PdfMetadata = enums.PdfMetadata;
/// https://www.cairographics.org/manual/cairo-PDF-Surfaces.html#cairo-pdf-surface-add-outline
pub fn addOutline() void {
@panic("TODO: to be implemented");
}
/// https://www.cairographics.org/manual/cairo-PDF-Surfaces.html#cairo-pdf-surface-create-for-stream
pub fn createForStream() void {
@panic("TODO: to be implemented");
}
/// Create a PDF surface of the specified size in points to be written to filename.
/// https://www.cairographics.org/manual/cairo-PDF-Surfaces.html#cairo-pdf-surface-create
pub fn create(comptime filename: []const u8, width_pt: f64, height_pt: f64) !*c.struct__cairo_surface {
// cairo_pdf_surface_create always returns a valid pointer, but it will
// return a pointer to a "nil" surface if an error such as out of memory
// occurs. You can use cairo_surface_status() to check for this.
return c.cairo_pdf_surface_create(filename.ptr, width_pt, height_pt).?;
}
/// https://www.cairographics.org/manual/cairo-PDF-Surfaces.html#cairo-pdf-get-versions
pub fn getVersions() void {
@panic("TODO: to be implemented");
}
/// https://www.cairographics.org/manual/cairo-PDF-Surfaces.html#cairo-pdf-surface-restrict-to-version
pub fn restrictToVersion() void {
@panic("TODO: to be implemented");
}
/// https://www.cairographics.org/manual/cairo-PDF-Surfaces.html#cairo-pdf-surface-set-metadata
pub fn setMetadata(surface: *c.struct__cairo_surface, metadata: PdfMetadata, char: []const u8) void {
c.cairo_pdf_surface_set_metadata(surface, metadata.toCairoEnum(), char.ptr);
}
/// https://www.cairographics.org/manual/cairo-PDF-Surfaces.html#cairo-pdf-surface-set-page-label
pub fn setPageLabel(surface: *c.struct__cairo_surface, char: []const u8) void {
c.cairo_pdf_surface_set_page_label(surface, char.ptr);
}
/// https://www.cairographics.org/manual/cairo-PDF-Surfaces.html#cairo-pdf-surface-set-size
pub fn setSize(surface: *c.struct__cairo_surface, width_pt: f64, height_pt: f64) void {
c.cairo_pdf_surface_set_size(surface, width_pt, height_pt);
}
/// https://www.cairographics.org/manual/cairo-PDF-Surfaces.html#cairo-pdf-surface-set-thumbnail-size
pub fn setThumbnailSize() void {
@panic("TODO: to be implemented");
}
/// https://www.cairographics.org/manual/cairo-PDF-Surfaces.html#cairo-pdf-version-to-string
pub fn versionToString() void {
@panic("TODO: to be implemented");
}
| https://raw.githubusercontent.com/jackdbd/zig-cairo/c2736f512b5a48a5ad38a15973b2879166da1d8a/src/surfaces/pdf.zig |
const std = @import("std");
const builtin = @import("builtin");
const Pkg = std.build.Pkg;
const pkgs = struct {
const s2s = Pkg{
.name = "s2s",
.source = .{ .path = "./deps/s2s/s2s.zig" },
.dependencies = &[_]Pkg{},
};
};
const LMDB_PATH = "./deps/lmdb/libraries/liblmdb/";
pub fn build(b: *std.build.Builder) void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
const mode = b.standardReleaseOptions();
//Add lmdb library for embeded key/value store
const cflags = [_][]const u8{ "-pthread", "-std=c2x" };
const lmdb_sources = [_][]const u8{ LMDB_PATH ++ "mdb.c", LMDB_PATH ++ "midl.c" };
const lmdb = b.addStaticLibrary("lmdb", null);
lmdb.setTarget(target);
lmdb.setBuildMode(mode);
lmdb.addCSourceFiles(&lmdb_sources, &cflags);
lmdb.linkLibC();
lmdb.install();
const target_name = target.allocDescription(b.allocator) catch unreachable;
const exe_name = std.fmt.allocPrint(b.allocator, "{[program]s}-{[target]s}", .{ .program = "recblock", .target = target_name }) catch unreachable;
const exe = b.addExecutable(exe_name, "src/main.zig");
exe.setTarget(target);
exe.setBuildMode(mode);
exe.addPackage(pkgs.s2s);
exe.linkLibrary(lmdb);
exe.addIncludePath(LMDB_PATH);
exe.install();
switch (mode) {
.Debug => {},
else => {
lmdb.link_function_sections = true;
lmdb.red_zone = true;
lmdb.want_lto = true;
exe.link_function_sections = true;
exe.red_zone = true;
exe.want_lto = true;
//FIXME: cross compiling for windows with strip run into issues
//Wait for self-hosting and if problem still persist,open an issue to track this
if (!target.isWindows()) {
lmdb.strip = true;
exe.strip = true;
}
},
}
const run_cmd = exe.run();
run_cmd.step.dependOn(b.getInstallStep());
if (b.args) |args| {
run_cmd.addArgs(args);
}
const run_step = b.step("run", "Run the app");
run_step.dependOn(&lmdb.step);
run_step.dependOn(&run_cmd.step);
const exe_tests = b.addTest("src/main.zig");
exe_tests.setTarget(target);
exe_tests.setBuildMode(mode);
exe_tests.addPackage(pkgs.s2s);
exe_tests.linkLibrary(lmdb);
exe_tests.addIncludePath(LMDB_PATH);
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&lmdb.step);
test_step.dependOn(&exe_tests.step);
}
| https://raw.githubusercontent.com/Ultra-Code/recblock/20d8a3aea30ea312df21509ae1d69a6e3bbbe116/build.zig |
const mtr = @import("package.zig");
const std = @import("std");
pub const Idx = u64;
pub const ConstructInfo = struct {
offset : u64, length : u64,
usage : mtr.buffer.Usage,
queueSharing : mtr.queue.SharingUsage,
label : [:0] const u8,
};
// describes in what contexts the buffer may be accessed for both reading &
// writing
// transfer allows buffers to copy regions of their memory to others
// buffer describes in what context the buffer will be used
//
pub const Usage = packed struct {
transferSrc : bool = false,
transferDst : bool = false,
transferSrcDst : bool = false,
bufferUniform : bool = false,
bufferStorage : bool = false,
bufferAccelerationStructure : bool = false,
bufferIndirect : bool = false,
// no vertex / buffer bit b/c software rasterized; the rasterizer does not
// use indices/vertices as traditional pipeline would
};
// Any piece of information that exists as an array of elements must be
// referenced to as a buffer. The buffer describes both where this data
// lives, as well as describes it as a form of metadata. An array of elements
// should be contained in an array as large as possible, where allocation
// and deallocation of the entire array is a valid operation.
pub const Primitive = struct {
allocatedHeapRegion : mtr.heap.RegionIdx,
offset : u64,
length : u64,
usage : mtr.buffer.Usage,
queueSharing : mtr.queue.SharingUsage,
label : [:0] const u8,
contextIdx : mtr.buffer.Idx,
pub fn jsonStringify(
self : @This(),
options : std.json.StringifyOptions,
outStream : anytype
) @TypeOf(outStream).Error ! void {
try outStream.writeByte('{');
const structInfo = @typeInfo(@This()).Struct;
inline for (structInfo.fields) |Field| {
try mtr.util.json.stringifyVariable(
Field.name, @field(self, Field.name), options, outStream
);
try outStream.writeByte(',');
}
try mtr.Context.dumpBufferToWriter(self.contextIdx, outStream);
try outStream.writeByte('}');
}
};
// a view into a buffer
pub const View = struct {
buffer : mtr.buffer.Idx,
offset : u64,
length : u64,
};
| https://raw.githubusercontent.com/AODQ/ztoadz/5af950d72deb78b383b0c1c748bf9a3d675b381b/src/mtr/buffer.zig |
const std = @import("std");
const expect = std.testing.expect;
const ArrayList = std.ArrayList;
const Allocator = std.mem.Allocator;
const test_allocator = std.testing.allocator;
const Solver = struct {
allocator: Allocator,
values: []ArrayList(u8) = undefined,
instructions: [][3]u8 = undefined,
fn parseInput(self: *Solver, input: []const u8) !void {
var file = try std.fs.cwd().openFile(input, .{});
defer file.close();
var buf_reader = std.io.bufferedReader(file.reader());
var in_stream = buf_reader.reader();
var buf: [50]u8 = undefined;
var list = ArrayList(ArrayList(u8)).init(self.allocator);
defer list.deinit();
while (try in_stream.readUntilDelimiterOrEof(&buf, '\n')) |line| {
if (std.ascii.isDigit(line[1])) {
break;
}
var index: usize = 1;
while (index < line.len) : (index += 4) {
const columnIndex = index / 4;
if (list.items.len <= columnIndex) {
try list.append(ArrayList(u8).init(self.allocator));
}
const symbol = line[index];
if (symbol == ' ') {
continue;
}
if (!std.ascii.isAlphabetic(symbol)) {
unreachable;
}
try list.items[columnIndex].append(symbol);
}
}
_ = try in_stream.readUntilDelimiterOrEof(&buf, '\n');
var instructions = ArrayList([3]u8).init(self.allocator);
defer instructions.deinit();
while (try in_stream.readUntilDelimiterOrEof(&buf, '\n')) |line| {
var spliterator = std.mem.split(u8, line, " ");
_ = spliterator.next();
const count = try std.fmt.parseInt(u8, spliterator.next().?, 10);
_ = spliterator.next();
const from = try std.fmt.parseInt(u8, spliterator.next().?, 10) - 1;
_ = spliterator.next();
const to = try std.fmt.parseInt(u8, spliterator.next().?, 10) - 1;
try instructions.append([_]u8{ count, from, to });
}
self.values = list.toOwnedSlice();
self.instructions = instructions.toOwnedSlice();
}
fn cloneValues(self: *Solver) ![]ArrayList(u8) {
var list = ArrayList(ArrayList(u8)).init(self.allocator);
defer list.deinit();
for (self.values) |_, index| {
try list.append(try self.values[index].clone());
}
return list.toOwnedSlice();
}
fn partOne(self: *Solver) ![]u8 {
var values = try self.cloneValues();
defer self.allocator.free(values);
defer {
for (values) |column| {
column.deinit();
}
}
var count: u8 = 0;
var from: u8 = 0;
var to: u8 = 0;
var i: usize = 0;
for (self.instructions) |instruction| {
count = instruction[0];
from = instruction[1];
to = instruction[2];
i = 0;
while (i < count) : (i += 1) {
const symbol = values[from].orderedRemove(0);
try values[to].insert(0, symbol);
}
}
var fuckingString: []u8 = try self.allocator.alloc(u8, values.len);
for (values) |column, index| {
fuckingString[index] = column.items[0];
}
return fuckingString;
}
fn partTwo(self: *Solver) ![]u8 {
var values = try self.cloneValues();
defer self.allocator.free(values);
defer {
for (values) |column| {
column.deinit();
}
}
var forpopin = ArrayList(u8).init(self.allocator);
defer forpopin.deinit();
var count: u8 = 0;
var from: u8 = 0;
var to: u8 = 0;
var i: usize = 0;
for (self.instructions) |instruction| {
count = instruction[0];
from = instruction[1];
to = instruction[2];
i = 0;
while (i < count) : (i += 1) {
const symbol = values[from].orderedRemove(0);
try forpopin.insert(0, symbol);
}
i = 0;
while (i < count) : (i += 1) {
const symbol = forpopin.orderedRemove(0);
try values[to].insert(0, symbol);
}
forpopin.clearRetainingCapacity();
}
var fuckingString: []u8 = try self.allocator.alloc(u8, values.len);
for (values) |column, index| {
fuckingString[index] = column.items[0];
}
return fuckingString;
}
pub fn both(self: *Solver) ![2][]u8 {
defer self.allocator.free(self.values);
defer {
for (self.values) |column| {
column.deinit();
}
}
defer self.allocator.free(self.instructions);
return [2][]u8{ try self.partOne(), try self.partTwo() };
}
};
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
var solver = Solver{ .allocator = arena.allocator() };
try solver.parseInput("input");
const stdout = std.io.getStdOut().writer();
const both = try solver.both();
defer solver.allocator.free(both[0]);
defer solver.allocator.free(both[1]);
try stdout.print("{s}, {s}\n", .{ both[0], both[1] });
}
test "part 1 test" {
var solver = Solver{ .allocator = test_allocator };
try solver.parseInput("test");
const both = try solver.both();
defer test_allocator.free(both[0]);
defer test_allocator.free(both[1]);
try expect(std.mem.eql(u8, both[0], "CMZ"));
}
test "part 1 full" {
var solver = Solver{ .allocator = test_allocator };
try solver.parseInput("input");
const both = try solver.both();
defer test_allocator.free(both[0]);
defer test_allocator.free(both[1]);
try expect(std.mem.eql(u8, both[0], "ZBDRNPMVH"));
}
test "part 2 test" {
var solver = Solver{ .allocator = test_allocator };
try solver.parseInput("test");
const both = try solver.both();
defer test_allocator.free(both[0]);
defer test_allocator.free(both[1]);
try expect(std.mem.eql(u8, both[1], "MCD"));
}
test "part 2 full" {
var solver = Solver{ .allocator = test_allocator };
try solver.parseInput("input");
const both = try solver.both();
defer test_allocator.free(both[0]);
defer test_allocator.free(both[1]);
try expect(std.mem.eql(u8, both[1], "WDLPFNNNB"));
}
| https://raw.githubusercontent.com/graynk/aoc2022/b7da1f3b71ec6684aa902659da2bacec621ff125/day5/src/main.zig |
const std = @import("std");
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
pub fn build(b: *std.Build) void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// Standard optimization options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
const lib = b.addStaticLibrary(.{
.name = "Quaternion",
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
// This declares intent for the library to be installed into the standard
// location when the user invokes the "install" step (the default step when
// running `zig build`).
b.installArtifact(lib);
// Creates a step for unit testing. This only builds the test executable
// but does not run it.
const main_tests = b.addTest(.{
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
const run_main_tests = b.addRunArtifact(main_tests);
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build test`
// This will evaluate the `test` step rather than the default, which is "install".
const test_step = b.step("test", "Run library tests");
test_step.dependOn(&run_main_tests.step);
}
| https://raw.githubusercontent.com/quantumshiro/zion/154c60fb3e33e9bd749f8c5bca8116bdaf74c9aa/build.zig |
const std = @import("std");
const mem = std.mem;
const Allocator = std.mem.Allocator;
// Similar to KeyValue with two important differences
// 1 - We don't need to normalize (i.e. lowercase) the names, because they're
// statically defined in code, and presumably, if the param is called "id"
// then the developer will also fetch it as "id"
// 2 - This is populated from Router, and the way router works is that it knows
// the values before it knows the names. The addValue and addNames
// methods reflect how Router uses this.
pub const Params = struct {
len: usize,
names: [][]const u8,
values: [][]const u8,
const Self = @This();
pub fn init(allocator: Allocator, max: usize) !Self {
const names = try allocator.alloc([]const u8, max);
const values = try allocator.alloc([]const u8, max);
return Self{
.len = 0,
.names = names,
.values = values,
};
}
pub fn deinit(self: *Self, allocator: Allocator) void {
allocator.free(self.names);
allocator.free(self.values);
}
pub fn addValue(self: *Self, value: []const u8) void {
const len = self.len;
const values = self.values;
if (len == values.len) {
return;
}
values[len] = value;
self.len = len + 1;
}
// It should be impossible for names.len != self.len at this point, but it's
// a bit dangerous to assume that since self.names is re-used between requests
// and we don't want to leak anything, so I think enforcing a len of names.len
// is safer, since names is generally statically defined based on routes setup.
pub fn addNames(self: *Self, names: [][]const u8) void {
std.debug.assert(names.len == self.len);
const n = self.names;
for (names, 0..) |name, i| {
n[i] = name;
}
self.len = names.len;
}
pub fn get(self: *Self, needle: []const u8) ?[]const u8 {
const names = self.names[0..self.len];
for (names, 0..) |name, i| {
if (mem.eql(u8, name, needle)) {
return self.values[i];
}
}
return null;
}
pub fn reset(self: *Self) void {
self.len = 0;
}
};
const t = @import("t.zig");
test "params: get" {
var allocator = t.allocator;
var params = try Params.init(allocator, 10);
var names = [_][]const u8{"over", "duncan"};
params.addValue("9000");
params.addValue("idaho");
params.addNames(names[0..]);
try t.expectEqual(@as(?[]const u8, "9000"), params.get("over"));
try t.expectEqual(@as(?[]const u8, "idaho"), params.get("duncan"));
params.reset();
try t.expectEqual(@as(?[]const u8, null), params.get("over"));
try t.expectEqual(@as(?[]const u8, null), params.get("duncan"));
params.addValue("!9000!");
params.addNames(names[0..1]);
try t.expectEqual(@as(?[]const u8, "!9000!"), params.get("over"));
params.deinit(t.allocator);
}
| https://raw.githubusercontent.com/EdamAme-x/Precisock/e8613429c23b78acd00cdea5bb72daaeea253d41/src/params.zig |
//
// Helper functions to convert microsecond to system clock ticks,
// and keep track of executed ticks.
//
const assert = @import("std").debug.assert;
const Clock = @This();
freq_hz: i64,
ticks_to_run: i64 = 0,
overrun_ticks: i64 = 0,
// return the number of ticks to run for micro_seconds, taking overrun
// ticks from last invocation into account
pub fn ticksToRun(self: *Clock, micro_seconds: u32) u64 {
assert(micro_seconds > 0);
const ticks: i64 = @divTrunc(self.freq_hz * micro_seconds, 1_000_000);
self.ticks_to_run = ticks - self.overrun_ticks;
if (self.ticks_to_run < 1) {
self.ticks_to_run = 1;
}
return @intCast(self.ticks_to_run);
}
pub fn ticksExecuted(self: *Clock, ticks_executed: u64) void {
const ticks: i64 = @intCast(ticks_executed);
if (ticks > self.ticks_to_run) {
self.overrun_ticks = ticks - self.ticks_to_run;
} else {
self.overrun_ticks = 0;
}
}
| https://raw.githubusercontent.com/floooh/kc85.zig/3af78174516d5c0aa8ce0fdba7e0cfd145261e4b/src/emu/Clock.zig |
pub const TracerProvider = @import("sdk/provider.zig").TracerProvider;
pub const RecordingSpan = @import("sdk/span.zig").RecordingSpan;
pub const Tracer = @import("sdk/tracer.zig").Tracer;
pub const Resource = @import("sdk/resource.zig").Resource;
pub const SimpleSpanProcessor = @import("sdk/simple_span_processor.zig").SimpleSpanProcessor;
pub const SpanProcessor = @import("sdk/span_processor.zig").SpanProcessor;
| https://raw.githubusercontent.com/Drumato/opentelemetry-zig/2534edfc257a8c2f00277d53f1937511db6aa2bc/src/otel/sdk.zig |
const common = @import("./common.zig");
const addf3 = @import("./addf3.zig").addf3;
pub const panic = common.panic;
comptime {
@export(__addhf3, .{ .name = "__addhf3", .linkage = common.linkage, .visibility = common.visibility });
}
fn __addhf3(a: f16, b: f16) callconv(.C) f16 {
return addf3(f16, a, b);
}
| https://raw.githubusercontent.com/ziglang/zig-bootstrap/ec2dca85a340f134d2fcfdc9007e91f9abed6996/zig/lib/compiler_rt/addhf3.zig |
const micro = @import("microzig");
const mmio = micro.mmio;
pub const devices = struct {
pub const ATtiny214 = struct {
pub const properties = struct {
pub const family = "AVR TINY";
pub const arch = "AVR8X";
};
pub const VectorTable = extern struct {
const Handler = micro.interrupt.Handler;
const unhandled = micro.interrupt.unhandled;
RESET: Handler = unhandled,
CRCSCAN_NMI: Handler = unhandled,
BOD_VLM: Handler = unhandled,
PORTA_PORT: Handler = unhandled,
PORTB_PORT: Handler = unhandled,
reserved5: [1]u16 = undefined,
RTC_CNT: Handler = unhandled,
RTC_PIT: Handler = unhandled,
TCA0_LUNF: Handler = unhandled,
TCA0_HUNF: Handler = unhandled,
TCA0_CMP0: Handler = unhandled,
TCA0_CMP1: Handler = unhandled,
TCA0_CMP2: Handler = unhandled,
TCB0_INT: Handler = unhandled,
TCD0_OVF: Handler = unhandled,
TCD0_TRIG: Handler = unhandled,
AC0_AC: Handler = unhandled,
ADC0_RESRDY: Handler = unhandled,
ADC0_WCOMP: Handler = unhandled,
TWI0_TWIS: Handler = unhandled,
TWI0_TWIM: Handler = unhandled,
SPI0_INT: Handler = unhandled,
USART0_RXC: Handler = unhandled,
USART0_DRE: Handler = unhandled,
USART0_TXC: Handler = unhandled,
NVMCTRL_EE: Handler = unhandled,
};
pub const peripherals = struct {
/// Virtual Ports
pub const VPORTA = @as(*volatile types.peripherals.VPORT, @ptrFromInt(0x0));
/// Virtual Ports
pub const VPORTB = @as(*volatile types.peripherals.VPORT, @ptrFromInt(0x4));
/// Virtual Ports
pub const VPORTC = @as(*volatile types.peripherals.VPORT, @ptrFromInt(0x8));
/// General Purpose IO
pub const GPIO = @as(*volatile types.peripherals.GPIO, @ptrFromInt(0x1c));
/// CPU
pub const CPU = @as(*volatile types.peripherals.CPU, @ptrFromInt(0x34));
/// Reset controller
pub const RSTCTRL = @as(*volatile types.peripherals.RSTCTRL, @ptrFromInt(0x40));
/// Sleep Controller
pub const SLPCTRL = @as(*volatile types.peripherals.SLPCTRL, @ptrFromInt(0x50));
/// Clock controller
pub const CLKCTRL = @as(*volatile types.peripherals.CLKCTRL, @ptrFromInt(0x60));
/// Bod interface
pub const BOD = @as(*volatile types.peripherals.BOD, @ptrFromInt(0x80));
/// Voltage reference
pub const VREF = @as(*volatile types.peripherals.VREF, @ptrFromInt(0xa0));
/// Watch-Dog Timer
pub const WDT = @as(*volatile types.peripherals.WDT, @ptrFromInt(0x100));
/// Interrupt Controller
pub const CPUINT = @as(*volatile types.peripherals.CPUINT, @ptrFromInt(0x110));
/// CRCSCAN
pub const CRCSCAN = @as(*volatile types.peripherals.CRCSCAN, @ptrFromInt(0x120));
/// Real-Time Counter
pub const RTC = @as(*volatile types.peripherals.RTC, @ptrFromInt(0x140));
/// Event System
pub const EVSYS = @as(*volatile types.peripherals.EVSYS, @ptrFromInt(0x180));
/// Configurable Custom Logic
pub const CCL = @as(*volatile types.peripherals.CCL, @ptrFromInt(0x1c0));
/// Port Multiplexer
pub const PORTMUX = @as(*volatile types.peripherals.PORTMUX, @ptrFromInt(0x200));
/// I/O Ports
pub const PORTA = @as(*volatile types.peripherals.PORT, @ptrFromInt(0x400));
/// I/O Ports
pub const PORTB = @as(*volatile types.peripherals.PORT, @ptrFromInt(0x420));
/// Analog to Digital Converter
pub const ADC0 = @as(*volatile types.peripherals.ADC, @ptrFromInt(0x600));
/// Analog Comparator
pub const AC0 = @as(*volatile types.peripherals.AC, @ptrFromInt(0x670));
/// Digital to Analog Converter
pub const DAC0 = @as(*volatile types.peripherals.DAC, @ptrFromInt(0x680));
/// Universal Synchronous and Asynchronous Receiver and Transmitter
pub const USART0 = @as(*volatile types.peripherals.USART, @ptrFromInt(0x800));
/// Two-Wire Interface
pub const TWI0 = @as(*volatile types.peripherals.TWI, @ptrFromInt(0x810));
/// Serial Peripheral Interface
pub const SPI0 = @as(*volatile types.peripherals.SPI, @ptrFromInt(0x820));
/// 16-bit Timer/Counter Type A
pub const TCA0 = @as(*volatile types.peripherals.TCA, @ptrFromInt(0xa00));
/// 16-bit Timer Type B
pub const TCB0 = @as(*volatile types.peripherals.TCB, @ptrFromInt(0xa40));
/// Timer Counter D
pub const TCD0 = @as(*volatile types.peripherals.TCD, @ptrFromInt(0xa80));
/// System Configuration Registers
pub const SYSCFG = @as(*volatile types.peripherals.SYSCFG, @ptrFromInt(0xf01));
/// Non-volatile Memory Controller
pub const NVMCTRL = @as(*volatile types.peripherals.NVMCTRL, @ptrFromInt(0x1000));
/// Signature row
pub const SIGROW = @as(*volatile types.peripherals.SIGROW, @ptrFromInt(0x1100));
/// Fuses
pub const FUSE = @as(*volatile types.peripherals.FUSE, @ptrFromInt(0x1280));
/// Lockbit
pub const LOCKBIT = @as(*volatile types.peripherals.LOCKBIT, @ptrFromInt(0x128a));
/// User Row
pub const USERROW = @as(*volatile types.peripherals.USERROW, @ptrFromInt(0x1300));
};
};
};
pub const types = struct {
pub const peripherals = struct {
/// Analog Comparator
pub const AC = extern struct {
/// Hysteresis Mode select
pub const AC_HYSMODE = enum(u2) {
/// No hysteresis
OFF = 0x0,
/// 10mV hysteresis
@"10mV" = 0x1,
/// 25mV hysteresis
@"25mV" = 0x2,
/// 50mV hysteresis
@"50mV" = 0x3,
};
/// Interrupt Mode select
pub const AC_INTMODE = enum(u2) {
/// Any Edge
BOTHEDGE = 0x0,
/// Negative Edge
NEGEDGE = 0x2,
/// Positive Edge
POSEDGE = 0x3,
_,
};
/// Low Power Mode select
pub const AC_LPMODE = enum(u1) {
/// Low power mode disabled
DIS = 0x0,
/// Low power mode enabled
EN = 0x1,
};
/// Negative Input MUX Selection
pub const AC_MUXNEG = enum(u2) {
/// Negative Pin 0
PIN0 = 0x0,
/// Voltage Reference
VREF = 0x2,
/// DAC output
DAC = 0x3,
_,
};
/// Positive Input MUX Selection
pub const AC_MUXPOS = enum(u2) {
/// Positive Pin 0
PIN0 = 0x0,
_,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Enable
ENABLE: u1,
/// Hysteresis Mode
HYSMODE: packed union {
raw: u2,
value: AC_HYSMODE,
},
/// Low Power Mode
LPMODE: packed union {
raw: u1,
value: AC_LPMODE,
},
/// Interrupt Mode
INTMODE: packed union {
raw: u2,
value: AC_INTMODE,
},
/// Output Buffer Enable
OUTEN: u1,
/// Run in Standby Mode
RUNSTDBY: u1,
}),
reserved2: [1]u8,
/// Mux Control A
MUXCTRLA: mmio.Mmio(packed struct(u8) {
/// Negative Input MUX Selection
MUXNEG: packed union {
raw: u2,
value: AC_MUXNEG,
},
reserved3: u1,
/// Positive Input MUX Selection
MUXPOS: packed union {
raw: u2,
value: AC_MUXPOS,
},
reserved7: u2,
/// Invert AC Output
INVERT: u1,
}),
reserved6: [3]u8,
/// Interrupt Control
INTCTRL: mmio.Mmio(packed struct(u8) {
/// Analog Comparator 0 Interrupt Enable
CMP: u1,
padding: u7,
}),
/// Status
STATUS: mmio.Mmio(packed struct(u8) {
/// Analog Comparator Interrupt Flag
CMP: u1,
reserved4: u3,
/// Analog Comparator State
STATE: u1,
padding: u3,
}),
};
/// Analog to Digital Converter
pub const ADC = extern struct {
/// Duty Cycle select
pub const ADC_DUTYCYC = enum(u1) {
/// 50% Duty cycle
DUTY50 = 0x0,
/// 25% Duty cycle
DUTY25 = 0x1,
};
/// ADC Resolution select
pub const ADC_RESSEL = enum(u1) {
/// 10-bit mode
@"10BIT" = 0x0,
/// 8-bit mode
@"8BIT" = 0x1,
};
/// Accumulation Samples select
pub const ADC_SAMPNUM = enum(u3) {
/// 1 ADC sample
ACC1 = 0x0,
/// Accumulate 2 samples
ACC2 = 0x1,
/// Accumulate 4 samples
ACC4 = 0x2,
/// Accumulate 8 samples
ACC8 = 0x3,
/// Accumulate 16 samples
ACC16 = 0x4,
/// Accumulate 32 samples
ACC32 = 0x5,
/// Accumulate 64 samples
ACC64 = 0x6,
_,
};
/// Clock Pre-scaler select
pub const ADC_PRESC = enum(u3) {
/// CLK_PER divided by 2
DIV2 = 0x0,
/// CLK_PER divided by 4
DIV4 = 0x1,
/// CLK_PER divided by 8
DIV8 = 0x2,
/// CLK_PER divided by 16
DIV16 = 0x3,
/// CLK_PER divided by 32
DIV32 = 0x4,
/// CLK_PER divided by 64
DIV64 = 0x5,
/// CLK_PER divided by 128
DIV128 = 0x6,
/// CLK_PER divided by 256
DIV256 = 0x7,
};
/// Reference Selection
pub const ADC_REFSEL = enum(u2) {
/// Internal reference
INTREF = 0x0,
/// VDD
VDDREF = 0x1,
_,
};
/// Automatic Sampling Delay Variation select
pub const ADC_ASDV = enum(u1) {
/// The Automatic Sampling Delay Variation is disabled
ASVOFF = 0x0,
/// The Automatic Sampling Delay Variation is enabled
ASVON = 0x1,
};
/// Initial Delay Selection
pub const ADC_INITDLY = enum(u3) {
/// Delay 0 CLK_ADC cycles
DLY0 = 0x0,
/// Delay 16 CLK_ADC cycles
DLY16 = 0x1,
/// Delay 32 CLK_ADC cycles
DLY32 = 0x2,
/// Delay 64 CLK_ADC cycles
DLY64 = 0x3,
/// Delay 128 CLK_ADC cycles
DLY128 = 0x4,
/// Delay 256 CLK_ADC cycles
DLY256 = 0x5,
_,
};
/// Window Comparator Mode select
pub const ADC_WINCM = enum(u3) {
/// No Window Comparison
NONE = 0x0,
/// Below Window
BELOW = 0x1,
/// Above Window
ABOVE = 0x2,
/// Inside Window
INSIDE = 0x3,
/// Outside Window
OUTSIDE = 0x4,
_,
};
/// Analog Channel Selection Bits
pub const ADC_MUXPOS = enum(u5) {
/// ADC input pin 0
AIN0 = 0x0,
/// ADC input pin 1
AIN1 = 0x1,
/// ADC input pin 2
AIN2 = 0x2,
/// ADC input pin 3
AIN3 = 0x3,
/// ADC input pin 4
AIN4 = 0x4,
/// ADC input pin 5
AIN5 = 0x5,
/// ADC input pin 6
AIN6 = 0x6,
/// ADC input pin 7
AIN7 = 0x7,
/// ADC input pin 8
AIN8 = 0x8,
/// ADC input pin 9
AIN9 = 0x9,
/// ADC input pin 10
AIN10 = 0xa,
/// ADC input pin 11
AIN11 = 0xb,
/// DAC0
DAC0 = 0x1c,
/// Internal Ref
INTREF = 0x1d,
/// Temp sensor
TEMPSENSE = 0x1e,
/// GND
GND = 0x1f,
_,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// ADC Enable
ENABLE: u1,
/// ADC Freerun mode
FREERUN: u1,
/// ADC Resolution
RESSEL: packed union {
raw: u1,
value: ADC_RESSEL,
},
reserved7: u4,
/// Run standby mode
RUNSTBY: u1,
}),
/// Control B
CTRLB: mmio.Mmio(packed struct(u8) {
/// Accumulation Samples
SAMPNUM: packed union {
raw: u3,
value: ADC_SAMPNUM,
},
padding: u5,
}),
/// Control C
CTRLC: mmio.Mmio(packed struct(u8) {
/// Clock Pre-scaler
PRESC: packed union {
raw: u3,
value: ADC_PRESC,
},
reserved4: u1,
/// Reference Selection
REFSEL: packed union {
raw: u2,
value: ADC_REFSEL,
},
/// Sample Capacitance Selection
SAMPCAP: u1,
padding: u1,
}),
/// Control D
CTRLD: mmio.Mmio(packed struct(u8) {
/// Sampling Delay Selection
SAMPDLY: u4,
/// Automatic Sampling Delay Variation
ASDV: packed union {
raw: u1,
value: ADC_ASDV,
},
/// Initial Delay Selection
INITDLY: packed union {
raw: u3,
value: ADC_INITDLY,
},
}),
/// Control E
CTRLE: mmio.Mmio(packed struct(u8) {
/// Window Comparator Mode
WINCM: packed union {
raw: u3,
value: ADC_WINCM,
},
padding: u5,
}),
/// Sample Control
SAMPCTRL: mmio.Mmio(packed struct(u8) {
/// Sample lenght
SAMPLEN: u5,
padding: u3,
}),
/// Positive mux input
MUXPOS: mmio.Mmio(packed struct(u8) {
/// Analog Channel Selection Bits
MUXPOS: packed union {
raw: u5,
value: ADC_MUXPOS,
},
padding: u3,
}),
reserved8: [1]u8,
/// Command
COMMAND: mmio.Mmio(packed struct(u8) {
/// Start Conversion Operation
STCONV: u1,
padding: u7,
}),
/// Event Control
EVCTRL: mmio.Mmio(packed struct(u8) {
/// Start Event Input Enable
STARTEI: u1,
padding: u7,
}),
/// Interrupt Control
INTCTRL: mmio.Mmio(packed struct(u8) {
/// Result Ready Interrupt Enable
RESRDY: u1,
/// Window Comparator Interrupt Enable
WCMP: u1,
padding: u6,
}),
/// Interrupt Flags
INTFLAGS: mmio.Mmio(packed struct(u8) {
/// Result Ready Flag
RESRDY: u1,
/// Window Comparator Flag
WCMP: u1,
padding: u6,
}),
/// Debug Control
DBGCTRL: mmio.Mmio(packed struct(u8) {
/// Debug run
DBGRUN: u1,
padding: u7,
}),
/// Temporary Data
TEMP: mmio.Mmio(packed struct(u8) {
/// Temporary
TEMP: u8,
}),
reserved16: [2]u8,
/// ADC Accumulator Result
RES: u16,
/// Window comparator low threshold
WINLT: u16,
/// Window comparator high threshold
WINHT: u16,
/// Calibration
CALIB: mmio.Mmio(packed struct(u8) {
/// Duty Cycle
DUTYCYC: packed union {
raw: u1,
value: ADC_DUTYCYC,
},
padding: u7,
}),
};
/// Bod interface
pub const BOD = extern struct {
/// Operation in active mode select
pub const BOD_ACTIVE = enum(u2) {
/// Disabled
DIS = 0x0,
/// Enabled
ENABLED = 0x1,
/// Sampled
SAMPLED = 0x2,
/// Enabled with wakeup halt
ENWAKE = 0x3,
};
/// Sample frequency select
pub const BOD_SAMPFREQ = enum(u1) {
/// 1kHz sampling
@"1KHZ" = 0x0,
/// 125Hz sampling
@"125Hz" = 0x1,
};
/// Operation in sleep mode select
pub const BOD_SLEEP = enum(u2) {
/// Disabled
DIS = 0x0,
/// Enabled
ENABLED = 0x1,
/// Sampled
SAMPLED = 0x2,
_,
};
/// Bod level select
pub const BOD_LVL = enum(u3) {
/// 1.8 V
BODLEVEL0 = 0x0,
/// 2.6 V
BODLEVEL2 = 0x2,
/// 4.2 V
BODLEVEL7 = 0x7,
_,
};
/// Configuration select
pub const BOD_VLMCFG = enum(u2) {
/// Interrupt when supply goes below VLM level
BELOW = 0x0,
/// Interrupt when supply goes above VLM level
ABOVE = 0x1,
/// Interrupt when supply crosses VLM level
CROSS = 0x2,
_,
};
/// voltage level monitor level select
pub const BOD_VLMLVL = enum(u2) {
/// VLM threshold 5% above BOD level
@"5ABOVE" = 0x0,
/// VLM threshold 15% above BOD level
@"15ABOVE" = 0x1,
/// VLM threshold 25% above BOD level
@"25ABOVE" = 0x2,
_,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Operation in sleep mode
SLEEP: packed union {
raw: u2,
value: BOD_SLEEP,
},
/// Operation in active mode
ACTIVE: packed union {
raw: u2,
value: BOD_ACTIVE,
},
/// Sample frequency
SAMPFREQ: packed union {
raw: u1,
value: BOD_SAMPFREQ,
},
padding: u3,
}),
/// Control B
CTRLB: mmio.Mmio(packed struct(u8) {
/// Bod level
LVL: packed union {
raw: u3,
value: BOD_LVL,
},
padding: u5,
}),
reserved8: [6]u8,
/// Voltage level monitor Control
VLMCTRLA: mmio.Mmio(packed struct(u8) {
/// voltage level monitor level
VLMLVL: packed union {
raw: u2,
value: BOD_VLMLVL,
},
padding: u6,
}),
/// Voltage level monitor interrupt Control
INTCTRL: mmio.Mmio(packed struct(u8) {
/// voltage level monitor interrrupt enable
VLMIE: u1,
/// Configuration
VLMCFG: packed union {
raw: u2,
value: BOD_VLMCFG,
},
padding: u5,
}),
/// Voltage level monitor interrupt Flags
INTFLAGS: mmio.Mmio(packed struct(u8) {
/// Voltage level monitor interrupt flag
VLMIF: u1,
padding: u7,
}),
/// Voltage level monitor status
STATUS: mmio.Mmio(packed struct(u8) {
/// Voltage level monitor status
VLMS: u1,
padding: u7,
}),
};
/// Configurable Custom Logic
pub const CCL = extern struct {
/// Edge Detection Enable select
pub const CCL_EDGEDET = enum(u1) {
/// Edge detector is disabled
DIS = 0x0,
/// Edge detector is enabled
EN = 0x1,
};
/// Filter Selection
pub const CCL_FILTSEL = enum(u2) {
/// Filter disabled
DISABLE = 0x0,
/// Synchronizer enabled
SYNCH = 0x1,
/// Filter enabled
FILTER = 0x2,
_,
};
/// LUT Input 0 Source Selection
pub const CCL_INSEL0 = enum(u4) {
/// Masked input
MASK = 0x0,
/// Feedback input source
FEEDBACK = 0x1,
/// Linked LUT input source
LINK = 0x2,
/// Event input source 0
EVENT0 = 0x3,
/// Event input source 1
EVENT1 = 0x4,
/// IO pin LUTn-IN0 input source
IO = 0x5,
/// AC0 OUT input source
AC0 = 0x6,
/// TCB0 WO input source
TCB0 = 0x7,
/// TCA0 WO0 input source
TCA0 = 0x8,
/// TCD0 WOA input source
TCD0 = 0x9,
/// USART0 XCK input source
USART0 = 0xa,
/// SPI0 SCK source
SPI0 = 0xb,
_,
};
/// LUT Input 1 Source Selection
pub const CCL_INSEL1 = enum(u4) {
/// Masked input
MASK = 0x0,
/// Feedback input source
FEEDBACK = 0x1,
/// Linked LUT input source
LINK = 0x2,
/// Event input source 0
EVENT0 = 0x3,
/// Event input source 1
EVENT1 = 0x4,
/// IO pin LUTn-N1 input source
IO = 0x5,
/// AC0 OUT input source
AC0 = 0x6,
/// TCB0 WO input source
TCB0 = 0x7,
/// TCA0 WO1 input source
TCA0 = 0x8,
/// TCD0 WOB input source
TCD0 = 0x9,
/// USART0 TXD input source
USART0 = 0xa,
/// SPI0 MOSI input source
SPI0 = 0xb,
_,
};
/// LUT Input 2 Source Selection
pub const CCL_INSEL2 = enum(u4) {
/// Masked input
MASK = 0x0,
/// Feedback input source
FEEDBACK = 0x1,
/// Linked LUT input source
LINK = 0x2,
/// Event input source 0
EVENT0 = 0x3,
/// Event input source 1
EVENT1 = 0x4,
/// IO pin LUTn-IN2 input source
IO = 0x5,
/// AC0 OUT input source
AC0 = 0x6,
/// TCB0 WO input source
TCB0 = 0x7,
/// TCA0 WO2 input source
TCA0 = 0x8,
/// TCD0 WOA input source
TCD0 = 0x9,
/// SPI0 MISO source
SPI0 = 0xb,
_,
};
/// Sequential Selection
pub const CCL_SEQSEL = enum(u3) {
/// Sequential logic disabled
DISABLE = 0x0,
/// D FlipFlop
DFF = 0x1,
/// JK FlipFlop
JK = 0x2,
/// D Latch
LATCH = 0x3,
/// RS Latch
RS = 0x4,
_,
};
/// Control Register A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Enable
ENABLE: u1,
reserved6: u5,
/// Run in Standby
RUNSTDBY: u1,
padding: u1,
}),
/// Sequential Control 0
SEQCTRL0: mmio.Mmio(packed struct(u8) {
/// Sequential Selection
SEQSEL: packed union {
raw: u3,
value: CCL_SEQSEL,
},
padding: u5,
}),
reserved5: [3]u8,
/// LUT Control 0 A
LUT0CTRLA: mmio.Mmio(packed struct(u8) {
/// LUT Enable
ENABLE: u1,
reserved3: u2,
/// Output Enable
OUTEN: u1,
/// Filter Selection
FILTSEL: packed union {
raw: u2,
value: CCL_FILTSEL,
},
/// Clock Source Selection
CLKSRC: u1,
/// Edge Detection Enable
EDGEDET: packed union {
raw: u1,
value: CCL_EDGEDET,
},
}),
/// LUT Control 0 B
LUT0CTRLB: mmio.Mmio(packed struct(u8) {
/// LUT Input 0 Source Selection
INSEL0: packed union {
raw: u4,
value: CCL_INSEL0,
},
/// LUT Input 1 Source Selection
INSEL1: packed union {
raw: u4,
value: CCL_INSEL1,
},
}),
/// LUT Control 0 C
LUT0CTRLC: mmio.Mmio(packed struct(u8) {
/// LUT Input 2 Source Selection
INSEL2: packed union {
raw: u4,
value: CCL_INSEL2,
},
padding: u4,
}),
/// Truth 0
TRUTH0: mmio.Mmio(packed struct(u8) {
/// Truth Table
TRUTH: u8,
}),
/// LUT Control 1 A
LUT1CTRLA: mmio.Mmio(packed struct(u8) {
/// LUT Enable
ENABLE: u1,
reserved3: u2,
/// Output Enable
OUTEN: u1,
/// Filter Selection
FILTSEL: packed union {
raw: u2,
value: CCL_FILTSEL,
},
/// Clock Source Selection
CLKSRC: u1,
/// Edge Detection Enable
EDGEDET: packed union {
raw: u1,
value: CCL_EDGEDET,
},
}),
/// LUT Control 1 B
LUT1CTRLB: mmio.Mmio(packed struct(u8) {
/// LUT Input 0 Source Selection
INSEL0: packed union {
raw: u4,
value: CCL_INSEL0,
},
/// LUT Input 1 Source Selection
INSEL1: packed union {
raw: u4,
value: CCL_INSEL1,
},
}),
/// LUT Control 1 C
LUT1CTRLC: mmio.Mmio(packed struct(u8) {
/// LUT Input 2 Source Selection
INSEL2: packed union {
raw: u4,
value: CCL_INSEL2,
},
padding: u4,
}),
/// Truth 1
TRUTH1: mmio.Mmio(packed struct(u8) {
/// Truth Table
TRUTH: u8,
}),
};
/// Clock controller
pub const CLKCTRL = extern struct {
/// Clock select
pub const CLKCTRL_CLKSEL = enum(u2) {
/// 20MHz internal oscillator
OSC20M = 0x0,
/// 32KHz internal Ultra Low Power oscillator
OSCULP32K = 0x1,
/// 32.768kHz external crystal oscillator
XOSC32K = 0x2,
/// External clock
EXTCLK = 0x3,
};
/// Prescaler division select
pub const CLKCTRL_PDIV = enum(u4) {
/// 2X
@"2X" = 0x0,
/// 4X
@"4X" = 0x1,
/// 8X
@"8X" = 0x2,
/// 16X
@"16X" = 0x3,
/// 32X
@"32X" = 0x4,
/// 64X
@"64X" = 0x5,
/// 6X
@"6X" = 0x8,
/// 10X
@"10X" = 0x9,
/// 12X
@"12X" = 0xa,
/// 24X
@"24X" = 0xb,
/// 48X
@"48X" = 0xc,
_,
};
/// Crystal startup time select
pub const CLKCTRL_CSUT = enum(u2) {
/// 1K cycles
@"1K" = 0x0,
/// 16K cycles
@"16K" = 0x1,
/// 32K cycles
@"32K" = 0x2,
/// 64k cycles
@"64K" = 0x3,
};
/// MCLK Control A
MCLKCTRLA: mmio.Mmio(packed struct(u8) {
/// Clock select
CLKSEL: packed union {
raw: u2,
value: CLKCTRL_CLKSEL,
},
reserved7: u5,
/// System clock out
CLKOUT: u1,
}),
/// MCLK Control B
MCLKCTRLB: mmio.Mmio(packed struct(u8) {
/// Prescaler enable
PEN: u1,
/// Prescaler division
PDIV: packed union {
raw: u4,
value: CLKCTRL_PDIV,
},
padding: u3,
}),
/// MCLK Lock
MCLKLOCK: mmio.Mmio(packed struct(u8) {
/// lock ebable
LOCKEN: u1,
padding: u7,
}),
/// MCLK Status
MCLKSTATUS: mmio.Mmio(packed struct(u8) {
/// System Oscillator changing
SOSC: u1,
reserved4: u3,
/// 20MHz oscillator status
OSC20MS: u1,
/// 32KHz oscillator status
OSC32KS: u1,
/// 32.768 kHz Crystal Oscillator status
XOSC32KS: u1,
/// External Clock status
EXTS: u1,
}),
reserved16: [12]u8,
/// OSC20M Control A
OSC20MCTRLA: mmio.Mmio(packed struct(u8) {
reserved1: u1,
/// Run standby
RUNSTDBY: u1,
padding: u6,
}),
/// OSC20M Calibration A
OSC20MCALIBA: mmio.Mmio(packed struct(u8) {
/// Calibration
CAL20M: u6,
padding: u2,
}),
/// OSC20M Calibration B
OSC20MCALIBB: mmio.Mmio(packed struct(u8) {
/// Oscillator temperature coefficient
TEMPCAL20M: u4,
reserved7: u3,
/// Lock
LOCK: u1,
}),
reserved24: [5]u8,
/// OSC32K Control A
OSC32KCTRLA: mmio.Mmio(packed struct(u8) {
reserved1: u1,
/// Run standby
RUNSTDBY: u1,
padding: u6,
}),
reserved28: [3]u8,
/// XOSC32K Control A
XOSC32KCTRLA: mmio.Mmio(packed struct(u8) {
/// Enable
ENABLE: u1,
/// Run standby
RUNSTDBY: u1,
/// Select
SEL: u1,
reserved4: u1,
/// Crystal startup time
CSUT: packed union {
raw: u2,
value: CLKCTRL_CSUT,
},
padding: u2,
}),
};
/// CPU
pub const CPU = extern struct {
/// CCP signature select
pub const CPU_CCP = enum(u8) {
/// SPM Instruction Protection
SPM = 0x9d,
/// IO Register Protection
IOREG = 0xd8,
_,
};
/// Configuration Change Protection
CCP: mmio.Mmio(packed struct(u8) {
/// CCP signature
CCP: packed union {
raw: u8,
value: CPU_CCP,
},
}),
reserved9: [8]u8,
/// Stack Pointer Low
SPL: u8,
/// Stack Pointer High
SPH: u8,
/// Status Register
SREG: mmio.Mmio(packed struct(u8) {
/// Carry Flag
C: u1,
/// Zero Flag
Z: u1,
/// Negative Flag
N: u1,
/// Two's Complement Overflow Flag
V: u1,
/// N Exclusive Or V Flag
S: u1,
/// Half Carry Flag
H: u1,
/// Transfer Bit
T: u1,
/// Global Interrupt Enable Flag
I: u1,
}),
};
/// Interrupt Controller
pub const CPUINT = extern struct {
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Round-robin Scheduling Enable
LVL0RR: u1,
reserved5: u4,
/// Compact Vector Table
CVT: u1,
/// Interrupt Vector Select
IVSEL: u1,
padding: u1,
}),
/// Status
STATUS: mmio.Mmio(packed struct(u8) {
/// Level 0 Interrupt Executing
LVL0EX: u1,
/// Level 1 Interrupt Executing
LVL1EX: u1,
reserved7: u5,
/// Non-maskable Interrupt Executing
NMIEX: u1,
}),
/// Interrupt Level 0 Priority
LVL0PRI: mmio.Mmio(packed struct(u8) {
/// Interrupt Level Priority
LVL0PRI: u8,
}),
/// Interrupt Level 1 Priority Vector
LVL1VEC: mmio.Mmio(packed struct(u8) {
/// Interrupt Vector with High Priority
LVL1VEC: u8,
}),
};
/// CRCSCAN
pub const CRCSCAN = extern struct {
/// CRC Flash Access Mode select
pub const CRCSCAN_MODE = enum(u2) {
/// Priority to flash
PRIORITY = 0x0,
_,
};
/// CRC Source select
pub const CRCSCAN_SRC = enum(u2) {
/// CRC on entire flash
FLASH = 0x0,
/// CRC on boot and appl section of flash
APPLICATION = 0x1,
/// CRC on boot section of flash
BOOT = 0x2,
_,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Enable CRC scan
ENABLE: u1,
/// Enable NMI Trigger
NMIEN: u1,
reserved7: u5,
/// Reset CRC scan
RESET: u1,
}),
/// Control B
CTRLB: mmio.Mmio(packed struct(u8) {
/// CRC Source
SRC: packed union {
raw: u2,
value: CRCSCAN_SRC,
},
reserved4: u2,
/// CRC Flash Access Mode
MODE: packed union {
raw: u2,
value: CRCSCAN_MODE,
},
padding: u2,
}),
/// Status
STATUS: mmio.Mmio(packed struct(u8) {
/// CRC Busy
BUSY: u1,
/// CRC Ok
OK: u1,
padding: u6,
}),
};
/// Digital to Analog Converter
pub const DAC = extern struct {
/// Control Register A
CTRLA: mmio.Mmio(packed struct(u8) {
/// DAC Enable
ENABLE: u1,
reserved6: u5,
/// Output Buffer Enable
OUTEN: u1,
/// Run in Standby Mode
RUNSTDBY: u1,
}),
/// DATA Register
DATA: u8,
};
/// Event System
pub const EVSYS = extern struct {
/// Asynchronous Channel 0 Generator Selection
pub const EVSYS_ASYNCCH0 = enum(u8) {
/// Off
OFF = 0x0,
/// Configurable Custom Logic LUT0
CCL_LUT0 = 0x1,
/// Configurable Custom Logic LUT1
CCL_LUT1 = 0x2,
/// Analog Comparator 0 out
AC0_OUT = 0x3,
/// Timer/Counter D0 compare B clear
TCD0_CMPBCLR = 0x4,
/// Timer/Counter D0 compare A set
TCD0_CMPASET = 0x5,
/// Timer/Counter D0 compare B set
TCD0_CMPBSET = 0x6,
/// Timer/Counter D0 program event
TCD0_PROGEV = 0x7,
/// Real Time Counter overflow
RTC_OVF = 0x8,
/// Real Time Counter compare
RTC_CMP = 0x9,
/// Asynchronous Event from Pin PA0
PORTA_PIN0 = 0xa,
/// Asynchronous Event from Pin PA1
PORTA_PIN1 = 0xb,
/// Asynchronous Event from Pin PA2
PORTA_PIN2 = 0xc,
/// Asynchronous Event from Pin PA3
PORTA_PIN3 = 0xd,
/// Asynchronous Event from Pin PA4
PORTA_PIN4 = 0xe,
/// Asynchronous Event from Pin PA5
PORTA_PIN5 = 0xf,
/// Asynchronous Event from Pin PA6
PORTA_PIN6 = 0x10,
/// Asynchronous Event from Pin PA7
PORTA_PIN7 = 0x11,
/// Unified Program and debug interface
UPDI = 0x12,
_,
};
/// Asynchronous Channel 1 Generator Selection
pub const EVSYS_ASYNCCH1 = enum(u8) {
/// Off
OFF = 0x0,
/// Configurable custom logic LUT0
CCL_LUT0 = 0x1,
/// Configurable custom logic LUT1
CCL_LUT1 = 0x2,
/// Analog Comparator 0 out
AC0_OUT = 0x3,
/// Timer/Counter D0 compare B clear
TCD0_CMPBCLR = 0x4,
/// Timer/Counter D0 compare A set
TCD0_CMPASET = 0x5,
/// Timer/Counter D0 compare B set
TCD0_CMPBSET = 0x6,
/// Timer/Counter D0 program event
TCD0_PROGEV = 0x7,
/// Real Time Counter overflow
RTC_OVF = 0x8,
/// Real Time Counter compare
RTC_CMP = 0x9,
/// Asynchronous Event from Pin PB0
PORTB_PIN0 = 0xa,
/// Asynchronous Event from Pin PB1
PORTB_PIN1 = 0xb,
/// Asynchronous Event from Pin PB2
PORTB_PIN2 = 0xc,
/// Asynchronous Event from Pin PB3
PORTB_PIN3 = 0xd,
/// Asynchronous Event from Pin PB4
PORTB_PIN4 = 0xe,
/// Asynchronous Event from Pin PB5
PORTB_PIN5 = 0xf,
/// Asynchronous Event from Pin PB6
PORTB_PIN6 = 0x10,
/// Asynchronous Event from Pin PB7
PORTB_PIN7 = 0x11,
_,
};
/// Asynchronous Channel 2 Generator Selection
pub const EVSYS_ASYNCCH2 = enum(u8) {
/// Off
OFF = 0x0,
/// Configurable Custom Logic LUT0
CCL_LUT0 = 0x1,
/// Configurable Custom Logic LUT1
CCL_LUT1 = 0x2,
/// Analog Comparator 0 out
AC0_OUT = 0x3,
/// Timer/Counter D0 compare B clear
TCD0_CMPBCLR = 0x4,
/// Timer/Counter D0 compare A set
TCD0_CMPASET = 0x5,
/// Timer/Counter D0 compare B set
TCD0_CMPBSET = 0x6,
/// Timer/Counter D0 program event
TCD0_PROGEV = 0x7,
/// Real Time Counter overflow
RTC_OVF = 0x8,
/// Real Time Counter compare
RTC_CMP = 0x9,
/// Asynchronous Event from Pin PC0
PORTC_PIN0 = 0xa,
/// Asynchronous Event from Pin PC1
PORTC_PIN1 = 0xb,
/// Asynchronous Event from Pin PC2
PORTC_PIN2 = 0xc,
/// Asynchronous Event from Pin PC3
PORTC_PIN3 = 0xd,
/// Asynchronous Event from Pin PC4
PORTC_PIN4 = 0xe,
/// Asynchronous Event from Pin PC5
PORTC_PIN5 = 0xf,
_,
};
/// Asynchronous Channel 3 Generator Selection
pub const EVSYS_ASYNCCH3 = enum(u8) {
/// Off
OFF = 0x0,
/// Configurable custom logic LUT0
CCL_LUT0 = 0x1,
/// Configurable custom logic LUT1
CCL_LUT1 = 0x2,
/// Analog Comparator 0 out
AC0_OUT = 0x3,
/// Timer/Counter type D compare B clear
TCD0_CMPBCLR = 0x4,
/// Timer/Counter type D compare A set
TCD0_CMPASET = 0x5,
/// Timer/Counter type D compare B set
TCD0_CMPBSET = 0x6,
/// Timer/Counter type D program event
TCD0_PROGEV = 0x7,
/// Real Time Counter overflow
RTC_OVF = 0x8,
/// Real Time Counter compare
RTC_CMP = 0x9,
/// Periodic Interrupt CLK_RTC div 8192
PIT_DIV8192 = 0xa,
/// Periodic Interrupt CLK_RTC div 4096
PIT_DIV4096 = 0xb,
/// Periodic Interrupt CLK_RTC div 2048
PIT_DIV2048 = 0xc,
/// Periodic Interrupt CLK_RTC div 1024
PIT_DIV1024 = 0xd,
/// Periodic Interrupt CLK_RTC div 512
PIT_DIV512 = 0xe,
/// Periodic Interrupt CLK_RTC div 256
PIT_DIV256 = 0xf,
/// Periodic Interrupt CLK_RTC div 128
PIT_DIV128 = 0x10,
/// Periodic Interrupt CLK_RTC div 64
PIT_DIV64 = 0x11,
_,
};
/// Asynchronous User Ch 0 Input Selection - TCB0
pub const EVSYS_ASYNCUSER0 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
/// Asynchronous Event Channel 0
ASYNCCH0 = 0x3,
/// Asynchronous Event Channel 1
ASYNCCH1 = 0x4,
/// Asynchronous Event Channel 2
ASYNCCH2 = 0x5,
/// Asynchronous Event Channel 3
ASYNCCH3 = 0x6,
_,
};
/// Asynchronous User Ch 1 Input Selection - ADC0
pub const EVSYS_ASYNCUSER1 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
/// Asynchronous Event Channel 0
ASYNCCH0 = 0x3,
/// Asynchronous Event Channel 1
ASYNCCH1 = 0x4,
/// Asynchronous Event Channel 2
ASYNCCH2 = 0x5,
/// Asynchronous Event Channel 3
ASYNCCH3 = 0x6,
_,
};
/// Asynchronous User Ch 2 Input Selection - CCL LUT0 Event 0
pub const EVSYS_ASYNCUSER2 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
/// Asynchronous Event Channel 0
ASYNCCH0 = 0x3,
/// Asynchronous Event Channel 1
ASYNCCH1 = 0x4,
/// Asynchronous Event Channel 2
ASYNCCH2 = 0x5,
/// Asynchronous Event Channel 3
ASYNCCH3 = 0x6,
_,
};
/// Asynchronous User Ch 3 Input Selection - CCL LUT1 Event 0
pub const EVSYS_ASYNCUSER3 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
/// Asynchronous Event Channel 0
ASYNCCH0 = 0x3,
/// Asynchronous Event Channel 1
ASYNCCH1 = 0x4,
/// Asynchronous Event Channel 2
ASYNCCH2 = 0x5,
/// Asynchronous Event Channel 3
ASYNCCH3 = 0x6,
_,
};
/// Asynchronous User Ch 4 Input Selection - CCL LUT0 Event 1
pub const EVSYS_ASYNCUSER4 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
/// Asynchronous Event Channel 0
ASYNCCH0 = 0x3,
/// Asynchronous Event Channel 1
ASYNCCH1 = 0x4,
/// Asynchronous Event Channel 2
ASYNCCH2 = 0x5,
/// Asynchronous Event Channel 3
ASYNCCH3 = 0x6,
_,
};
/// Asynchronous User Ch 5 Input Selection - CCL LUT1 Event 1
pub const EVSYS_ASYNCUSER5 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
/// Asynchronous Event Channel 0
ASYNCCH0 = 0x3,
/// Asynchronous Event Channel 1
ASYNCCH1 = 0x4,
/// Asynchronous Event Channel 2
ASYNCCH2 = 0x5,
/// Asynchronous Event Channel 3
ASYNCCH3 = 0x6,
_,
};
/// Asynchronous User Ch 6 Input Selection - TCD0 Event 0
pub const EVSYS_ASYNCUSER6 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
/// Asynchronous Event Channel 0
ASYNCCH0 = 0x3,
/// Asynchronous Event Channel 1
ASYNCCH1 = 0x4,
/// Asynchronous Event Channel 2
ASYNCCH2 = 0x5,
/// Asynchronous Event Channel 3
ASYNCCH3 = 0x6,
_,
};
/// Asynchronous User Ch 7 Input Selection - TCD0 Event 1
pub const EVSYS_ASYNCUSER7 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
/// Asynchronous Event Channel 0
ASYNCCH0 = 0x3,
/// Asynchronous Event Channel 1
ASYNCCH1 = 0x4,
/// Asynchronous Event Channel 2
ASYNCCH2 = 0x5,
/// Asynchronous Event Channel 3
ASYNCCH3 = 0x6,
_,
};
/// Asynchronous User Ch 8 Input Selection - Event Out 0
pub const EVSYS_ASYNCUSER8 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
/// Asynchronous Event Channel 0
ASYNCCH0 = 0x3,
/// Asynchronous Event Channel 1
ASYNCCH1 = 0x4,
/// Asynchronous Event Channel 2
ASYNCCH2 = 0x5,
/// Asynchronous Event Channel 3
ASYNCCH3 = 0x6,
_,
};
/// Asynchronous User Ch 9 Input Selection - Event Out 1
pub const EVSYS_ASYNCUSER9 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
/// Asynchronous Event Channel 0
ASYNCCH0 = 0x3,
/// Asynchronous Event Channel 1
ASYNCCH1 = 0x4,
/// Asynchronous Event Channel 2
ASYNCCH2 = 0x5,
/// Asynchronous Event Channel 3
ASYNCCH3 = 0x6,
_,
};
/// Asynchronous User Ch 10 Input Selection - Event Out 2
pub const EVSYS_ASYNCUSER10 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
/// Asynchronous Event Channel 0
ASYNCCH0 = 0x3,
/// Asynchronous Event Channel 1
ASYNCCH1 = 0x4,
/// Asynchronous Event Channel 2
ASYNCCH2 = 0x5,
/// Asynchronous Event Channel 3
ASYNCCH3 = 0x6,
_,
};
/// Synchronous Channel 0 Generator Selection
pub const EVSYS_SYNCCH0 = enum(u8) {
/// Off
OFF = 0x0,
/// Timer/Counter B0
TCB0 = 0x1,
/// Timer/Counter A0 overflow
TCA0_OVF_LUNF = 0x2,
/// Timer/Counter A0 underflow high byte (split mode)
TCA0_HUNF = 0x3,
/// Timer/Counter A0 compare 0
TCA0_CMP0 = 0x4,
/// Timer/Counter A0 compare 1
TCA0_CMP1 = 0x5,
/// Timer/Counter A0 compare 2
TCA0_CMP2 = 0x6,
/// Synchronous Event from Pin PC0
PORTC_PIN0 = 0x7,
/// Synchronous Event from Pin PC1
PORTC_PIN1 = 0x8,
/// Synchronous Event from Pin PC2
PORTC_PIN2 = 0x9,
/// Synchronous Event from Pin PC3
PORTC_PIN3 = 0xa,
/// Synchronous Event from Pin PC4
PORTC_PIN4 = 0xb,
/// Synchronous Event from Pin PC5
PORTC_PIN5 = 0xc,
/// Synchronous Event from Pin PA0
PORTA_PIN0 = 0xd,
/// Synchronous Event from Pin PA1
PORTA_PIN1 = 0xe,
/// Synchronous Event from Pin PA2
PORTA_PIN2 = 0xf,
/// Synchronous Event from Pin PA3
PORTA_PIN3 = 0x10,
/// Synchronous Event from Pin PA4
PORTA_PIN4 = 0x11,
/// Synchronous Event from Pin PA5
PORTA_PIN5 = 0x12,
/// Synchronous Event from Pin PA6
PORTA_PIN6 = 0x13,
/// Synchronous Event from Pin PA7
PORTA_PIN7 = 0x14,
_,
};
/// Synchronous Channel 1 Generator Selection
pub const EVSYS_SYNCCH1 = enum(u8) {
/// Off
OFF = 0x0,
/// Timer/Counter B0
TCB0 = 0x1,
/// Timer/Counter A0 overflow
TCA0_OVF_LUNF = 0x2,
/// Timer/Counter A0 underflow high byte (split mode)
TCA0_HUNF = 0x3,
/// Timer/Counter A0 compare 0
TCA0_CMP0 = 0x4,
/// Timer/Counter A0 compare 1
TCA0_CMP1 = 0x5,
/// Timer/Counter A0 compare 2
TCA0_CMP2 = 0x6,
/// Synchronous Event from Pin PB0
PORTB_PIN0 = 0x8,
/// Synchronous Event from Pin PB1
PORTB_PIN1 = 0x9,
/// Synchronous Event from Pin PB2
PORTB_PIN2 = 0xa,
/// Synchronous Event from Pin PB3
PORTB_PIN3 = 0xb,
/// Synchronous Event from Pin PB4
PORTB_PIN4 = 0xc,
/// Synchronous Event from Pin PB5
PORTB_PIN5 = 0xd,
/// Synchronous Event from Pin PB6
PORTB_PIN6 = 0xe,
/// Synchronous Event from Pin PB7
PORTB_PIN7 = 0xf,
_,
};
/// Synchronous User Ch 0 Input Selection - TCA0
pub const EVSYS_SYNCUSER0 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
_,
};
/// Synchronous User Ch 1 Input Selection - USART0
pub const EVSYS_SYNCUSER1 = enum(u8) {
/// Off
OFF = 0x0,
/// Synchronous Event Channel 0
SYNCCH0 = 0x1,
/// Synchronous Event Channel 1
SYNCCH1 = 0x2,
_,
};
/// Asynchronous Channel Strobe
ASYNCSTROBE: u8,
/// Synchronous Channel Strobe
SYNCSTROBE: u8,
/// Asynchronous Channel 0 Generator Selection
ASYNCCH0: mmio.Mmio(packed struct(u8) {
/// Asynchronous Channel 0 Generator Selection
ASYNCCH0: packed union {
raw: u8,
value: EVSYS_ASYNCCH0,
},
}),
/// Asynchronous Channel 1 Generator Selection
ASYNCCH1: mmio.Mmio(packed struct(u8) {
/// Asynchronous Channel 1 Generator Selection
ASYNCCH1: packed union {
raw: u8,
value: EVSYS_ASYNCCH1,
},
}),
/// Asynchronous Channel 2 Generator Selection
ASYNCCH2: mmio.Mmio(packed struct(u8) {
/// Asynchronous Channel 2 Generator Selection
ASYNCCH2: packed union {
raw: u8,
value: EVSYS_ASYNCCH2,
},
}),
/// Asynchronous Channel 3 Generator Selection
ASYNCCH3: mmio.Mmio(packed struct(u8) {
/// Asynchronous Channel 3 Generator Selection
ASYNCCH3: packed union {
raw: u8,
value: EVSYS_ASYNCCH3,
},
}),
reserved10: [4]u8,
/// Synchronous Channel 0 Generator Selection
SYNCCH0: mmio.Mmio(packed struct(u8) {
/// Synchronous Channel 0 Generator Selection
SYNCCH0: packed union {
raw: u8,
value: EVSYS_SYNCCH0,
},
}),
/// Synchronous Channel 1 Generator Selection
SYNCCH1: mmio.Mmio(packed struct(u8) {
/// Synchronous Channel 1 Generator Selection
SYNCCH1: packed union {
raw: u8,
value: EVSYS_SYNCCH1,
},
}),
reserved18: [6]u8,
/// Asynchronous User Ch 0 Input Selection - TCB0
ASYNCUSER0: mmio.Mmio(packed struct(u8) {
/// Asynchronous User Ch 0 Input Selection - TCB0
ASYNCUSER0: packed union {
raw: u8,
value: EVSYS_ASYNCUSER0,
},
}),
/// Asynchronous User Ch 1 Input Selection - ADC0
ASYNCUSER1: mmio.Mmio(packed struct(u8) {
/// Asynchronous User Ch 1 Input Selection - ADC0
ASYNCUSER1: packed union {
raw: u8,
value: EVSYS_ASYNCUSER1,
},
}),
/// Asynchronous User Ch 2 Input Selection - CCL LUT0 Event 0
ASYNCUSER2: mmio.Mmio(packed struct(u8) {
/// Asynchronous User Ch 2 Input Selection - CCL LUT0 Event 0
ASYNCUSER2: packed union {
raw: u8,
value: EVSYS_ASYNCUSER2,
},
}),
/// Asynchronous User Ch 3 Input Selection - CCL LUT1 Event 0
ASYNCUSER3: mmio.Mmio(packed struct(u8) {
/// Asynchronous User Ch 3 Input Selection - CCL LUT1 Event 0
ASYNCUSER3: packed union {
raw: u8,
value: EVSYS_ASYNCUSER3,
},
}),
/// Asynchronous User Ch 4 Input Selection - CCL LUT0 Event 1
ASYNCUSER4: mmio.Mmio(packed struct(u8) {
/// Asynchronous User Ch 4 Input Selection - CCL LUT0 Event 1
ASYNCUSER4: packed union {
raw: u8,
value: EVSYS_ASYNCUSER4,
},
}),
/// Asynchronous User Ch 5 Input Selection - CCL LUT1 Event 1
ASYNCUSER5: mmio.Mmio(packed struct(u8) {
/// Asynchronous User Ch 5 Input Selection - CCL LUT1 Event 1
ASYNCUSER5: packed union {
raw: u8,
value: EVSYS_ASYNCUSER5,
},
}),
/// Asynchronous User Ch 6 Input Selection - TCD0 Event 0
ASYNCUSER6: mmio.Mmio(packed struct(u8) {
/// Asynchronous User Ch 6 Input Selection - TCD0 Event 0
ASYNCUSER6: packed union {
raw: u8,
value: EVSYS_ASYNCUSER6,
},
}),
/// Asynchronous User Ch 7 Input Selection - TCD0 Event 1
ASYNCUSER7: mmio.Mmio(packed struct(u8) {
/// Asynchronous User Ch 7 Input Selection - TCD0 Event 1
ASYNCUSER7: packed union {
raw: u8,
value: EVSYS_ASYNCUSER7,
},
}),
/// Asynchronous User Ch 8 Input Selection - Event Out 0
ASYNCUSER8: mmio.Mmio(packed struct(u8) {
/// Asynchronous User Ch 8 Input Selection - Event Out 0
ASYNCUSER8: packed union {
raw: u8,
value: EVSYS_ASYNCUSER8,
},
}),
/// Asynchronous User Ch 9 Input Selection - Event Out 1
ASYNCUSER9: mmio.Mmio(packed struct(u8) {
/// Asynchronous User Ch 9 Input Selection - Event Out 1
ASYNCUSER9: packed union {
raw: u8,
value: EVSYS_ASYNCUSER9,
},
}),
/// Asynchronous User Ch 10 Input Selection - Event Out 2
ASYNCUSER10: mmio.Mmio(packed struct(u8) {
/// Asynchronous User Ch 10 Input Selection - Event Out 2
ASYNCUSER10: packed union {
raw: u8,
value: EVSYS_ASYNCUSER10,
},
}),
reserved34: [5]u8,
/// Synchronous User Ch 0 Input Selection - TCA0
SYNCUSER0: mmio.Mmio(packed struct(u8) {
/// Synchronous User Ch 0 Input Selection - TCA0
SYNCUSER0: packed union {
raw: u8,
value: EVSYS_SYNCUSER0,
},
}),
/// Synchronous User Ch 1 Input Selection - USART0
SYNCUSER1: mmio.Mmio(packed struct(u8) {
/// Synchronous User Ch 1 Input Selection - USART0
SYNCUSER1: packed union {
raw: u8,
value: EVSYS_SYNCUSER1,
},
}),
};
/// Fuses
pub const FUSE = extern struct {
/// BOD Operation in Active Mode select
pub const FUSE_ACTIVE = enum(u2) {
/// Disabled
DIS = 0x0,
/// Enabled
ENABLED = 0x1,
/// Sampled
SAMPLED = 0x2,
/// Enabled with wake-up halted until BOD is ready
ENWAKE = 0x3,
};
/// BOD Level select
pub const FUSE_LVL = enum(u3) {
/// 1.8 V
BODLEVEL0 = 0x0,
/// 2.6 V
BODLEVEL2 = 0x2,
/// 4.2 V
BODLEVEL7 = 0x7,
_,
};
/// BOD Sample Frequency select
pub const FUSE_SAMPFREQ = enum(u1) {
/// 1kHz sampling frequency
@"1KHz" = 0x0,
/// 125Hz sampling frequency
@"125Hz" = 0x1,
};
/// BOD Operation in Sleep Mode select
pub const FUSE_SLEEP = enum(u2) {
/// Disabled
DIS = 0x0,
/// Enabled
ENABLED = 0x1,
/// Sampled
SAMPLED = 0x2,
_,
};
/// Frequency Select
pub const FUSE_FREQSEL = enum(u2) {
/// 16 MHz
@"16MHZ" = 0x1,
/// 20 MHz
@"20MHZ" = 0x2,
_,
};
/// CRC Source select
pub const FUSE_CRCSRC = enum(u2) {
/// The CRC is performed on the entire Flash (boot, application code and application data section).
FLASH = 0x0,
/// The CRC is performed on the boot section of Flash
BOOT = 0x1,
/// The CRC is performed on the boot and application code section of Flash
BOOTAPP = 0x2,
/// Disable CRC.
NOCRC = 0x3,
};
/// Reset Pin Configuration select
pub const FUSE_RSTPINCFG = enum(u2) {
/// GPIO mode
GPIO = 0x0,
/// UPDI mode
UPDI = 0x1,
/// Reset mode
RST = 0x2,
_,
};
/// Startup Time select
pub const FUSE_SUT = enum(u3) {
/// 0 ms
@"0MS" = 0x0,
/// 1 ms
@"1MS" = 0x1,
/// 2 ms
@"2MS" = 0x2,
/// 4 ms
@"4MS" = 0x3,
/// 8 ms
@"8MS" = 0x4,
/// 16 ms
@"16MS" = 0x5,
/// 32 ms
@"32MS" = 0x6,
/// 64 ms
@"64MS" = 0x7,
};
/// Watchdog Timeout Period select
pub const FUSE_PERIOD = enum(u4) {
/// Watch-Dog timer Off
OFF = 0x0,
/// 8 cycles (8ms)
@"8CLK" = 0x1,
/// 16 cycles (16ms)
@"16CLK" = 0x2,
/// 32 cycles (32ms)
@"32CLK" = 0x3,
/// 64 cycles (64ms)
@"64CLK" = 0x4,
/// 128 cycles (0.128s)
@"128CLK" = 0x5,
/// 256 cycles (0.256s)
@"256CLK" = 0x6,
/// 512 cycles (0.512s)
@"512CLK" = 0x7,
/// 1K cycles (1.0s)
@"1KCLK" = 0x8,
/// 2K cycles (2.0s)
@"2KCLK" = 0x9,
/// 4K cycles (4.1s)
@"4KCLK" = 0xa,
/// 8K cycles (8.2s)
@"8KCLK" = 0xb,
_,
};
/// Watchdog Window Timeout Period select
pub const FUSE_WINDOW = enum(u4) {
/// Window mode off
OFF = 0x0,
/// 8 cycles (8ms)
@"8CLK" = 0x1,
/// 16 cycles (16ms)
@"16CLK" = 0x2,
/// 32 cycles (32ms)
@"32CLK" = 0x3,
/// 64 cycles (64ms)
@"64CLK" = 0x4,
/// 128 cycles (0.128s)
@"128CLK" = 0x5,
/// 256 cycles (0.256s)
@"256CLK" = 0x6,
/// 512 cycles (0.512s)
@"512CLK" = 0x7,
/// 1K cycles (1.0s)
@"1KCLK" = 0x8,
/// 2K cycles (2.0s)
@"2KCLK" = 0x9,
/// 4K cycles (4.1s)
@"4KCLK" = 0xa,
/// 8K cycles (8.2s)
@"8KCLK" = 0xb,
_,
};
/// Watchdog Configuration
WDTCFG: mmio.Mmio(packed struct(u8) {
/// Watchdog Timeout Period
PERIOD: packed union {
raw: u4,
value: FUSE_PERIOD,
},
/// Watchdog Window Timeout Period
WINDOW: packed union {
raw: u4,
value: FUSE_WINDOW,
},
}),
/// BOD Configuration
BODCFG: mmio.Mmio(packed struct(u8) {
/// BOD Operation in Sleep Mode
SLEEP: packed union {
raw: u2,
value: FUSE_SLEEP,
},
/// BOD Operation in Active Mode
ACTIVE: packed union {
raw: u2,
value: FUSE_ACTIVE,
},
/// BOD Sample Frequency
SAMPFREQ: packed union {
raw: u1,
value: FUSE_SAMPFREQ,
},
/// BOD Level
LVL: packed union {
raw: u3,
value: FUSE_LVL,
},
}),
/// Oscillator Configuration
OSCCFG: mmio.Mmio(packed struct(u8) {
/// Frequency Select
FREQSEL: packed union {
raw: u2,
value: FUSE_FREQSEL,
},
reserved7: u5,
/// Oscillator Lock
OSCLOCK: u1,
}),
reserved4: [1]u8,
/// TCD0 Configuration
TCD0CFG: mmio.Mmio(packed struct(u8) {
/// Compare A Default Output Value
CMPA: u1,
/// Compare B Default Output Value
CMPB: u1,
/// Compare C Default Output Value
CMPC: u1,
/// Compare D Default Output Value
CMPD: u1,
/// Compare A Output Enable
CMPAEN: u1,
/// Compare B Output Enable
CMPBEN: u1,
/// Compare C Output Enable
CMPCEN: u1,
/// Compare D Output Enable
CMPDEN: u1,
}),
/// System Configuration 0
SYSCFG0: mmio.Mmio(packed struct(u8) {
/// EEPROM Save
EESAVE: u1,
reserved2: u1,
/// Reset Pin Configuration
RSTPINCFG: packed union {
raw: u2,
value: FUSE_RSTPINCFG,
},
reserved6: u2,
/// CRC Source
CRCSRC: packed union {
raw: u2,
value: FUSE_CRCSRC,
},
}),
/// System Configuration 1
SYSCFG1: mmio.Mmio(packed struct(u8) {
/// Startup Time
SUT: packed union {
raw: u3,
value: FUSE_SUT,
},
padding: u5,
}),
/// Application Code Section End
APPEND: u8,
/// Boot Section End
BOOTEND: u8,
};
/// General Purpose IO
pub const GPIO = extern struct {
/// General Purpose IO Register 0
GPIOR0: u8,
/// General Purpose IO Register 1
GPIOR1: u8,
/// General Purpose IO Register 2
GPIOR2: u8,
/// General Purpose IO Register 3
GPIOR3: u8,
};
/// Lockbit
pub const LOCKBIT = extern struct {
/// Lock Bits select
pub const LOCKBIT_LB = enum(u8) {
/// Read and write lock
RWLOCK = 0x3a,
/// No locks
NOLOCK = 0xc5,
_,
};
/// Lock bits
LOCKBIT: mmio.Mmio(packed struct(u8) {
/// Lock Bits
LB: packed union {
raw: u8,
value: LOCKBIT_LB,
},
}),
};
/// Non-volatile Memory Controller
pub const NVMCTRL = extern struct {
/// Command select
pub const NVMCTRL_CMD = enum(u3) {
/// No Command
NONE = 0x0,
/// Write page
PAGEWRITE = 0x1,
/// Erase page
PAGEERASE = 0x2,
/// Erase and write page
PAGEERASEWRITE = 0x3,
/// Page buffer clear
PAGEBUFCLR = 0x4,
/// Chip erase
CHIPERASE = 0x5,
/// EEPROM erase
EEERASE = 0x6,
/// Write fuse (PDI only)
FUSEWRITE = 0x7,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Command
CMD: packed union {
raw: u3,
value: NVMCTRL_CMD,
},
padding: u5,
}),
/// Control B
CTRLB: mmio.Mmio(packed struct(u8) {
/// Application code write protect
APCWP: u1,
/// Boot Lock
BOOTLOCK: u1,
padding: u6,
}),
/// Status
STATUS: mmio.Mmio(packed struct(u8) {
/// Flash busy
FBUSY: u1,
/// EEPROM busy
EEBUSY: u1,
/// Write error
WRERROR: u1,
padding: u5,
}),
/// Interrupt Control
INTCTRL: mmio.Mmio(packed struct(u8) {
/// EEPROM Ready
EEREADY: u1,
padding: u7,
}),
/// Interrupt Flags
INTFLAGS: mmio.Mmio(packed struct(u8) {
/// EEPROM Ready
EEREADY: u1,
padding: u7,
}),
reserved6: [1]u8,
/// Data
DATA: u16,
/// Address
ADDR: u16,
};
/// I/O Ports
pub const PORT = extern struct {
/// Input/Sense Configuration select
pub const PORT_ISC = enum(u3) {
/// Interrupt disabled but input buffer enabled
INTDISABLE = 0x0,
/// Sense Both Edges
BOTHEDGES = 0x1,
/// Sense Rising Edge
RISING = 0x2,
/// Sense Falling Edge
FALLING = 0x3,
/// Digital Input Buffer disabled
INPUT_DISABLE = 0x4,
/// Sense low Level
LEVEL = 0x5,
_,
};
/// Data Direction
DIR: u8,
/// Data Direction Set
DIRSET: u8,
/// Data Direction Clear
DIRCLR: u8,
/// Data Direction Toggle
DIRTGL: u8,
/// Output Value
OUT: u8,
/// Output Value Set
OUTSET: u8,
/// Output Value Clear
OUTCLR: u8,
/// Output Value Toggle
OUTTGL: u8,
/// Input Value
IN: u8,
/// Interrupt Flags
INTFLAGS: mmio.Mmio(packed struct(u8) {
/// Pin Interrupt
INT: u8,
}),
reserved16: [6]u8,
/// Pin 0 Control
PIN0CTRL: mmio.Mmio(packed struct(u8) {
/// Input/Sense Configuration
ISC: packed union {
raw: u3,
value: PORT_ISC,
},
/// Pullup enable
PULLUPEN: u1,
reserved7: u3,
/// Inverted I/O Enable
INVEN: u1,
}),
/// Pin 1 Control
PIN1CTRL: mmio.Mmio(packed struct(u8) {
/// Input/Sense Configuration
ISC: packed union {
raw: u3,
value: PORT_ISC,
},
/// Pullup enable
PULLUPEN: u1,
reserved7: u3,
/// Inverted I/O Enable
INVEN: u1,
}),
/// Pin 2 Control
PIN2CTRL: mmio.Mmio(packed struct(u8) {
/// Input/Sense Configuration
ISC: packed union {
raw: u3,
value: PORT_ISC,
},
/// Pullup enable
PULLUPEN: u1,
reserved7: u3,
/// Inverted I/O Enable
INVEN: u1,
}),
/// Pin 3 Control
PIN3CTRL: mmio.Mmio(packed struct(u8) {
/// Input/Sense Configuration
ISC: packed union {
raw: u3,
value: PORT_ISC,
},
/// Pullup enable
PULLUPEN: u1,
reserved7: u3,
/// Inverted I/O Enable
INVEN: u1,
}),
/// Pin 4 Control
PIN4CTRL: mmio.Mmio(packed struct(u8) {
/// Input/Sense Configuration
ISC: packed union {
raw: u3,
value: PORT_ISC,
},
/// Pullup enable
PULLUPEN: u1,
reserved7: u3,
/// Inverted I/O Enable
INVEN: u1,
}),
/// Pin 5 Control
PIN5CTRL: mmio.Mmio(packed struct(u8) {
/// Input/Sense Configuration
ISC: packed union {
raw: u3,
value: PORT_ISC,
},
/// Pullup enable
PULLUPEN: u1,
reserved7: u3,
/// Inverted I/O Enable
INVEN: u1,
}),
/// Pin 6 Control
PIN6CTRL: mmio.Mmio(packed struct(u8) {
/// Input/Sense Configuration
ISC: packed union {
raw: u3,
value: PORT_ISC,
},
/// Pullup enable
PULLUPEN: u1,
reserved7: u3,
/// Inverted I/O Enable
INVEN: u1,
}),
/// Pin 7 Control
PIN7CTRL: mmio.Mmio(packed struct(u8) {
/// Input/Sense Configuration
ISC: packed union {
raw: u3,
value: PORT_ISC,
},
/// Pullup enable
PULLUPEN: u1,
reserved7: u3,
/// Inverted I/O Enable
INVEN: u1,
}),
};
/// Port Multiplexer
pub const PORTMUX = extern struct {
/// Configurable Custom Logic LUT0 select
pub const PORTMUX_LUT0 = enum(u1) {
/// Default pin
DEFAULT = 0x0,
/// Alternate pin
ALTERNATE = 0x1,
};
/// Configurable Custom Logic LUT1 select
pub const PORTMUX_LUT1 = enum(u1) {
/// Default pin
DEFAULT = 0x0,
/// Alternate pin
ALTERNATE = 0x1,
};
/// Port Multiplexer SPI0 select
pub const PORTMUX_SPI0 = enum(u1) {
/// Default pins
DEFAULT = 0x0,
/// Alternate pins
ALTERNATE = 0x1,
};
/// Port Multiplexer TWI0 select
pub const PORTMUX_TWI0 = enum(u1) {
/// Default pins
DEFAULT = 0x0,
/// Alternate pins
ALTERNATE = 0x1,
};
/// Port Multiplexer USART0 select
pub const PORTMUX_USART0 = enum(u1) {
/// Default pins
DEFAULT = 0x0,
/// Alternate pins
ALTERNATE = 0x1,
};
/// Port Multiplexer TCA0 Output 0 select
pub const PORTMUX_TCA00 = enum(u1) {
/// Default pin
DEFAULT = 0x0,
/// Alternate pin
ALTERNATE = 0x1,
};
/// Port Multiplexer TCA0 Output 1 select
pub const PORTMUX_TCA01 = enum(u1) {
/// Default pin
DEFAULT = 0x0,
/// Alternate pin
ALTERNATE = 0x1,
};
/// Port Multiplexer TCA0 Output 2 select
pub const PORTMUX_TCA02 = enum(u1) {
/// Default pin
DEFAULT = 0x0,
/// Alternate pin
ALTERNATE = 0x1,
};
/// Port Multiplexer TCA0 Output 3 select
pub const PORTMUX_TCA03 = enum(u1) {
/// Default pin
DEFAULT = 0x0,
/// Alternate pin
ALTERNATE = 0x1,
};
/// Port Multiplexer TCA0 Output 4 select
pub const PORTMUX_TCA04 = enum(u1) {
/// Default pin
DEFAULT = 0x0,
/// Alternate pin
ALTERNATE = 0x1,
};
/// Port Multiplexer TCA0 Output 5 select
pub const PORTMUX_TCA05 = enum(u1) {
/// Default pin
DEFAULT = 0x0,
/// Alternate pin
ALTERNATE = 0x1,
};
/// Port Multiplexer TCB select
pub const PORTMUX_TCB0 = enum(u1) {
/// Default pin
DEFAULT = 0x0,
/// Alternate pin
ALTERNATE = 0x1,
};
/// Port Multiplexer Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Event Output 0
EVOUT0: u1,
/// Event Output 1
EVOUT1: u1,
/// Event Output 2
EVOUT2: u1,
reserved4: u1,
/// Configurable Custom Logic LUT0
LUT0: packed union {
raw: u1,
value: PORTMUX_LUT0,
},
/// Configurable Custom Logic LUT1
LUT1: packed union {
raw: u1,
value: PORTMUX_LUT1,
},
padding: u2,
}),
/// Port Multiplexer Control B
CTRLB: mmio.Mmio(packed struct(u8) {
/// Port Multiplexer USART0
USART0: packed union {
raw: u1,
value: PORTMUX_USART0,
},
reserved2: u1,
/// Port Multiplexer SPI0
SPI0: packed union {
raw: u1,
value: PORTMUX_SPI0,
},
reserved4: u1,
/// Port Multiplexer TWI0
TWI0: packed union {
raw: u1,
value: PORTMUX_TWI0,
},
padding: u3,
}),
/// Port Multiplexer Control C
CTRLC: mmio.Mmio(packed struct(u8) {
/// Port Multiplexer TCA0 Output 0
TCA00: packed union {
raw: u1,
value: PORTMUX_TCA00,
},
/// Port Multiplexer TCA0 Output 1
TCA01: packed union {
raw: u1,
value: PORTMUX_TCA01,
},
/// Port Multiplexer TCA0 Output 2
TCA02: packed union {
raw: u1,
value: PORTMUX_TCA02,
},
/// Port Multiplexer TCA0 Output 3
TCA03: packed union {
raw: u1,
value: PORTMUX_TCA03,
},
/// Port Multiplexer TCA0 Output 4
TCA04: packed union {
raw: u1,
value: PORTMUX_TCA04,
},
/// Port Multiplexer TCA0 Output 5
TCA05: packed union {
raw: u1,
value: PORTMUX_TCA05,
},
padding: u2,
}),
/// Port Multiplexer Control D
CTRLD: mmio.Mmio(packed struct(u8) {
/// Port Multiplexer TCB
TCB0: packed union {
raw: u1,
value: PORTMUX_TCB0,
},
padding: u7,
}),
};
/// Reset controller
pub const RSTCTRL = extern struct {
/// Reset Flags
RSTFR: mmio.Mmio(packed struct(u8) {
/// Power on Reset flag
PORF: u1,
/// Brown out detector Reset flag
BORF: u1,
/// External Reset flag
EXTRF: u1,
/// Watch dog Reset flag
WDRF: u1,
/// Software Reset flag
SWRF: u1,
/// UPDI Reset flag
UPDIRF: u1,
padding: u2,
}),
/// Software Reset
SWRR: mmio.Mmio(packed struct(u8) {
/// Software reset enable
SWRE: u1,
padding: u7,
}),
};
/// Real-Time Counter
pub const RTC = extern struct {
/// Clock Select
pub const RTC_CLKSEL = enum(u2) {
/// Internal 32kHz OSC
INT32K = 0x0,
/// Internal 1kHz OSC
INT1K = 0x1,
/// 32KHz Crystal OSC
TOSC32K = 0x2,
/// External Clock
EXTCLK = 0x3,
};
/// Prescaling Factor select
pub const RTC_PRESCALER = enum(u4) {
/// RTC Clock / 1
DIV1 = 0x0,
/// RTC Clock / 2
DIV2 = 0x1,
/// RTC Clock / 4
DIV4 = 0x2,
/// RTC Clock / 8
DIV8 = 0x3,
/// RTC Clock / 16
DIV16 = 0x4,
/// RTC Clock / 32
DIV32 = 0x5,
/// RTC Clock / 64
DIV64 = 0x6,
/// RTC Clock / 128
DIV128 = 0x7,
/// RTC Clock / 256
DIV256 = 0x8,
/// RTC Clock / 512
DIV512 = 0x9,
/// RTC Clock / 1024
DIV1024 = 0xa,
/// RTC Clock / 2048
DIV2048 = 0xb,
/// RTC Clock / 4096
DIV4096 = 0xc,
/// RTC Clock / 8192
DIV8192 = 0xd,
/// RTC Clock / 16384
DIV16384 = 0xe,
/// RTC Clock / 32768
DIV32768 = 0xf,
};
/// Period select
pub const RTC_PERIOD = enum(u4) {
/// Off
OFF = 0x0,
/// RTC Clock Cycles 4
CYC4 = 0x1,
/// RTC Clock Cycles 8
CYC8 = 0x2,
/// RTC Clock Cycles 16
CYC16 = 0x3,
/// RTC Clock Cycles 32
CYC32 = 0x4,
/// RTC Clock Cycles 64
CYC64 = 0x5,
/// RTC Clock Cycles 128
CYC128 = 0x6,
/// RTC Clock Cycles 256
CYC256 = 0x7,
/// RTC Clock Cycles 512
CYC512 = 0x8,
/// RTC Clock Cycles 1024
CYC1024 = 0x9,
/// RTC Clock Cycles 2048
CYC2048 = 0xa,
/// RTC Clock Cycles 4096
CYC4096 = 0xb,
/// RTC Clock Cycles 8192
CYC8192 = 0xc,
/// RTC Clock Cycles 16384
CYC16384 = 0xd,
/// RTC Clock Cycles 32768
CYC32768 = 0xe,
_,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Enable
RTCEN: u1,
reserved3: u2,
/// Prescaling Factor
PRESCALER: packed union {
raw: u4,
value: RTC_PRESCALER,
},
/// Run In Standby
RUNSTDBY: u1,
}),
/// Status
STATUS: mmio.Mmio(packed struct(u8) {
/// CTRLA Synchronization Busy Flag
CTRLABUSY: u1,
/// Count Synchronization Busy Flag
CNTBUSY: u1,
/// Period Synchronization Busy Flag
PERBUSY: u1,
/// Comparator Synchronization Busy Flag
CMPBUSY: u1,
padding: u4,
}),
/// Interrupt Control
INTCTRL: mmio.Mmio(packed struct(u8) {
/// Overflow Interrupt enable
OVF: u1,
/// Compare Match Interrupt enable
CMP: u1,
padding: u6,
}),
/// Interrupt Flags
INTFLAGS: mmio.Mmio(packed struct(u8) {
/// Overflow Interrupt Flag
OVF: u1,
/// Compare Match Interrupt
CMP: u1,
padding: u6,
}),
/// Temporary
TEMP: u8,
/// Debug control
DBGCTRL: mmio.Mmio(packed struct(u8) {
/// Run in debug
DBGRUN: u1,
padding: u7,
}),
reserved7: [1]u8,
/// Clock Select
CLKSEL: mmio.Mmio(packed struct(u8) {
/// Clock Select
CLKSEL: packed union {
raw: u2,
value: RTC_CLKSEL,
},
padding: u6,
}),
/// Counter
CNT: u16,
/// Period
PER: u16,
/// Compare
CMP: u16,
reserved16: [2]u8,
/// PIT Control A
PITCTRLA: mmio.Mmio(packed struct(u8) {
/// Enable
PITEN: u1,
reserved3: u2,
/// Period
PERIOD: packed union {
raw: u4,
value: RTC_PERIOD,
},
padding: u1,
}),
/// PIT Status
PITSTATUS: mmio.Mmio(packed struct(u8) {
/// CTRLA Synchronization Busy Flag
CTRLBUSY: u1,
padding: u7,
}),
/// PIT Interrupt Control
PITINTCTRL: mmio.Mmio(packed struct(u8) {
/// Periodic Interrupt
PI: u1,
padding: u7,
}),
/// PIT Interrupt Flags
PITINTFLAGS: mmio.Mmio(packed struct(u8) {
/// Periodic Interrupt
PI: u1,
padding: u7,
}),
reserved21: [1]u8,
/// PIT Debug control
PITDBGCTRL: mmio.Mmio(packed struct(u8) {
/// Run in debug
DBGRUN: u1,
padding: u7,
}),
};
/// Signature row
pub const SIGROW = extern struct {
/// Device ID Byte 0
DEVICEID0: u8,
/// Device ID Byte 1
DEVICEID1: u8,
/// Device ID Byte 2
DEVICEID2: u8,
/// Serial Number Byte 0
SERNUM0: u8,
/// Serial Number Byte 1
SERNUM1: u8,
/// Serial Number Byte 2
SERNUM2: u8,
/// Serial Number Byte 3
SERNUM3: u8,
/// Serial Number Byte 4
SERNUM4: u8,
/// Serial Number Byte 5
SERNUM5: u8,
/// Serial Number Byte 6
SERNUM6: u8,
/// Serial Number Byte 7
SERNUM7: u8,
/// Serial Number Byte 8
SERNUM8: u8,
/// Serial Number Byte 9
SERNUM9: u8,
reserved32: [19]u8,
/// Temperature Sensor Calibration Byte 0
TEMPSENSE0: u8,
/// Temperature Sensor Calibration Byte 1
TEMPSENSE1: u8,
/// OSC16 error at 3V
OSC16ERR3V: u8,
/// OSC16 error at 5V
OSC16ERR5V: u8,
/// OSC20 error at 3V
OSC20ERR3V: u8,
/// OSC20 error at 5V
OSC20ERR5V: u8,
};
/// Sleep Controller
pub const SLPCTRL = extern struct {
/// Sleep mode select
pub const SLPCTRL_SMODE = enum(u2) {
/// Idle mode
IDLE = 0x0,
/// Standby Mode
STDBY = 0x1,
/// Power-down Mode
PDOWN = 0x2,
_,
};
/// Control
CTRLA: mmio.Mmio(packed struct(u8) {
/// Sleep enable
SEN: u1,
/// Sleep mode
SMODE: packed union {
raw: u2,
value: SLPCTRL_SMODE,
},
padding: u5,
}),
};
/// Serial Peripheral Interface
pub const SPI = extern struct {
/// Prescaler select
pub const SPI_PRESC = enum(u2) {
/// System Clock / 4
DIV4 = 0x0,
/// System Clock / 16
DIV16 = 0x1,
/// System Clock / 64
DIV64 = 0x2,
/// System Clock / 128
DIV128 = 0x3,
};
/// SPI Mode select
pub const SPI_MODE = enum(u2) {
/// SPI Mode 0
@"0" = 0x0,
/// SPI Mode 1
@"1" = 0x1,
/// SPI Mode 2
@"2" = 0x2,
/// SPI Mode 3
@"3" = 0x3,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Enable Module
ENABLE: u1,
/// Prescaler
PRESC: packed union {
raw: u2,
value: SPI_PRESC,
},
reserved4: u1,
/// Enable Double Speed
CLK2X: u1,
/// Host Operation Enable
MASTER: u1,
/// Data Order Setting
DORD: u1,
padding: u1,
}),
/// Control B
CTRLB: mmio.Mmio(packed struct(u8) {
/// SPI Mode
MODE: packed union {
raw: u2,
value: SPI_MODE,
},
/// Client Select Disable
SSD: u1,
reserved6: u3,
/// Buffer Write Mode
BUFWR: u1,
/// Buffer Mode Enable
BUFEN: u1,
}),
/// Interrupt Control
INTCTRL: mmio.Mmio(packed struct(u8) {
/// Interrupt Enable
IE: u1,
reserved4: u3,
/// Client Select Trigger Interrupt Enable
SSIE: u1,
/// Data Register Empty Interrupt Enable
DREIE: u1,
/// Transfer Complete Interrupt Enable
TXCIE: u1,
/// Receive Complete Interrupt Enable
RXCIE: u1,
}),
/// Interrupt Flags
INTFLAGS: mmio.Mmio(packed struct(u8) {
/// Buffer Overflow
BUFOVF: u1,
reserved4: u3,
/// Client Select Trigger Interrupt Flag
SSIF: u1,
/// Data Register Empty Interrupt Flag
DREIF: u1,
/// Transfer Complete Interrupt Flag
TXCIF: u1,
/// Receive Complete Interrupt Flag
RXCIF: u1,
}),
/// Data
DATA: u8,
};
/// System Configuration Registers
pub const SYSCFG = extern struct {
/// Revision ID
REVID: u8,
/// External Break
EXTBRK: mmio.Mmio(packed struct(u8) {
/// External break enable
ENEXTBRK: u1,
padding: u7,
}),
};
/// 16-bit Timer/Counter Type A
pub const TCA = extern union {
pub const Mode = enum {
SINGLE,
SPLIT,
};
pub fn get_mode(self: *volatile @This()) Mode {
{
const value = self.SINGLE.CTRLD.read().SPLITM;
switch (value) {
0 => return .SINGLE,
else => {},
}
}
{
const value = self.SPLIT.CTRLD.read().SPLITM;
switch (value) {
1 => return .SPLIT,
else => {},
}
}
unreachable;
}
/// Clock Selection
pub const TCA_SINGLE_CLKSEL = enum(u3) {
/// System Clock
DIV1 = 0x0,
/// System Clock / 2
DIV2 = 0x1,
/// System Clock / 4
DIV4 = 0x2,
/// System Clock / 8
DIV8 = 0x3,
/// System Clock / 16
DIV16 = 0x4,
/// System Clock / 64
DIV64 = 0x5,
/// System Clock / 256
DIV256 = 0x6,
/// System Clock / 1024
DIV1024 = 0x7,
};
/// Waveform generation mode select
pub const TCA_SINGLE_WGMODE = enum(u3) {
/// Normal Mode
NORMAL = 0x0,
/// Frequency Generation Mode
FRQ = 0x1,
/// Single Slope PWM
SINGLESLOPE = 0x3,
/// Dual Slope PWM, overflow on TOP
DSTOP = 0x5,
/// Dual Slope PWM, overflow on TOP and BOTTOM
DSBOTH = 0x6,
/// Dual Slope PWM, overflow on BOTTOM
DSBOTTOM = 0x7,
_,
};
/// Command select
pub const TCA_SINGLE_CMD = enum(u2) {
/// No Command
NONE = 0x0,
/// Force Update
UPDATE = 0x1,
/// Force Restart
RESTART = 0x2,
/// Force Hard Reset
RESET = 0x3,
};
/// Direction select
pub const TCA_SINGLE_DIR = enum(u1) {
/// Count up
UP = 0x0,
/// Count down
DOWN = 0x1,
};
/// Event Action select
pub const TCA_SINGLE_EVACT = enum(u2) {
/// Count on positive edge event
POSEDGE = 0x0,
/// Count on any edge event
ANYEDGE = 0x1,
/// Count on prescaled clock while event line is 1.
HIGHLVL = 0x2,
/// Count on prescaled clock. Event controls count direction. Up-count when event line is 0, down-count when event line is 1.
UPDOWN = 0x3,
};
/// Clock Selection
pub const TCA_SPLIT_CLKSEL = enum(u3) {
/// System Clock
DIV1 = 0x0,
/// System Clock / 2
DIV2 = 0x1,
/// System Clock / 4
DIV4 = 0x2,
/// System Clock / 8
DIV8 = 0x3,
/// System Clock / 16
DIV16 = 0x4,
/// System Clock / 64
DIV64 = 0x5,
/// System Clock / 256
DIV256 = 0x6,
/// System Clock / 1024
DIV1024 = 0x7,
};
/// Command select
pub const TCA_SPLIT_CMD = enum(u2) {
/// No Command
NONE = 0x0,
/// Force Update
UPDATE = 0x1,
/// Force Restart
RESTART = 0x2,
/// Force Hard Reset
RESET = 0x3,
};
SINGLE: extern struct {
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Module Enable
ENABLE: u1,
/// Clock Selection
CLKSEL: packed union {
raw: u3,
value: TCA_SINGLE_CLKSEL,
},
padding: u4,
}),
/// Control B
CTRLB: mmio.Mmio(packed struct(u8) {
/// Waveform generation mode
WGMODE: packed union {
raw: u3,
value: TCA_SINGLE_WGMODE,
},
/// Auto Lock Update
ALUPD: u1,
/// Compare 0 Enable
CMP0EN: u1,
/// Compare 1 Enable
CMP1EN: u1,
/// Compare 2 Enable
CMP2EN: u1,
padding: u1,
}),
/// Control C
CTRLC: mmio.Mmio(packed struct(u8) {
/// Compare 0 Waveform Output Value
CMP0OV: u1,
/// Compare 1 Waveform Output Value
CMP1OV: u1,
/// Compare 2 Waveform Output Value
CMP2OV: u1,
padding: u5,
}),
/// Control D
CTRLD: mmio.Mmio(packed struct(u8) {
/// Split Mode Enable
SPLITM: u1,
padding: u7,
}),
/// Control E Clear
CTRLECLR: mmio.Mmio(packed struct(u8) {
/// Direction
DIR: u1,
/// Lock Update
LUPD: u1,
/// Command
CMD: packed union {
raw: u2,
value: TCA_SINGLE_CMD,
},
padding: u4,
}),
/// Control E Set
CTRLESET: mmio.Mmio(packed struct(u8) {
/// Direction
DIR: packed union {
raw: u1,
value: TCA_SINGLE_DIR,
},
/// Lock Update
LUPD: u1,
/// Command
CMD: packed union {
raw: u2,
value: TCA_SINGLE_CMD,
},
padding: u4,
}),
/// Control F Clear
CTRLFCLR: mmio.Mmio(packed struct(u8) {
/// Period Buffer Valid
PERBV: u1,
/// Compare 0 Buffer Valid
CMP0BV: u1,
/// Compare 1 Buffer Valid
CMP1BV: u1,
/// Compare 2 Buffer Valid
CMP2BV: u1,
padding: u4,
}),
/// Control F Set
CTRLFSET: mmio.Mmio(packed struct(u8) {
/// Period Buffer Valid
PERBV: u1,
/// Compare 0 Buffer Valid
CMP0BV: u1,
/// Compare 1 Buffer Valid
CMP1BV: u1,
/// Compare 2 Buffer Valid
CMP2BV: u1,
padding: u4,
}),
reserved9: [1]u8,
/// Event Control
EVCTRL: mmio.Mmio(packed struct(u8) {
/// Count on Event Input
CNTEI: u1,
/// Event Action
EVACT: packed union {
raw: u2,
value: TCA_SINGLE_EVACT,
},
padding: u5,
}),
/// Interrupt Control
INTCTRL: mmio.Mmio(packed struct(u8) {
/// Overflow Interrupt
OVF: u1,
reserved4: u3,
/// Compare 0 Interrupt
CMP0: u1,
/// Compare 1 Interrupt
CMP1: u1,
/// Compare 2 Interrupt
CMP2: u1,
padding: u1,
}),
/// Interrupt Flags
INTFLAGS: mmio.Mmio(packed struct(u8) {
/// Overflow Interrupt
OVF: u1,
reserved4: u3,
/// Compare 0 Interrupt
CMP0: u1,
/// Compare 1 Interrupt
CMP1: u1,
/// Compare 2 Interrupt
CMP2: u1,
padding: u1,
}),
reserved14: [2]u8,
/// Degbug Control
DBGCTRL: mmio.Mmio(packed struct(u8) {
/// Debug Run
DBGRUN: u1,
padding: u7,
}),
/// Temporary data for 16-bit Access
TEMP: u8,
reserved32: [16]u8,
/// Count
CNT: u16,
reserved38: [4]u8,
/// Period
PER: u16,
/// Compare 0
CMP0: u16,
/// Compare 1
CMP1: u16,
/// Compare 2
CMP2: u16,
reserved54: [8]u8,
/// Period Buffer
PERBUF: u16,
/// Compare 0 Buffer
CMP0BUF: u16,
/// Compare 1 Buffer
CMP1BUF: u16,
/// Compare 2 Buffer
CMP2BUF: u16,
},
SPLIT: extern struct {
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Module Enable
ENABLE: u1,
/// Clock Selection
CLKSEL: packed union {
raw: u3,
value: TCA_SPLIT_CLKSEL,
},
padding: u4,
}),
/// Control B
CTRLB: mmio.Mmio(packed struct(u8) {
/// Low Compare 0 Enable
LCMP0EN: u1,
/// Low Compare 1 Enable
LCMP1EN: u1,
/// Low Compare 2 Enable
LCMP2EN: u1,
reserved4: u1,
/// High Compare 0 Enable
HCMP0EN: u1,
/// High Compare 1 Enable
HCMP1EN: u1,
/// High Compare 2 Enable
HCMP2EN: u1,
padding: u1,
}),
/// Control C
CTRLC: mmio.Mmio(packed struct(u8) {
/// Low Compare 0 Output Value
LCMP0OV: u1,
/// Low Compare 1 Output Value
LCMP1OV: u1,
/// Low Compare 2 Output Value
LCMP2OV: u1,
reserved4: u1,
/// High Compare 0 Output Value
HCMP0OV: u1,
/// High Compare 1 Output Value
HCMP1OV: u1,
/// High Compare 2 Output Value
HCMP2OV: u1,
padding: u1,
}),
/// Control D
CTRLD: mmio.Mmio(packed struct(u8) {
/// Split Mode Enable
SPLITM: u1,
padding: u7,
}),
/// Control E Clear
CTRLECLR: mmio.Mmio(packed struct(u8) {
reserved2: u2,
/// Command
CMD: packed union {
raw: u2,
value: TCA_SPLIT_CMD,
},
padding: u4,
}),
/// Control E Set
CTRLESET: mmio.Mmio(packed struct(u8) {
reserved2: u2,
/// Command
CMD: packed union {
raw: u2,
value: TCA_SPLIT_CMD,
},
padding: u4,
}),
reserved10: [4]u8,
/// Interrupt Control
INTCTRL: mmio.Mmio(packed struct(u8) {
/// Low Underflow Interrupt Enable
LUNF: u1,
/// High Underflow Interrupt Enable
HUNF: u1,
reserved4: u2,
/// Low Compare 0 Interrupt Enable
LCMP0: u1,
/// Low Compare 1 Interrupt Enable
LCMP1: u1,
/// Low Compare 2 Interrupt Enable
LCMP2: u1,
padding: u1,
}),
/// Interrupt Flags
INTFLAGS: mmio.Mmio(packed struct(u8) {
/// Low Underflow Interrupt Flag
LUNF: u1,
/// High Underflow Interrupt Flag
HUNF: u1,
reserved4: u2,
/// Low Compare 2 Interrupt Flag
LCMP0: u1,
/// Low Compare 1 Interrupt Flag
LCMP1: u1,
/// Low Compare 0 Interrupt Flag
LCMP2: u1,
padding: u1,
}),
reserved14: [2]u8,
/// Degbug Control
DBGCTRL: mmio.Mmio(packed struct(u8) {
/// Debug Run
DBGRUN: u1,
padding: u7,
}),
reserved32: [17]u8,
/// Low Count
LCNT: u8,
/// High Count
HCNT: u8,
reserved38: [4]u8,
/// Low Period
LPER: u8,
/// High Period
HPER: u8,
/// Low Compare
LCMP0: u8,
/// High Compare
HCMP0: u8,
/// Low Compare
LCMP1: u8,
/// High Compare
HCMP1: u8,
/// Low Compare
LCMP2: u8,
/// High Compare
HCMP2: u8,
},
};
/// 16-bit Timer Type B
pub const TCB = extern struct {
/// Clock Select
pub const TCB_CLKSEL = enum(u2) {
/// CLK_PER (No Prescaling)
CLKDIV1 = 0x0,
/// CLK_PER/2 (From Prescaler)
CLKDIV2 = 0x1,
/// Use Clock from TCA
CLKTCA = 0x2,
_,
};
/// Timer Mode select
pub const TCB_CNTMODE = enum(u3) {
/// Periodic Interrupt
INT = 0x0,
/// Periodic Timeout
TIMEOUT = 0x1,
/// Input Capture Event
CAPT = 0x2,
/// Input Capture Frequency measurement
FRQ = 0x3,
/// Input Capture Pulse-Width measurement
PW = 0x4,
/// Input Capture Frequency and Pulse-Width measurement
FRQPW = 0x5,
/// Single Shot
SINGLE = 0x6,
/// 8-bit PWM
PWM8 = 0x7,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Enable
ENABLE: u1,
/// Clock Select
CLKSEL: packed union {
raw: u2,
value: TCB_CLKSEL,
},
reserved4: u1,
/// Synchronize Update
SYNCUPD: u1,
reserved6: u1,
/// Run Standby
RUNSTDBY: u1,
padding: u1,
}),
/// Control Register B
CTRLB: mmio.Mmio(packed struct(u8) {
/// Timer Mode
CNTMODE: packed union {
raw: u3,
value: TCB_CNTMODE,
},
reserved4: u1,
/// Pin Output Enable
CCMPEN: u1,
/// Pin Initial State
CCMPINIT: u1,
/// Asynchronous Enable
ASYNC: u1,
padding: u1,
}),
reserved4: [2]u8,
/// Event Control
EVCTRL: mmio.Mmio(packed struct(u8) {
/// Event Input Enable
CAPTEI: u1,
reserved4: u3,
/// Event Edge
EDGE: u1,
reserved6: u1,
/// Input Capture Noise Cancellation Filter
FILTER: u1,
padding: u1,
}),
/// Interrupt Control
INTCTRL: mmio.Mmio(packed struct(u8) {
/// Capture or Timeout
CAPT: u1,
padding: u7,
}),
/// Interrupt Flags
INTFLAGS: mmio.Mmio(packed struct(u8) {
/// Capture or Timeout
CAPT: u1,
padding: u7,
}),
/// Status
STATUS: mmio.Mmio(packed struct(u8) {
/// Run
RUN: u1,
padding: u7,
}),
/// Debug Control
DBGCTRL: mmio.Mmio(packed struct(u8) {
/// Debug Run
DBGRUN: u1,
padding: u7,
}),
/// Temporary Value
TEMP: u8,
/// Count
CNT: u16,
/// Compare or Capture
CCMP: u16,
};
/// Timer Counter D
pub const TCD = extern struct {
/// clock select
pub const TCD_CLKSEL = enum(u2) {
/// 20 MHz oscillator
@"20MHZ" = 0x0,
/// External clock
EXTCLK = 0x2,
/// System clock
SYSCLK = 0x3,
_,
};
/// counter prescaler select
pub const TCD_CNTPRES = enum(u2) {
/// Sync clock divided by 1
DIV1 = 0x0,
/// Sync clock divided by 4
DIV4 = 0x1,
/// Sync clock divided by 32
DIV32 = 0x2,
_,
};
/// Syncronization prescaler select
pub const TCD_SYNCPRES = enum(u2) {
/// Selevted clock source divided by 1
DIV1 = 0x0,
/// Selevted clock source divided by 2
DIV2 = 0x1,
/// Selevted clock source divided by 4
DIV4 = 0x2,
/// Selevted clock source divided by 8
DIV8 = 0x3,
};
/// Waveform generation mode select
pub const TCD_WGMODE = enum(u2) {
/// One ramp mode
ONERAMP = 0x0,
/// Two ramp mode
TWORAMP = 0x1,
/// Four ramp mode
FOURRAMP = 0x2,
/// Dual slope mode
DS = 0x3,
};
/// Compare C output select
pub const TCD_CMPCSEL = enum(u1) {
/// PWM A output
PWMA = 0x0,
/// PWM B output
PWMB = 0x1,
};
/// Compare D output select
pub const TCD_CMPDSEL = enum(u1) {
/// PWM A output
PWMA = 0x0,
/// PWM B output
PWMB = 0x1,
};
/// Dither select
pub const TCD_DITHERSEL = enum(u2) {
/// On-time ramp B
ONTIMEB = 0x0,
/// On-time ramp A and B
ONTIMEAB = 0x1,
/// Dead-time rampB
DEADTIMEB = 0x2,
/// Dead-time ramp A and B
DEADTIMEAB = 0x3,
};
/// Delay prescaler select
pub const TCD_DLYPRESC = enum(u2) {
/// No prescaling
DIV1 = 0x0,
/// Prescale with 2
DIV2 = 0x1,
/// Prescale with 4
DIV4 = 0x2,
/// Prescale with 8
DIV8 = 0x3,
};
/// Delay select
pub const TCD_DLYSEL = enum(u2) {
/// No delay
OFF = 0x0,
/// Input blanking enabled
INBLANK = 0x1,
/// Event delay enabled
EVENT = 0x2,
_,
};
/// Delay trigger select
pub const TCD_DLYTRIG = enum(u2) {
/// Compare A set
CMPASET = 0x0,
/// Compare A clear
CMPACLR = 0x1,
/// Compare B set
CMPBSET = 0x2,
/// Compare B clear
CMPBCLR = 0x3,
};
/// Event action select
pub const TCD_ACTION = enum(u1) {
/// Event trigger a fault
FAULT = 0x0,
/// Event trigger a fault and capture
CAPTURE = 0x1,
};
/// Event config select
pub const TCD_CFG = enum(u2) {
/// Neither Filter nor Asynchronous Event is enabled
NEITHER = 0x0,
/// Input Capture Noise Cancellation Filter enabled
FILTER = 0x1,
/// Asynchronous Event output qualification enabled
ASYNC = 0x2,
_,
};
/// Edge select
pub const TCD_EDGE = enum(u1) {
/// The falling edge or low level of event generates retrigger or fault action
FALL_LOW = 0x0,
/// The rising edge or high level of event generates retrigger or fault action
RISE_HIGH = 0x1,
};
/// Input mode select
pub const TCD_INPUTMODE = enum(u4) {
/// Input has no actions
NONE = 0x0,
/// Stop output, jump to opposite compare cycle and wait
JMPWAIT = 0x1,
/// Stop output, execute opposite compare cycle and wait
EXECWAIT = 0x2,
/// stop output, execute opposite compare cycle while fault active
EXECFAULT = 0x3,
/// Stop all outputs, maintain frequency
FREQ = 0x4,
/// Stop all outputs, execute dead time while fault active
EXECDT = 0x5,
/// Stop all outputs, jump to next compare cycle and wait
WAIT = 0x6,
/// Stop all outputs, wait for software action
WAITSW = 0x7,
/// Stop output on edge, jump to next compare cycle
EDGETRIG = 0x8,
/// Stop output on edge, maintain frequency
EDGETRIGFREQ = 0x9,
/// Stop output at level, maintain frequency
LVLTRIGFREQ = 0xa,
_,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Enable
ENABLE: u1,
/// Syncronization prescaler
SYNCPRES: packed union {
raw: u2,
value: TCD_SYNCPRES,
},
/// counter prescaler
CNTPRES: packed union {
raw: u2,
value: TCD_CNTPRES,
},
/// clock select
CLKSEL: packed union {
raw: u2,
value: TCD_CLKSEL,
},
padding: u1,
}),
/// Control B
CTRLB: mmio.Mmio(packed struct(u8) {
/// Waveform generation mode
WGMODE: packed union {
raw: u2,
value: TCD_WGMODE,
},
padding: u6,
}),
/// Control C
CTRLC: mmio.Mmio(packed struct(u8) {
/// Compare output value override
CMPOVR: u1,
/// Auto update
AUPDATE: u1,
reserved3: u1,
/// Fifty percent waveform
FIFTY: u1,
reserved6: u2,
/// Compare C output select
CMPCSEL: packed union {
raw: u1,
value: TCD_CMPCSEL,
},
/// Compare D output select
CMPDSEL: packed union {
raw: u1,
value: TCD_CMPDSEL,
},
}),
/// Control D
CTRLD: mmio.Mmio(packed struct(u8) {
/// Compare A value
CMPAVAL: u4,
/// Compare B value
CMPBVAL: u4,
}),
/// Control E
CTRLE: mmio.Mmio(packed struct(u8) {
/// Synchronize end of cycle strobe
SYNCEOC: u1,
/// Synchronize strobe
SYNC: u1,
/// Restart strobe
RESTART: u1,
/// Software Capture A Strobe
SCAPTUREA: u1,
/// Software Capture B Strobe
SCAPTUREB: u1,
reserved7: u2,
/// Disable at end of cycle
DISEOC: u1,
}),
reserved8: [3]u8,
/// EVCTRLA
EVCTRLA: mmio.Mmio(packed struct(u8) {
/// Trigger event enable
TRIGEI: u1,
reserved2: u1,
/// Event action
ACTION: packed union {
raw: u1,
value: TCD_ACTION,
},
reserved4: u1,
/// Edge select
EDGE: packed union {
raw: u1,
value: TCD_EDGE,
},
reserved6: u1,
/// Event config
CFG: packed union {
raw: u2,
value: TCD_CFG,
},
}),
/// EVCTRLB
EVCTRLB: mmio.Mmio(packed struct(u8) {
/// Trigger event enable
TRIGEI: u1,
reserved2: u1,
/// Event action
ACTION: packed union {
raw: u1,
value: TCD_ACTION,
},
reserved4: u1,
/// Edge select
EDGE: packed union {
raw: u1,
value: TCD_EDGE,
},
reserved6: u1,
/// Event config
CFG: packed union {
raw: u2,
value: TCD_CFG,
},
}),
reserved12: [2]u8,
/// Interrupt Control
INTCTRL: mmio.Mmio(packed struct(u8) {
/// Overflow interrupt enable
OVF: u1,
reserved2: u1,
/// Trigger A interrupt enable
TRIGA: u1,
/// Trigger B interrupt enable
TRIGB: u1,
padding: u4,
}),
/// Interrupt Flags
INTFLAGS: mmio.Mmio(packed struct(u8) {
/// Overflow interrupt enable
OVF: u1,
reserved2: u1,
/// Trigger A interrupt enable
TRIGA: u1,
/// Trigger B interrupt enable
TRIGB: u1,
padding: u4,
}),
/// Status
STATUS: mmio.Mmio(packed struct(u8) {
/// Enable ready
ENRDY: u1,
/// Command ready
CMDRDY: u1,
reserved6: u4,
/// PWM activity on A
PWMACTA: u1,
/// PWM activity on B
PWMACTB: u1,
}),
reserved16: [1]u8,
/// Input Control A
INPUTCTRLA: mmio.Mmio(packed struct(u8) {
/// Input mode
INPUTMODE: packed union {
raw: u4,
value: TCD_INPUTMODE,
},
padding: u4,
}),
/// Input Control B
INPUTCTRLB: mmio.Mmio(packed struct(u8) {
/// Input mode
INPUTMODE: packed union {
raw: u4,
value: TCD_INPUTMODE,
},
padding: u4,
}),
/// Fault Control
FAULTCTRL: mmio.Mmio(packed struct(u8) {
/// Compare A value
CMPA: u1,
/// Compare B value
CMPB: u1,
/// Compare C value
CMPC: u1,
/// Compare D vaule
CMPD: u1,
/// Compare A enable
CMPAEN: u1,
/// Compare B enable
CMPBEN: u1,
/// Compare C enable
CMPCEN: u1,
/// Compare D enable
CMPDEN: u1,
}),
reserved20: [1]u8,
/// Delay Control
DLYCTRL: mmio.Mmio(packed struct(u8) {
/// Delay select
DLYSEL: packed union {
raw: u2,
value: TCD_DLYSEL,
},
/// Delay trigger
DLYTRIG: packed union {
raw: u2,
value: TCD_DLYTRIG,
},
/// Delay prescaler
DLYPRESC: packed union {
raw: u2,
value: TCD_DLYPRESC,
},
padding: u2,
}),
/// Delay value
DLYVAL: mmio.Mmio(packed struct(u8) {
/// Delay value
DLYVAL: u8,
}),
reserved24: [2]u8,
/// Dither Control A
DITCTRL: mmio.Mmio(packed struct(u8) {
/// Dither select
DITHERSEL: packed union {
raw: u2,
value: TCD_DITHERSEL,
},
padding: u6,
}),
/// Dither value
DITVAL: mmio.Mmio(packed struct(u8) {
/// Dither value
DITHER: u4,
padding: u4,
}),
reserved30: [4]u8,
/// Debug Control
DBGCTRL: mmio.Mmio(packed struct(u8) {
/// Debug run
DBGRUN: u1,
reserved2: u1,
/// Fault detection
FAULTDET: u1,
padding: u5,
}),
reserved34: [3]u8,
/// Capture A
CAPTUREA: u16,
/// Capture B
CAPTUREB: u16,
reserved40: [2]u8,
/// Compare A Set
CMPASET: mmio.Mmio(packed struct(u16) {
/// Compare A Set
CMPASET: u12,
padding: u4,
}),
/// Compare A Clear
CMPACLR: mmio.Mmio(packed struct(u16) {
/// Compare A Set
CMPACLR: u12,
padding: u4,
}),
/// Compare B Set
CMPBSET: mmio.Mmio(packed struct(u16) {
/// Compare B Set
CMPBSET: u12,
padding: u4,
}),
/// Compare B Clear
CMPBCLR: mmio.Mmio(packed struct(u16) {
/// Compare B Clear
CMPBCLR: u12,
padding: u4,
}),
};
/// Two-Wire Interface
pub const TWI = extern struct {
/// SDA Hold Time select
pub const TWI_SDAHOLD = enum(u2) {
/// SDA hold time off
OFF = 0x0,
/// Typical 50ns hold time
@"50NS" = 0x1,
/// Typical 300ns hold time
@"300NS" = 0x2,
/// Typical 500ns hold time
@"500NS" = 0x3,
};
/// SDA Setup Time select
pub const TWI_SDASETUP = enum(u1) {
/// SDA setup time is 4 clock cycles
@"4CYC" = 0x0,
/// SDA setup time is 8 clock cycles
@"8CYC" = 0x1,
};
/// Inactive Bus Timeout select
pub const TWI_TIMEOUT = enum(u2) {
/// Bus Timeout Disabled
DISABLED = 0x0,
/// 50 Microseconds
@"50US" = 0x1,
/// 100 Microseconds
@"100US" = 0x2,
/// 200 Microseconds
@"200US" = 0x3,
};
/// Acknowledge Action select
pub const TWI_ACKACT = enum(u1) {
/// Send ACK
ACK = 0x0,
/// Send NACK
NACK = 0x1,
};
/// Command select
pub const TWI_MCMD = enum(u2) {
/// No Action
NOACT = 0x0,
/// Issue Repeated Start Condition
REPSTART = 0x1,
/// Receive or Transmit Data, depending on DIR
RECVTRANS = 0x2,
/// Issue Stop Condition
STOP = 0x3,
};
/// Bus State select
pub const TWI_BUSSTATE = enum(u2) {
/// Unknown Bus State
UNKNOWN = 0x0,
/// Bus is Idle
IDLE = 0x1,
/// This Module Controls The Bus
OWNER = 0x2,
/// The Bus is Busy
BUSY = 0x3,
};
/// Command select
pub const TWI_SCMD = enum(u2) {
/// No Action
NOACT = 0x0,
/// Used To Complete a Transaction
COMPTRANS = 0x2,
/// Used in Response to Address/Data Interrupt
RESPONSE = 0x3,
_,
};
/// Client Address or Stop select
pub const TWI_AP = enum(u1) {
/// Stop condition generated APIF
STOP = 0x0,
/// Address detection generated APIF
ADR = 0x1,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
reserved1: u1,
/// FM Plus Enable
FMPEN: u1,
/// SDA Hold Time
SDAHOLD: packed union {
raw: u2,
value: TWI_SDAHOLD,
},
/// SDA Setup Time
SDASETUP: packed union {
raw: u1,
value: TWI_SDASETUP,
},
padding: u3,
}),
reserved2: [1]u8,
/// Debug Control Register
DBGCTRL: mmio.Mmio(packed struct(u8) {
/// Debug Run
DBGRUN: u1,
padding: u7,
}),
/// Host Control A
MCTRLA: mmio.Mmio(packed struct(u8) {
/// Enable TWI Host
ENABLE: u1,
/// Smart Mode Enable
SMEN: u1,
/// Inactive Bus Timeout
TIMEOUT: packed union {
raw: u2,
value: TWI_TIMEOUT,
},
/// Quick Command Enable
QCEN: u1,
reserved6: u1,
/// Write Interrupt Enable
WIEN: u1,
/// Read Interrupt Enable
RIEN: u1,
}),
/// Host Control B
MCTRLB: mmio.Mmio(packed struct(u8) {
/// Command
MCMD: packed union {
raw: u2,
value: TWI_MCMD,
},
/// Acknowledge Action
ACKACT: packed union {
raw: u1,
value: TWI_ACKACT,
},
/// Flush
FLUSH: u1,
padding: u4,
}),
/// Host Status
MSTATUS: mmio.Mmio(packed struct(u8) {
/// Bus State
BUSSTATE: packed union {
raw: u2,
value: TWI_BUSSTATE,
},
/// Bus Error
BUSERR: u1,
/// Arbitration Lost
ARBLOST: u1,
/// Received Acknowledge
RXACK: u1,
/// Clock Hold
CLKHOLD: u1,
/// Write Interrupt Flag
WIF: u1,
/// Read Interrupt Flag
RIF: u1,
}),
/// Host Baud Rate Control
MBAUD: u8,
/// Host Address
MADDR: u8,
/// Host Data
MDATA: u8,
/// Client Control A
SCTRLA: mmio.Mmio(packed struct(u8) {
/// Enable TWI Client
ENABLE: u1,
/// Smart Mode Enable
SMEN: u1,
/// Promiscuous Mode Enable
PMEN: u1,
reserved5: u2,
/// Stop Interrupt Enable
PIEN: u1,
/// Address/Stop Interrupt Enable
APIEN: u1,
/// Data Interrupt Enable
DIEN: u1,
}),
/// Client Control B
SCTRLB: mmio.Mmio(packed struct(u8) {
/// Command
SCMD: packed union {
raw: u2,
value: TWI_SCMD,
},
/// Acknowledge Action
ACKACT: packed union {
raw: u1,
value: TWI_ACKACT,
},
padding: u5,
}),
/// Client Status
SSTATUS: mmio.Mmio(packed struct(u8) {
/// Client Address or Stop
AP: packed union {
raw: u1,
value: TWI_AP,
},
/// Read/Write Direction
DIR: u1,
/// Bus Error
BUSERR: u1,
/// Collision
COLL: u1,
/// Received Acknowledge
RXACK: u1,
/// Clock Hold
CLKHOLD: u1,
/// Address/Stop Interrupt Flag
APIF: u1,
/// Data Interrupt Flag
DIF: u1,
}),
/// Client Address
SADDR: u8,
/// Client Data
SDATA: u8,
/// Client Address Mask
SADDRMASK: mmio.Mmio(packed struct(u8) {
/// Address Enable
ADDREN: u1,
/// Address Mask
ADDRMASK: u7,
}),
};
/// Universal Synchronous and Asynchronous Receiver and Transmitter
pub const USART = extern struct {
/// RS485 Mode internal transmitter select
pub const USART_RS485 = enum(u2) {
/// RS485 Mode disabled
OFF = 0x0,
/// RS485 Mode External drive
EXT = 0x1,
/// RS485 Mode Internal drive
INT = 0x2,
_,
};
/// Receiver Mode select
pub const USART_RXMODE = enum(u2) {
/// Normal mode
NORMAL = 0x0,
/// CLK2x mode
CLK2X = 0x1,
/// Generic autobaud mode
GENAUTO = 0x2,
/// LIN constrained autobaud mode
LINAUTO = 0x3,
};
/// Communication Mode select
pub const USART_CMODE = enum(u2) {
/// Asynchronous Mode
ASYNCHRONOUS = 0x0,
/// Synchronous Mode
SYNCHRONOUS = 0x1,
/// Infrared Communication
IRCOM = 0x2,
/// SPI Host Mode
MSPI = 0x3,
};
/// Character Size select
pub const USART_NORMAL_CHSIZE = enum(u3) {
/// Character size: 5 bit
@"5BIT" = 0x0,
/// Character size: 6 bit
@"6BIT" = 0x1,
/// Character size: 7 bit
@"7BIT" = 0x2,
/// Character size: 8 bit
@"8BIT" = 0x3,
/// Character size: 9 bit read low byte first
@"9BITL" = 0x6,
/// Character size: 9 bit read high byte first
@"9BITH" = 0x7,
_,
};
/// Parity Mode select
pub const USART_NORMAL_PMODE = enum(u2) {
/// No Parity
DISABLED = 0x0,
/// Even Parity
EVEN = 0x2,
/// Odd Parity
ODD = 0x3,
_,
};
/// Stop Bit Mode select
pub const USART_NORMAL_SBMODE = enum(u1) {
/// 1 stop bit
@"1BIT" = 0x0,
/// 2 stop bits
@"2BIT" = 0x1,
};
/// Receive Data Low Byte
RXDATAL: mmio.Mmio(packed struct(u8) {
/// RX Data
DATA: u8,
}),
/// Receive Data High Byte
RXDATAH: mmio.Mmio(packed struct(u8) {
/// Receiver Data Register
DATA8: u1,
/// Parity Error
PERR: u1,
/// Frame Error
FERR: u1,
reserved6: u3,
/// Buffer Overflow
BUFOVF: u1,
/// Receive Complete Interrupt Flag
RXCIF: u1,
}),
/// Transmit Data Low Byte
TXDATAL: mmio.Mmio(packed struct(u8) {
/// Transmit Data Register
DATA: u8,
}),
/// Transmit Data High Byte
TXDATAH: mmio.Mmio(packed struct(u8) {
/// Transmit Data Register (CHSIZE=9bit)
DATA8: u1,
padding: u7,
}),
/// Status
STATUS: mmio.Mmio(packed struct(u8) {
/// Wait For Break
WFB: u1,
/// Break Detected Flag
BDF: u1,
reserved3: u1,
/// Inconsistent Sync Field Interrupt Flag
ISFIF: u1,
/// Receive Start Interrupt
RXSIF: u1,
/// Data Register Empty Flag
DREIF: u1,
/// Transmit Interrupt Flag
TXCIF: u1,
/// Receive Complete Interrupt Flag
RXCIF: u1,
}),
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// RS485 Mode internal transmitter
RS485: packed union {
raw: u2,
value: USART_RS485,
},
/// Auto-baud Error Interrupt Enable
ABEIE: u1,
/// Loop-back Mode Enable
LBME: u1,
/// Receiver Start Frame Interrupt Enable
RXSIE: u1,
/// Data Register Empty Interrupt Enable
DREIE: u1,
/// Transmit Complete Interrupt Enable
TXCIE: u1,
/// Receive Complete Interrupt Enable
RXCIE: u1,
}),
/// Control B
CTRLB: mmio.Mmio(packed struct(u8) {
/// Multi-processor Communication Mode
MPCM: u1,
/// Receiver Mode
RXMODE: packed union {
raw: u2,
value: USART_RXMODE,
},
/// Open Drain Mode Enable
ODME: u1,
/// Start Frame Detection Enable
SFDEN: u1,
reserved6: u1,
/// Transmitter Enable
TXEN: u1,
/// Reciever enable
RXEN: u1,
}),
/// Control C
CTRLC: mmio.Mmio(packed struct(u8) {
/// Character Size
CHSIZE: packed union {
raw: u3,
value: USART_NORMAL_CHSIZE,
},
/// Stop Bit Mode
SBMODE: packed union {
raw: u1,
value: USART_NORMAL_SBMODE,
},
/// Parity Mode
PMODE: packed union {
raw: u2,
value: USART_NORMAL_PMODE,
},
/// Communication Mode
CMODE: packed union {
raw: u2,
value: USART_CMODE,
},
}),
/// Baud Rate
BAUD: u16,
reserved11: [1]u8,
/// Debug Control
DBGCTRL: mmio.Mmio(packed struct(u8) {
/// Debug Run
DBGRUN: u1,
padding: u7,
}),
/// Event Control
EVCTRL: mmio.Mmio(packed struct(u8) {
/// IrDA Event Input Enable
IREI: u1,
padding: u7,
}),
/// IRCOM Transmitter Pulse Length Control
TXPLCTRL: mmio.Mmio(packed struct(u8) {
/// Transmit pulse length
TXPL: u8,
}),
/// IRCOM Receiver Pulse Length Control
RXPLCTRL: mmio.Mmio(packed struct(u8) {
/// Receiver Pulse Lenght
RXPL: u7,
padding: u1,
}),
};
/// User Row
pub const USERROW = extern struct {
/// User Row Byte 0
USERROW0: u8,
/// User Row Byte 1
USERROW1: u8,
/// User Row Byte 2
USERROW2: u8,
/// User Row Byte 3
USERROW3: u8,
/// User Row Byte 4
USERROW4: u8,
/// User Row Byte 5
USERROW5: u8,
/// User Row Byte 6
USERROW6: u8,
/// User Row Byte 7
USERROW7: u8,
/// User Row Byte 8
USERROW8: u8,
/// User Row Byte 9
USERROW9: u8,
/// User Row Byte 10
USERROW10: u8,
/// User Row Byte 11
USERROW11: u8,
/// User Row Byte 12
USERROW12: u8,
/// User Row Byte 13
USERROW13: u8,
/// User Row Byte 14
USERROW14: u8,
/// User Row Byte 15
USERROW15: u8,
/// User Row Byte 16
USERROW16: u8,
/// User Row Byte 17
USERROW17: u8,
/// User Row Byte 18
USERROW18: u8,
/// User Row Byte 19
USERROW19: u8,
/// User Row Byte 20
USERROW20: u8,
/// User Row Byte 21
USERROW21: u8,
/// User Row Byte 22
USERROW22: u8,
/// User Row Byte 23
USERROW23: u8,
/// User Row Byte 24
USERROW24: u8,
/// User Row Byte 25
USERROW25: u8,
/// User Row Byte 26
USERROW26: u8,
/// User Row Byte 27
USERROW27: u8,
/// User Row Byte 28
USERROW28: u8,
/// User Row Byte 29
USERROW29: u8,
/// User Row Byte 30
USERROW30: u8,
/// User Row Byte 31
USERROW31: u8,
};
/// Virtual Ports
pub const VPORT = extern struct {
/// Data Direction
DIR: u8,
/// Output Value
OUT: u8,
/// Input Value
IN: u8,
/// Interrupt Flags
INTFLAGS: mmio.Mmio(packed struct(u8) {
/// Pin Interrupt
INT: u8,
}),
};
/// Voltage reference
pub const VREF = extern struct {
/// ADC0 reference select
pub const VREF_ADC0REFSEL = enum(u3) {
/// Voltage reference at 0.55V
@"0V55" = 0x0,
/// Voltage reference at 1.1V
@"1V1" = 0x1,
/// Voltage reference at 2.5V
@"2V5" = 0x2,
/// Voltage reference at 4.34V
@"4V34" = 0x3,
/// Voltage reference at 1.5V
@"1V5" = 0x4,
_,
};
/// DAC0/AC0 reference select
pub const VREF_DAC0REFSEL = enum(u3) {
/// Voltage reference at 0.55V
@"0V55" = 0x0,
/// Voltage reference at 1.1V
@"1V1" = 0x1,
/// Voltage reference at 2.5V
@"2V5" = 0x2,
/// Voltage reference at 4.34V
@"4V34" = 0x3,
/// Voltage reference at 1.5V
@"1V5" = 0x4,
_,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// DAC0/AC0 reference select
DAC0REFSEL: packed union {
raw: u3,
value: VREF_DAC0REFSEL,
},
reserved4: u1,
/// ADC0 reference select
ADC0REFSEL: packed union {
raw: u3,
value: VREF_ADC0REFSEL,
},
padding: u1,
}),
/// Control B
CTRLB: mmio.Mmio(packed struct(u8) {
/// DAC0/AC0 reference enable
DAC0REFEN: u1,
/// ADC0 reference enable
ADC0REFEN: u1,
padding: u6,
}),
};
/// Watch-Dog Timer
pub const WDT = extern struct {
/// Period select
pub const WDT_PERIOD = enum(u4) {
/// Watch-Dog timer Off
OFF = 0x0,
/// 8 cycles (8ms)
@"8CLK" = 0x1,
/// 16 cycles (16ms)
@"16CLK" = 0x2,
/// 32 cycles (32ms)
@"32CLK" = 0x3,
/// 64 cycles (64ms)
@"64CLK" = 0x4,
/// 128 cycles (0.128s)
@"128CLK" = 0x5,
/// 256 cycles (0.256s)
@"256CLK" = 0x6,
/// 512 cycles (0.512s)
@"512CLK" = 0x7,
/// 1K cycles (1.0s)
@"1KCLK" = 0x8,
/// 2K cycles (2.0s)
@"2KCLK" = 0x9,
/// 4K cycles (4.1s)
@"4KCLK" = 0xa,
/// 8K cycles (8.2s)
@"8KCLK" = 0xb,
_,
};
/// Window select
pub const WDT_WINDOW = enum(u4) {
/// Window mode off
OFF = 0x0,
/// 8 cycles (8ms)
@"8CLK" = 0x1,
/// 16 cycles (16ms)
@"16CLK" = 0x2,
/// 32 cycles (32ms)
@"32CLK" = 0x3,
/// 64 cycles (64ms)
@"64CLK" = 0x4,
/// 128 cycles (0.128s)
@"128CLK" = 0x5,
/// 256 cycles (0.256s)
@"256CLK" = 0x6,
/// 512 cycles (0.512s)
@"512CLK" = 0x7,
/// 1K cycles (1.0s)
@"1KCLK" = 0x8,
/// 2K cycles (2.0s)
@"2KCLK" = 0x9,
/// 4K cycles (4.1s)
@"4KCLK" = 0xa,
/// 8K cycles (8.2s)
@"8KCLK" = 0xb,
_,
};
/// Control A
CTRLA: mmio.Mmio(packed struct(u8) {
/// Period
PERIOD: packed union {
raw: u4,
value: WDT_PERIOD,
},
/// Window
WINDOW: packed union {
raw: u4,
value: WDT_WINDOW,
},
}),
/// Status
STATUS: mmio.Mmio(packed struct(u8) {
/// Syncronization busy
SYNCBUSY: u1,
reserved7: u6,
/// Lock enable
LOCK: u1,
}),
};
};
};
| https://raw.githubusercontent.com/burgrp/microzig-avr/f6f7a766fac9f85d2643789b46bc882ff4ade0ed/src/chips/ATtiny214.zig |