code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
const std = @import("std");
const c = @import("c.zig");
usingnamespace @import("entity.zig");
usingnamespace @import("3d_viewport.zig");
const State = struct {
pass_action: c.sg_pass_action,
main_pipeline: c.sg_pipeline,
main_bindings: c.sg_bindings,
};
// Global state (makes me think of GLUT from back in the day... might be a cleaner way to deal with this)
var state: State = undefined;
var last_time: u64 = 0;
var show_test_window: bool = false;
var show_another_window: bool = false;
var display_menu: bool = false;
var dock_builder_initialised = false;
var viewportState : ViewportState = undefined;
var meshIdx : u8 = 0;
// Constants for ImGui docking.
const fullscreen_window_name = "Fullscreen Window";
const main_dockspace_name = "Main DockSpace";
const debugging_window_name = "Debugging";
const entity_properties_window_name = "Entity Properties";
const viewport_window_name = "3D Viewport";
// Entry point.
pub fn main() void {
var app_desc = std.mem.zeroes(c.sapp_desc);
app_desc.width = 1280;
app_desc.height = 720;
app_desc.init_cb = init;
app_desc.frame_cb = update;
app_desc.cleanup_cb = cleanup;
app_desc.event_cb = event;
app_desc.window_title = "Editor";
app_desc.fullscreen = true;
app_desc.enable_dragndrop = false;
_ = c.sapp_run(&app_desc);
}
// Initialise the docking test application.
export fn init() void {
var desc = std.mem.zeroes(c.sg_desc);
desc.context = c.sapp_sgcontext();
c.sg_setup(&desc);
c.stm_setup();
var imgui_desc = std.mem.zeroes(c.simgui_desc_t);
c.simgui_setup(&imgui_desc);
var io = c.igGetIO();
io.*.ConfigFlags |= c.ImGuiConfigFlags_DockingEnable;
state.pass_action.colors[0].action = .SG_ACTION_CLEAR;
state.pass_action.colors[0].value = c.struct_sg_color {
.r = 0.2, .g = 0.2, .b = 0.2, .a = 1.0
};
}
// Per-frame update.
export fn update() void {
// Local vars for current app state (application window & timings).
const app_width = c.sapp_width();
const app_height = c.sapp_height();
const dt = c.stm_sec(c.stm_laptime(&last_time));
// Start new ImGui frame.
c.simgui_new_frame(app_width, app_height, dt);
// Update ImGui ViewPort.
var work_pos = c.ImVec2 {.x = 0.0, .y = 0.0};
var work_size = c.ImVec2 {.x = 0.0, .y = 0.0};
var viewport = c.igGetMainViewport();
work_pos = viewport.*.WorkPos;
work_size = viewport.*.WorkSize;
// State for the fullscreen ImGui window that everything else attaches to.
c.igSetNextWindowPos(work_pos, c.ImGuiCond_Always, c.ImVec2{ .x = 0, .y = 0 });
c.igSetNextWindowSize(work_size, c.ImGuiCond_Always);
c.igSetNextWindowViewport(viewport.*.ID);
c.igPushStyleVar_Float(c.ImGuiStyleVar_WindowRounding, 0.0);
c.igPushStyleVar_Float(c.ImGuiStyleVar_WindowBorderSize, 0.0);
var window_flags : c.ImGuiWindowFlags = 0;
window_flags |= c.ImGuiWindowFlags_NoTitleBar | c.ImGuiWindowFlags_NoCollapse | c.ImGuiWindowFlags_NoResize | c.ImGuiWindowFlags_NoMove;
window_flags |= c.ImGuiWindowFlags_NoBringToFrontOnFocus | c.ImGuiWindowFlags_NoNavFocus | c.ImGuiWindowFlags_NoDocking;
// Begin Fullscreen Window.
var show_flag = true;
_ = c.igBegin(fullscreen_window_name, &show_flag, window_flags);
if (c.igBeginMainMenuBar())
{
if (c.igBeginMenu("File", true))
{
if (c.igMenuItem_Bool("Exit", "", false, true))
{
c.sapp_request_quit();
}
c.igEndMenu();
}
c.igEndMainMenuBar();
}
const main_dockspace_id = c.igGetID_Str(main_dockspace_name);
// Lazily initialise dockspace inside Fullscreen Window.
if (dock_builder_initialised == false) {
c.igDockBuilderRemoveNode(main_dockspace_id); // Clear out existing layout
_ = c.igDockBuilderAddNode(main_dockspace_id, 0.0); // Add empty node
var dock_main_id = main_dockspace_id; // This variable will track the document node, however we are not using it here as we aren't docking anything into it.
var dock_id_prop = c.igDockBuilderSplitNode(dock_main_id, c.ImGuiDir_Right, 0.20, 0, &dock_main_id);
var dock_id_bottom = c.igDockBuilderSplitNode(dock_main_id, c.ImGuiDir_Down, 0.20, 0, &dock_main_id);
c.igDockBuilderDockWindow(viewport_window_name, dock_main_id);
c.igDockBuilderDockWindow(debugging_window_name, dock_id_bottom);
c.igDockBuilderDockWindow(entity_properties_window_name, dock_id_prop);
c.igDockBuilderFinish(main_dockspace_id);
dock_builder_initialised = true;
}
_ = c.igDockSpace(main_dockspace_id,
c.ImVec2{ .x = 0.0, .y = 0.0 },
c.ImGuiDockNodeFlags_None | c.ImGuiDockNodeFlags_PassthruCentralNode,
0);
// Debugging Window.
var showing_debugging_window : bool = true;
c.igSetNextWindowDockID(main_dockspace_id, c.ImGuiCond_FirstUseEver);
if (c.igBegin(debugging_window_name, &showing_debugging_window, 0)) {
c.igText("App surface size (%d, %d)", app_width, app_height);
c.igSeparator();
var meshes = [2][]const u8 { "cube", "suzanne" };
if (c.igBeginCombo("Current Mesh", meshes[meshIdx].ptr, 0)) {
for (meshes) |mesh, i| {
if (c.igSelectable_Bool(mesh.ptr, i == meshIdx, 0, c.ImVec2 { .x = 0, .y = 0 })) {
meshIdx = @intCast(u8, i);
}
}
c.igEndCombo();
}
c.igSeparator();
_ = c.igSliderFloat("Rotate X", &viewportState.camera_rot[0], 0.0, 360.0, "%.3f", 1.0);
_ = c.igSliderFloat("Rotate Y", &viewportState.camera_rot[1], 0.0, 360.0, "%.3f", 1.0);
_ = c.igSliderFloat("Rotate Z", &viewportState.camera_rot[2], 0.0, 360.0, "%.3f", 1.0);
c.igSeparator();
_ = c.igColorEdit3("clear color", &state.pass_action.colors[0].value.r, 0);
if (c.igButton("Test Window", c.ImVec2{ .x = 0.0, .y = 0.0 })) show_test_window = !show_test_window;
if (c.igButton("Another Window", c.ImVec2{ .x = 0.0, .y = 0.0 })) show_another_window = !show_another_window;
c.igText("Application average %.3f ms/frame (%.1f FPS)", 1000.0 / c.igGetIO().*.Framerate, c.igGetIO().*.Framerate);
c.igText("Delta time: %.3f", dt);
if (show_another_window) {
c.igSetNextWindowSize(c.ImVec2{ .x = 200, .y = 100 }, c.ImGuiCond_FirstUseEver);
_ = c.igBegin("Another Window", &show_another_window, 0);
c.igText("Hello");
c.igEnd();
}
if (show_test_window) {
c.igSetNextWindowPos(c.ImVec2{ .x = 460, .y = 20 }, c.ImGuiCond_FirstUseEver, c.ImVec2{ .x = 0, .y = 0 });
c.igShowDemoWindow(0);
}
}
c.igEnd();
// 3D Viewport Window.
var showing_3d_viewport : bool = true;
c.igSetNextWindowDockID(main_dockspace_id, c.ImGuiCond_FirstUseEver);
c.igPushStyleVar_Vec2(c.ImGuiStyleVar_WindowPadding, c.ImVec2 { .x = 0.0, .y = 0.0 });
if (c.igBegin(viewport_window_name, &showing_3d_viewport, 0)) {
do_3d_viewport(&viewportState);
}
c.igPopStyleVar(1);
c.igEnd();
// Entity Properties Window.
var showing_entity_properties_window : bool = true;
c.igSetNextWindowDockID(main_dockspace_id, c.ImGuiCond_FirstUseEver);
if (c.igBegin(entity_properties_window_name, &showing_entity_properties_window, 0)) {
if (Entity.init(std.testing.allocator, "Markov the Entity Boy") catch null) |entity| {
const fields = @typeInfo(Entity).Struct.fields;
inline for (fields) |field| {
// Skip fields beginning with an underscore (treat as hidden).
if (field.name[0] == '_') {
continue;
}
// Show field value.
switch (field.field_type) {
[:0]u8 => {
const name : []u8 = @field(entity, field.name);
//std.debug.print("{s}\n", .{ name });
c.igText("%s: %s", field.name.ptr, name.ptr);
},
else => {}
}
}
entity.deinit();
}
}
c.igEnd();
// End fullscreen window.
c.igEnd();
c.igPopStyleVar(2);
c.sg_begin_default_pass(&state.pass_action, app_width, app_height);
c.simgui_render();
c.sg_end_pass();
c.sg_commit();
}
// Cleanup function (shuts down ImGui and Sokol).
export fn cleanup() void {
c.simgui_shutdown();
c.sg_shutdown();
}
// Event handler (passthrough to ImGui).
export fn event(e: [*c]const c.sapp_event) void {
_ = c.simgui_handle_event(e);
}
|
src/main.zig
|
const std = @import("std");
const math = std.math;
const common = @import("common.zig");
const Number = common.Number;
const floatFromUnsigned = common.floatFromUnsigned;
// converts the form 0xMMM.NNNpEEE.
//
// MMM.NNN = mantissa
// EEE = exponent
//
// MMM.NNN is stored as an integer, the exponent is offset.
pub fn convertHex(comptime T: type, n_: Number(T)) T {
const MantissaT = common.mantissaType(T);
var n = n_;
if (n.mantissa == 0) {
return if (n.negative) -0.0 else 0.0;
}
const max_exp = math.floatExponentMax(T);
const min_exp = math.floatExponentMin(T);
const mantissa_bits = math.floatMantissaBits(T);
const exp_bits = math.floatExponentBits(T);
const exp_bias = min_exp - 1;
// mantissa now implicitly divided by 2^mantissa_bits
n.exponent += mantissa_bits;
// Shift mantissa and exponent to bring representation into float range.
// Eventually we want a mantissa with a leading 1-bit followed by mantbits other bits.
// For rounding, we need two more, where the bottom bit represents
// whether that bit or any later bit was non-zero.
// (If the mantissa has already lost non-zero bits, trunc is true,
// and we OR in a 1 below after shifting left appropriately.)
while (n.mantissa != 0 and n.mantissa >> (mantissa_bits + 2) == 0) {
n.mantissa <<= 1;
n.exponent -= 1;
}
if (n.many_digits) {
n.mantissa |= 1;
}
while (n.mantissa >> (1 + mantissa_bits + 2) != 0) {
n.mantissa = (n.mantissa >> 1) | (n.mantissa & 1);
n.exponent += 1;
}
// If exponent is too negative,
// denormalize in hopes of making it representable.
// (The -2 is for the rounding bits.)
while (n.mantissa > 1 and n.exponent < min_exp - 2) {
n.mantissa = (n.mantissa >> 1) | (n.mantissa & 1);
n.exponent += 1;
}
// Round using two bottom bits.
var round = n.mantissa & 3;
n.mantissa >>= 2;
round |= n.mantissa & 1; // round to even (round up if mantissa is odd)
n.exponent += 2;
if (round == 3) {
n.mantissa += 1;
if (n.mantissa == 1 << (1 + mantissa_bits)) {
n.mantissa >>= 1;
n.exponent += 1;
}
}
// Denormal or zero
if (n.mantissa >> mantissa_bits == 0) {
n.exponent = exp_bias;
}
// Infinity and range error
if (n.exponent > max_exp) {
return math.inf(T);
}
var bits = n.mantissa & ((1 << mantissa_bits) - 1);
bits |= @intCast(MantissaT, (n.exponent - exp_bias) & ((1 << exp_bits) - 1)) << mantissa_bits;
if (n.negative) {
bits |= 1 << (mantissa_bits + exp_bits);
}
return floatFromUnsigned(T, MantissaT, bits);
}
|
lib/std/fmt/parse_float/convert_hex.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const Random = std.rand.Random;
pub fn lessThanComparison(comptime T: type) fn (a: T, b: T) bool {
return struct {
fn lessThan(a: T, b: T) bool {
return a < b;
}
}.lessThan;
}
pub fn greaterThanComparison(comptime T: type) fn (a: T, b: T) bool {
return struct {
fn greaterThan(a: T, b: T) bool {
return a > b;
}
}.greaterThan;
}
pub fn runBenchmark(comptime T: type, alloc: *Allocator, rng: *Random, benchmark: anytype) !void {
const setup = try setupBench(T, alloc, rng);
const items = setup.items;
switch (setup.action) {
.make => {
var heap = benchmark.make(alloc, items);
defer heap.deinit();
},
.add => {
var heap = benchmark.new(alloc);
defer heap.deinit();
benchmark.add(&heap, items);
},
.remove => {
var heap = benchmark.make(alloc, items);
defer heap.deinit();
benchmark.remove(&heap);
},
.remove_max => {
var heap = benchmark.makeMax(alloc, items);
defer heap.deinit();
benchmark.removeMax(&heap);
},
}
}
fn setupBench(comptime T: type, allocator: *Allocator, rng: *Random) !Setup(T) {
const args = try getArgs(allocator);
const items = try generateRandomSlice(T, allocator, rng, args.size);
return Setup(T){
.action = args.action,
.items = items,
};
}
fn Setup(comptime T: type) type {
return struct {
action: Action,
items: []T,
};
}
const Args = struct {
action: Action,
size: usize,
};
const Action = enum {
make,
add,
remove,
remove_max,
};
fn getArgs(allocator: *Allocator) !Args {
var args = std.process.args();
const arg0 = try args.next(allocator).?;
defer allocator.free(arg0);
const arg1 = try args.next(allocator) orelse @panic("Must provide action for benchmark");
defer allocator.free(arg1);
const action = std.meta.stringToEnum(Action, arg1) orelse @panic("Action must be: make, add, remove");
const arg2 = try args.next(allocator) orelse @panic("Must provide number of items for benchmark");
defer allocator.free(arg2);
const size = try std.fmt.parseInt(usize, arg2, 10);
return Args{
.action = action,
.size = size,
};
}
fn generateRandomSlice(comptime T: type, allocator: *std.mem.Allocator, rng: *Random, size: usize) ![]T {
var array = std.ArrayList(T).init(allocator);
try array.ensureCapacity(size);
var i: usize = 0;
while (i < size) : (i += 1) {
const elem = rng.int(T);
try array.append(elem);
}
return array.toOwnedSlice();
}
|
bench/utils.zig
|
const std = @import("std");
const eql = std.mem.eql;
const indexOf = std.mem.indexOf;
const parseUnsigned = std.fmt.parseUnsigned;
const fields = [_][]const u8{ "byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid" };
const ecls = [_][]const u8{ "amb", "blu", "brn", "gry", "grn", "hzl", "oth" };
fn isValid(key: []const u8, val: []const u8) bool {
if (eql(u8, key, "byr")) {
const n = parseUnsigned(u32, val, 10) catch return false;
if (n >= 1920 and n <= 2002) {
return true;
}
} else if (eql(u8, key, "iyr")) {
const n = parseUnsigned(u32, val, 10) catch return false;
if (n >= 2010 and n <= 2020) {
return true;
}
} else if (eql(u8, key, "eyr")) {
const n = parseUnsigned(u32, val, 10) catch return false;
if (n >= 2020 and n <= 2030) {
return true;
}
} else if (eql(u8, key, "hgt")) {
if (indexOf(u8, val, "cm")) |i| {
const n = parseUnsigned(u32, val[0..i], 10) catch return false;
if (n >= 150 and n <= 193) {
return true;
}
}
if (indexOf(u8, val, "in")) |i| {
const n = parseUnsigned(u32, val[0..i], 10) catch return false;
if (n >= 59 and n <= 76) {
return true;
}
}
} else if (eql(u8, key, "hcl")) {
if (val.len == 7) {
const n = parseUnsigned(u32, val[1..], 16) catch return false;
if (n <= 0xFFFFFF) {
return true;
}
}
} else if (eql(u8, key, "ecl")) {
for (ecls) |ecl| {
if (eql(u8, val, ecl)) {
return true;
}
}
} else if (eql(u8, key, "pid")) {
if (val.len == 9) {
const n = parseUnsigned(u32, val, 10) catch return false;
return true;
}
}
return false;
}
fn parse(allocator: *std.mem.Allocator, str: []u8, na: *u32, nb: *u32) !void {
var map = std.BufMap.init(allocator);
defer map.deinit();
var pairs = std.mem.tokenize(str, " ");
while (pairs.next()) |substr| {
try map.put(substr[0..3], substr[4..]);
}
var valid = true;
for (fields) |key| {
if (map.get(key)) |val| {
valid = valid and isValid(key, val);
} else {
return;
}
}
na.* += 1;
if (valid) {
nb.* += 1;
}
}
pub fn main() anyerror!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var p: u8 = undefined;
var na: u32 = 0;
var nb: u32 = 0;
var passport = std.ArrayList(u8).init(allocator);
defer passport.deinit();
var stdin = std.io.getStdIn().reader();
var in = std.io.bufferedReader(stdin).reader();
while (in.readByte() catch null) |c| {
// Passport entries delimited by blank lines
if (c == '\n') {
if (p == '\n') {
try parse(allocator, passport.items, &na, &nb);
passport.shrinkRetainingCapacity(0);
} else {
try passport.append(' ');
}
} else {
try passport.append(c);
}
p = c;
}
try parse(allocator, passport.items, &na, &nb); // final password before EOF
std.debug.print("A) {d} passports with all fields present\n", .{na});
std.debug.print("B) {d} passports with all fields present and valid\n", .{nb});
}
|
2020/zig/src/4.zig
|
const std = @import("std");
const assert = std.debug.assert;
const glfw = @import("glfw");
const c = @cImport({
@cInclude("dawn/dawn_proc.h");
@cInclude("dawn_native_mach.h");
});
const objc = @cImport({
@cInclude("objc/message.h");
});
pub const cimgui = @cImport({
@cDefine("CIMGUI_DEFINE_ENUMS_AND_STRUCTS", "");
@cDefine("CIMGUI_NO_EXPORT", "");
@cInclude("imgui/cimgui.h");
});
pub const gpu = @import("mach-gpu/main.zig");
pub const GraphicsContext = struct {
native_instance: gpu.NativeInstance,
adapter_type: gpu.Adapter.Type,
backend_type: gpu.Adapter.BackendType,
device: gpu.Device,
queue: gpu.Queue,
window: glfw.Window,
window_surface: gpu.Surface,
swapchain: gpu.SwapChain,
swapchain_descriptor: gpu.SwapChain.Descriptor,
buffer_pool: BufferPool,
texture_pool: TexturePool,
render_pipeline_pool: RenderPipelinePool,
pub const swapchain_format = gpu.Texture.Format.bgra8_unorm;
// TODO: Adjust pool sizes.
const buffer_pool_size = 256;
const texture_pool_size = 256;
const render_pipeline_pool_size = 256;
pub fn init(allocator: std.mem.Allocator, window: glfw.Window) GraphicsContext {
c.dawnProcSetProcs(c.machDawnNativeGetProcs());
const instance = c.machDawnNativeInstance_init();
c.machDawnNativeInstance_discoverDefaultAdapters(instance);
var native_instance = gpu.NativeInstance.wrap(c.machDawnNativeInstance_get(instance).?);
const gpu_interface = native_instance.interface();
const backend_adapter = switch (gpu_interface.waitForAdapter(&.{
.power_preference = .high_performance,
})) {
.adapter => |v| v,
.err => |err| {
std.debug.print("[zgpu] Failed to get adapter: error={} {s}.\n", .{ err.code, err.message });
std.process.exit(1);
},
};
const props = backend_adapter.properties;
std.debug.print("[zgpu] High-performance device has been selected:\n", .{});
std.debug.print("[zgpu] Name: {s}\n", .{props.name});
std.debug.print("[zgpu] Driver: {s}\n", .{props.driver_description});
std.debug.print("[zgpu] Adapter type: {s}\n", .{gpu.Adapter.typeName(props.adapter_type)});
std.debug.print("[zgpu] Backend type: {s}\n", .{gpu.Adapter.backendTypeName(props.backend_type)});
const device = switch (backend_adapter.waitForDevice(&.{})) {
.device => |v| v,
.err => |err| {
std.debug.print("[zgpu] Failed to get device: error={} {s}\n", .{ err.code, err.message });
std.process.exit(1);
},
};
device.setUncapturedErrorCallback(&printUnhandledErrorCallback);
const window_surface = createSurfaceForWindow(
&native_instance,
window,
comptime detectGLFWOptions(),
);
const framebuffer_size = window.getFramebufferSize() catch unreachable;
const swapchain_descriptor = gpu.SwapChain.Descriptor{
.label = "main window swap chain",
.usage = .{ .render_attachment = true },
.format = swapchain_format,
.width = framebuffer_size.width,
.height = framebuffer_size.height,
.present_mode = .fifo,
.implementation = 0,
};
const swapchain = device.nativeCreateSwapChain(
window_surface,
&swapchain_descriptor,
);
return GraphicsContext{
.native_instance = native_instance,
.adapter_type = props.adapter_type,
.backend_type = props.backend_type,
.device = device,
.queue = device.getQueue(),
.window = window,
.window_surface = window_surface,
.swapchain = swapchain,
.swapchain_descriptor = swapchain_descriptor,
.buffer_pool = BufferPool.init(allocator, buffer_pool_size),
.texture_pool = TexturePool.init(allocator, texture_pool_size),
.render_pipeline_pool = RenderPipelinePool.init(allocator, render_pipeline_pool_size),
};
}
pub fn deinit(gctx: *GraphicsContext, allocator: std.mem.Allocator) void {
// TODO: Make sure all GPU commands are completed.
// TODO: How to release `native_instance`?
gctx.buffer_pool.deinit(allocator);
gctx.texture_pool.deinit(allocator);
gctx.render_pipeline_pool.deinit(allocator);
gctx.window_surface.release();
gctx.swapchain.release();
gctx.queue.release();
gctx.device.release();
gctx.* = undefined;
}
pub fn update(gctx: *GraphicsContext) bool {
const fb_size = gctx.window.getFramebufferSize() catch unreachable;
if (gctx.swapchain_descriptor.width != fb_size.width or
gctx.swapchain_descriptor.height != fb_size.height)
{
gctx.swapchain_descriptor.width = fb_size.width;
gctx.swapchain_descriptor.height = fb_size.height;
gctx.swapchain.release();
gctx.swapchain = gctx.device.nativeCreateSwapChain(
gctx.window_surface,
&gctx.swapchain_descriptor,
);
std.debug.print(
"[zgpu] Swap chain has been resized to: {d}x{d}\n",
.{ gctx.swapchain_descriptor.width, gctx.swapchain_descriptor.height },
);
return false; // Swap chain has been resized.
}
return true;
}
pub fn createBuffer(gctx: GraphicsContext, descriptor: gpu.Buffer.Descriptor) BufferHandle {
const gpuobj = gctx.device.createBuffer(&descriptor);
return gctx.buffer_pool.addResource(.{
.gpuobj = gpuobj,
.size = descriptor.size,
.usage = descriptor.usage,
});
}
pub fn destroyBuffer(gctx: GraphicsContext, handle: BufferHandle) void {
gctx.buffer_pool.destroyResource(handle);
}
pub fn lookupBuffer(gctx: GraphicsContext, handle: BufferHandle) ?gpu.Buffer {
if (gctx.lookupBufferInfo(handle)) |info| {
return info.gpuobj;
}
return null;
}
pub fn lookupBufferInfo(gctx: GraphicsContext, handle: BufferHandle) ?BufferInfo {
if (gctx.buffer_pool.lookupResourceInfo(handle)) |buffer_info| {
return buffer_info.*;
}
return null;
}
pub fn createTexture(gctx: GraphicsContext, descriptor: gpu.Texture.Descriptor) TextureHandle {
const gpuobj = gctx.device.createTexture(&descriptor);
return gctx.texture_pool.addResource(.{
.gpuobj = gpuobj,
.usage = descriptor.usage,
.dimension = descriptor.dimension,
.size = descriptor.size,
.format = descriptor.format,
.mip_level_count = descriptor.mip_level_count,
.sample_count = descriptor.sample_count,
});
}
pub fn destroyTexture(gctx: GraphicsContext, handle: TextureHandle) void {
gctx.texture_pool.destroyResource(handle);
}
pub fn lookupTexture(gctx: GraphicsContext, handle: TextureHandle) ?gpu.Texture {
if (gctx.lookupTextureInfo(handle)) |info| {
return info.gpuobj;
}
return null;
}
pub fn lookupTextureInfo(gctx: GraphicsContext, handle: TextureHandle) ?TextureInfo {
if (gctx.texture_pool.lookupResourceInfo(handle)) |texture_info| {
return texture_info.*;
}
return null;
}
pub fn createRenderPipeline(
gctx: GraphicsContext,
descriptor: gpu.RenderPipeline.Descriptor,
) RenderPipelineHandle {
const gpuobj = gctx.device.createRenderPipeline(&descriptor);
return gctx.render_pipeline_pool.addResource(.{
.gpuobj = gpuobj,
});
}
pub fn destroyRenderPipeline(gctx: GraphicsContext, handle: RenderPipelineHandle) void {
gctx.render_pipeline_pool.destroyResource(handle);
}
pub fn lookupRenderPipeline(gctx: GraphicsContext, handle: RenderPipelineHandle) ?gpu.RenderPipeline {
if (gctx.render_pipeline_pool.lookupResourceInfo(handle)) |render_pipeline| {
return render_pipeline.gpuobj;
}
return null;
}
};
pub const BufferHandle = struct {
index: u16 align(4) = 0,
generation: u16 = 0,
};
pub const TextureHandle = struct {
index: u16 align(4) = 0,
generation: u16 = 0,
};
pub const RenderPipelineHandle = struct {
index: u16 align(4) = 0,
generation: u16 = 0,
};
pub const BufferInfo = struct {
gpuobj: ?gpu.Buffer = null,
size: usize = 0,
usage: gpu.BufferUsage = .{},
};
pub const TextureInfo = struct {
gpuobj: ?gpu.Texture = null,
usage: gpu.Texture.Usage = .{},
dimension: gpu.Texture.Dimension = .dimension_1d,
size: gpu.Extent3D = .{ .width = 0 },
format: gpu.Texture.Format = .none,
mip_level_count: u32 = 0,
sample_count: u32 = 0,
};
const RenderPipelineInfo = struct {
gpuobj: ?gpu.RenderPipeline = null,
};
const BufferPool = ResourcePool(BufferInfo, BufferHandle);
const TexturePool = ResourcePool(TextureInfo, TextureHandle);
const RenderPipelinePool = ResourcePool(RenderPipelineInfo, RenderPipelineHandle);
fn ResourcePool(comptime ResourceInfo: type, comptime ResourceHandle: type) type {
return struct {
const Self = @This();
resources: []ResourceInfo,
generations: []u16,
fn init(allocator: std.mem.Allocator, capacity: u32) Self {
var resources = allocator.alloc(ResourceInfo, capacity + 1) catch unreachable;
for (resources) |*resource| resource.* = .{};
var generations = allocator.alloc(u16, capacity + 1) catch unreachable;
for (generations) |*gen| gen.* = 0;
return .{
.resources = resources,
.generations = generations,
};
}
fn deinit(pool: *Self, allocator: std.mem.Allocator) void {
for (pool.resources) |resource| {
if (resource.gpuobj) |gpuobj|
gpuobj.release();
}
allocator.free(pool.resources);
allocator.free(pool.generations);
pool.* = undefined;
}
fn addResource(pool: Self, resource: ResourceInfo) ResourceHandle {
assert(resource.gpuobj != null);
var slot_idx: u32 = 1;
while (slot_idx < pool.resources.len) : (slot_idx += 1) {
if (pool.resources[slot_idx].gpuobj == null)
break;
}
assert(slot_idx < pool.resources.len);
pool.resources[slot_idx] = resource;
return .{
.index = @intCast(u16, slot_idx),
.generation = blk: {
pool.generations[slot_idx] += 1;
break :blk pool.generations[slot_idx];
},
};
}
fn destroyResource(pool: Self, handle: ResourceHandle) void {
var resource_info = pool.lookupResourceInfo(handle);
if (resource_info == null)
return;
const gpuobj = resource_info.?.gpuobj.?;
if (@hasDecl(@TypeOf(gpuobj), "destroy")) {
gpuobj.destroy();
}
gpuobj.release();
resource_info.?.* = .{};
}
fn isHandleValid(pool: Self, handle: ResourceHandle) bool {
return handle.index > 0 and
handle.index < pool.resources.len and
handle.generation > 0 and
handle.generation == pool.generations[handle.index] and
pool.resources[handle.index].gpuobj != null;
}
fn lookupResourceInfo(pool: Self, handle: ResourceHandle) ?*ResourceInfo {
if (pool.isHandleValid(handle)) {
return &pool.resources[handle.index];
}
return null;
}
};
}
pub fn checkContent(comptime content_dir: []const u8) !void {
const local = struct {
fn impl() !void {
// Change directory to where an executable is located.
{
var exe_path_buffer: [1024]u8 = undefined;
const exe_path = std.fs.selfExeDirPath(exe_path_buffer[0..]) catch "./";
std.os.chdir(exe_path) catch {};
}
// Make sure font file is a valid data file and not just a Git LFS pointer.
{
const file = try std.fs.cwd().openFile(content_dir ++ "Roboto-Medium.ttf", .{});
defer file.close();
const size = @intCast(usize, try file.getEndPos());
if (size <= 1024) {
return error.InvalidDataFiles;
}
}
}
};
local.impl() catch |err| {
std.debug.print(
\\
\\ERROR
\\Invalid data files or missing content folder.
\\Please install Git LFS (Large File Support) and run:
\\git lfs install
\\git pull
\\
\\
,
.{},
);
return err;
};
}
pub const FrameStats = struct {
time: f64 = 0.0,
delta_time: f32 = 0.0,
fps: f32 = 0.0,
average_cpu_time: f32 = 0.0,
previous_time: f64 = 0.0,
fps_refresh_time: f64 = 0.0,
frame_counter: u64 = 0,
frame_number: u64 = 0,
pub fn update(self: *FrameStats, window: glfw.Window, window_name: []const u8) void {
self.time = glfw.getTime();
self.delta_time = @floatCast(f32, self.time - self.previous_time);
self.previous_time = self.time;
if ((self.time - self.fps_refresh_time) >= 1.0) {
const t = self.time - self.fps_refresh_time;
const fps = @intToFloat(f64, self.frame_counter) / t;
const ms = (1.0 / fps) * 1000.0;
self.fps = @floatCast(f32, fps);
self.average_cpu_time = @floatCast(f32, ms);
self.fps_refresh_time = self.time;
self.frame_counter = 0;
var buffer = [_]u8{0} ** 128;
const text = std.fmt.bufPrint(
buffer[0..],
"FPS: {d:.1} CPU time: {d:.3} ms | {s}",
.{ self.fps, self.average_cpu_time, window_name },
) catch unreachable;
window.setTitle(@ptrCast([*:0]const u8, text.ptr)) catch unreachable;
}
self.frame_counter += 1;
self.frame_number += 1;
}
};
pub const gui = struct {
pub fn init(window: glfw.Window, device: gpu.Device, font: [*:0]const u8, font_size: f32) void {
assert(cimgui.igGetCurrentContext() == null);
_ = cimgui.igCreateContext(null);
if (!ImGui_ImplGlfw_InitForOther(window.handle, true)) {
unreachable;
}
const io = cimgui.igGetIO().?;
if (cimgui.ImFontAtlas_AddFontFromFileTTF(io.*.Fonts, font, font_size, null, null) == null) {
unreachable;
}
if (!ImGui_ImplWGPU_Init(device.ptr, 2, @enumToInt(GraphicsContext.swapchain_format))) {
unreachable;
}
}
pub fn deinit() void {
assert(cimgui.igGetCurrentContext() != null);
ImGui_ImplWGPU_Shutdown();
ImGui_ImplGlfw_Shutdown();
cimgui.igDestroyContext(null);
}
pub fn newFrame(fb_width: u32, fb_height: u32) void {
ImGui_ImplGlfw_NewFrame();
ImGui_ImplWGPU_NewFrame();
{
const io = cimgui.igGetIO().?;
io.*.DisplaySize = .{
.x = @intToFloat(f32, fb_width),
.y = @intToFloat(f32, fb_height),
};
io.*.DisplayFramebufferScale = .{ .x = 1.0, .y = 1.0 };
}
cimgui.igNewFrame();
}
pub fn draw(pass: gpu.RenderPassEncoder) void {
cimgui.igRender();
ImGui_ImplWGPU_RenderDrawData(cimgui.igGetDrawData(), pass.ptr);
}
extern fn ImGui_ImplGlfw_InitForOther(window: *anyopaque, install_callbacks: bool) bool;
extern fn ImGui_ImplGlfw_NewFrame() void;
extern fn ImGui_ImplGlfw_Shutdown() void;
extern fn ImGui_ImplWGPU_Init(device: *anyopaque, num_frames_in_flight: u32, rt_format: u32) bool;
extern fn ImGui_ImplWGPU_NewFrame() void;
extern fn ImGui_ImplWGPU_RenderDrawData(draw_data: *anyopaque, pass_encoder: *anyopaque) void;
extern fn ImGui_ImplWGPU_Shutdown() void;
};
fn detectGLFWOptions() glfw.BackendOptions {
const target = @import("builtin").target;
if (target.isDarwin()) return .{ .cocoa = true };
return switch (target.os.tag) {
.windows => .{ .win32 = true },
.linux => .{ .x11 = true },
else => .{},
};
}
fn createSurfaceForWindow(
native_instance: *const gpu.NativeInstance,
window: glfw.Window,
comptime glfw_options: glfw.BackendOptions,
) gpu.Surface {
const glfw_native = glfw.Native(glfw_options);
const descriptor = if (glfw_options.win32) gpu.Surface.Descriptor{
.windows_hwnd = .{
.label = "basic surface",
.hinstance = std.os.windows.kernel32.GetModuleHandleW(null).?,
.hwnd = glfw_native.getWin32Window(window),
},
} else if (glfw_options.x11) gpu.Surface.Descriptor{
.xlib = .{
.label = "basic surface",
.display = glfw_native.getX11Display(),
.window = glfw_native.getX11Window(window),
},
} else if (glfw_options.cocoa) blk: {
const ns_window = glfw_native.getCocoaWindow(window);
const ns_view = msgSend(ns_window, "contentView", .{}, *anyopaque); // [nsWindow contentView]
// Create a CAMetalLayer that covers the whole window that will be passed to CreateSurface.
msgSend(ns_view, "setWantsLayer:", .{true}, void); // [view setWantsLayer:YES]
const layer = msgSend(objc.objc_getClass("CAMetalLayer"), "layer", .{}, ?*anyopaque); // [CAMetalLayer layer]
if (layer == null) @panic("failed to create Metal layer");
msgSend(ns_view, "setLayer:", .{layer.?}, void); // [view setLayer:layer]
// Use retina if the window was created with retina support.
const scale_factor = msgSend(ns_window, "backingScaleFactor", .{}, f64); // [ns_window backingScaleFactor]
msgSend(layer.?, "setContentsScale:", .{scale_factor}, void); // [layer setContentsScale:scale_factor]
break :blk gpu.Surface.Descriptor{
.metal_layer = .{
.label = "basic surface",
.layer = layer.?,
},
};
} else if (glfw_options.wayland) {
// bugs.chromium.org/p/dawn/issues/detail?id=1246&q=surface&can=2
@panic("Dawn does not yet have Wayland support");
} else unreachable;
return native_instance.createSurface(&descriptor);
}
// Borrowed from https://github.com/hazeycode/zig-objcrt
fn msgSend(obj: anytype, sel_name: [:0]const u8, args: anytype, comptime ReturnType: type) ReturnType {
const args_meta = @typeInfo(@TypeOf(args)).Struct.fields;
const FnType = switch (args_meta.len) {
0 => fn (@TypeOf(obj), objc.SEL) callconv(.C) ReturnType,
1 => fn (@TypeOf(obj), objc.SEL, args_meta[0].field_type) callconv(.C) ReturnType,
2 => fn (@TypeOf(obj), objc.SEL, args_meta[0].field_type, args_meta[1].field_type) callconv(.C) ReturnType,
3 => fn (
@TypeOf(obj),
objc.SEL,
args_meta[0].field_type,
args_meta[1].field_type,
args_meta[2].field_type,
) callconv(.C) ReturnType,
4 => fn (
@TypeOf(obj),
objc.SEL,
args_meta[0].field_type,
args_meta[1].field_type,
args_meta[2].field_type,
args_meta[3].field_type,
) callconv(.C) ReturnType,
else => @compileError("Unsupported number of args"),
};
// NOTE: `func` is a var because making it const causes a compile error which I believe is a compiler bug.
var func = @ptrCast(FnType, objc.objc_msgSend);
const sel = objc.sel_getUid(sel_name);
return @call(.{}, func, .{ obj, sel } ++ args);
}
fn printUnhandledError(_: void, typ: gpu.ErrorType, message: [*:0]const u8) void {
switch (typ) {
.validation => std.debug.print("[zgpu] Validation error: {s}\n", .{message}),
.out_of_memory => std.debug.print("[zgpu] Out of memory: {s}\n", .{message}),
.device_lost => std.debug.print("[zgpu] Device lost: {s}\n", .{message}),
.unknown => std.debug.print("[zgpu] Unknown error: {s}\n", .{message}),
else => unreachable,
}
std.process.exit(1);
}
var printUnhandledErrorCallback = gpu.ErrorCallback.init(void, {}, printUnhandledError);
|
libs/zgpu/src/zgpu.zig
|
const std = @import("std");
const math = std.math;
const trait = std.meta.trait;
const assert = std.debug.assert;
fn VecMixin(comptime Self: type, comptime T: type) type {
return struct {
/// Clones the vector
pub fn clone(self: *Self) Self {
var result = Self.Zero;
inline for (@typeInfo(Self).Struct.fields) |field| {
@field(result, field.name) = @field(self.*, field.name);
}
return result;
}
/// Negates the vector
pub fn neg(self: Self) T {
var result = Self.Zero;
inline for (@typeInfo(Self).Struct.fields) |field| {
@field(result, field.name) = -@field(self, field.name);
}
return result;
}
/// Gets the value at index of a vector
pub fn getIndex(self: *Self, index: usize) T {
assert(index < 4);
return @field(self.*, @typeInfo(Self).Struct.fields[index].name);
}
/// Sets the value at index of a vector
pub fn setIndex(self: *Self, comptime index: usize, value: T) void {
assert(index < 4);
@field(self.*, @typeInfo(Self).Struct.fields[index].name) = value;
}
/// Add two vectors
pub fn add(self: Self, other: Self) Self {
var result = Self.Zero;
inline for (@typeInfo(Self).Struct.fields) |field| {
@field(result, field.name) = @field(self, field.name) + @field(other, field.name);
}
return result;
}
/// Sub two vectors
pub fn sub(self: Self, other: Self) Self {
var result = Self.Zero;
inline for (@typeInfo(Self).Struct.fields) |field| {
@field(result, field.name) = @field(self, field.name) - @field(other, field.name);
}
return result;
}
/// Mul two vectors
pub fn mul(self: Self, other: Self) Self {
var result = Self.Zero;
inline for (@typeInfo(Self).Struct.fields) |field| {
@field(result, field.name) = @field(self, field.name) * @field(other, field.name);
}
return result;
}
/// Mul a vector by a scalar
pub fn mulScalar(self: Self, other: T) Self {
var result = Self.Zero;
inline for (@typeInfo(Self).Struct.fields) |field| {
@field(result, field.name) = @field(self, field.name) * other;
}
return result;
}
/// Div two vectors
pub fn div(self: Self, other: Self) Self {
var result = Self.Zero;
inline for (@typeInfo(Self).Struct.fields) |field| {
@field(result, field.name) = @field(self, field.name) / @field(other, field.name);
}
return result;
}
/// Div a vector by a scalar
pub fn divScalar(self: Self, other: T) Self {
var result = Self.Zero;
inline for (@typeInfo(Self).Struct.fields) |field| {
@field(result, field.name) = @field(self, field.name) / other;
}
return result;
}
/// Get the magnitude of a vector
pub fn mag(self: Self) T {
return math.sqrt(self.magSqr());
}
/// Gets the magnitude squared of a vector
/// Faster than mag()
pub fn magSqr(self: Self) T {
var result: T = 0;
inline for (@typeInfo(Self).Struct.fields) |field| {
result += @field(self, field.name) * @field(self, field.name);
}
return result;
}
/// Dot product of two vectors
pub fn dot(self: Self, other: Self) T {
var result: T = 0;
inline for (@typeInfo(Self).Struct.fields) |field| {
result += @field(self, field.name) * @field(other, field.name);
}
return result;
}
/// Returns a vector with the smallest components of two vectors
pub fn min(self: Self, other: Self) Self {
var result = Self.Zero;
inline for (@typeInfo(Self).Struct.fields) |field| {
@field(result, field.name) = math.min(@field(self, field.name), @field(other, field.name));
}
}
/// Returns a vector with the largest components of two vectors
pub fn max(self: Self, other: Self) Self {
var result = Self.Zero;
inline for (@typeInfo(Self).Struct.fields) |field| {
@field(result, field.name) = math.max(@field(self, field.name), @field(other, field.name));
}
}
/// Returns the distance between two vectors
pub fn dist(self: Self, other: Self) T {
return math.sqrt(self.distSqr(other));
}
/// Returns the distance between two vectors squared
/// Faster than dist()
pub fn distSqr(self: Self, other: Self) T {
var result: T = 0;
inline for (@typeInfo(Self).Struct.fields) |field| {
var d = @field(self, field.name) - @field(other, field.name);
result += d * d;
}
return result;
}
};
}
pub fn Vec2(comptime T: type) type {
if (!comptime trait.isNumber(T)) {
@compileError("Vec2 type must be a number");
}
if (comptime trait.isUnsignedInt(T)) {
@compileError("Vec2 type cannot be unsigned");
}
return extern struct {
const Self = @This();
x: T,
y: T,
pub const Up = Self{ .x = 0, .y = 1 };
pub const Down = Self{ .x = 0, .y = -1 };
pub const Left = Self{ .x = -1, .y = 0 };
pub const Right = Self{ .x = 1, .y = 0 };
pub const One = Self{ .x = 1, .y = 1 };
pub const Zero = Self{ .x = 0, .y = 0 };
usingnamespace VecMixin(Self, T);
pub fn new(x: T, y: T) Self {
return Self{
.x = x,
.y = y,
};
}
pub fn newFromVec3(other: Vec3(T)) Self {
return Self{
.x = other.x,
.y = other.y,
};
}
};
}
pub fn Vec3(comptime T: type) type {
if (!comptime trait.isNumber(T)) {
@compileError("Vec3 type must be a number");
}
if (comptime trait.isUnsignedInt(T)) {
@compileError("Vec3 type cannot be unsigned");
}
return extern struct {
const Self = @This();
x: T,
y: T,
z: T,
pub const Up = Self{ .x = 0, .y = 1, .z = 0 };
pub const Down = Self{ .x = 0, .y = -1, .z = 0 };
pub const Left = Self{ .x = -1, .y = 0, .z = 0 };
pub const Right = Self{ .x = 1, .y = 0, .z = 0 };
pub const Forward = Self{ .x = 0, .y = 0, .z = -1 };
pub const Back = Self{ .x = 0, .y = 0, .z = 1 };
pub const One = Self{ .x = 1, .y = 1, .z = 1 };
pub const Zero = Self{ .x = 0, .y = 0, .z = 0 };
usingnamespace VecMixin(Self, T);
pub fn new(x: T, y: T, z: T) Self {
return Self{
.x = x,
.y = y,
.z = z,
};
}
pub fn newFromVec2(other: Vec2(T), z: T) Self {
return Self{
.x = other.x,
.y = other.y,
.z = z,
};
}
pub fn newFromVec4(other: Vec4(T)) Self {
return Self{
.x = other.x,
.y = other.y,
.z = other.z,
};
}
};
}
pub fn Vec4(comptime T: type) type {
if (!comptime trait.isNumber(T)) {
@compileError("Vec4 type must be a number");
}
if (comptime trait.isUnsignedInt(T)) {
@compileError("Vec4 type cannot be unsigned");
}
return extern struct {
const Self = @This();
x: T,
y: T,
z: T,
w: T,
pub const Up = Self{ .x = 0, .y = 1, .z = 0, .w = 0 };
pub const Down = Self{ .x = 0, .y = -1, .z = 0, .w = 0 };
pub const Left = Self{ .x = -1, .y = 0, .z = 0, .w = 0 };
pub const Right = Self{ .x = 1, .y = 0, .z = 0, .w = 0 };
pub const Forward = Self{ .x = 0, .y = 0, .z = -1, .w = 0 };
pub const Back = Self{ .x = 0, .y = 0, .z = 1, .w = 0 };
pub const In = Self{ .x = 0, .y = 0, .z = 0, .w = -1 };
pub const Out = Self{ .x = 0, .y = 0, .z = 0, .w = 1 };
pub const One = Self{ .x = 1, .y = 1, .z = 1, .w = 1 };
pub const Zero = Self{ .x = 0, .y = 0, .z = 0, .w = 0 };
usingnamespace VecMixin(Self, T);
pub fn new(x: T, y: T, z: T, w: T) Self {
return Self{
.x = x,
.y = y,
.z = z,
.w = w,
};
}
pub fn newFromVec2(other: Vec2(T), z: T, w: T) Self {
return Self{
.x = other.x,
.y = other.y,
.z = z,
.w = w,
};
}
pub fn newFromVec3(other: Vec3(T), w: T) Self {
return Self{
.x = other.x,
.y = other.y,
.z = other.z,
.w = w,
};
}
};
}
|
math/src/vec.zig
|
const ChunkedReader = @import("chunked_reader.zig").ChunkedReader;
const ContentLengthReader = @import("content_length_reader.zig").ContentLengthReader;
const Event = @import("../events/events.zig").Event;
const Headers = @import("http").Headers;
const Method = @import("http").Method;
const StatusCode = @import("http").StatusCode;
const std = @import("std");
pub const BodyReaderType = enum {
Chunked,
ContentLength,
NoContent,
};
pub const BodyReader = union(BodyReaderType) {
Chunked: ChunkedReader,
ContentLength: ContentLengthReader,
NoContent: void,
pub fn read(self: *BodyReader, reader: anytype, buffer: []u8) !Event {
return switch (self.*) {
.Chunked => |*chunked_reader| try chunked_reader.read(reader, buffer),
.ContentLength => |*body_reader| try body_reader.read(reader, buffer),
.NoContent => .EndOfMessage,
};
}
// Determines the appropriate response body length.
// Cf: RFC 7230 section 3.3.3, https://tools.ietf.org/html/rfc7230#section-3.3.3
// TODO:
// - Handle transfert encoding (compress, deflate, gzip)
// - Should we deal with chunked not being the final encoding ?
// Currently it is considered to be an error (UnknownTranfertEncoding).
pub fn frame(requestMethod: Method, status_code: StatusCode, headers: Headers) !BodyReader {
var rawStatusCode = @enumToInt(status_code);
const hasNoContent = (requestMethod == .Head or rawStatusCode < 200 or status_code == .NoContent or status_code == .NotModified);
if (hasNoContent) {
return .NoContent;
}
const isSuccessfulConnectRequest = (rawStatusCode < 300 and rawStatusCode > 199 and requestMethod == .Connect);
if (isSuccessfulConnectRequest) {
return .NoContent;
}
var transfert_encoding = headers.get("Transfer-Encoding");
if (transfert_encoding != null) {
if (!std.mem.endsWith(u8, transfert_encoding.?.value, "chunked")) {
return error.UnknownTranfertEncoding;
}
return BodyReader{ .Chunked = ChunkedReader{} };
}
var contentLength: usize = 0;
var contentLengthHeader = headers.get("Content-Length");
if (contentLengthHeader != null) {
contentLength = std.fmt.parseInt(usize, contentLengthHeader.?.value, 10) catch return error.RemoteProtocolError;
}
return BodyReader{ .ContentLength = ContentLengthReader{ .expected_length = contentLength } };
}
};
const expect = std.testing.expect;
const expectError = std.testing.expectError;
test "Frame Body - A HEAD request has no content" {
var headers = Headers.init(std.testing.allocator);
var body_reader = try BodyReader.frame(.Head, .Ok, headers);
try expect(body_reader == .NoContent);
}
test "Frame Body - Informational responses (1XX status code) have no content" {
var headers = Headers.init(std.testing.allocator);
var body_reader = try BodyReader.frame(.Get, .Continue, headers);
try expect(body_reader == .NoContent);
}
test "Frame Body - Response with a 204 No Content status code has no content" {
var headers = Headers.init(std.testing.allocator);
var body_reader = try BodyReader.frame(.Get, .NoContent, headers);
try expect(body_reader == .NoContent);
}
test "Frame Body - Response with 304 Not Modified status code has no content" {
var headers = Headers.init(std.testing.allocator);
var body_reader = try BodyReader.frame(.Get, .NotModified, headers);
try expect(body_reader == .NoContent);
}
test "Frame Body - A successful response (2XX) to a CONNECT request has no content" {
var headers = Headers.init(std.testing.allocator);
var body_reader = try BodyReader.frame(.Connect, .Ok, headers);
try expect(body_reader == .NoContent);
}
test "Frame Body - Use a ChunkReader when chunked is the final encoding" {
var headers = Headers.init(std.testing.allocator);
defer headers.deinit();
try headers.append("Transfer-Encoding", "gzip, chunked");
var body_reader = try BodyReader.frame(.Get, .Ok, headers);
try expect(body_reader == .Chunked);
}
test "Frame Body - Fail when chunked is not the final encoding" {
var headers = Headers.init(std.testing.allocator);
defer headers.deinit();
try headers.append("Transfer-Encoding", "chunked, gzip");
const failure = BodyReader.frame(.Get, .Ok, headers);
try expectError(error.UnknownTranfertEncoding, failure);
}
test "Frame Body - By default use a ContentLengthReader with a length of 0" {
var headers = Headers.init(std.testing.allocator);
var body_reader = try BodyReader.frame(.Get, .Ok, headers);
try expect(body_reader == .ContentLength);
try expect(body_reader.ContentLength.expected_length == 0);
}
test "Frame Body - Use a ContentLengthReader with the provided length" {
var headers = Headers.init(std.testing.allocator);
defer headers.deinit();
try headers.append("Content-Length", "15");
var body_reader = try BodyReader.frame(.Get, .Ok, headers);
try expect(body_reader == .ContentLength);
try expect(body_reader.ContentLength.expected_length == 15);
}
test "Frame Body - Fail when the provided content length is invalid" {
var headers = Headers.init(std.testing.allocator);
defer headers.deinit();
try headers.append("Content-Length", "XXX");
const failure = BodyReader.frame(.Get, .Ok, headers);
try expectError(error.RemoteProtocolError, failure);
}
test "NoContentReader - Returns EndOfMessage." {
const content = "";
var base_reader = std.io.fixedBufferStream(content).reader();
var reader = std.io.peekStream(1024, base_reader);
var body_reader = BodyReader{ .NoContent = undefined };
var buffer: [0]u8 = undefined;
var event = try body_reader.read(&reader, &buffer);
try expect(event == .EndOfMessage);
}
|
src/readers/readers.zig
|
const std = @import("std");
const testing = std.testing;
pub const ExpressionMatch = struct {
result: []const u8 = undefined,
};
/// Will scan the buf for pattern. Pattern can contain () to indicate narrow group to extract.
/// Currently no support for character classes and other patterns.
pub fn expressionExtractor(buf: []const u8, pattern: []const u8) ?ExpressionMatch {
_ = buf;
_ = pattern;
if (std.mem.indexOf(u8, pattern, "()")) |pos| {
var start_slice = pattern[0..pos];
var end_slice = pattern[pos + 2 ..];
var start_pos = std.mem.indexOf(u8, buf, start_slice) orelse return null;
var end_pos = if(end_slice.len == 0) buf.len else std.mem.indexOfPos(u8, buf, start_pos + start_slice.len, end_slice) orelse return null;
// If no end-match, assume end of line...
// This might come back to bite me, but it's as good as anything right now without particular usecases
// This allows us to e.g. get particular headers-values. The more future-proof solution is to implement
// bettter pattern-matching-engine.
if (end_pos == 0) {
if(std.mem.indexOfAny(u8, buf[start_pos + start_slice.len..], "\r\n")) |line_end| {
end_pos = start_pos + start_slice.len + line_end;
} else {
end_pos = buf.len;
}
}
return ExpressionMatch{
.result = buf[start_pos + start_slice.len .. end_pos],
};
} else if (std.mem.indexOf(u8, buf, pattern)) |_| {
return ExpressionMatch{
.result = buf[0..],
};
}
return null;
}
test "expressionExtractor" {
try testing.expect(expressionExtractor("", "not there") == null);
// Hvis match uten (): lagre hele payload?
try testing.expectEqualStrings("match", expressionExtractor("match", "()").?.result);
try testing.expectEqualStrings("match", expressionExtractor("match", "atc").?.result);
try testing.expectEqualStrings("atc", expressionExtractor("match", "m()h").?.result);
try testing.expectEqualStrings("123123", expressionExtractor("idtoken=1<PASSWORD>", "token=()").?.result);
try testing.expectEqualStrings("idtoken", expressionExtractor("idtoken=<PASSWORD>", "()=12").?.result);
}
|
src/parser_expressions.zig
|
const std = @import("std");
const fundude = @import("fundude");
pub fn main() !void {
const stdout = std.io.getStdOut().writer();
try stdout.print("<html>\n", .{});
try stdout.print("<style>td {{ white-space: nowrap; font-family: monospace }}</style>\n", .{});
try stdout.print("<html>\n<body>\n<table>\n", .{});
try stdout.print("<tr>\n<th></th>\n", .{});
for ([_]u8{0} ** 16) |_, i| {
try stdout.print("<th>x{X}</th>\n", .{i});
}
try stdout.print("</tr>\n", .{});
for ([_]u8{0} ** 16) |_, high| {
try stdout.print("<tr><th>{X}x</th>\n", .{high});
for ([_]u8{0} ** 16) |_, low| {
const opcode = @intCast(u8, high << 4 | low);
const op = fundude.Cpu.Op.decode([3]u8{ opcode, 0xCD, 0xAB });
var buffer: [16]u8 = undefined;
try stdout.print("<td>{} <br />", .{op.disassemble(&buffer)});
try stdout.print("{} ", .{op.length});
if (op.durations[0] == op.durations[1]) {
try stdout.print("{}\n", .{op.durations[0]});
} else {
try stdout.print("{}/{}\n", .{ op.durations[0], op.durations[1] });
}
try stdout.print("</td>\n", .{});
}
try stdout.print("</tr>\n", .{});
}
try stdout.print("<tr>\n<th></th>\n", .{});
for ([_]u8{0} ** 16) |_, i| {
try stdout.print("<th>x{X}</th>\n", .{i});
}
try stdout.print("</tr>\n", .{});
for ([_]u8{0} ** 16) |_, high| {
try stdout.print("<tr><th>{X}x</th>\n", .{high});
for ([_]u8{0} ** 16) |_, low| {
const opcode = @intCast(u8, high << 4 | low);
const op = fundude.Cpu.Op.decode([3]u8{ 0xCB, opcode, 0 });
var buffer: [16]u8 = undefined;
try stdout.print("<td>{} <br />", .{op.disassemble(&buffer)});
try stdout.print("{} ", .{op.length});
if (op.durations[0] == op.durations[1]) {
try stdout.print("{}\n", .{op.durations[0]});
} else {
try stdout.print("{}/{}\n", .{ op.durations[0], op.durations[1] });
}
try stdout.print("</td>\n", .{});
}
try stdout.print("</tr>\n", .{});
}
try stdout.print("</table>\n</body>\n</html>\n", .{});
}
|
scripts/opcodes.zig
|
const HeaderName = @import("name.zig").HeaderName;
const HeaderType = @import("name.zig").HeaderType;
const HeaderValue = @import("value.zig").HeaderValue;
pub const Header = struct {
name: HeaderName,
value: []const u8,
pub fn as_slice(comptime headers: anytype) []Header {
const typeof = @TypeOf(headers);
const typeinfo = @typeInfo(typeof);
switch (typeinfo) {
.Struct => |obj| {
comptime {
var result: [obj.fields.len]Header = undefined;
var i = 0;
while (i < obj.fields.len) {
_ = HeaderName.parse(headers[i][0]) catch {
@compileError("Invalid header name: " ++ headers[i][0]);
};
_ = HeaderValue.parse(headers[i][1]) catch {
@compileError("Invalid header value: " ++ headers[i][1]);
};
var _type = HeaderType.from_bytes(headers[i][0]);
var name = headers[i][0];
var value = headers[i][1];
result[i] = Header{ .name = .{ .type = _type, .value = name }, .value = value };
i += 1;
}
return &result;
}
},
else => {
@compileError("The parameter type must be an anonymous list literal.\n" ++ "Ex: Header.as_slice(.{.{\"Gotta-Go\", \"Fast!\"}});");
},
}
}
};
const std = @import("std");
const expect = std.testing.expect;
const expectEqualStrings = std.testing.expectEqualStrings;
test "AsSlice" {
var result = Header.as_slice(.{
.{ "Content-Length", "9000" },
.{ "Gotta-Go", "Fast!" },
});
try expect(result.len == 2);
try expect(result[0].name.type == .ContentLength);
try expectEqualStrings(result[0].name.raw(), "Content-Length");
try expectEqualStrings(result[0].value, "9000");
try expect(result[1].name.type == .Custom);
try expectEqualStrings(result[1].name.raw(), "Gotta-Go");
try expectEqualStrings(result[1].value, "Fast!");
}
|
src/headers/header.zig
|
const std = @import("std");
const mem = std.mem;
/// The only output of the tokenizer.
pub const ZNodeToken = struct {
const Self = @This();
/// 0 is root, 1 is top level children.
depth: usize,
/// The extent of the slice.
start: usize,
end: usize,
};
/// Parses text outputting ZNodeTokens. Does not convert strings to numbers, and all strings are
/// "as is", no escaping is performed.
pub const StreamingParser = struct {
const Self = @This();
state: State,
start_index: usize,
current_index: usize,
// The maximum node depth.
max_depth: usize,
// The current line's depth.
line_depth: usize,
// The current node depth.
node_depth: usize,
/// Level of multiline string.
open_string_level: usize,
/// Current level of multiline string close.
close_string_level: usize,
/// Account for any extra spaces trailing at the end of a word.
trailing_spaces: usize,
pub const Error = error{
TooMuchIndentation,
InvalidWhitespace,
OddIndentationValue,
InvalidQuotation,
InvalidMultilineOpen,
InvalidMultilineClose,
InvalidNewLineInString,
InvalidCharacterAfterString,
SemicolonWentPastRoot,
UnexpectedEof,
};
pub const State = enum {
/// Whether we're starting on an openline.
OpenLine,
ExpectZNode,
Indent,
OpenCharacter,
Quotation,
SingleLineCharacter,
MultilineOpen0,
MultilineOpen1,
MultilineLevelOpen,
MultilineLevelClose,
MultilineClose0,
MultilineCharacter,
EndString,
OpenComment,
Comment,
};
/// Returns a blank parser.
pub fn init() Self {
var self: StreamingParser = undefined;
self.reset();
return self;
}
/// Resets the parser back to the beginning state.
pub fn reset(self: *Self) void {
self.state = .OpenLine;
self.start_index = 0;
self.current_index = 0;
self.max_depth = 0;
self.line_depth = 0;
self.node_depth = 0;
self.open_string_level = 0;
self.close_string_level = 0;
self.trailing_spaces = 0;
}
pub fn completeOrError(self: *const Self) !void {
switch (self.state) {
.ExpectZNode, .OpenLine, .EndString, .Comment, .OpenComment, .Indent => {},
else => return Error.UnexpectedEof,
}
}
/// Feeds a character to the parser. May output a ZNode. Check "hasCompleted" to see if there
/// are any unfinished strings.
pub fn feed(self: *Self, c: u8) Error!?ZNodeToken {
defer self.current_index += 1;
//std.debug.print("FEED<{}> {} {} ({c})\n", .{self.state, self.current_index, c, c});
switch (self.state) {
.OpenComment, .Comment => switch (c) {
'\n' => {
self.start_index = self.current_index + 1;
// We're ending a line with nodes.
if (self.state == .Comment) {
self.max_depth = self.line_depth + 1;
}
self.node_depth = 0;
self.line_depth = 0;
self.state = .OpenLine;
},
else => {
// Skip.
},
},
// All basically act the same except for a few minor differences.
.ExpectZNode, .OpenLine, .EndString, .OpenCharacter => switch (c) {
'#' => {
if (self.state == .OpenLine) {
self.state = .OpenComment;
} else {
defer self.state = .Comment;
if (self.state == .OpenCharacter) {
return ZNodeToken{
.depth = self.line_depth + self.node_depth + 1,
.start = self.start_index,
.end = self.current_index - self.trailing_spaces,
};
}
}
},
// The tricky character (and other whitespace).
' ' => {
if (self.state == .OpenLine) {
if (self.line_depth >= self.max_depth) {
return Error.TooMuchIndentation;
}
self.state = .Indent;
} else if (self.state == .OpenCharacter) {
self.trailing_spaces += 1;
} else {
// Skip spaces when expecting a node on a closed line,
// including this one.
self.start_index = self.current_index + 1;
}
},
':' => {
defer self.state = .ExpectZNode;
const node = ZNodeToken{
.depth = self.line_depth + self.node_depth + 1,
.start = self.start_index,
.end = self.current_index - self.trailing_spaces,
};
self.start_index = self.current_index + 1;
self.node_depth += 1;
// Only return when we're not at end of a string.
if (self.state != .EndString) {
return node;
}
},
',' => {
defer self.state = .ExpectZNode;
const node = ZNodeToken{
.depth = self.line_depth + self.node_depth + 1,
.start = self.start_index,
.end = self.current_index - self.trailing_spaces,
};
self.start_index = self.current_index + 1;
// Only return when we're not at end of a string.
if (self.state != .EndString) {
return node;
}
},
';' => {
if (self.node_depth == 0) {
return Error.SemicolonWentPastRoot;
}
defer self.state = .ExpectZNode;
const node = ZNodeToken{
.depth = self.line_depth + self.node_depth + 1,
.start = self.start_index,
.end = self.current_index - self.trailing_spaces,
};
self.start_index = self.current_index + 1;
self.node_depth -= 1;
// Only return when we're not at end of a string, or in semicolons
// special case, when we don't have an empty string.
if (self.state != .EndString and node.start < node.end) {
return node;
}
},
'"' => {
if (self.state == .EndString) {
return Error.InvalidCharacterAfterString;
}
// Don't start another string.
if (self.state == .OpenCharacter) {
return null;
}
// We start here to account for the possibility of a string being ""
self.start_index = self.current_index + 1;
self.state = .Quotation;
},
'[' => {
if (self.state == .EndString) {
return Error.InvalidCharacterAfterString;
}
// Don't start another string.
if (self.state == .OpenCharacter) {
return null;
}
self.open_string_level = 0;
self.state = .MultilineOpen0;
},
'\n' => {
defer self.state = .OpenLine;
const node = ZNodeToken{
.depth = self.line_depth + self.node_depth + 1,
.start = self.start_index,
.end = self.current_index - self.trailing_spaces,
};
self.start_index = self.current_index + 1;
// Only reset on a non open line.
if (self.state != .OpenLine) {
self.max_depth = self.line_depth + 1;
self.line_depth = 0;
}
self.node_depth = 0;
// Only return something if there is something. Quoted strings are good.
if (self.state == .OpenCharacter) {
return node;
}
},
'\t', '\r' => {
return Error.InvalidWhitespace;
},
else => {
// We already have a string.
if (self.state == .EndString) {
return Error.InvalidCharacterAfterString;
}
// Don't reset if we're in a string.
if (self.state != .OpenCharacter) {
self.start_index = self.current_index;
}
self.trailing_spaces = 0;
self.state = .OpenCharacter;
},
},
.Indent => switch (c) {
' ' => {
self.start_index = self.current_index + 1;
self.line_depth += 1;
self.state = .OpenLine;
},
else => {
return Error.OddIndentationValue;
},
},
.Quotation => switch (c) {
'"' => {
self.state = .EndString;
const node = ZNodeToken{
.depth = self.line_depth + self.node_depth + 1,
.start = self.start_index,
.end = self.current_index,
};
// Reset because we're going to expecting nodes.
self.start_index = self.current_index + 1;
return node;
},
else => {
self.state = .SingleLineCharacter;
},
},
.SingleLineCharacter => switch (c) {
'"' => {
self.state = .EndString;
const node = ZNodeToken{
.depth = self.line_depth + self.node_depth + 1,
.start = self.start_index,
.end = self.current_index,
};
// Reset because we're going to expecting nodes.
self.start_index = self.current_index + 1;
return node;
},
'\n' => {
return Error.InvalidNewLineInString;
},
else => {
// Consume.
},
},
.MultilineOpen0, .MultilineLevelOpen => switch (c) {
'=' => {
self.open_string_level += 1;
self.state = .MultilineLevelOpen;
},
'[' => {
self.start_index = self.current_index + 1;
self.state = .MultilineOpen1;
},
else => {
return Error.InvalidMultilineOpen;
},
},
.MultilineOpen1 => switch (c) {
']' => {
self.state = .MultilineClose0;
},
'\n' => {
// Skip first newline.
self.start_index = self.current_index + 1;
},
else => {
self.state = .MultilineCharacter;
},
},
.MultilineCharacter => switch (c) {
']' => {
self.close_string_level = 0;
self.state = .MultilineClose0;
},
else => {
// Capture EVERYTHING.
},
},
.MultilineClose0, .MultilineLevelClose => switch (c) {
'=' => {
self.close_string_level += 1;
self.state = .MultilineLevelClose;
},
']' => {
if (self.close_string_level == self.open_string_level) {
self.state = .EndString;
return ZNodeToken{
.depth = self.line_depth + self.node_depth + 1,
.start = self.start_index,
.end = self.current_index - self.open_string_level - 1,
};
}
self.state = .MultilineCharacter;
},
else => {
return Error.InvalidMultilineClose;
},
},
}
return null;
}
/// Parses the stream, outputting ZNodeTokens which reference the text.
pub fn parse(self: *Self, idx: *usize, text: []const u8) !?ZNodeToken {
while (idx.* <= text.len) {
// Insert an extra newline at the end of the stream.
const node = if (idx.* == text.len) try self.feed('\n') else try self.feed(text[idx.*]);
idx.* += 1;
if (node) |n| {
return n;
}
}
return null;
}
};
fn testNextTextOrError(stream: *StreamingParser, idx: *usize, text: []const u8) ![]const u8 {
while (idx.* < text.len) {
const node = try stream.feed(text[idx.*]);
idx.* += 1;
if (node) |n| {
//std.debug.print("TOKEN {}\n", .{text[n.start..n.end]});
return text[n.start..n.end];
}
}
return error.ExhaustedLoop;
}
test "parsing slice output" {
const testing = std.testing;
const text =
\\# woo comment
\\mp:10
\\[[sy]]
\\ # another
\\ : n : "en" , [[m]]
\\ "sc" : [[10]] , g #inline
\\ [[]]:[==[
\\hi]==]
;
var idx: usize = 0;
var stream = StreamingParser.init();
try testing.expectEqualSlices(u8, "mp", try testNextTextOrError(&stream, &idx, text));
try testing.expectEqualSlices(u8, "10", try testNextTextOrError(&stream, &idx, text));
try testing.expectEqualSlices(u8, "sy", try testNextTextOrError(&stream, &idx, text));
try testing.expectEqualSlices(u8, "", try testNextTextOrError(&stream, &idx, text));
try testing.expectEqualSlices(u8, "n", try testNextTextOrError(&stream, &idx, text));
try testing.expectEqualSlices(u8, "en", try testNextTextOrError(&stream, &idx, text));
try testing.expectEqualSlices(u8, "m", try testNextTextOrError(&stream, &idx, text));
try testing.expectEqualSlices(u8, "sc", try testNextTextOrError(&stream, &idx, text));
try testing.expectEqualSlices(u8, "10", try testNextTextOrError(&stream, &idx, text));
try testing.expectEqualSlices(u8, "g", try testNextTextOrError(&stream, &idx, text));
try testing.expectEqualSlices(u8, "", try testNextTextOrError(&stream, &idx, text));
try testing.expectEqualSlices(u8, "hi", try testNextTextOrError(&stream, &idx, text));
}
fn testNextLevelOrError(stream: *StreamingParser, idx: *usize, text: []const u8) !usize {
while (idx.* < text.len) {
const node = try stream.feed(text[idx.*]);
idx.* += 1;
if (node) |n| {
return n.depth;
}
}
return error.ExhaustedLoop;
}
test "parsing depths" {
const testing = std.testing;
const text =
\\# woo comment
\\mp:10
\\[[sy]]
\\ # another
\\ : n : "en" , [[m]]
\\ # more
\\
\\ # even more
\\
\\ "sc" : [[10]] , g #inline
\\ [[]]:[==[
\\hi]==]
;
var idx: usize = 0;
var stream = StreamingParser.init();
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 1);
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 2);
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 1);
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 2);
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 3);
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 4);
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 4);
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 3);
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 4);
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 4);
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 2);
try testing.expectEqual(try testNextLevelOrError(&stream, &idx, text), 3);
}
/// Represents a node in a static tree. Nodes have a parent, child, and sibling pointer.
pub const ZNode = struct {
const Self = @This();
value: []const u8 = "",
parent: ?*ZNode = null,
sibling: ?*ZNode = null,
child: ?*ZNode = null,
/// Returns the next Node in the tree. Will return Null after reaching root. For nodes further
/// down the tree, they will bubble up, resulting in a negative depth. Depth will be modified
/// relative to self: 0 for siblings, -1 or less for parents, 1 for children.
pub fn next(self: *const Self, depth: ?*isize) ?*ZNode {
if (self.child) |c| {
if (depth) |d| {
d.* += 1;
}
return c;
} else if (self.sibling) |c| {
return c;
} else {
// Go up and forward.
var iter: ?*const ZNode = self;
while (iter != null) {
iter = iter.?.parent;
if (iter != null) {
if (depth) |d| {
d.* -= 1;
}
if (iter.?.sibling) |c| {
return c;
}
}
}
return null;
}
}
/// Iterates this node's children. Pass null to start. `iter = node.nextChild(iter);`
pub fn nextChild(self: *const Self, iter: ?*const ZNode) ?*ZNode {
if (iter) |it| {
return it.sibling;
} else {
return self.child;
}
}
/// Returns the next descendant. Pass null to start.
pub fn nextDescendant(self: *const Self, iter: ?*const ZNode, depth: ?*isize) ?*ZNode {
if (iter == null) {
if (self.child) |c| {
if (depth) |d| {
d.* += 1;
}
return c;
}
} else if (iter.?.child) |c| {
if (depth) |d| {
d.* += 1;
}
return c;
} else if (iter.?.sibling) |sib| {
return sib;
} else {
// Go up and forward.
if (depth) |d| {
d.* -= 1;
}
var it = iter.?.parent;
while (it != null) {
// Back.
if (it == self) {
return null;
}
// Nope try going forward.
if (it.?.sibling) |sib| {
return sib;
} else {
if (depth) |d| {
d.* -= 1;
}
it = it.?.parent;
}
}
}
return null;
}
/// Returns the nth child. O(n)
pub fn getNthChild(self: *const Self, nth: usize) ?*ZNode {
var count: usize = 0;
var iter: ?*ZNode = self.child;
while (iter) |n| {
if (count == nth) {
return n;
}
count += 1;
iter = n.sibling;
}
return null;
}
/// Returns the nth child's value. Or null if neither the node or child exist.
pub fn getNthChildValue(self: *const Self, nth: usize) ?[]const u8 {
if (self.getNthChild(nth)) |child| {
return child.value;
}
return null;
}
/// Returns the number of children. O(n)
pub fn getChildCount(self: *const Self) usize {
var count: usize = 0;
var iter: ?*ZNode = self.child;
while (iter) |n| {
count += 1;
iter = n.sibling;
}
return count;
}
/// Finds the nth child node with a specific value.
pub fn findNthChild(self: *const Self, nth: usize, value: []const u8) ?*ZNode {
var count: usize = 0;
var iter: ?*ZNode = self.child orelse return null;
while (iter) |n| {
if (mem.eql(u8, n.value, value)) {
if (count == nth) {
return n;
}
count += 1;
}
iter = n.sibling;
}
return null;
}
/// Finds the first child with the specified value.
pub fn findChild(self: *const Self, value: []const u8) ?*ZNode {
return self.findNthChild(0, value);
}
/// Finds the next child after the given iterator. This is good for when you can guess the order
/// of the nodes, which can cut down on starting from the beginning. Passing null starts over
/// from the beginning. Returns the found node or null (it will loop back around).
pub fn findNextChild(self: *const Self, start: ?*const ZNode, value: []const u8) ?*ZNode {
var iter: ?*ZNode = self.child;
if (start) |si| {
iter = si.sibling;
}
while (iter != start) {
if (iter) |it| {
if (mem.eql(u8, it.value, value)) {
return it;
}
iter = it.sibling;
} else {
// Loop back.
iter = self.child;
}
}
return null;
}
/// Traverses descendants until a node with the specific value is found.
pub fn findNthDescendant(self: *const Self, nth: usize, value: []const u8) ?*ZNode {
var count: usize = 0;
var iter: ?*const ZNode = null;
while (self.nextDescendant(iter, null)) |n| : (iter = n) {
if (mem.eql(u8, n.value, value)) {
if (count == nth) {
return n;
}
count += 1;
}
}
return null;
}
/// Traverses descendants until a node with the specific value is found.
pub fn findDescendant(self: *const Self, value: []const u8) ?*ZNode {
return self.findNthDescendant(0, value);
}
/// Returns true if node has more than one descendant (child, grandchild, etc).
fn _moreThanOneDescendant(self: *const Self) bool {
var count: usize = 0;
var iter: ?*const ZNode = null;
while (self.nextDescendant(iter, null)) |n| : (iter = n) {
count += 1;
if (count > 1) {
return true;
}
}
return false;
}
/// Outputs a `ZNode`s children on multiple lines. Excludes this node as root.
fn stringify(self: *const Self, out_stream: anytype) @TypeOf(out_stream).Error!void {
var depth: isize = 0;
var last_depth: isize = 0;
var iter: ?*const ZNode = null;
while (self.nextDescendant(iter, &depth)) |n| : (iter = n) {
// Special case for root.
if (last_depth == 0) {
last_depth = depth;
} else if (depth > last_depth) {
last_depth = depth;
try out_stream.writeAll(":");
// Likely an array.
if (mem.eql(u8, n.parent.?.value, "")) {
try out_stream.writeAll(" ");
} else if (n.parent.?._moreThanOneDescendant()) {
try out_stream.writeAll("\n");
try out_stream.writeByteNTimes(' ', 2 * @bitCast(usize, depth - 1));
} else {
try out_stream.writeAll(" ");
}
} else if (depth < last_depth) {
while (depth < last_depth) {
last_depth = depth;
}
try out_stream.writeAll("\n");
try out_stream.writeByteNTimes(' ', 2 * @bitCast(usize, depth - 1));
} else {
try out_stream.writeAll("\n");
try out_stream.writeByteNTimes(' ', 2 * @bitCast(usize, depth - 1));
}
var multiline_or_quote =
mem.indexOf(u8, n.value, "\n") != null or
mem.indexOf(u8, n.value, "\"") != null;
var contains_control =
mem.indexOf(u8, n.value, " ") != null or
mem.indexOf(u8, n.value, "\t") != null or
mem.indexOf(u8, n.value, ":") != null or
mem.indexOf(u8, n.value, ",") != null or
mem.indexOf(u8, n.value, ";") != null;
// Handle empty strings without children.
var empty_no_children = n.child == null and n.value.len == 0;
if (multiline_or_quote) {
try out_stream.writeAll("[[\n");
} else if (contains_control or empty_no_children) {
try out_stream.writeAll("\"");
}
try out_stream.writeAll(n.value);
if (multiline_or_quote) {
try out_stream.writeAll("]]");
} else if (contains_control or empty_no_children) {
try out_stream.writeAll("\"");
}
}
try out_stream.writeAll("\n");
}
/// Stringifies to standard out.
pub fn show(self: *const Self) void {
self.stringify(std.io.getStdOut().writer()) catch {};
}
};
pub const ZStaticTreeError = error{
TreeFull,
} || StreamingParser.Error;
/// Represents a static tree. Does not manage memory for strings, only references it.
pub fn ZStaticTree(comptime S: usize) type {
return struct {
const Self = @This();
root: ZNode = ZNode{},
nodes: [S]ZNode = [_]ZNode{.{}} ** S,
node_count: usize = 0,
/// Returns the number of nodes remaining in the tree.
pub fn nodesRemaining(self: *const Self) usize {
return S - self.node_count;
}
/// Returns a boolean indicating if nodes from text can fit within the tree.
pub fn canAppend(self: *const Self, text: []const u8) bool {
var count = countTextNodes(text) catch return false;
return count <= self.nodesRemaining();
}
/// Adds a node to given a parent. Null parent uses the root.
pub fn appendValue(self: *Self, parent: ?*ZNode, value: []const u8) ZStaticTreeError!*ZNode {
if (self.node_count >= S) {
return ZStaticTreeError.TreeFull;
}
var true_parent = if (parent == null) &self.root else parent;
var node = &self.nodes[self.node_count];
self.node_count += 1;
node.value = value;
node.parent = true_parent;
node.sibling = null;
node.child = null;
// Add to end.
if (true_parent) |p| {
if (p.child) |child| {
var iter = child;
while (iter.sibling) |sib| : (iter = sib) {}
iter.sibling = node;
} else {
p.child = node;
}
}
return node;
}
/// Clears the entire tree.
pub fn clear(self: *Self) void {
self.node_count = 0;
self.root.child = null;
}
};
}
test "error fills tree" {
const testing = std.testing;
var tree = ZStaticTree(6){};
// Using 2 nodes.
try appendText(&tree, null, "foo:bar");
try testing.expectEqual(@as(usize, 2), tree.node_count);
try testing.expectError(ZStaticTreeError.TreeFull, appendText(&tree, null, "bar:foo:baz:ha:ha"));
try testing.expectEqual(@as(usize, 6), tree.node_count);
}
test "static tree" {
const testing = std.testing;
const text =
\\max_particles: 100
\\texture: circle
\\en: Foo
\\systems:
\\ : name:Emitter
\\ params:
\\ some,stuff,hehe
\\ : name:Fire
;
var tree = ZStaticTree(100){};
try appendText(&tree, null, text);
var iter = tree.root.findNextChild(null, "max_particles");
try testing.expect(iter != null);
iter = tree.root.findNextChild(iter, "texture");
try testing.expect(iter != null);
iter = tree.root.findNextChild(iter, "max_particles");
try testing.expect(iter != null);
iter = tree.root.findNextChild(iter, "systems");
try testing.expect(iter != null);
iter = tree.root.findNextChild(iter, "42");
try testing.expect(iter == null);
}
test "node appending and searching" {
const testing = std.testing;
var tree = ZStaticTree(100){};
var root = try tree.appendValue(null, "");
_ = try tree.appendValue(root, "");
_ = try tree.appendValue(root, "Hello");
_ = try tree.appendValue(root, "foo");
_ = try tree.appendValue(root, "42");
_ = try tree.appendValue(root, "3.14");
_ = try tree.appendValue(root, "true");
const nested = try tree.appendValue(root, "nested");
_ = try tree.appendValue(nested, "descendent");
try testing.expectEqual(@as(usize, 7), root.getChildCount());
try testing.expect(root.findNthChild(0, "") != null);
try testing.expect(root.findNthChild(0, "Hello") != null);
try testing.expect(root.findNthChild(0, "foo") != null);
try testing.expect(root.findNthChild(1, "Hello") == null);
try testing.expect(root.findNthChild(1, "foo") == null);
try testing.expect(root.findNthChild(0, "42") != null);
try testing.expect(root.findNthChild(0, "41") == null);
try testing.expect(root.findNthChild(1, "42") == null);
try testing.expect(root.findNthChild(0, "3.14") != null);
try testing.expect(root.findNthChild(0, "3.13") == null);
try testing.expect(root.findNthChild(1, "3.14") == null);
try testing.expect(root.findNthChild(0, "true") != null);
try testing.expect(root.findNthChild(1, "true") == null);
try testing.expect(root.findChild("Hello") != null);
try testing.expect(root.findChild("foo") != null);
try testing.expect(root.findChild("42") != null);
try testing.expect(root.findChild("3.14") != null);
try testing.expect(root.findChild("true") != null);
try testing.expect(root.findChild("nested") != null);
try testing.expect(root.findChild("descendent") == null);
try testing.expect(root.findDescendant("nested") != null);
try testing.expect(root.findDescendant("descendent") != null);
}
test "appending node" {
const testing = std.testing;
var tree0 = ZStaticTree(8){};
var tree1 = ZStaticTree(8){};
try appendText(&tree0, null, "foo:bar");
tree1.root.value = "ROOT";
try appendText(&tree1, null, "biz:baz");
var new_root = try copyNode(&tree0, null, &tree1.root);
try testing.expectEqualSlices(u8, "ROOT", new_root.value);
var root = &tree0.root;
try testing.expectEqualSlices(u8, "foo", root.child.?.value);
try testing.expectEqualSlices(u8, "bar", root.child.?.child.?.value);
try testing.expectEqualSlices(u8, "ROOT", root.child.?.sibling.?.value);
try testing.expectEqualSlices(u8, "biz", root.child.?.sibling.?.child.?.value);
try testing.expectEqualSlices(u8, "baz", root.child.?.sibling.?.child.?.child.?.value);
}
pub const ZDynamicTreeError = error{
OutOfMemory,
} || StreamingParser.Error;
pub const ZDynamicTree = struct {
const Self = @This();
arena: std.heap.ArenaAllocator,
root: ZNode = ZNode{},
node_count: usize = 0,
pub fn init(allocator: std.mem.Allocator) Self {
return Self{ .arena = std.heap.ArenaAllocator.init(allocator) };
}
pub fn deinit(self: *Self) void {
self.arena.deinit();
}
/// Appends a raw value allocated with this struct's arena allocator.
pub fn appendRawValue(self: *Self, parent: ?*ZNode, value: []const u8) ZDynamicTreeError!*ZNode {
var true_parent = if (parent == null) &self.root else parent.?;
var node = try self.arena.allocator().create(ZNode);
self.node_count += 1;
node.value = value;
node.parent = true_parent;
node.sibling = null;
node.child = null;
// Add to end.
if (true_parent.child) |child| {
var iter = child;
while (iter.sibling) |sib| : (iter = sib) {}
iter.sibling = node;
} else {
true_parent.child = node;
}
return node;
}
/// Appends a value, duping it.
pub fn appendValue(self: *Self, parent: ?*ZNode, value: []const u8) ZDynamicTreeError!*ZNode {
var duped = try self.arena.allocator().dupe(u8, value);
errdefer self.arena.allocator().free(duped);
return self.appendRawValue(parent, duped);
}
/// Appends anytype to the tree by converting it to a string with allocPrint.
pub fn appendAnytype(self: *Self, parent: ?*ZNode, value: anytype) ZDynamicTreeError!*ZNode {
return self.appendPrint(parent, "{any}", .{value});
}
/// Appends anytype to the tree by converting it to a string with allocPrint.
pub fn appendPrint(self: *Self, parent: ?*ZNode, comptime fmt: []const u8, value: anytype) ZDynamicTreeError!*ZNode {
var string = try std.fmt.allocPrint(self.arena.allocator(), fmt, value);
errdefer self.arena.allocator().free(string);
return self.appendRawValue(parent, string);
}
};
const ZError = ZDynamicTreeError || ZStaticTreeError || ParseTreeError;
/// Adds text under a parent node. Passing null will put the text under root.
pub fn appendText(tree: anytype, parent: ?*ZNode, text: []const u8) ZError!void {
const tree_type = @TypeOf(tree);
const tree_type_info = @typeInfo(tree_type);
if (tree_type_info != .Pointer) {
@compileError("copyNode expects a zzz tree pointer");
}
var current: *ZNode = if (parent == null) &tree.root else parent.?;
var current_depth: usize = 0;
var stream = StreamingParser.init();
var idx: usize = 0;
while (try stream.parse(&idx, text)) |token| {
const slice = text[token.start..token.end];
const value = if (slice.len == 0) "" else slice;
const new_depth = token.depth;
if (new_depth <= current_depth) {
// Ascend.
while (current_depth > new_depth) {
current = current.parent orelse unreachable;
current_depth -= 1;
}
// Sibling.
const new = try tree.appendValue(current.parent, value);
current.sibling = new;
current = new;
} else if (new_depth == current_depth + 1) {
// Descend.
current_depth += 1;
const new = try tree.appendValue(current, value);
current.child = new;
current = new;
} else {
// Levels shouldn't increase by more than one.
unreachable;
}
}
try stream.completeOrError();
}
/// Copies a node under another parent. Does not check if there are overlaps, like copying a
/// parent to be under a child.
pub fn copyNode(tree: anytype, parent: ?*ZNode, node: *const ZNode) ZError!*ZNode {
const tree_type = @TypeOf(tree);
const tree_type_info = @typeInfo(tree_type);
if (tree_type_info != .Pointer) {
@compileError("copyNode expects a zzz tree pointer");
}
var new_root = try tree.appendValue(parent, node.value);
// Starts at 1 for direct children so we iterate siblings.
var last_depth: isize = 1;
var depth: isize = 0;
var iter = node;
var piter: ?*ZNode = new_root;
var plast: ?*ZNode = null;
while (iter.next(&depth)) |next| : (iter = next) {
// If depth comes back to 0 or less, we're siblings or parent of node.
if (depth <= 0) {
break;
} else if (depth > last_depth) {
piter = plast;
last_depth = depth;
} else if (depth < last_depth) {
plast = piter;
while (last_depth != depth) {
piter = piter.?.parent;
last_depth -= 1;
}
} else {
// Sibling, keep using current parent (piter).
}
plast = try tree.appendValue(piter, next.value);
}
return new_root;
}
/// Parses and counts the number of nodes in a text.
pub fn countTextNodes(text: []const u8) !usize {
var count: usize = 0;
var stream = StreamingParser.init();
var idx: usize = 0;
while (try stream.parse(&idx, text)) |_| {
count += 1;
}
try stream.completeOrError();
return count;
}
test "dynamic tree" {
const testing = std.testing;
var tree0 = ZDynamicTree.init(testing.allocator);
defer tree0.deinit();
try appendText(&tree0, null,
\\arbitrary:data:that:can:be:as:long:as:memory:can:hold
);
_ = try tree0.appendAnytype(null, 42);
try testing.expectEqual(@as(usize, 12), tree0.node_count);
}
pub const ParseTreeError = error{
InvalidData,
MissingField,
} || std.mem.Allocator.Error;
/// Tries to constuct an instance of given type by parsing a tree node
/// slices (i.e. strings) are copied, free the result with `zzz.free`
pub fn parseTreeAlloc(
comptime T: type,
allocator: std.mem.Allocator,
node: ZNode,
) ParseTreeError!T {
return switch (T) {
[]const u8 => allocator.dupe(
u8,
if (node.child) |child| child.value else node.value,
),
else => switch (@typeInfo(T)) {
.Struct => parseTreeStructAlloc(T, allocator, node),
.Pointer => parseTreeSliceAlloc(T, allocator, node),
.Enum => parseTreeEnum(T, node),
else => @compileError("Unsupported type: " ++ @typeName(T)),
},
};
}
pub fn parseTreeStructAlloc(
comptime T: type,
allocator: std.mem.Allocator,
node: ZNode,
) ParseTreeError!T {
if (@typeInfo(T) != .Struct) {
@compileError("Expected struct type, got " ++ @typeName(T));
}
var res: T = undefined;
var missing_field = false;
inline for (std.meta.fields(T)) |field| {
if (node.findChild(field.name)) |value_node| {
const field_value = try parseTreeAlloc(
field.field_type,
allocator,
value_node.*,
);
@field(res, field.name) = field_value;
} else {
missing_field = true;
}
}
return if (missing_field) ParseTreeError.MissingField else res;
}
pub fn parseTreeSliceAlloc(
comptime T: type,
allocator: std.mem.Allocator,
node: ZNode,
) ParseTreeError!T {
const type_info = @typeInfo(T);
if (type_info != .Pointer) {
@compileError("Expected slice type, got " ++ @typeName(T));
}
switch (type_info.Pointer.size) {
.Slice => {
const count = node.getChildCount();
var elems = try allocator.alloc(type_info.Pointer.child, count);
errdefer allocator.free(elems);
if (node.child) |child| {
var i: u32 = 0;
var iter: ?*ZNode = child;
while (iter) |cur| : (iter = iter.?.sibling) {
elems[i] = try parseTreeAlloc(
type_info.Pointer.child,
allocator,
cur.*,
);
i += 1;
}
if (i != count) {
return ParseTreeError.InvalidData;
}
} else {
return ParseTreeError.InvalidData;
}
return elems;
},
else => @compileError("Expected slice type, got " ++ @typeName(T)),
}
}
pub fn parseTreeEnum(comptime T: type, node: ZNode) ParseTreeError!T {
if (@typeInfo(T) != .Enum) @compileError("Expected enum type, got " ++ @typeName(T));
const value_node = node.child orelse {
return ParseTreeError.InvalidData;
};
inline for (std.meta.fields(T)) |variant| {
if (std.mem.eql(u8, value_node.value, variant.name)) {
return @field(T, variant.name);
}
}
return ParseTreeError.InvalidData;
}
/// Frees memory allocated by parseTreeAlloc
pub fn free(allocator: std.mem.Allocator, value: anytype) void {
const T = @TypeOf(value);
switch (T) {
[]const u8 => {
allocator.free(value);
},
else => switch (@typeInfo(T)) {
.Struct => |s_info| {
inline for (s_info.fields) |field| {
free(allocator, @field(value, field.name));
}
},
.Pointer => |p_info| switch (p_info.size) {
.Slice => {
for (value) |elem| free(allocator, elem);
allocator.free(value);
},
else => @compileError("Unsupported type: " ++ @typeName(T)),
},
.Enum => {},
else => @compileError("Unsupported type: " ++ @typeName(T)),
},
}
}
test "parseTreeAlloc/free" {
const testing = std.testing;
var test_tree = ZStaticTree(1000){};
try appendText(
&test_tree,
null,
\\a: "testing, testing..."
\\bs:
\\ 1
\\ 2
\\ 3
\\
\\c: fourth
\\
\\d:
\\ foo: "foo'd"
\\ bar: "bar'd"
\\ baz: "baz'd"
\\
,
);
const foo = try parseTreeAlloc(
struct {
a: []const u8,
bs: [][]const u8,
c: enum {
first,
second,
third,
fourth,
},
d: struct {
foo: []const u8,
bar: []const u8,
baz: []const u8,
},
},
testing.allocator,
test_tree.root,
);
defer free(testing.allocator, foo);
try testing.expectEqualSlices(u8, "testing, testing...", foo.a);
try testing.expectEqual(@as(usize, 3), foo.bs.len);
try testing.expectEqualSlices(u8, "1", foo.bs[0]);
try testing.expectEqualSlices(u8, "2", foo.bs[1]);
try testing.expectEqualSlices(u8, "3", foo.bs[2]);
try testing.expectEqual(@TypeOf(foo.c).fourth, foo.c);
try testing.expectEqualSlices(u8, "foo'd", foo.d.foo);
try testing.expectEqualSlices(u8, "bar'd", foo.d.bar);
try testing.expectEqualSlices(u8, "baz'd", foo.d.baz);
try testing.expectError(ParseTreeError.MissingField, parseTreeAlloc(
struct {
something: enum { different },
},
testing.allocator,
test_tree.root,
));
}
|
src/main.zig
|
const std = @import("std");
const parse = @import("parse.zig");
const LispTokenizer = parse.LispTokenizer;
const LispParser = parse.LispParser;
const LispCall = interpret.LispCall;
const LispClosure = interpret.LispClosure;
const LispExpr = interpret.LispExpr;
const LispInterpreter = interpret.LispInterpreter;
const LispNativeCall = interpret.LispNativeCall;
const interpret = @import("interpret.zig");
pub fn initCore(allocator: *std.mem.Allocator) !LispInterpreter {
var core = LispInterpreter.init(allocator, null);
errdefer core.deinit();
try core.scope.put("nil", LispExpr{ .nil = {} });
try core.scope.put("t", LispExpr{ .t = {} });
try core.scope.put("+", LispExpr{ .native_func = @ptrToInt(Operation(.add)) });
try core.scope.put("-", LispExpr{ .native_func = @ptrToInt(Operation(.sub)) });
try core.scope.put("*", LispExpr{ .native_func = @ptrToInt(Operation(.mul)) });
try core.scope.put("/", LispExpr{ .native_func = @ptrToInt(Operation(.div)) });
try core.scope.put("=", LispExpr{ .native_func = @ptrToInt(Comparison(.eq)) });
try core.scope.put("!=", LispExpr{ .native_func = @ptrToInt(Comparison(.neq)) });
try core.scope.put("<", LispExpr{ .native_func = @ptrToInt(Comparison(.lt)) });
try core.scope.put("<=", LispExpr{ .native_func = @ptrToInt(Comparison(.lteq)) });
try core.scope.put(">", LispExpr{ .native_func = @ptrToInt(Comparison(.gt)) });
try core.scope.put(">=", LispExpr{ .native_func = @ptrToInt(Comparison(.gteq)) });
try core.scope.put("print", LispExpr{ .native_func = @ptrToInt(print) });
try core.scope.put("let", LispExpr{ .native_macro = @ptrToInt(let) });
try core.scope.put("set", LispExpr{ .native_macro = @ptrToInt(set) });
try core.scope.put("func", LispExpr{ .native_macro = @ptrToInt(func) });
try core.scope.put("macro", LispExpr{ .native_macro = @ptrToInt(macro) });
try core.scope.put("if", LispExpr{ .native_macro = @ptrToInt(@"if") });
try core.scope.put("while", LispExpr{ .native_macro = @ptrToInt(@"while") });
try core.scope.put("list", LispExpr{ .native_func = @ptrToInt(list) });
try core.scope.put("len", LispExpr{ .native_func = @ptrToInt(len) });
try core.scope.put("at", LispExpr{ .native_func = @ptrToInt(at) });
try core.scope.put("push", LispExpr{ .native_func = @ptrToInt(push) });
try core.scope.put("pop", LispExpr{ .native_func = @ptrToInt(pop) });
try core.scope.put("clone", LispExpr{ .native_func = @ptrToInt(clone) });
return core;
}
const OperationType = enum { add, sub, mul, div };
fn Operation(comptime op: OperationType) LispNativeCall {
const impl = switch (op) {
.add, .mul => struct {
fn inner(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
var acc: isize = switch (op) {
.add => 0,
.mul => 1,
else => unreachable,
};
for (args) |arg| {
switch (arg) {
.number => |num| switch (op) {
.add => acc += num,
.mul => acc *= num,
else => unreachable,
},
else => return error.OperationNotANumber,
}
}
return LispExpr{ .number = acc };
}
},
.sub, .div => struct {
fn inner(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len != 2) return error.OperationInvalidArguments;
const first = switch (args[0]) {
.number => |number| number,
else => return error.OperationNotANumber,
};
const second = switch (args[1]) {
.number => |number| number,
else => return error.OperationNotANumber,
};
return LispExpr{
.number = switch (op) {
.sub => first - second,
.div => @divTrunc(first, second),
else => unreachable,
},
};
}
},
};
return impl.inner;
}
const ComparisonType = enum { eq, neq, gt, gteq, lt, lteq };
fn Comparison(comptime comp: ComparisonType) LispNativeCall {
const impl = struct {
fn inner(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
const first = switch (args[0]) {
.number => |number| number,
else => return error.ComparisonNotANumber,
};
for (args[1..]) |branch| switch (branch) {
.number => |number| if (!switch (comp) {
.eq => first == number,
.neq => first != number,
.gt => first > number,
.gteq => first >= number,
.lt => first < number,
.lteq => first <= number,
}) return LispExpr{ .nil = {} },
else => return error.ComparisonNotANumber,
};
return LispExpr{ .t = {} };
}
};
return impl.inner;
}
fn print(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
const stdout = std.io.getStdOut().writer();
if (args.len == 0) return error.PrintInvalidArguments;
for (args) |arg| {
switch (arg) {
.string => |string| try stdout.print("{s}", .{string.items}),
else => try stdout.print("{s}", .{arg}),
}
}
return args[args.len - 1];
}
fn let(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len != 2) return error.LetInvalidArguments;
if (args[0] != .identifier) return error.LetNoIdentifier;
if (interpreter.scope.contains(args[0].identifier.items)) return error.LetIdentifierAlreadyExists;
const value = try interpreter.eval(args[1]);
try interpreter.scope.put(args[0].identifier.items, value);
return value;
}
fn set(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len != 2) return error.SetInvalidArguments;
if (args[0] != .identifier) return error.SetNoIdentifier;
if (!interpreter.scope.contains(args[0].identifier.items)) return error.SetNoSuchIdentifer;
const value = try interpreter.eval(args[1]);
try interpreter.scope.put(args[0].identifier.items, value);
return value;
}
fn macro(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len < 2) return error.MacroInvalidArguments;
if (args[0] != .list) return error.MacroNoParameters;
var callable = try interpreter.allocator.create(LispCall);
errdefer interpreter.allocator.destroy(callable);
callable.params = try std.ArrayList(LispExpr).initCapacity(interpreter.allocator, args[0].list.items.len);
errdefer callable.params.deinit();
for (args[0].list.items) |param| {
if (param == .identifier) {
try callable.params.append(param);
} else {
return error.MacroInvalidParameter;
}
}
callable.body = try std.ArrayList(LispExpr).initCapacity(interpreter.allocator, args.len - 1);
errdefer callable.body.deinit();
for (args[1..]) |expr| try callable.body.append(expr);
return try interpreter.store(LispExpr{ .macro = callable });
}
fn func(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len < 3) return error.FuncInvalidArguments;
if (args[0] != .list) return error.FuncNoParameters;
if (args[1] != .list) return error.FuncNoClosure;
var closure = try interpreter.allocator.create(LispClosure);
errdefer interpreter.allocator.destroy(closure);
closure.call.params = try std.ArrayList(LispExpr).initCapacity(interpreter.allocator, args[0].list.items.len);
errdefer closure.call.params.deinit();
for (args[0].list.items) |param| {
if (param != .identifier) return error.FuncInvalidParameter;
try closure.call.params.append(param);
}
closure.interpreter = LispInterpreter.init(interpreter.allocator, null);
errdefer closure.interpreter.deinit();
for (args[1].list.items) |captured| {
if (captured != .identifier) return error.FuncInvalidParameter;
if (interpreter.get(captured.identifier.items)) |value| {
try closure.interpreter.scope.put(captured.identifier.items, try closure.interpreter.clone(value));
} else {
return error.FuncCaptureNoSuchIdentifier;
}
}
closure.call.body = try std.ArrayList(LispExpr).initCapacity(interpreter.allocator, args.len - 1);
errdefer closure.call.body.deinit();
for (args[2..]) |expr| try closure.call.body.append(expr);
return try interpreter.store(LispExpr{ .func = closure });
}
fn @"if"(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len < 3) return error.IfInvalidArguments;
const cond = try interpreter.eval(args[0]);
if (cond != .nil) {
return try interpreter.eval(args[1]);
} else {
if (args.len > 3) {
for (args[2 .. args.len - 1]) |branch| _ = try interpreter.eval(branch);
}
return try interpreter.eval(args[args.len - 1]);
}
}
fn @"while"(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len < 2) return error.WhileInvalidArguments;
while ((try interpreter.eval(args[0])) != .nil) {
for (args[1..]) |branch| _ = try interpreter.eval(branch);
}
return LispExpr{ .nil = {} };
}
fn list(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
var exprs = try interpreter.allocator.create(std.ArrayList(LispExpr));
errdefer interpreter.allocator.destroy(exprs);
exprs.* = try std.ArrayList(LispExpr).initCapacity(interpreter.allocator, args.len);
errdefer exprs.deinit();
for (args) |arg| try exprs.append(arg);
return try interpreter.store(LispExpr{ .list = exprs });
}
fn len(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len != 1) return error.LenInvalidArguments;
if (args[0] != .list) return error.PushNotAList;
return LispExpr{ .number = @intCast(isize, args[0].list.items.len) };
}
fn at(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len != 2) return error.AtInvalidArguments;
if (args[0] != .list) return error.AtNotAList;
if (args[1] != .number) return error.AtNotANumber;
const index = args[1].number;
if (index < 0 or index >= args[0].list.items.len) return error.AtIndexOutOfBounds;
return args[0].list.items[@intCast(usize, index)];
}
fn push(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len < 2) return error.PushInvalidArguments;
if (args[0] != .list) return error.PushNotAList;
for (args[1..]) |branch| try args[0].list.append(branch);
return args[args.len - 1];
}
fn pop(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len != 1) return error.PopInvalidArguments;
if (args[0] != .list) return error.PopNotAList;
return args[0].list.pop();
}
fn clone(interpreter: *LispInterpreter, args: []LispExpr) !LispExpr {
if (args.len != 1) return error.CloneInvalidArguments;
return interpreter.clone(args[0]);
}
|
src/library.zig
|
const device = @import("device.zig");
const dtb = @import("dtb.zig");
const log = @import("log.zig");
extern fn __delay(count: i32) void;
pub const Reg = enum(u32) {
GPFSEL0 = 0x00, // GPIO Function Select 0
GPFSEL1 = 0x04, // GPIO Function Select 1
GPFSEL2 = 0x08, // GPIO Function Select 2
GPFSEL3 = 0x0C, // GPIO Function Select 3
GPFSEL4 = 0x10, // GPIO Function Select 4
GPFSEL5 = 0x14, // GPIO Function Select 5
GPSET0 = 0x1C, // GPIO Pin Output Set 0
GPSET1 = 0x20, // GPIO Pin Output Set 1
GPCLR0 = 0x28, // GPIO Pin Output Clear 0
GPCLR1 = 0x2C, // GPIO Pin Output Clear 1
GPLEV0 = 0x34, // GPIO Pin Level 0
GPLEV1 = 0x38, // GPIO Pin Level 1
GPEDS0 = 0x40, // GPIO Pin Event Detect Status 0
GPEDS1 = 0x44, // GPIO Pin Event Detect Status 1
GPREN0 = 0x4C, // GPIO Pin Rising Edge Detect Enable 0
GPREN1 = 0x50, // GPIO Pin Rising Edge Detect Enable 1
GPFEN0 = 0x58, // GPIO Pin Falling Edge Detect Enable 0
GPFEN1 = 0x5C, // GPIO Pin Falling Edge Detect Enable 1
GPHEN0 = 0x64, // GPIO Pin High Detect Enable 0
GPHEN1 = 0x68, // GPIO Pin High Detect Enable 1
GPLEN0 = 0x70, // GPIO Pin Low Detect Enable 0
GPLEN1 = 0x74, // GPIO Pin Low Detect Enable 1
GPAREN0 = 0x7C, // GPIO Pin Async. Rising Edge Detect 0
GPAREN1 = 0x80, // GPIO Pin Async. Rising Edge Detect 1
GPAFEN0 = 0x88, // GPIO Pin Async. Falling Edge Detect 0
GPAFEN1 = 0x8C, // GPIO Pin Async. Falling Edge Detect 1
GPPUD = 0x94, // GPIO Pin Pull-up/down Enable
GPPUDCLK0 = 0x98, // GPIO Pin Pull-up/down Enable Clock 0
GPPUDCLK1 = 0x9C, // GPIO Pin Pull-up/down Enable Clock 1
};
pub const PUD = enum(u2) {
Disable = 0b00,
EnablePullDownControl = 0b01,
EnablePullUpControl = 0b10,
};
pub const GPIO = struct {
const Self = @This();
dev: device.MMIOPeripheralDevice(Reg),
pub fn controlPull(self: Self, reg: Reg, control: PUD, mask: u32) void {
// Per the BCM2835 ARM Peripherals manual:
// "1. Write to GPPUD to set the required control signal (i.e. Pull-up
// or Pull-Down or neither to remove the current Pull-up/down)"
self.dev.writeReg(.GPPUD, @enumToInt(control));
// "2. Wait 150 cycles – this provides the required set-up time for the
// control signal"
__delay(150);
// "3. Write to GPPUDCLK0/1 to clock the control signal into the GPIO
// pads you wish to modify – NOTE only the pads which receive a clock
// will be modified, all others will retain their previous state"
self.dev.writeReg(reg, mask);
// "4. Wait 150 cycles – this provides the required hold time for the
// control signal"
__delay(150);
// "5. Write to GPPUD to remove the control signal"
self.dev.writeReg(.GPPUD, 0);
// "6. Write to GPPUDCLK0/1 to remove the clock"
self.dev.writeReg(reg, 0);
}
pub fn probe(node: dtb.Node) !Self {
return Self{ .dev = try device.MMIOPeripheralDevice(Reg).init(node) };
}
};
|
gpio.zig
|
const std = @import("std");
const gpa = std.heap.c_allocator;
const zfetch = @import("zfetch");
const zigmod = @import("../../lib.zig");
const u = @import("./../../util/index.zig");
const zpm = @import("./../zpm.zig");
//
//
pub fn execute(args: [][]u8) !void {
const url = try std.mem.join(gpa, "/", &.{ zpm.server_root, "packages" });
const val = try zpm.server_fetchArray(url);
const found = blk: {
for (val) |pkg| {
if (std.mem.eql(u8, pkg.name, args[0])) {
break :blk pkg;
}
}
u.fail("no package with name '{s}' found", .{args[0]});
};
const self_module = try zigmod.ModFile.init(gpa);
for (self_module.deps) |dep| {
if (std.mem.eql(u8, dep.name, found.name)) {
std.log.warn("dependency with name '{s}' already exists in your dependencies", .{found.name});
}
}
for (self_module.rootdeps) |dep| {
if (std.mem.eql(u8, dep.name, found.name)) {
std.log.warn("dependency with name '{s}' already exists in your root_dependencies", .{found.name});
}
}
for (self_module.builddeps) |dep| {
if (std.mem.eql(u8, dep.name, found.name)) {
std.log.warn("dependency with name '{s}' already exists in your build_dependencies", .{found.name});
}
}
const has_zigdotmod = blk: {
const _url = try std.mem.join(gpa, "/", &.{ found.git, "blob", "HEAD", "zig.mod" });
const _req = try zfetch.Request.init(gpa, _url, null);
defer _req.deinit();
try _req.do(.GET, null, null);
break :blk _req.status.code == 200;
};
const has_zigmodyml = blk: {
const _url = try std.mem.join(gpa, "/", &.{ found.git, "blob", "HEAD", "zigmod.yml" });
const _req = try zfetch.Request.init(gpa, _url, null);
defer _req.deinit();
try _req.do(.GET, null, null);
break :blk _req.status.code == 200;
};
const file = try zigmod.ModFile.openFile(std.fs.cwd(), .{ .mode = .read_write });
defer file.close();
try file.seekTo(try file.getEndPos());
const file_w = file.writer();
try file_w.print("\n", .{});
try file_w.print(" - src: git {s}\n", .{u.trim_suffix(found.git, ".git")});
if (!(has_zigdotmod or has_zigmodyml)) {
try file_w.print(" name: {s}\n", .{found.name});
try file_w.print(" main: {s}\n", .{found.root_file[1..]});
}
std.log.info("Successfully added package {s} by {s}", .{ found.name, found.author });
}
|
src/cmd/zpm/add.zig
|
const std = @import("std");
const fmt = std.fmt;
const io = std.io;
const mem = std.mem;
const warn = std.debug.warn;
fn hex2int(hexChars: []u8) !u128 {
var hexSlice = hexChars[0..];
if (mem.eql(u8, hexSlice[0..2], "0x")) {
hexSlice = hexChars[2..];
}
// NOTE: If not for the fact that we need to ignore dashes ('-') in the
// input, we could replace this logic with a call to fmt.parseInt()
var val: u128 = 0;
for (hexSlice) |c| {
if (c == '-') {
continue;
}
var charVal = try fmt.charToDigit(c, 16);
val = (val * 16) + charVal;
}
return val;
}
fn processInput(outstream: *io.OutStream(std.os.WriteError), input: []u8) void {
if (hex2int(input)) |hexValue| {
if (outstream.print("{}\n", hexValue)) {} else |err| {
warn("Failed to write to stdout");
}
} else |parseErr| {
warn("Failed to parse hex value {}: {}\n", input, parseErr);
}
}
pub fn main() !void {
var arenaAlloc = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
defer arenaAlloc.deinit();
const alloc = &arenaAlloc.allocator;
const stdinFile = try io.getStdIn();
const stdoutFile = try io.getStdOut();
var stdin = stdinFile.inStream();
var stdout = stdoutFile.outStream();
var args = std.process.args();
const program = args.next(alloc);
var argsPresent = false;
while (args.next(alloc)) |argErrorUnion| {
if (argErrorUnion) |hexArg| {
processInput(&stdout.stream, hexArg);
argsPresent = true;
} else |err| {
warn("Error when attempting to read argument: {}\n", err);
break;
}
}
if (!std.os.isatty(stdinFile.handle)) {
var buf = try std.Buffer.initSize(alloc, 1024);
defer buf.deinit();
while (io.readLineFrom(&stdin.stream, &buf)) |hexArg| {
processInput(&stdout.stream, hexArg);
} else |err| {
if (err != error.EndOfStream) {
warn("Error while reading from stdin: {}\n", err);
}
}
} else if (!argsPresent) {
warn("Usage: {} INPUT...\n\nInput can also be piped in via stdin and will be processed one line at a time.\n", program);
}
}
|
zig/src/hex2dec.zig
|
const std = @import("std");
const os = std.os;
const builtin = @import("builtin");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const page_size = std.mem.page_size;
/// Integer type for pointing to slots in a small allocation
const SlotIndex = @IntType(false, std.math.log2(page_size) + 1);
pub const Config = struct {
/// Number of stack frames to capture.
stack_trace_frames: usize = 4,
/// Whether the allocator is configured to accept a backing
/// allocator used for the underlying memory.
/// false means it will make syscalls directly, and
/// the create() function takes no arguments.
/// If this is set to true, create() takes a *Allocator parameter.
backing_allocator: bool = false,
/// Whether to use mprotect to take away write permission
/// from allocator internal state to prevent allocator state
/// corruption. Enabling this catches bugs but is slower.
memory_protection: bool = true,
};
pub fn GeneralPurposeDebugAllocator(comptime config: Config) type {
return struct {
allocator: Allocator,
backing_allocator: BackingAllocator,
buckets: [small_bucket_count]?*BucketHeader,
simple_allocator: SimpleAllocatorType,
large_allocations: LargeAllocTable,
total_requested_bytes: usize,
requested_memory_limit: usize,
comptime {
if (config.backing_allocator and config.memory_protection) {
@compileError("Memory protection is unavailable when using a backing allocator");
}
}
const Self = @This();
const BackingAllocator = if (config.backing_allocator) *Allocator else void;
const SimpleAllocatorType = if (config.backing_allocator) void else SimpleAllocator;
const stack_n = config.stack_trace_frames;
const one_trace_size = @sizeOf(usize) * stack_n;
const traces_per_slot = 2;
pub const Error = std.mem.Allocator.Error;
const small_bucket_count = std.math.log2(page_size);
const largest_bucket_object_size = 1 << (small_bucket_count - 1);
const LargeAlloc = struct {
bytes: []u8,
stack_addresses: [stack_n]usize,
fn dumpStackTrace(self: *LargeAlloc) void {
var len: usize = 0;
while (len < stack_n and self.stack_addresses[len] != 0) {
len += 1;
}
const stack_trace = builtin.StackTrace{
.instruction_addresses = &self.stack_addresses,
.index = len,
};
std.debug.dumpStackTrace(stack_trace);
}
};
const LargeAllocTable = std.HashMap(usize, LargeAlloc, hash_addr, eql_addr);
pub fn createWithAllocator(backing_allocator: BackingAllocator) !*Self {
const self = blk: {
if (config.backing_allocator) {
break :blk try backing_allocator.create(Self);
} else {
const self_bytes = try sysAlloc(undefined, @sizeOf(Self));
break :blk @ptrCast(*Self, self_bytes.ptr);
}
};
self.* = Self{
.allocator = Allocator{
.reallocFn = realloc,
.shrinkFn = shrink,
},
.backing_allocator = backing_allocator,
.buckets = [1]?*BucketHeader{null} ** small_bucket_count,
.simple_allocator = if (config.backing_allocator) {} else SimpleAllocator.init(),
.large_allocations = LargeAllocTable.init(if (config.backing_allocator)
backing_allocator
else
&self.simple_allocator.allocator),
.total_requested_bytes = 0,
.requested_memory_limit = std.math.maxInt(usize),
};
try self.mprotectInit(os.PROT_READ);
return self;
}
pub fn create() !*Self {
if (config.backing_allocator) {
@compileError("GeneralPurposeDebugAllocator has backing_allocator enabled therefore client must call createWithAllocator()");
}
return createWithAllocator({});
}
// Bucket: In memory, in order:
// * BucketHeader
// * bucket_used_bits: [N]u8, // 1 bit for every slot; 1 byte for every 8 slots
// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
const BucketHeader = struct {
prev: *BucketHeader,
next: *BucketHeader,
page: [*]align(page_size) u8,
alloc_cursor: SlotIndex,
used_count: SlotIndex,
fn usedBits(bucket: *BucketHeader, index: usize) *u8 {
return @intToPtr(*u8, @ptrToInt(bucket) + @sizeOf(BucketHeader) + index);
}
fn stackTracePtr(
bucket: *BucketHeader,
size_class: usize,
slot_index: SlotIndex,
trace_kind: TraceKind,
) *[stack_n]usize {
const start_ptr = @ptrCast([*]u8, bucket) + bucketStackFramesStart(size_class);
const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
@enumToInt(trace_kind) * usize(one_trace_size);
return @ptrCast(*[stack_n]usize, addr);
}
fn captureStackTrace(
bucket: *BucketHeader,
return_address: usize,
size_class: usize,
slot_index: SlotIndex,
trace_kind: TraceKind,
) void {
// Initialize them to 0. When determining the count we must look
// for non zero addresses.
const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
collectStackTrace(return_address, stack_addresses);
}
};
fn bucketStackTrace(
bucket: *BucketHeader,
size_class: usize,
slot_index: SlotIndex,
trace_kind: TraceKind,
) builtin.StackTrace {
const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind);
var len: usize = 0;
while (len < stack_n and stack_addresses[len] != 0) {
len += 1;
}
return builtin.StackTrace{
.instruction_addresses = stack_addresses,
.index = len,
};
}
fn bucketStackFramesStart(size_class: usize) usize {
return std.mem.alignForward(
@sizeOf(BucketHeader) + usedBitsCount(size_class),
@alignOf(usize),
);
}
fn bucketSize(size_class: usize) usize {
const slot_count = @divExact(page_size, size_class);
return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count;
}
fn usedBitsCount(size_class: usize) usize {
const slot_count = @divExact(page_size, size_class);
if (slot_count < 8) return 1;
return @divExact(slot_count, 8);
}
fn mprotectInit(self: *Self, protection: u32) Error!void {
if (!config.memory_protection) return;
const slice = @intToPtr([*]align(page_size) u8, @ptrToInt(self))[0..page_size];
os.mprotect(slice, protection) catch |e| switch (e) {
error.AccessDenied => unreachable,
error.OutOfMemory => return error.OutOfMemory,
error.Unexpected => return error.OutOfMemory,
};
}
fn mprotect(self: *Self, protection: u32) void {
if (!config.memory_protection) return;
const slice = @intToPtr([*]align(page_size) u8, @ptrToInt(self))[0..page_size];
os.mprotect(slice, protection) catch unreachable;
}
fn detectLeaksInBucket(
bucket: *BucketHeader,
size_class: usize,
used_bits_count: usize,
) void {
var used_bits_byte: usize = 0;
while (used_bits_byte < used_bits_count) : (used_bits_byte += 1) {
const used_byte = bucket.usedBits(used_bits_byte).*;
if (used_byte != 0) {
var bit_index: u3 = 0;
while (true) : (bit_index += 1) {
const is_used = @truncate(u1, used_byte >> bit_index) != 0;
if (is_used) {
std.debug.warn("\nMemory leak detected:\n");
const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index);
const stack_trace = bucketStackTrace(
bucket,
size_class,
slot_index,
.Alloc,
);
std.debug.dumpStackTrace(stack_trace);
}
if (bit_index == std.math.maxInt(u3))
break;
}
}
}
}
pub fn destroy(self: *Self) void {
for (self.buckets) |optional_bucket, bucket_i| {
const first_bucket = optional_bucket orelse continue;
const size_class = usize(1) << @intCast(u6, bucket_i);
const used_bits_count = usedBitsCount(size_class);
var bucket = first_bucket;
while (true) {
detectLeaksInBucket(bucket, size_class, used_bits_count);
bucket = bucket.next;
if (bucket == first_bucket)
break;
}
}
var large_it = self.large_allocations.iterator();
while (large_it.next()) |large_alloc| {
std.debug.warn("\nMemory leak detected:\n");
large_alloc.value.dumpStackTrace();
}
if (!config.backing_allocator)
self.simple_allocator.deinit(); // Free large_allocations memory.
self.sysFree(@ptrCast([*]u8, self)[0..@sizeOf(Self)]);
}
fn directAlloc(
self: *Self,
n: usize,
alignment: u29,
first_trace_addr: usize,
) Error![]u8 {
const alloc_size = if (alignment <= page_size) n else n + alignment;
const slice = try sysAlloc(self, alloc_size);
errdefer self.sysFree(slice);
if (alloc_size == n) {
try self.trackLargeAlloc(slice, first_trace_addr);
return slice;
}
const addr = @ptrToInt(slice.ptr);
const aligned_addr = std.mem.alignForward(addr, alignment);
// Unmap the extra bytes that were only requested in order to guarantee
// that the range of memory we were provided had a proper alignment in
// it somewhere. The extra bytes could be at the beginning, or end, or both.
const unused_start = slice[0 .. aligned_addr - addr];
if (unused_start.len != 0) {
self.sysFree(unused_start);
}
const aligned_end_addr = std.mem.alignForward(aligned_addr + n, page_size);
const unused_end_len = @ptrToInt(slice.ptr + slice.len) - aligned_end_addr;
const unused_end = @intToPtr([*]u8, aligned_end_addr)[0..unused_end_len];
if (unused_end.len != 0) {
self.sysFree(unused_end);
}
const result = @intToPtr([*]u8, aligned_addr)[0..n];
try self.trackLargeAlloc(result, first_trace_addr);
return result;
}
fn mprotectLargeAllocs(self: *Self, flags: u32) void {
if (!config.memory_protection) return;
if (config.backing_allocator) return;
self.simple_allocator.mprotect(flags);
}
fn trackLargeAlloc(
self: *Self,
bytes: []u8,
first_trace_addr: usize,
) !void {
self.mprotectLargeAllocs(os.PROT_WRITE | os.PROT_READ);
defer self.mprotectLargeAllocs(os.PROT_READ);
const gop = try self.large_allocations.getOrPut(@ptrToInt(bytes.ptr));
if (gop.found_existing) {
@panic("OS provided unexpected memory address");
}
gop.kv.value.bytes = bytes;
collectStackTrace(first_trace_addr, &gop.kv.value.stack_addresses);
}
fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
std.mem.set(usize, addresses, 0);
var stack_trace = builtin.StackTrace{
.instruction_addresses = addresses,
.index = 0,
};
std.debug.captureStackTrace(first_trace_addr, &stack_trace);
}
fn allocSlot(
self: *Self,
size_class: usize,
trace_addr: usize,
) Error![*]u8 {
const bucket_index = std.math.log2(size_class);
const first_bucket = self.buckets[bucket_index] orelse try self.createBucket(
size_class,
bucket_index,
);
var bucket = first_bucket;
const slot_count = @divExact(page_size, size_class);
while (bucket.alloc_cursor == slot_count) {
const prev_bucket = bucket;
bucket = prev_bucket.next;
if (bucket == first_bucket) {
// make a new one
bucket = try self.createBucket(size_class, bucket_index);
bucket.prev = prev_bucket;
bucket.next = prev_bucket.next;
prev_bucket.next = bucket;
bucket.next.prev = bucket;
}
}
// change the allocator's current bucket to be this one
self.buckets[bucket_index] = bucket;
const slot_index = bucket.alloc_cursor;
bucket.alloc_cursor += 1;
var used_bits_byte = bucket.usedBits(slot_index / 8);
const used_bit_index: u3 = @intCast(u3, slot_index % 8); // TODO cast should be unnecessary
used_bits_byte.* |= (u8(1) << used_bit_index);
bucket.used_count += 1;
bucket.captureStackTrace(trace_addr, size_class, slot_index, .Alloc);
return bucket.page + slot_index * size_class;
}
fn searchBucket(
self: *Self,
bucket_index: usize,
addr: usize,
) ?*BucketHeader {
const first_bucket = self.buckets[bucket_index] orelse return null;
var bucket = first_bucket;
while (true) {
const in_bucket_range = (addr >= @ptrToInt(bucket.page) and
addr < @ptrToInt(bucket.page) + page_size);
if (in_bucket_range) return bucket;
bucket = bucket.prev;
if (bucket == first_bucket) {
return null;
}
self.buckets[bucket_index] = bucket;
}
}
fn freeSlot(
self: *Self,
bucket: *BucketHeader,
bucket_index: usize,
size_class: usize,
slot_index: SlotIndex,
used_byte: *u8,
used_bit_index: u3,
trace_addr: usize,
) void {
// Capture stack trace to be the "first free", in case a double free happens.
bucket.captureStackTrace(@returnAddress(), size_class, slot_index, .Free);
used_byte.* &= ~(u8(1) << used_bit_index);
bucket.used_count -= 1;
if (bucket.used_count == 0) {
if (bucket.next == bucket) {
// it's the only bucket and therefore the current one
self.buckets[bucket_index] = null;
} else {
bucket.next.prev = bucket.prev;
bucket.prev.next = bucket.next;
self.buckets[bucket_index] = bucket.prev;
}
self.sysFree(bucket.page[0..page_size]);
const bucket_size = bucketSize(size_class);
const aligned_bucket_size = std.mem.alignForward(bucket_size, page_size);
self.sysFree(@ptrCast([*]u8, bucket)[0..aligned_bucket_size]);
}
}
const ResizeBehavior = enum {
shrink,
realloc,
};
fn directRealloc(
self: *Self,
old_mem: []u8,
new_size: usize,
return_addr: usize,
behavior: ResizeBehavior,
) Error![]u8 {
self.mprotectLargeAllocs(os.PROT_WRITE | os.PROT_READ);
defer self.mprotectLargeAllocs(os.PROT_READ);
const old_kv = self.large_allocations.get(@ptrToInt(old_mem.ptr)).?;
const result = old_mem.ptr[0..new_size];
// TODO test if the old_mem.len is correct
old_kv.value.bytes = result;
collectStackTrace(return_addr, &old_kv.value.stack_addresses);
const old_end_page = std.mem.alignForward(old_mem.len, page_size);
const new_end_page = std.mem.alignForward(new_size, page_size);
if (new_end_page < old_end_page) {
self.sysFree(old_mem.ptr[new_end_page..old_end_page]);
} else if (behavior == .realloc) {
return error.OutOfMemory;
}
return result;
}
/// This function assumes the object is in the large object storage regardless
/// of the parameters.
fn resizeLarge(
self: *Self,
old_mem: []u8,
old_align: u29,
new_size: usize,
new_align: u29,
return_addr: usize,
behavior: ResizeBehavior,
) Error![]u8 {
if (new_size == 0) {
self.directFree(old_mem);
return old_mem[0..0];
} else if (new_size > old_mem.len or new_align > old_align) {
self.mprotectLargeAllocs(os.PROT_WRITE | os.PROT_READ);
defer self.mprotectLargeAllocs(os.PROT_READ);
const old_kv = self.large_allocations.get(@ptrToInt(old_mem.ptr)).?;
const end_page = std.mem.alignForward(old_kv.value.bytes.len, page_size);
if (new_size <= end_page and (new_align <= old_align or
isAligned(@ptrToInt(old_mem.ptr), new_align)))
{
const result = old_mem.ptr[0..new_size];
// TODO test if the old_mem.len is correct
old_kv.value.bytes = result;
collectStackTrace(return_addr, &old_kv.value.stack_addresses);
return result;
}
const new_mem = try self.directAlloc(new_size, new_align, return_addr);
@memcpy(new_mem.ptr, old_mem.ptr, std.math.min(old_mem.len, new_mem.len));
self.directFree(old_mem);
return new_mem;
} else {
const new_aligned_size = std.math.max(new_size, new_align);
if (new_aligned_size > largest_bucket_object_size) {
return self.directRealloc(old_mem, new_size, return_addr, behavior);
} else {
const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size);
const ptr = self.allocSlot(new_size_class, return_addr) catch |e| switch (e) {
error.OutOfMemory => return self.directRealloc(
old_mem,
new_size,
return_addr,
behavior,
),
};
@memcpy(ptr, old_mem.ptr, new_size);
self.directFree(old_mem);
return ptr[0..new_size];
}
}
}
pub fn setRequestedMemoryLimit(self: *Self, limit: usize) void {
self.mprotect(os.PROT_WRITE | os.PROT_READ);
defer self.mprotect(os.PROT_READ);
self.requested_memory_limit = limit;
}
fn reallocOrShrink(
allocator: *Allocator,
old_mem: []u8,
old_align: u29,
new_size: usize,
new_align: u29,
return_addr: usize,
behavior: ResizeBehavior,
) Error![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
self.mprotect(os.PROT_WRITE | os.PROT_READ);
defer self.mprotect(os.PROT_READ);
const prev_req_bytes = self.total_requested_bytes;
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
if (new_req_bytes > prev_req_bytes and
new_req_bytes > self.requested_memory_limit)
{
return error.OutOfMemory;
}
self.total_requested_bytes = new_req_bytes;
errdefer self.total_requested_bytes = prev_req_bytes;
if (old_mem.len == 0) {
assert(behavior == .realloc);
const new_aligned_size = std.math.max(new_size, new_align);
if (new_aligned_size > largest_bucket_object_size) {
return self.directAlloc(new_size, new_align, return_addr);
} else {
const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size);
const ptr = try self.allocSlot(new_size_class, return_addr);
return ptr[0..new_size];
}
}
const aligned_size = std.math.max(old_mem.len, old_align);
if (aligned_size > largest_bucket_object_size) {
return self.resizeLarge(old_mem, old_align, new_size, new_align, return_addr, behavior);
}
const size_class = up_to_nearest_power_of_2(usize, aligned_size);
var bucket_index = std.math.log2(size_class);
const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) {
if (self.searchBucket(bucket_index, @ptrToInt(old_mem.ptr))) |bucket| {
break bucket;
}
} else {
return self.resizeLarge(old_mem, old_align, new_size, new_align, return_addr, behavior);
};
const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page);
const slot_index = @intCast(SlotIndex, byte_offset / size_class);
const used_byte_index = slot_index / 8;
const used_bit_index = @intCast(u3, slot_index % 8);
const used_byte = bucket.usedBits(used_byte_index);
const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0;
if (!is_used) {
// print allocation stack trace
std.debug.warn("\nDouble free detected, allocated here:\n");
const alloc_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .Alloc);
std.debug.dumpStackTrace(alloc_stack_trace);
std.debug.warn("\nFirst free here:\n");
const free_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .Free);
std.debug.dumpStackTrace(free_stack_trace);
@panic("\nSecond free here:");
}
if (new_size == 0) {
self.freeSlot(
bucket,
bucket_index,
size_class,
slot_index,
used_byte,
used_bit_index,
return_addr,
);
return old_mem[0..0];
}
const new_aligned_size = std.math.max(new_size, new_align);
const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size);
if (size_class == new_size_class) {
return old_mem.ptr[0..new_size];
}
if (new_aligned_size > largest_bucket_object_size) {
self.mprotectLargeAllocs(os.PROT_WRITE | os.PROT_READ);
defer self.mprotectLargeAllocs(os.PROT_READ);
const new_mem = try self.directAlloc(new_size, new_align, return_addr);
@memcpy(new_mem.ptr, old_mem.ptr, old_mem.len);
self.freeSlot(
bucket,
bucket_index,
size_class,
slot_index,
used_byte,
used_bit_index,
return_addr,
);
return new_mem;
}
// Migrating to a smaller size class.
const ptr = self.allocSlot(new_size_class, return_addr) catch |e| switch (e) {
error.OutOfMemory => switch (behavior) {
.realloc => return error.OutOfMemory,
.shrink => return old_mem.ptr[0..new_size],
},
};
@memcpy(ptr, old_mem.ptr, new_size);
self.freeSlot(
bucket,
bucket_index,
size_class,
slot_index,
used_byte,
used_bit_index,
return_addr,
);
return ptr[0..new_size];
}
fn directFree(self: *Self, bytes: []u8) void {
self.mprotectLargeAllocs(os.PROT_WRITE | os.PROT_READ);
defer self.mprotectLargeAllocs(os.PROT_READ);
var kv = self.large_allocations.remove(@ptrToInt(bytes.ptr)).?;
if (bytes.len != kv.value.bytes.len) {
std.debug.warn(
"\nAllocation size {} bytes does not match free size {}. Allocated here:\n",
kv.value.bytes.len,
bytes.len,
);
kv.value.dumpStackTrace();
@panic("\nFree here:");
}
self.sysFree(bytes);
}
fn shrink(
allocator: *Allocator,
old_mem: []u8,
old_align: u29,
new_size: usize,
new_align: u29,
) []u8 {
return reallocOrShrink(
allocator,
old_mem,
old_align,
new_size,
new_align,
@returnAddress(),
.shrink,
) catch unreachable;
}
fn realloc(
allocator: *Allocator,
old_mem: []u8,
old_align: u29,
new_size: usize,
new_align: u29,
) Error![]u8 {
return reallocOrShrink(
allocator,
old_mem,
old_align,
new_size,
new_align,
@returnAddress(),
.realloc,
);
}
fn createBucket(
self: *Self,
size_class: usize,
bucket_index: usize,
) Error!*BucketHeader {
const page = try sysAlloc(self, page_size);
errdefer self.sysFree(page);
const bucket_size = bucketSize(size_class);
const aligned_bucket_size = std.mem.alignForward(bucket_size, page_size);
const bucket_bytes = try sysAlloc(self, aligned_bucket_size);
const ptr = @ptrCast(*BucketHeader, bucket_bytes.ptr);
ptr.* = BucketHeader{
.prev = ptr,
.next = ptr,
.page = page.ptr,
.alloc_cursor = 0,
.used_count = 0,
};
self.buckets[bucket_index] = ptr;
// Set the used bits to all zeroes
@memset((*[1]u8)(ptr.usedBits(0)), 0, usedBitsCount(size_class));
return ptr;
}
var next_addr_hint: ?[*]align(page_size) u8 = null;
fn sysAlloc(self: *Self, len: usize) Error![]align(page_size) u8 {
if (config.backing_allocator) {
return self.backing_allocator.alignedAlloc(u8, page_size, len);
} else {
const perms = os.PROT_READ | os.PROT_WRITE;
const flags = os.MAP_PRIVATE | os.MAP_ANONYMOUS;
const hint = @atomicLoad(@typeOf(next_addr_hint), &next_addr_hint, .SeqCst);
const result = os.mmap(hint, len, perms, flags, -1, 0) catch return error.OutOfMemory;
const new_hint = result.ptr + std.mem.alignForward(result.len, page_size);
_ = @cmpxchgStrong(@typeOf(next_addr_hint), &next_addr_hint, hint, new_hint, .SeqCst, .SeqCst);
return result;
}
}
fn sysFree(self: *Self, old_mem: []u8) void {
if (config.backing_allocator) {
return self.backing_allocator.free(old_mem);
} else {
// This call cannot fail because we are giving the full memory back (not splitting a
// vm page).
os.munmap(@alignCast(page_size, old_mem));
}
}
const SimpleAllocator = struct {
allocator: Allocator,
active_allocation: []u8,
fn init() SimpleAllocator {
return SimpleAllocator{
.allocator = Allocator{
.reallocFn = realloc,
.shrinkFn = shrink,
},
.active_allocation = (([*]u8)(undefined))[0..0],
};
}
fn deinit(self: SimpleAllocator) void {
if (self.active_allocation.len == 0) return;
comptime assert(!config.backing_allocator);
sysFree(undefined, self.active_allocation);
}
fn realloc(
allocator: *Allocator,
old_mem: []u8,
old_align: u29,
new_size: usize,
new_align: u29,
) error{OutOfMemory}![]u8 {
assert(old_mem.len == 0);
assert(new_align < page_size);
comptime assert(!config.backing_allocator);
const self = @fieldParentPtr(SimpleAllocator, "allocator", allocator);
const result = try sysAlloc(undefined, new_size);
self.active_allocation = result;
return result;
}
fn shrink(
allocator: *Allocator,
old_mem: []u8,
old_align: u29,
new_size: usize,
new_align: u29,
) []u8 {
assert(new_size == 0);
comptime assert(!config.backing_allocator);
sysFree(undefined, old_mem);
return old_mem[0..0];
}
/// Applies to all of the bytes in the entire allocator.
pub fn mprotect(self: *SimpleAllocator, protection: u32) void {
if (!config.memory_protection) return;
if (self.active_allocation.len == 0) return;
const aligned_ptr = @alignCast(page_size, self.active_allocation.ptr);
const aligned_len = std.mem.alignForward(self.active_allocation.len, page_size);
const slice = aligned_ptr[0..aligned_len];
os.mprotect(slice, protection) catch unreachable;
}
};
};
}
const TraceKind = enum {
Alloc,
Free,
};
fn up_to_nearest_power_of_2(comptime T: type, n: T) T {
var power: T = 1;
while (power < n)
power *= 2;
return power;
}
fn hash_addr(addr: usize) u32 {
// TODO ignore the least significant bits because addr is guaranteed
// to be page aligned
if (@sizeOf(usize) == @sizeOf(u32))
return addr;
comptime assert(@sizeOf(usize) == 8);
return @intCast(u32, addr >> 32) ^ @truncate(u32, addr);
}
fn eql_addr(a: usize, b: usize) bool {
return a == b;
}
const test_config = Config{};
const test_config_nomprotect = Config{ .memory_protection = false };
test "small allocations - free in same order" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
var list = std.ArrayList(*u64).init(std.debug.global_allocator);
var i: usize = 0;
while (i < 513) : (i += 1) {
const ptr = try allocator.create(u64);
try list.append(ptr);
}
for (list.toSlice()) |ptr| {
allocator.destroy(ptr);
}
}
test "small allocations - free in reverse order" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
var list = std.ArrayList(*u64).init(std.debug.global_allocator);
var i: usize = 0;
while (i < 513) : (i += 1) {
const ptr = try allocator.create(u64);
try list.append(ptr);
}
while (list.popOrNull()) |ptr| {
allocator.destroy(ptr);
}
}
test "large allocations" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
const ptr1 = try allocator.alloc(u64, 42768);
const ptr2 = try allocator.alloc(u64, 52768);
allocator.free(ptr1);
const ptr3 = try allocator.alloc(u64, 62768);
allocator.free(ptr3);
allocator.free(ptr2);
}
test "realloc" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
defer allocator.free(slice);
slice[0] = 0x12;
// This reallocation should keep its pointer address.
const old_slice = slice;
slice = try allocator.realloc(slice, 2);
assert(old_slice.ptr == slice.ptr);
assert(slice[0] == 0x12);
slice[1] = 0x34;
// This requires upgrading to a larger size class
slice = try allocator.realloc(slice, 17);
assert(slice[0] == 0x12);
assert(slice[1] == 0x34);
}
test "shrink" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, 20);
defer allocator.free(slice);
std.mem.set(u8, slice, 0x11);
slice = allocator.shrink(slice, 17);
for (slice) |b| {
assert(b == 0x11);
}
slice = allocator.shrink(slice, 16);
for (slice) |b| {
assert(b == 0x11);
}
}
test "large object - grow" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
defer allocator.free(slice1);
var old = slice1;
slice1 = try allocator.realloc(slice1, page_size * 2 - 10);
assert(slice1.ptr == old.ptr);
slice1 = try allocator.realloc(slice1, page_size * 2);
assert(slice1.ptr == old.ptr);
slice1 = try allocator.realloc(slice1, page_size * 2 + 1);
}
test "realloc small object to large object" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, 70);
defer allocator.free(slice);
slice[0] = 0x12;
slice[60] = 0x34;
// This requires upgrading to a large object
const large_object_size = page_size * 2 + 50;
slice = try allocator.realloc(slice, large_object_size);
assert(slice[0] == 0x12);
assert(slice[60] == 0x34);
}
test "shrink large object to large object" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
slice[0] = 0x12;
slice[60] = 0x34;
if (allocator.realloc(slice, page_size * 2 + 1)) |_| {
@panic("expected failure");
} else |e| assert(e == error.OutOfMemory);
slice = allocator.shrink(slice, page_size * 2 + 1);
assert(slice[0] == 0x12);
assert(slice[60] == 0x34);
slice = try allocator.realloc(slice, page_size * 2);
assert(slice[0] == 0x12);
assert(slice[60] == 0x34);
}
test "shrink large object to large object with larger alignment" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
var debug_buffer: [1000]u8 = undefined;
const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
const alloc_size = page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
defer allocator.free(slice);
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
while (isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
}
while (stuff_to_free.popOrNull()) |item| {
allocator.free(item);
}
slice[0] = 0x12;
slice[60] = 0x34;
slice = try allocator.alignedRealloc(slice, page_size * 2, alloc_size / 2);
assert(slice[0] == 0x12);
assert(slice[60] == 0x34);
}
test "realloc large object to small object" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
slice[0] = 0x12;
slice[16] = 0x34;
slice = try allocator.realloc(slice, 19);
assert(slice[0] == 0x12);
assert(slice[16] == 0x34);
}
test "backing allocator" {
const gpda = try GeneralPurposeDebugAllocator(Config{
.backing_allocator = true,
.memory_protection = false,
}).createWithAllocator(std.debug.global_allocator);
defer gpda.destroy();
const allocator = &gpda.allocator;
const ptr = try allocator.create(i32);
defer allocator.destroy(ptr);
}
test "realloc large object to larger alignment" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
var debug_buffer: [1000]u8 = undefined;
const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator;
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
defer allocator.free(slice);
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
while (isAligned(@ptrToInt(slice.ptr), page_size * 2)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
}
while (stuff_to_free.popOrNull()) |item| {
allocator.free(item);
}
slice[0] = 0x12;
slice[16] = 0x34;
slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 100);
assert(slice[0] == 0x12);
assert(slice[16] == 0x34);
slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 25);
assert(slice[0] == 0x12);
assert(slice[16] == 0x34);
slice = try allocator.alignedRealloc(slice, page_size * 2, page_size * 2 + 100);
assert(slice[0] == 0x12);
assert(slice[16] == 0x34);
}
fn isAligned(addr: usize, alignment: usize) bool {
// 000010000 // example addr
// 000001111 // subtract 1
// 111110000 // binary not
const aligned_addr = (addr & ~(alignment - 1));
return aligned_addr == addr;
}
test "isAligned works" {
assert(isAligned(0, 4));
assert(isAligned(1, 1));
assert(isAligned(2, 1));
assert(isAligned(2, 2));
assert(!isAligned(2, 4));
assert(isAligned(3, 1));
assert(!isAligned(3, 2));
assert(!isAligned(3, 4));
assert(isAligned(4, 4));
assert(isAligned(4, 2));
assert(isAligned(4, 1));
assert(!isAligned(4, 8));
assert(!isAligned(4, 16));
}
test "large object shrinks to small but allocation fails during shrink" {
var failing_allocator = std.debug.FailingAllocator.init(std.heap.direct_allocator, 3);
const gpda = try GeneralPurposeDebugAllocator(Config{
.backing_allocator = true,
.memory_protection = false,
}).createWithAllocator(&failing_allocator.allocator);
defer gpda.destroy();
const allocator = &gpda.allocator;
var slice = try allocator.alloc(u8, page_size * 2 + 50);
defer allocator.free(slice);
slice[0] = 0x12;
slice[3] = 0x34;
// Next allocation will fail in the backing allocator of the GeneralPurposeDebugAllocator
slice = allocator.shrink(slice, 4);
assert(slice[0] == 0x12);
assert(slice[3] == 0x34);
}
test "objects of size 1024 and 2048" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
const slice = try allocator.alloc(u8, 1025);
const slice2 = try allocator.alloc(u8, 3000);
allocator.free(slice);
allocator.free(slice2);
}
test "setting a memory cap" {
const gpda = try GeneralPurposeDebugAllocator(test_config).create();
defer gpda.destroy();
const allocator = &gpda.allocator;
gpda.setRequestedMemoryLimit(1010);
const small = try allocator.create(i32);
assert(gpda.total_requested_bytes == 4);
const big = try allocator.alloc(u8, 1000);
assert(gpda.total_requested_bytes == 1004);
std.testing.expectError(error.OutOfMemory, allocator.create(u64));
allocator.destroy(small);
assert(gpda.total_requested_bytes == 1000);
allocator.free(big);
assert(gpda.total_requested_bytes == 0);
const exact = try allocator.alloc(u8, 1010);
assert(gpda.total_requested_bytes == 1010);
allocator.free(exact);
}
|
gpda.zig
|
const std = @import("std");
const builtin = @import("builtin");
pub const ANDROID_PROJECT_PATH = "android-project";
pub const ZIG_LIBC_CONFIGS_DIR_PATH = "zig-libc-configs";
pub const AndroidEnv = struct {
jdk_path: []u8,
sdk_path: []u8,
platform_number: []u8,
build_tools_path: []u8,
ndk_path: []u8,
};
pub const AndroidTarget = enum {
aarch64,
arm,
x86,
x86_64,
const Config = struct {
lib_dir: []const u8,
include_dir: []const u8,
out_dir: []const u8,
libc_file: []const u8,
target: std.zig.CrossTarget,
};
fn name(self: AndroidTarget) []const u8 {
return switch (self) {
.aarch64 => "arm64-v8a",
.arm => "armeabi",
.x86 => "x86",
.x86_64 => "x86_64",
};
}
pub fn config(self: AndroidTarget) Config {
return switch (self) {
.aarch64 => Config{
.lib_dir = "arch-arm64/usr/lib",
.include_dir = "aarch64-linux-android",
.out_dir = "arm64-v8a",
.libc_file = ZIG_LIBC_CONFIGS_DIR_PATH ++ "/aarch64-libc.conf",
.target = std.zig.CrossTarget{
.cpu_arch = .aarch64,
.os_tag = .linux,
.abi = .android,
.cpu_model = .baseline,
.cpu_features_add = std.Target.aarch64.featureSet(&.{.v8a}),
},
},
.arm => Config{
.lib_dir = "arch-arm/usr/lib",
.include_dir = "arm-linux-androideabi",
.out_dir = "armeabi",
.libc_file = ZIG_LIBC_CONFIGS_DIR_PATH ++ "/arm-libc.conf",
.target = std.zig.CrossTarget{
.cpu_arch = .arm,
.os_tag = .linux,
.abi = .android,
.cpu_model = .baseline,
.cpu_features_add = std.Target.arm.featureSet(&.{.v7a}),
},
},
.x86 => Config{
.lib_dir = "arch-x86/usr/lib",
.include_dir = "i686-linux-android",
.out_dir = "x86",
.libc_file = ZIG_LIBC_CONFIGS_DIR_PATH ++ "/x86-libc.conf",
.target = std.zig.CrossTarget{
.cpu_arch = .i386,
.os_tag = .linux,
.abi = .android,
.cpu_model = .baseline,
},
},
.x86_64 => Config{
.lib_dir = "arch-x86_64/usr/lib64",
.include_dir = "x86_64-linux-android",
.out_dir = "x86_64",
.libc_file = ZIG_LIBC_CONFIGS_DIR_PATH ++ "/x86_64-libc.conf",
.target = std.zig.CrossTarget{
.cpu_arch = .x86_64,
.os_tag = .linux,
.abi = .android,
.cpu_model = .baseline,
},
},
};
}
};
pub const AndroidMainLib = struct {
target: AndroidTarget,
step: *std.build.LibExeObjStep,
};
pub fn buildSdlForAndroidStep(builder: *std.build.Builder, env: *const AndroidEnv) *std.build.Step {
// this step will build SDL for all enabled ABIs
// it's best for this step remain separated in order to keep the output cached
// since not only you rarely need to rebuild SDL
// but also because your zig main android lib links agains this output
const ndk_build_ext = switch (builtin.os.tag) {
.windows => ".cmd",
else => ".sh",
};
const ndk_build_exe = std.fs.path.resolve(
builder.allocator,
&[_][]const u8{ env.ndk_path, "ndk-build" ++ ndk_build_ext },
) catch unreachable;
var ndk_build_command = builder.addSystemCommand(&[_][]const u8{
ndk_build_exe,
"NDK_LIBS_OUT=../ndk-out/lib",
"NDK_OUT=../ndk-out/out",
});
// the process needs to execute from the jni folder in order to
// correctly find the jni project files Application.mk and Android.mk
ndk_build_command.cwd = builder.pathFromRoot(ANDROID_PROJECT_PATH ++ "/jni");
return &ndk_build_command.step;
}
pub fn assertZigLibcConfigsDirExist(builder: *std.build.Builder, env: *const AndroidEnv) void {
// asset that `ZIG_LIBC_CONFIGS_DIR_PATH` directory exists in project root
// which should contain libc config files for each android target
// these are used when building the main zig android lib
//
// in future versions of zig, it will be possible to generate them
// in a build step. for now, we generate them if the folder does not
// exist on any `zig build` invocation
const libc_configs_path = builder.pathFromRoot(ZIG_LIBC_CONFIGS_DIR_PATH);
std.fs.makeDirAbsolute(libc_configs_path) catch return;
const libc_configs_dir = std.fs.openDirAbsolute(libc_configs_path, .{}) catch unreachable;
comptime var all_targets: [@typeInfo(AndroidTarget).Enum.fields.len]AndroidTarget = undefined;
inline for (@typeInfo(AndroidTarget).Enum.fields) |field, i| {
all_targets[i] = @intToEnum(AndroidTarget, field.value);
}
const empty_path = switch(builtin.os.tag) {
.windows => "C:\\",
else => "",
};
for (all_targets) |target| {
const config = target.config();
const path = config.libc_file[ZIG_LIBC_CONFIGS_DIR_PATH.len + 1..];
const content = builder.fmt(
\\include_dir={s}
\\sys_include_dir={s}
\\crt_dir={s}/platforms/android-{s}/{s}
\\msvc_lib_dir={s}
\\kernel32_lib_dir={s}
\\gcc_dir={s}
, .{
config.include_dir,
config.include_dir,
env.ndk_path,
env.platform_number,
config.lib_dir,
empty_path,
empty_path,
empty_path,
},
);
libc_configs_dir.writeFile(path, content) catch continue;
}
}
pub fn buildApkStep(builder: *std.build.Builder, env: *const AndroidEnv, main_libs: []const AndroidMainLib) *std.build.Step {
// this step will create a signed and aligned apk from
// - your android project
// - your debug keystore
// - SDL android libs
// - zig main android lib
// - your project asset folder
// all executable paths needed to create an apk
const exe_ext = switch (builtin.os.tag) {
.windows => ".exe",
else => "",
};
const javac_exe = std.fs.path.resolve(
builder.allocator,
&[_][]const u8{ env.jdk_path, "bin/javac" ++ exe_ext },
) catch unreachable;
const jarsigner_exe = std.fs.path.resolve(
builder.allocator,
&[_][]const u8{ env.jdk_path, "bin/jarsigner" ++ exe_ext },
) catch unreachable;
const aapt_exe = std.fs.path.resolve(
builder.allocator,
&[_][]const u8{ env.build_tools_path, "aapt" ++ exe_ext },
) catch unreachable;
const dx_ext = switch (builtin.os.tag) {
.windows => ".bat",
else => ".sh",
};
const dx_exe = std.fs.path.resolve(
builder.allocator,
&[_][]const u8{ env.build_tools_path, "dx" ++ dx_ext },
) catch unreachable;
const zipalign_exe = std.fs.path.resolve(
builder.allocator,
&[_][]const u8{ env.build_tools_path, "zipalign" ++ exe_ext },
) catch unreachable;
// other paths
const android_jar_path = std.fs.path.resolve(
builder.allocator,
&[_][]const u8{ env.sdk_path, "platforms", builder.fmt("android-{s}", .{env.platform_number}), "android.jar" },
) catch unreachable;
const clean_step = builder.allocator.create(CleanStep) catch unreachable;
clean_step.* = CleanStep.init(builder);
var create_r_java_log = builder.addLog(
"step 1: creating the R.java source from the resources inside the `res` folder\n",
.{},
);
create_r_java_log.step.dependOn(&clean_step.step);
var create_r_java_command = addCommand(builder, &[_][]const u8{
aapt_exe,
"package",
"-m",
"-J",
"out",
"-M",
"AndroidManifest.xml",
"-S",
"res",
"-I",
android_jar_path,
});
create_r_java_command.step.dependOn(&create_r_java_log.step);
var compile_java_log = builder.addLog(
"step 2: compiling all java sources inside the `java` folder into the `out` folder\n",
.{},
);
compile_java_log.step.dependOn(&create_r_java_command.step);
var javac_argv = std.ArrayList([]const u8).initCapacity(builder.allocator, 32) catch unreachable;
javac_argv.append(javac_exe) catch unreachable;
javac_argv.append("-d") catch unreachable;
javac_argv.append("out") catch unreachable;
javac_argv.append("-classpath") catch unreachable;
javac_argv.append(android_jar_path) catch unreachable;
javac_argv.append("-target") catch unreachable;
javac_argv.append("1.7") catch unreachable;
javac_argv.append("-source") catch unreachable;
javac_argv.append("1.7") catch unreachable;
javac_argv.append("-sourcepath") catch unreachable;
javac_argv.append("java:out") catch unreachable;
{
var it = std.fs.walkPath(builder.allocator, builder.pathFromRoot(ANDROID_PROJECT_PATH ++ "/java")) catch unreachable;
defer it.deinit();
while (it.next() catch unreachable) |entry| {
if (entry.kind == .File and std.mem.endsWith(u8, entry.path, ".java")) {
javac_argv.append(builder.dupe(entry.path)) catch unreachable;
}
}
}
var compile_java_command = addCommand(builder, javac_argv.items);
compile_java_command.step.dependOn(&compile_java_log.step);
var compile_classes_dex_log = builder.addLog(
"step 3: converting all compiled java code into android vm compatible bytecode\n",
.{},
);
compile_classes_dex_log.step.dependOn(&compile_java_command.step);
var compile_classes_dex_command = addCommand(builder, &[_][]const u8{
dx_exe,
"--dex",
"--min-sdk-version=16",
"--output=out/classes.dex",
"out",
});
compile_classes_dex_command.step.dependOn(&compile_classes_dex_log.step);
var create_apk_log = builder.addLog(
"step 4: creating first version of the apk including all resources and asset files\n",
.{},
);
create_apk_log.step.dependOn(&compile_classes_dex_command.step);
var create_apk_command = addCommand(builder, &[_][]const u8{
aapt_exe,
"package",
"-f",
"-M",
"AndroidManifest.xml",
"-S",
"res",
"-I",
android_jar_path,
"-A",
"../assets",
"-F",
"out/app.apk.unaligned",
});
create_apk_command.step.dependOn(&create_apk_log.step);
var copy_sdl_libs_log = builder.addLog(
"step 5: copying all compiled SDL libraries to `out/lib/<target>`\n",
.{},
);
copy_sdl_libs_log.step.dependOn(&create_apk_command.step);
const copy_sdl_libs = builder.allocator.create(CopyDirStep) catch unreachable;
copy_sdl_libs.* = CopyDirStep.init(
builder,
ANDROID_PROJECT_PATH ++ "/ndk-out/lib",
ANDROID_PROJECT_PATH ++ "/out/lib",
);
copy_sdl_libs.step.dependOn(©_sdl_libs_log.step);
var copy_main_libs_log = builder.addLog(
"step 6: copying all compiled zig libraries found to `out/lib/<target>` for each `AndroidTarget`\n",
.{},
);
copy_main_libs_log.step.dependOn(©_sdl_libs.step);
const libs_dir = builder.pathFromRoot(ANDROID_PROJECT_PATH ++ "/out/lib");
var last_copy_step = ©_main_libs_log.step;
for (main_libs) |main_lib| {
const target_lib_path = std.fs.path.resolve(
builder.allocator,
&[_][]const u8{ libs_dir, main_lib.target.name(), "libmain.so" },
) catch unreachable;
const copy_main_lib = builder.allocator.create(CopyLibStep) catch unreachable;
copy_main_lib.* = CopyLibStep.init(
builder,
main_lib.step,
target_lib_path,
);
copy_main_lib.step.dependOn(last_copy_step);
last_copy_step = ©_main_lib.step;
}
var add_classes_dex_to_apk_log = builder.addLog(
"step 7: adding `classes.dex` to the apk\n",
.{},
);
add_classes_dex_to_apk_log.step.dependOn(last_copy_step);
var add_classes_dex_to_apk_command = builder.addSystemCommand(&[_][]const u8{
aapt_exe,
"add",
"-f",
"app.apk.unaligned",
"classes.dex",
});
add_classes_dex_to_apk_command.cwd = builder.pathFromRoot(ANDROID_PROJECT_PATH ++ "/out");
add_classes_dex_to_apk_command.step.dependOn(&add_classes_dex_to_apk_log.step);
var add_libs_to_apk_log = builder.addLog(
"step 8: adding all libs inside `out/lib` to the apk\n",
.{},
);
add_libs_to_apk_log.step.dependOn(&add_classes_dex_to_apk_command.step);
const add_libs_to_apk = builder.allocator.create(AddLibsToApkStep) catch unreachable;
add_libs_to_apk.* = AddLibsToApkStep.init(builder, aapt_exe);
add_libs_to_apk.step.dependOn(&add_libs_to_apk_log.step);
var sign_apk_log = builder.addLog(
"step 9: signing apk\n",
.{},
);
sign_apk_log.step.dependOn(&add_libs_to_apk.step);
var sign_apk_command = addCommand(builder, &[_][]const u8{
jarsigner_exe,
"-keystore",
"keystore/debug.keystore",
"-storepass",
"password",
"-keypass",
"password",
"out/app.apk.unaligned",
"debugkey",
});
sign_apk_command.step.dependOn(&sign_apk_log.step);
var align_apk_log = builder.addLog(
"step 10: aligning apk\n",
.{},
);
align_apk_log.step.dependOn(&sign_apk_command.step);
var align_apk_command = addCommand(builder, &[_][]const u8{
zipalign_exe,
"-f",
"4",
"out/app.apk.unaligned",
"out/app.apk",
});
align_apk_command.step.dependOn(&align_apk_log.step);
return &align_apk_command.step;
}
pub fn installAndRunApkStep(
builder: *std.build.Builder,
env: *const AndroidEnv,
package_name: []const u8,
main_activity_class_name: []const u8,
) *std.build.Step {
const exe_ext = switch (builtin.os.tag) {
.windows => ".exe",
else => "",
};
const adb_exe = std.fs.path.resolve(
builder.allocator,
&[_][]const u8{ env.sdk_path, "platform-tools/adb" ++ exe_ext },
) catch unreachable;
const install_apk = builder.addSystemCommand(&[_][]const u8{
adb_exe,
"install",
"-r",
ANDROID_PROJECT_PATH ++ "/out/app.apk",
});
var run_apk = builder.addSystemCommand(&[_][]const u8{
adb_exe,
"shell",
"am",
"start",
"-n",
builder.fmt("{s}/{s}.{s}", .{ package_name, package_name, main_activity_class_name }),
});
run_apk.step.dependOn(&install_apk.step);
return &run_apk.step;
}
pub fn openAndroidLog(builder: *std.build.Builder, env: *const AndroidEnv) *std.build.Step {
const exe_ext = switch (builtin.os.tag) {
.windows => ".exe",
else => "",
};
const adb_exe = std.fs.path.resolve(
builder.allocator,
&[_][]const u8{ env.sdk_path, "platform-tools/adb" ++ exe_ext },
) catch unreachable;
const open_log = builder.addSystemCommand(&[_][]const u8{
adb_exe,
"logcat",
"-s",
"SDL",
"ZIG",
});
return &open_log.step;
}
fn addCommand(builder: *std.build.Builder, argv: []const []const u8) *std.build.RunStep {
var command = builder.addSystemCommand(argv);
command.cwd = builder.pathFromRoot(ANDROID_PROJECT_PATH);
return command;
}
const CleanStep = struct {
step: std.build.Step,
builder: *std.build.Builder,
pub fn init(builder: *std.build.Builder) CleanStep {
return CleanStep{
.step = std.build.Step.init(.Custom, "Cleaning " ++ ANDROID_PROJECT_PATH ++ "/out", builder.allocator, make),
.builder = builder,
};
}
fn make(step: *std.build.Step) !void {
const self = @fieldParentPtr(CleanStep, "step", step);
const out_dir = self.builder.pathFromRoot(ANDROID_PROJECT_PATH ++ "/out");
std.fs.cwd().deleteTree(out_dir) catch |err| {
std.log.warn("Unable to remove {s}: {s}\n", .{ out_dir, @errorName(err) });
return err;
};
try std.fs.cwd().makeDir(out_dir);
}
};
const CopyLibStep = struct {
step: std.build.Step,
builder: *std.build.Builder,
lib: *std.build.LibExeObjStep,
dest: []const u8,
pub fn init(builder: *std.build.Builder, lib: *std.build.LibExeObjStep, dest: []const u8) CopyLibStep {
var step = std.build.Step.init(.Custom, builder.fmt("copying to {s}", .{dest}), builder.allocator, make);
step.dependOn(&lib.step);
return CopyLibStep{
.builder = builder,
.step = step,
.lib = lib,
.dest = dest,
};
}
fn make(step: *std.build.Step) !void {
const self = @fieldParentPtr(CopyLibStep, "step", step);
const source_path = self.lib.getOutputLibPath();
_ = try std.fs.updateFileAbsolute(source_path, self.dest, .{});
//const cwd = std.fs.cwd();
//std.log.info("source path: {s}, dest: {s}", .{source_path, self.dest});
//_ = try std.fs.Dir.updateFile(cwd, source_path, cwd, self.dest, .{});
}
};
const CopyDirStep = struct {
step: std.build.Step,
builder: *std.build.Builder,
source: []const u8,
dest: []const u8,
pub fn init(builder: *std.build.Builder, source: []const u8, dest: []const u8) CopyDirStep {
return CopyDirStep{
.builder = builder,
.step = std.build.Step.init(.Custom, builder.fmt("copying {s} to {s}", .{ source, dest }), builder.allocator, make),
.source = builder.dupe(source),
.dest = builder.dupe(dest),
};
}
fn make(step: *std.build.Step) !void {
const self = @fieldParentPtr(CopyDirStep, "step", step);
const source = self.builder.pathFromRoot(self.source);
const dest = self.builder.pathFromRoot(self.dest);
var it = try std.fs.walkPath(self.builder.allocator, source);
defer it.deinit();
while (try it.next()) |entry| {
const rel_path = entry.path[source.len + 1 ..];
const dest_path = try std.fs.path.join(self.builder.allocator, &[_][]const u8{ dest, rel_path });
switch (entry.kind) {
.Directory => try std.fs.cwd().makePath(dest_path),
.File => try self.builder.updateFile(entry.path, dest_path),
else => {},
}
}
}
};
const AddLibsToApkStep = struct {
step: std.build.Step,
builder: *std.build.Builder,
aapt_exe: []const u8,
fn init(builder: *std.build.Builder, aapt_exe: []const u8) AddLibsToApkStep {
return AddLibsToApkStep{
.step = std.build.Step.init(.InstallDir, "copying libs to apk", builder.allocator, make),
.builder = builder,
.aapt_exe = aapt_exe,
};
}
fn make(step: *std.build.Step) !void {
const self = @fieldParentPtr(AddLibsToApkStep, "step", step);
const process_cwd = self.builder.pathFromRoot(ANDROID_PROJECT_PATH ++ "/out");
const walker_cwd = self.builder.fmt("{s}/lib", .{process_cwd});
var it = try std.fs.walkPath(self.builder.allocator, walker_cwd);
defer it.deinit();
while (try it.next()) |entry| {
if (entry.kind == .File and std.mem.endsWith(u8, entry.path, ".so")) {
var lib_path = self.builder.dupe(entry.path[process_cwd.len + 1 ..]);
for (lib_path) |*byte| {
if (byte.* == '\\') {
byte.* = '/';
}
}
const add_process = try std.ChildProcess.init(
&[_][]const u8{
self.aapt_exe,
"add",
"-f",
"app.apk.unaligned",
lib_path,
},
self.builder.allocator,
);
add_process.stdin_behavior = .Ignore;
add_process.cwd = process_cwd;
_ = try add_process.spawnAndWait();
}
}
}
};
|
build_android.zig
|
const std = @import("std");
const Seat = @import("Seat.zig");
pub const Direction = enum {
next,
previous,
};
pub const PhysicalDirection = enum {
up,
down,
left,
right,
};
pub const Orientation = enum {
horizontal,
vertical,
};
// zig fmt: off
const command_impls = std.ComptimeStringMap(
fn (*std.mem.Allocator, *Seat, []const [:0]const u8, *?[]const u8) Error!void,
.{
.{ "attach-mode", @import("command/attach_mode.zig").attachMode },
.{ "background-color", @import("command/config.zig").backgroundColor },
.{ "border-color-focused", @import("command/config.zig").borderColorFocused },
.{ "border-color-unfocused", @import("command/config.zig").borderColorUnfocused },
.{ "border-color-urgent", @import("command/config.zig").borderColorUrgent },
.{ "border-width", @import("command/config.zig").borderWidth },
.{ "close", @import("command/close.zig").close },
.{ "csd-filter-add", @import("command/filter.zig").csdFilterAdd },
.{ "csd-filter-remove", @import("command/filter.zig").csdFilterRemove },
.{ "declare-mode", @import("command/declare_mode.zig").declareMode },
.{ "default-layout", @import("command/layout.zig").defaultLayout },
.{ "enter-mode", @import("command/enter_mode.zig").enterMode },
.{ "exit", @import("command/exit.zig").exit },
.{ "float-filter-add", @import("command/filter.zig").floatFilterAdd },
.{ "float-filter-remove", @import("command/filter.zig").floatFilterRemove },
.{ "focus-follows-cursor", @import("command/focus_follows_cursor.zig").focusFollowsCursor },
.{ "focus-output", @import("command/output.zig").focusOutput },
.{ "focus-previous-tags", @import("command/tags.zig").focusPreviousTags },
.{ "focus-view", @import("command/focus_view.zig").focusView },
.{ "input", @import("command/input.zig").input },
.{ "list-input-configs", @import("command/input.zig").listInputConfigs},
.{ "list-inputs", @import("command/input.zig").listInputs },
.{ "map", @import("command/map.zig").map },
.{ "map-pointer", @import("command/map.zig").mapPointer },
.{ "move", @import("command/move.zig").move },
.{ "output-layout", @import("command/layout.zig").outputLayout },
.{ "resize", @import("command/move.zig").resize },
.{ "send-layout-cmd", @import("command/layout.zig").sendLayoutCmd },
.{ "send-to-output", @import("command/output.zig").sendToOutput },
.{ "send-to-previous-tags", @import("command/tags.zig").sendToPreviousTags },
.{ "set-cursor-warp", @import("command/config.zig").setCursorWarp },
.{ "set-focused-tags", @import("command/tags.zig").setFocusedTags },
.{ "set-repeat", @import("command/set_repeat.zig").setRepeat },
.{ "set-view-tags", @import("command/tags.zig").setViewTags },
.{ "snap", @import("command/move.zig").snap },
.{ "spawn", @import("command/spawn.zig").spawn },
.{ "spawn-tagmask", @import("command/tags.zig").spawnTagmask },
.{ "swap", @import("command/swap.zig").swap},
.{ "toggle-float", @import("command/toggle_float.zig").toggleFloat },
.{ "toggle-focused-tags", @import("command/tags.zig").toggleFocusedTags },
.{ "toggle-fullscreen", @import("command/toggle_fullscreen.zig").toggleFullscreen },
.{ "toggle-view-tags", @import("command/tags.zig").toggleViewTags },
.{ "unmap", @import("command/map.zig").unmap },
.{ "unmap-pointer", @import("command/map.zig").unmapPointer },
.{ "xcursor-theme", @import("command/xcursor_theme.zig").xcursorTheme },
.{ "zoom", @import("command/zoom.zig").zoom },
},
);
// zig fmt: on
pub const Error = error{
NoCommand,
UnknownCommand,
NotEnoughArguments,
TooManyArguments,
Overflow,
InvalidButton,
InvalidCharacter,
InvalidDirection,
InvalidPhysicalDirection,
InvalidOrientation,
InvalidRgba,
InvalidValue,
UnknownOption,
ConflictingOptions,
OutOfMemory,
Other,
};
/// Run a command for the given Seat. The `args` parameter is similar to the
/// classic argv in that the command to be run is passed as the first argument.
/// The optional slice passed as the out parameter must initially be set to
/// null. If the command produces output or Error.Other is returned, the slice
/// will be set to the output of the command or a failure message, respectively.
/// The caller is then responsible for freeing that slice, which will be
/// allocated using the provided allocator.
pub fn run(
allocator: *std.mem.Allocator,
seat: *Seat,
args: []const [:0]const u8,
out: *?[]const u8,
) Error!void {
std.debug.assert(out.* == null);
if (args.len == 0) return Error.NoCommand;
const impl_fn = command_impls.get(args[0]) orelse return Error.UnknownCommand;
try impl_fn(allocator, seat, args, out);
}
/// Return a short error message for the given error. Passing Error.Other is UB
pub fn errToMsg(err: Error) [:0]const u8 {
return switch (err) {
Error.NoCommand => "no command given",
Error.UnknownCommand => "unknown command",
Error.UnknownOption => "unknown option",
Error.ConflictingOptions => "options conflict",
Error.NotEnoughArguments => "not enough arguments",
Error.TooManyArguments => "too many arguments",
Error.Overflow => "value out of bounds",
Error.InvalidButton => "invalid button",
Error.InvalidCharacter => "invalid character in argument",
Error.InvalidDirection => "invalid direction. Must be 'next' or 'previous'",
Error.InvalidPhysicalDirection => "invalid direction. Must be 'up', 'down', 'left' or 'right'",
Error.InvalidOrientation => "invalid orientation. Must be 'horizontal', or 'vertical'",
Error.InvalidRgba => "invalid color format, must be hexadecimal 0xRRGGBB or 0xRRGGBBAA",
Error.InvalidValue => "invalid value",
Error.OutOfMemory => "out of memory",
Error.Other => unreachable,
};
}
|
source/river-0.1.0/river/command.zig
|
// The length of the journey has to be borne with, for every moment
// is necessary.
const std = @import("std");
const Self = @This();
const Page = [4096]u8;
const PageId = u64;
const FreePages = std.SinglyLinkedList(PageId);
pages: std.ArrayList(Page) = undefined,
/// Holds ids of the free pages in the manager
/// Ids of pages are immutable and are never invalidated
/// unless the id is freed, then there is no guarantee that
/// the page access won't produce Use After Free
free_pages: FreePages = undefined,
allocator: std.mem.Allocator,
pub fn init(allocator: std.mem.Allocator) Self {
return Self{
.pages = std.ArrayList(Page).init(allocator),
.free_pages = FreePages{},
.allocator = allocator,
};
}
pub fn deinit(self: *Self) void {
// Destroy all free pages id's
while (self.free_pages.popFirst()) |id_ptr| {
self.allocator.destroy(id_ptr);
}
self.pages.deinit();
}
/// Returns an id of a new page. There is no guarantee that
/// the page will be zeroes.
pub fn new_page(self: *Self) u64 {
if (self.free_pages.popFirst()) |id| {
// Create a stack copy of the integer
const id_copy = id.data;
// Destroy the heap integer
defer self.allocator.destroy(id);
return id_copy;
} else {
self.pages.append([_]u8{0} ** 4096) catch {
std.log.err("Out of memory!", .{});
};
return self.pages.items.len - 1;
}
return null;
}
/// Release a page. There is no guarantee that using the id after the release
/// will return a value. And can produce use after free.
pub fn release_page(self: *Self, id: u64) void {
// If page is at the end, delete it
if (id == self.pages.items.len - 1) {
_ = self.pages.pop();
}
// If page is somewhere in the middle, reuse it
else {
// The free ids linked list only stores pointers
// So to keep the ids I need to alloc memory for them
// in the heap and then manually free them
const id_node = self.allocator.create(FreePages.Node) catch {
std.log.err("Out of memory! Leaking a page.", .{});
return;
};
id_node.*.data = id;
self.free_pages.prepend(id_node);
}
}
|
src/page_manager.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const Random = std.rand.DefaultPrng;
const base_log = std.log.scoped(.lesson5);
const cuda = @import("cudaz");
const cu = cuda.cu;
const RawKernels = @import("lesson5_kernel.zig");
pub fn main() !void {
try nosuspend amain();
}
const CudaEventLoop = struct {
running_streams: std.ArrayList(*cuda.Stream),
suspended_frames: std.ArrayList(anyframe),
pub fn initCapacity(allocator: Allocator, num: usize) !CudaEventLoop {
return CudaEventLoop{
.running_streams = try std.ArrayList(*cuda.Stream).initCapacity(allocator, num),
.suspended_frames = try std.ArrayList(anyframe).initCapacity(allocator, num),
};
}
pub fn deinit(self: *CudaEventLoop) void {
self.running_streams.deinit();
self.suspended_frames.deinit();
}
pub fn registerStream(self: *CudaEventLoop, stream: *cuda.Stream, frame: anyframe) void {
self.running_streams.appendAssumeCapacity(stream);
self.suspended_frames.appendAssumeCapacity(frame);
}
pub fn joinStreamsAndResume(self: *CudaEventLoop) void {
var n_streams = self.running_streams.items.len;
std.debug.assert(self.suspended_frames.items.len == n_streams);
while (self.running_streams.items.len > 0) {
for (self.running_streams.items) |stream, i| {
if (stream.done()) {
_ = self.running_streams.swapRemove(i);
resume self.suspended_frames.swapRemove(i);
break;
}
} else {
// only sleep if no frame was resumed.
std.time.sleep(100 * std.time.ns_per_us);
}
}
self.deinit();
}
};
pub fn amain() !void {
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = general_purpose_allocator.allocator();
var random = Random.init(10298374);
const num_cols: usize = 2048;
var data = try allocator.alloc(u32, num_cols * num_cols);
defer allocator.free(data);
random.fill(std.mem.sliceAsBytes(data));
var trans_cpu = try allocator.alloc(u32, num_cols * num_cols);
defer allocator.free(trans_cpu);
var timer = std.time.Timer.start() catch unreachable;
RawKernels.transposeCpu(data, trans_cpu, num_cols);
const elapsed_cpu = @intToFloat(f32, timer.lap()) / std.time.ns_per_us;
base_log.info("CPU transpose of {}x{} matrix took {:.3}ms", .{ num_cols, num_cols, elapsed_cpu });
try initModule(0);
gpuInfo(0);
// var elapsed = try transposeSerial(&stream, d_data, d_trans, num_cols);
// log.info("GPU serial transpose of {}x{} matrix took {:.3}ms", .{ num_cols, num_cols, elapsed });
// log.info("GPU serial transpose bandwith: {:.3}MB/s", .{computeBandwith(elapsed, data) * 1e-6});
// This is not the best way of doing profiling cause we are
// scheduling all kernels at the same time on the GPU
// and they will interfere with each others.
// But it shows how multi streams work.
var ex = try CudaEventLoop.initCapacity(allocator, 4);
var per_row = async transposePerRow(&ex, allocator, data, num_cols);
var per_cell = async transposePerCell(&ex, allocator, data, num_cols);
var per_block = async transposePerBlock(&ex, allocator, data, num_cols);
var per_block_inline = async transposePerBlockInlined(&ex, allocator, data, num_cols);
ex.joinStreamsAndResume();
// nosuspend ensure that joinStreams has correctly resumed the frames already.
try nosuspend await per_row;
try nosuspend await per_cell;
try nosuspend await per_block;
try nosuspend await per_block_inline;
}
fn transposeSerial(stream: *cuda.Stream, d_data: []const u32, d_out: []u32, num_cols: usize) !f64 {
var timer = cuda.GpuTimer.start(stream);
try k.transposeCpu.launch(
stream,
cuda.Grid.init1D(1, 1),
.{ d_data, d_out, num_cols },
);
timer.stop();
try expectTransposed(d_data, d_out, num_cols);
return timer.elapsed();
}
fn transposePerRow(ex: *CudaEventLoop, allocator: Allocator, data: []const u32, num_cols: usize) !void {
var stream = cuda.Stream.init(0) catch unreachable;
defer stream.deinit();
const d_data = try stream.alloc(u32, data.len);
stream.memcpyHtoD(u32, d_data, data);
var out = try allocator.alloc(u32, data.len);
defer allocator.free(out);
const d_out = try stream.alloc(u32, data.len);
defer cuda.free(d_out);
const log = std.log.scoped(.transposePerRow);
log.info("Scheduling GPU", .{});
var timer = cuda.GpuTimer.start(&stream);
k.transposePerRow.launch(
&stream,
cuda.Grid.init1D(num_cols, 32),
.{ d_data, d_out, num_cols },
) catch unreachable;
timer.stop();
stream.memcpyDtoH(u32, out, d_out);
// Yield control to main loop
suspend {
ex.registerStream(&stream, @frame());
}
stream.synchronize();
const elapsed = timer.elapsed();
log.info("{}x{} matrix took {:.3}ms", .{ num_cols, num_cols, elapsed });
log.info("bandwith: {:.3}MB/s", .{computeBandwith(elapsed, data) * 1e-6});
expectTransposed(data, out, num_cols) catch {
log.err("didn't properly transpose !", .{});
};
}
fn transposePerCell(ex: *CudaEventLoop, allocator: Allocator, data: []const u32, num_cols: usize) !void {
var stream = cuda.Stream.init(0) catch unreachable;
defer stream.deinit();
const d_data = try stream.alloc(u32, data.len);
stream.memcpyHtoD(u32, d_data, data);
var out = try allocator.alloc(u32, data.len);
defer allocator.free(out);
const d_out = try stream.alloc(u32, data.len);
defer cuda.free(d_out);
const log = std.log.scoped(.transposePerCell);
log.info("Scheduling GPU", .{});
var timer = cuda.GpuTimer.start(&stream);
k.transposePerCell.launch(
&stream,
cuda.Grid.init2D(num_cols, num_cols, 32, 32),
.{ d_data, d_out, num_cols },
) catch unreachable;
timer.stop();
stream.memcpyDtoH(u32, out, d_out);
// Yield control to main loop
suspend {
ex.registerStream(&stream, @frame());
}
stream.synchronize();
const elapsed = timer.elapsed();
log.info("{}x{} matrix took {:.3}ms", .{ num_cols, num_cols, elapsed });
log.info("bandwith: {:.3}MB/s", .{computeBandwith(elapsed, data) * 1e-6});
expectTransposed(data, out, num_cols) catch {
log.err("didn't properly transpose !", .{});
};
}
fn transposePerBlock(ex: *CudaEventLoop, allocator: Allocator, data: []const u32, num_cols: usize) !void {
var out = try allocator.alloc(u32, data.len);
defer allocator.free(out);
var stream = cuda.Stream.init(0) catch unreachable;
defer stream.deinit();
const d_data = try stream.alloc(u32, data.len);
stream.memcpyHtoD(u32, d_data, data);
const d_out = try stream.alloc(u32, data.len);
defer cuda.free(d_out);
const log = std.log.scoped(.transposePerBlock);
log.info("Scheduling GPU", .{});
var timer = cuda.GpuTimer.start(&stream);
const block_size = RawKernels.block_size;
const grid = cuda.Grid.init2D(num_cols, num_cols, 32, block_size);
try k.transposePerBlock.launchWithSharedMem(
&stream,
grid,
@sizeOf(u32) * 16 * 16 * 16,
.{ d_data, d_out, num_cols },
);
timer.stop();
stream.memcpyDtoH(u32, out, d_out);
// Yield control to main loop
suspend {
ex.registerStream(&stream, @frame());
}
stream.synchronize();
const elapsed = timer.elapsed();
log.info("{}x{} matrix took {:.3}ms", .{ num_cols, num_cols, elapsed });
log.info("bandwith: {:.3}MB/s", .{computeBandwith(elapsed, data) * 1e-6});
expectTransposed(data, out, num_cols) catch {
log.err("didn't properly transpose !", .{});
};
}
fn transposePerBlockInlined(ex: *CudaEventLoop, allocator: Allocator, data: []const u32, num_cols: usize) !void {
var out = try allocator.alloc(u32, data.len);
defer allocator.free(out);
var stream = cuda.Stream.init(0) catch unreachable;
defer stream.deinit();
const d_data = try stream.alloc(u32, data.len);
stream.memcpyHtoD(u32, d_data, data);
const d_out = try stream.alloc(u32, data.len);
defer cuda.free(d_out);
const log = std.log.scoped(.transposePerBlockInlined);
log.info("Scheduling GPU", .{});
var timer = cuda.GpuTimer.start(&stream);
const block_size = RawKernels.block_size_inline;
const grid = cuda.Grid.init2D(num_cols, num_cols, 256 / block_size, block_size);
try k.transposePerBlockInlined.launch(
&stream,
grid,
.{ d_data, d_out, num_cols },
);
timer.stop();
stream.memcpyDtoH(u32, out, d_out);
// Yield control to main loop
suspend {
ex.registerStream(&stream, @frame());
}
stream.synchronize();
const elapsed = timer.elapsed();
log.info("{}x{} matrix took {:.3}ms", .{ num_cols, num_cols, elapsed });
log.info("bandwith: {:.3}MB/s", .{computeBandwith(elapsed, data) * 1e-6});
expectTransposed(data, out, num_cols) catch {
log.err("didn't properly transpose !", .{});
};
}
const Kernels = struct {
transposeCpu: cuda.FnStruct("transposeCpu", RawKernels.transposeCpu),
transposePerRow: cuda.FnStruct("transposePerRow", RawKernels.transposePerRow),
transposePerCell: cuda.FnStruct("transposePerCell", RawKernels.transposePerCell),
transposePerBlock: cuda.FnStruct("transposePerBlock", RawKernels.transposePerBlock),
transposePerBlockInlined: cuda.FnStruct("transposePerBlockInlined", RawKernels.transposePerBlockInlined),
};
var k: Kernels = undefined;
fn initModule(device: u3) !void {
_ = try cuda.Stream.init(device);
_ = cuda.initDevice(device) catch @panic("No GPU");
// Panic if we can't load the module.
k = Kernels{
.transposeCpu = @TypeOf(k.transposeCpu).init() catch unreachable,
.transposePerRow = @TypeOf(k.transposePerRow).init() catch unreachable,
.transposePerCell = @TypeOf(k.transposePerCell).init() catch unreachable,
.transposePerBlock = @TypeOf(k.transposePerBlock).init() catch unreachable,
.transposePerBlockInlined = @TypeOf(k.transposePerBlockInlined).init() catch unreachable,
};
}
fn gpuInfo(device: u8) void {
var mem_clock_rate_khz: i32 = cuda.getAttr(device, cuda.Attribute.MemoryClockRate);
var mem_bus_width_bits: i32 = cuda.getAttr(device, cuda.Attribute.GlobalMemoryBusWidth);
const mem_bus_width_bytes = @intToFloat(f64, @divExact(mem_bus_width_bits, 8));
const peek_bandwith = @intToFloat(f64, mem_clock_rate_khz) * 1e3 * mem_bus_width_bytes;
base_log.info("GPU peek bandwith: {:.3}MB/s", .{peek_bandwith * 1e-6});
var l1_cache: i32 = cuda.getAttr(device, cuda.Attribute.GlobalL1CacheSupported);
var l2_cache: i32 = cuda.getAttr(device, cuda.Attribute.L2CacheSize);
base_log.info("GPU L1 cache {}, L2 cache {}", .{ l1_cache, l2_cache });
}
fn computeBandwith(elapsed_ms: f64, data: []const u32) f64 {
const n = @intToFloat(f64, data.len);
const bytes = @intToFloat(f64, @sizeOf(u32));
return n * bytes / elapsed_ms * 1000;
}
fn expectTransposed(data: []const u32, trans: []u32, num_cols: usize) !void {
var i: usize = 0;
while (i < num_cols) : (i += 1) {
var j: usize = 0;
while (j < num_cols) : (j += 1) {
std.testing.expectEqual(data[num_cols * j + i], trans[num_cols * i + j]) catch |err| {
if (data.len < 100) {
base_log.err("original: {any}", .{data});
base_log.err("transposed: {any}", .{trans});
}
base_log.err("failed expectTransposed !", .{});
return err;
};
}
}
}
|
CS344/src/lesson5.zig
|
const std = @import("std");
const bio = @import("bio/bio.zig");
const Sequence = @import("sequence.zig").Sequence;
pub fn Database(comptime A: type, comptime KmerLength: comptime_int) type {
return struct {
const Self = @This();
pub const Alphabet = A;
pub const kmerInfo = bio.kmer.KmerInfo(A, KmerLength);
allocator: std.mem.Allocator,
sequences: []Sequence(A),
seq_indices: []const usize,
seq_offset_by_kmer: []const usize,
seq_count_by_kmer: []const usize,
kmers: []const kmerInfo.KmerType,
kmer_offset_by_seq: []const usize,
kmer_count_by_seq: []const usize,
pub const ProgressType = enum { analyze, index };
pub const ProgressCallback = fn (ProgressType, usize, usize) void;
// Database takes ownerhip of sequences
pub fn init(allocator: std.mem.Allocator, sequences: []Sequence(A), progress_callback: ?ProgressCallback) !Self {
var total_entries: usize = 0;
var total_unique_entries: usize = 0;
// to keep track of the unique kmer of a given sequence
var seq_by_kmer = try allocator.alloc(usize, kmerInfo.MaxKmers);
std.mem.set(usize, seq_by_kmer, std.math.maxInt(usize)); // set to -1 equivalent
defer allocator.free(seq_by_kmer);
var count_by_kmer = try allocator.alloc(usize, kmerInfo.MaxKmers);
std.mem.set(usize, count_by_kmer, 0);
defer allocator.free(count_by_kmer);
for (sequences) |sequence, seq_idx| {
var kmer_it = bio.kmer.Iterator(kmerInfo).init(sequence.data);
while (kmer_it.next()) |kmer| {
total_entries += 1;
// ambiguous?
if (kmer == kmerInfo.AmbiguousKmer)
continue;
// already counted for this sequence?
if (seq_by_kmer[kmer] == seq_idx)
continue;
seq_by_kmer[kmer] = seq_idx;
count_by_kmer[kmer] += 1;
total_unique_entries += 1;
}
if (progress_callback != null and (seq_idx % 512 == 0 or seq_idx + 1 == sequences.len)) {
progress_callback.?(.analyze, seq_idx + 1, sequences.len);
}
}
// Populate DB
var kmers = try allocator.alloc(kmerInfo.KmerType, total_entries);
var kmer_offset_by_seq = try allocator.alloc(usize, sequences.len);
var kmer_count_by_seq = try allocator.alloc(usize, sequences.len);
var seq_indices = try allocator.alloc(usize, total_unique_entries);
var seq_count_by_kmer = try allocator.alloc(usize, kmerInfo.MaxKmers);
var seq_offset_by_kmer = try allocator.alloc(usize, kmerInfo.MaxKmers);
// zero counts
std.mem.set(usize, kmer_count_by_seq, 0);
std.mem.set(usize, seq_count_by_kmer, 0);
for (seq_offset_by_kmer) |*seq_offset, kmer| {
seq_offset.* = if (kmer > 0) seq_offset_by_kmer[kmer - 1] + count_by_kmer[kmer - 1] else 0;
}
// Reset tracking for unique kmer within a sequence
std.mem.set(usize, seq_by_kmer, std.math.maxInt(usize)); // set to -1 equivalent
var kmer_count: usize = 0;
for (sequences) |sequence, seq_idx| {
kmer_offset_by_seq[seq_idx] = kmer_count;
var kmer_it = bio.kmer.Iterator(kmerInfo).init(sequence.data);
while (kmer_it.next()) |kmer| {
kmers[kmer_count] = kmer;
kmer_count += 1;
// ambiguous?
if (kmer == kmerInfo.AmbiguousKmer)
continue;
// already counted for this sequence?
if (seq_by_kmer[kmer] == seq_idx)
continue;
seq_by_kmer[kmer] = seq_idx;
seq_indices[seq_offset_by_kmer[kmer] + seq_count_by_kmer[kmer]] = seq_idx;
seq_count_by_kmer[kmer] += 1;
}
kmer_count_by_seq[seq_idx] = kmer_count - kmer_offset_by_seq[seq_idx];
if (progress_callback != null and (seq_idx % 512 == 0 or seq_idx + 1 == sequences.len)) {
progress_callback.?(.index, seq_idx + 1, sequences.len);
}
}
return Self{
.allocator = allocator,
.sequences = sequences,
.seq_indices = seq_indices,
.seq_offset_by_kmer = seq_offset_by_kmer,
.seq_count_by_kmer = seq_count_by_kmer,
.kmers = kmers,
.kmer_offset_by_seq = kmer_offset_by_seq,
.kmer_count_by_seq = kmer_count_by_seq,
};
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.seq_indices);
self.allocator.free(self.seq_offset_by_kmer);
self.allocator.free(self.seq_count_by_kmer);
self.allocator.free(self.kmers);
self.allocator.free(self.kmer_offset_by_seq);
self.allocator.free(self.kmer_count_by_seq);
for (self.sequences) |*sequence| sequence.deinit();
self.allocator.free(self.sequences);
}
};
}
|
src/database.zig
|
const std = @import("std");
const stdx = @import("stdx");
const Point2 = stdx.math.Point2(u32);
const t = stdx.testing;
const log = stdx.log.scoped(.rect_bin_packer);
const SpanId = u32;
const NullId = std.math.maxInt(SpanId);
const ResizeCallback = fn (ctx: ?*anyopaque, width: u32, height: u32) void;
const ResizeCallbackItem = struct {
ctx: ?*anyopaque,
cb: ResizeCallback,
};
/// Rectangle bin packer implemented with the skyline bottom-left algorithm.
/// Trades some performance to pack rects more efficiently.
/// Perform best fit placement. The placement with the lowest y is chosen, ties are broken with lowest waste.
/// Does not own the underlying buffer and only cares about providing the next available rectangle and triggering resize events.
/// When the buffer needs to resize, the width and height are doubled and resize callbacks are invoked.
pub const RectBinPacker = struct {
spans: stdx.ds.CompactSinglyLinkedListBuffer(SpanId, Span),
/// Current spans from left to right.
head: SpanId,
/// In-order callbacks.
resize_cbs: std.ArrayList(ResizeCallbackItem),
width: u32,
height: u32,
const Self = @This();
pub fn init(alloc: std.mem.Allocator, width: u32, height: u32) Self {
var new = Self{
.spans = stdx.ds.CompactSinglyLinkedListBuffer(SpanId, Span).init(alloc),
.resize_cbs = std.ArrayList(ResizeCallbackItem).init(alloc),
.width = width,
.height = height,
.head = undefined,
};
new.head = new.spans.add(.{ .x = 0, .y = 0, .width = width }) catch @panic("error");
return new;
}
pub fn deinit(self: Self) void {
self.spans.deinit();
self.resize_cbs.deinit();
}
pub fn addResizeCallback(self: *Self, ctx: ?*anyopaque, cb: ResizeCallback) void {
self.resize_cbs.append(.{
.ctx = ctx,
.cb = cb,
}) catch @panic("error");
}
pub fn removeResizeCallback(self: *Self, needle: ResizeCallback) void {
for (self.resize_cbs.items) |it, i| {
if (it.cb == needle) {
self.resize_cbs.orderedRemove(i);
break;
}
}
}
pub fn allocRect(self: *Self, width: u32, height: u32) Point2 {
if (self.findRectSpace(width, height)) |res| {
self.allocRectResult(res);
return Point2.init(res.x, res.y);
} else {
// Perform resize.
while (true) {
self.width *= 2;
self.height *= 2;
// Add or extend a span.
const last_id = self.spans.getLast(self.head).?;
const last = self.spans.getPtrNoCheck(last_id);
if (last.y == 0) {
// Extend.
last.width = self.width - last.x;
} else {
const start_x = last.x + last.width;
_ = self.spans.insertAfter(last_id, .{
.x = start_x,
.y = 0,
.width = self.width - start_x,
}) catch @panic("error");
}
if (self.findRectSpace(width, height)) |res| {
self.allocRectResult(res);
// Invoke resize callbacks only after we allocated the rect.
for (self.resize_cbs.items) |it| {
it.cb(it.ctx, self.width, self.height);
}
return Point2.init(res.x, res.y);
}
}
}
}
fn allocRectResult(self: *Self, res: FindSpaceResult) void {
// Remove all spans that are covered by the requested width.
var visited_width: u32 = 0;
var cur = if (res.prev_span_id == NullId) self.head else self.spans.getNextNoCheck(res.prev_span_id);
while (cur != NullId) {
const node = self.spans.nodes.getPtrNoCheck(cur);
const span = node.data;
const next = node.next;
visited_width += span.width;
if (visited_width <= res.width) {
// Completely covered.
self.spans.nodes.remove(cur);
} else {
// Modify existing span.
node.data.x = res.x + res.width;
node.data.width = visited_width - res.width;
break;
}
cur = next;
}
// Reattach the list.
if (res.prev_span_id == NullId) {
self.head = cur;
const head = self.spans.getPtrNoCheck(self.head);
if (head.y == res.y + res.height) {
// Extend.
head.width += head.x;
head.x = 0;
} else {
// Create span.
self.head = self.spans.insertBeforeHeadNoCheck(self.head, .{
.x = 0,
.y = res.y + res.height,
.width = res.width,
}) catch @panic("error");
}
} else {
const prev = self.spans.getPtrNoCheck(res.prev_span_id);
if (prev.y == res.y + res.height) {
// Extend prev.
prev.width += res.width;
if (cur != NullId) {
const next = self.spans.nodes.getNoCheck(cur);
if (next.data.y == res.y + res.height) {
// Extend prev again by removing next.
const next_next = next.next;
prev.width += next.data.width;
self.spans.nodes.remove(cur);
cur = next_next;
}
}
self.spans.nodes.getPtrNoCheck(res.prev_span_id).next = cur;
} else {
self.spans.nodes.getPtrNoCheck(res.prev_span_id).next = cur;
if (cur != NullId) {
const next = self.spans.getPtrNoCheck(cur);
if (next.y == res.y + res.height) {
// Extend next.
next.x = res.x;
next.width += res.width;
} else {
// Create span.
_ = self.spans.insertAfter(res.prev_span_id, .{
.x = res.x,
.y = res.y + res.height,
.width = res.width,
}) catch @panic("error");
}
} else {
// Create span.
_ = self.spans.insertAfter(res.prev_span_id, .{
.x = res.x,
.y = res.y + res.height,
.width = res.width,
}) catch @panic("error");
}
}
}
}
/// Returns the result of the next available position that can fit the requested rect.
/// Does not allocate the space. Returns null if no position was found.
fn findRectSpace(self: Self, width: u32, height: u32) ?FindSpaceResult {
var best_prev_id: SpanId = NullId;
var best_waste: u32 = NullId;
var best_x: u32 = NullId;
var best_y: u32 = NullId;
var prev: SpanId = NullId;
var cur = self.head;
while (cur != NullId) {
const node = self.spans.nodes.getNoCheck(cur);
const span = node.data;
if (span.x + width > self.width) {
// Can not fit.
break;
}
var waste: u32 = undefined;
const y_fit = self.findLowestY(cur, span.x, width, &waste);
if (y_fit + height <= self.height) {
// Best fit picks the lowest y. If there is a tie, pick the one with the lowest waste.
if (y_fit < best_y or (y_fit == best_y and waste < best_waste)) {
best_y = y_fit;
best_x = span.x;
best_waste = waste;
best_prev_id = prev;
}
}
prev = cur;
cur = node.next;
}
if (best_y == NullId) {
// No placement was found. Need to resize the atlas.
return null;
} else {
return FindSpaceResult{
.prev_span_id = best_prev_id,
.x = best_x,
.y = best_y,
.width = width,
.height = height,
};
}
}
/// Returns the lowest y value that can fit the requested width starting at a node.
fn findLowestY(self: Self, cur_: SpanId, start_x: u32, req_width: u32, out_waste: *u32) u32 {
const end_x = start_x + req_width;
var min_y: u32 = 0;
var waste: u32 = 0;
var visited_width: u32 = 0;
var cur = cur_;
while (cur != NullId) {
const node = self.spans.nodes.getNoCheck(cur);
const span = node.data;
if (span.x >= end_x) {
break;
}
if (span.y > min_y) {
// Elevate min_y and add waste for visited width.
waste += visited_width * (span.y - min_y);
min_y = span.y;
visited_width += span.width;
} else {
// Add waste below min_y.
var under_width = span.width;
if (under_width + visited_width > req_width) {
under_width = req_width - visited_width;
}
waste += under_width * (min_y - span.y);
visited_width += span.width;
}
cur = node.next;
}
out_waste.* = waste;
return min_y;
}
};
const Span = struct {
x: u32,
y: u32,
width: u32,
};
const FindSpaceResult = struct {
prev_span_id: SpanId,
x: u32,
y: u32,
width: u32,
height: u32,
};
test "Extend prev span after insert." {
var packer = RectBinPacker.init(t.alloc, 10, 10);
defer packer.deinit();
var pos = packer.allocRect(5, 5);
try t.eq(packer.spans.size(), 2);
pos = packer.allocRect(5, 5);
try t.eq(packer.spans.size(), 1);
const head = packer.spans.getNoCheck(packer.head);
try t.eq(head, .{
.x = 0,
.y = 5,
.width = 10,
});
}
test "Insert new tail." {
var packer = RectBinPacker.init(t.alloc, 10, 10);
defer packer.deinit();
var pos = packer.allocRect(5, 5);
try t.eq(packer.spans.size(), 2);
pos = packer.allocRect(5, 1);
try t.eq(packer.spans.size(), 2);
pos = packer.allocRect(5, 1);
try t.eq(packer.spans.size(), 2);
var node = packer.spans.nodes.getNoCheck(packer.head);
try t.eq(node.data, .{ .x = 0, .y = 5, .width = 5 });
node = packer.spans.nodes.getNoCheck(node.next);
try t.eq(node.data, .{ .x = 5, .y = 2, .width = 5 });
}
test "Extend next span after insert." {
var packer = RectBinPacker.init(t.alloc, 10, 10);
defer packer.deinit();
var pos = packer.allocRect(4, 5);
pos = packer.allocRect(2, 2);
try t.eq(packer.spans.size(), 3);
var node = packer.spans.nodes.getNoCheck(packer.head);
try t.eq(node.data, .{ .x = 0, .y = 5, .width = 4 });
node = packer.spans.nodes.getNoCheck(node.next);
try t.eq(node.data, .{ .x = 4, .y = 2, .width = 2 });
node = packer.spans.nodes.getNoCheck(node.next);
try t.eq(node.data, .{ .x = 6, .y = 0, .width = 4 });
try t.eq(node.next, NullId);
pos = packer.allocRect(4, 4);
try t.eq(packer.spans.size(), 3);
node = packer.spans.nodes.getNoCheck(packer.head);
try t.eq(node.data, .{ .x = 0, .y = 5, .width = 4 });
node = packer.spans.nodes.getNoCheck(node.next);
try t.eq(node.data, .{ .x = 4, .y = 2, .width = 2 });
node = packer.spans.nodes.getNoCheck(node.next);
try t.eq(node.data, .{ .x = 6, .y = 4, .width = 4 });
try t.eq(node.next, NullId);
pos = packer.allocRect(2, 2);
try t.eq(packer.spans.size(), 2);
const last_id = packer.spans.getLast(packer.head).?;
const last = packer.spans.getNoCheck(last_id);
try t.eq(last, .{
.x = 4,
.y = 4,
.width = 6,
});
}
test "Merge prev, inserted, and next span into prev." {
var packer = RectBinPacker.init(t.alloc, 10, 10);
defer packer.deinit();
var pos = packer.allocRect(4, 4);
pos = packer.allocRect(2, 2);
pos = packer.allocRect(4, 4);
try t.eq(packer.spans.size(), 3);
pos = packer.allocRect(2, 2);
try t.eq(packer.spans.size(), 1);
const head = packer.spans.getNoCheck(packer.head);
try t.eq(head, .{
.x = 0,
.y = 4,
.width = 10,
});
}
test "Extend next after insert." {
var packer = RectBinPacker.init(t.alloc, 10, 10);
defer packer.deinit();
var pos = packer.allocRect(4, 1);
pos = packer.allocRect(6, 4);
try t.eq(packer.spans.size(), 2);
pos = packer.allocRect(4, 3);
try t.eq(packer.spans.size(), 1);
const head = packer.spans.getNoCheck(packer.head);
try t.eq(head, .{
.x = 0,
.y = 4,
.width = 10,
});
}
test "Insert new head." {
var packer = RectBinPacker.init(t.alloc, 10, 10);
defer packer.deinit();
var pos = packer.allocRect(4, 1);
pos = packer.allocRect(6, 4);
try t.eq(packer.spans.size(), 2);
pos = packer.allocRect(4, 2);
try t.eq(packer.spans.size(), 2);
var node = packer.spans.nodes.getNoCheck(packer.head);
try t.eq(node.data, .{ .x = 0, .y = 3, .width = 4 });
node = packer.spans.nodes.getNoCheck(node.next);
try t.eq(node.data, .{ .x = 4, .y = 4, .width = 6 });
}
test "Stack from not enough remaing space at the right." {
var packer = RectBinPacker.init(t.alloc, 10, 10);
defer packer.deinit();
var pos = packer.allocRect(6, 1);
pos = packer.allocRect(6, 1);
pos = packer.allocRect(6, 1);
try t.eq(packer.spans.size(), 2);
var node = packer.spans.nodes.getNoCheck(packer.head);
try t.eq(node.data, .{ .x = 0, .y = 3, .width = 6 });
node = packer.spans.nodes.getNoCheck(node.next);
try t.eq(node.data, .{ .x = 6, .y = 0, .width = 4 });
}
test "Resize." {
var packer = RectBinPacker.init(t.alloc, 10, 10);
defer packer.deinit();
const Context = struct {
width: u32,
height: u32,
};
const S = struct {
fn onResize(ptr: ?*anyopaque, width: u32, height: u32) void {
const ctx_ = stdx.mem.ptrCastAlign(*Context, ptr);
ctx_.width = width;
ctx_.height = height;
}
};
var ctx = Context{ .width = 0, .height = 0 };
packer.addResizeCallback(&ctx, S.onResize);
_ = packer.allocRect(11, 11);
try t.eq(ctx.width, 20);
try t.eq(ctx.height, 20);
try t.eq(packer.spans.size(), 2);
var node = packer.spans.nodes.getNoCheck(packer.head);
try t.eq(node.data, .{ .x = 0, .y = 11, .width = 11 });
node = packer.spans.nodes.getNoCheck(node.next);
try t.eq(node.data, .{ .x = 11, .y = 0, .width = 9 });
}
/// Allocation is constant time but inefficient with space allocation.
/// Tracks the next available pos and the max height of the current row.
/// Upon resize, only the height is doubled since context info is lost from the other rows.
pub const FastBinPacker = struct {
width: u32,
height: u32,
/// Start pos for the next glyph.
next_x: u32,
next_y: u32,
/// The max height of the current row.
/// Used to advance next_y once width is reached for the current row.
row_height: u32,
/// In-order callbacks.
resize_cbs: std.ArrayList(ResizeCallbackItem),
const Self = @This();
pub fn init(alloc: std.mem.Allocator, width: u32, height: u32) @This() {
return .{
.width = width,
.height = height,
.next_x = 0,
.next_y = 0,
.row_height = 0,
.resize_cbs = std.ArrayList(ResizeCallbackItem).init(alloc),
};
}
pub fn deinit(self: Self) void {
self.resize_cbs.deinit();
}
pub fn allocRect(self: *Self, width: u32, height: u32) Point2 {
if (self.next_x + width > self.width) {
// Wrap to the next row.
if (self.row_height == 0) {
// Current width can't fit.
@panic("Requested width too large.");
}
self.next_y += self.row_height;
self.next_x = 0;
self.row_height = 0;
return self.allocRect(width, height);
}
if (self.next_y + height > self.height) {
// Increase buffer height.
self.height *= 2;
// Invoke resize callbacks only after we allocated the rect.
for (self.resize_cbs.items) |it| {
it.cb(it.ctx, self.width, self.height);
}
return self.allocRect(width, height);
}
defer self.advancePos(width, height);
return Point2.init(self.next_x, self.next_y);
}
fn advancePos(self: *Self, width: u32, height: u32) void {
self.next_x += width;
if (width > self.row_height) {
self.row_height = height;
}
}
};
test "FastBinPacker" {
var packer = FastBinPacker.init(t.alloc, 10, 10);
defer packer.deinit();
var pos = packer.allocRect(5, 5);
try t.eq(pos, Point2.init(0, 0));
pos = packer.allocRect(5, 5);
try t.eq(pos, Point2.init(5, 0));
}
|
graphics/src/rect_bin_packer.zig
|
const std = @import("std");
const builtin = @import("builtin");
const debug = std.debug;
const testing = std.testing;
pub fn PackedIntIo(comptime Int: type, comptime endian: builtin.Endian) type {
//The general technique employed here is to cast bytes in the array to a container
// integer (having bits % 8 == 0) large enough to contain the number of bits we want,
// then we can retrieve or store the new value with a relative minimum of masking
// and shifting. In this worst case, this means that we'll need an integer that's
// actually 1 byte larger than the minimum required to store the bits, because it
// is possible that the bits start at the end of the first byte, continue through
// zero or more, then end in the beginning of the last. But, if we try to access
// a value in the very last byte of memory with that integer size, that extra byte
// will be out of bounds. Depending on the circumstances of the memory, that might
// mean the OS fatally kills the program. Thus, we use a larger container (MaxIo)
// most of the time, but a smaller container (MinIo) when touching the last byte
// of the memory.
const int_bits = comptime std.meta.bitCount(Int);
//in the best case, this is the number of bytes we need to touch
// to read or write a value, as bits
const min_io_bits = ((int_bits + 7) / 8) * 8;
//in the worst case, this is the number of bytes we need to touch
// to read or write a value, as bits
const max_io_bits = switch (int_bits) {
0 => 0,
1 => 8,
2...9 => 16,
10...65535 => ((int_bits / 8) + 2) * 8,
else => unreachable,
};
//we bitcast the desired Int type to an unsigned version of itself
// to avoid issues with shifting signed ints.
const UnInt = @IntType(false, int_bits);
//The maximum container int type
const MinIo = @IntType(false, min_io_bits);
//The minimum container int type
const MaxIo = @IntType(false, max_io_bits);
return struct {
pub fn get(bytes: []const u8, index: usize, bit_offset: u7) Int {
if (int_bits == 0) return 0;
const bit_index = (index * int_bits) + bit_offset;
const max_end_byte = (bit_index + max_io_bits) / 8;
//Using the larger container size will potentially read out of bounds
if (max_end_byte > bytes.len) return getBits(bytes, MinIo, bit_index);
return getBits(bytes, MaxIo, bit_index);
}
fn getBits(bytes: []const u8, comptime Container: type, bit_index: usize) Int {
const container_bits = comptime std.meta.bitCount(Container);
const Shift = std.math.Log2Int(Container);
const start_byte = bit_index / 8;
const head_keep_bits = bit_index - (start_byte * 8);
const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
//read bytes as container
const value_ptr = @ptrCast(*align(1) const Container, &bytes[start_byte]);
var value = value_ptr.*;
if (endian != builtin.endian) value = @byteSwap(Container, value);
switch (endian) {
.Big => {
value <<= @intCast(Shift, head_keep_bits);
value >>= @intCast(Shift, head_keep_bits);
value >>= @intCast(Shift, tail_keep_bits);
},
.Little => {
value <<= @intCast(Shift, tail_keep_bits);
value >>= @intCast(Shift, tail_keep_bits);
value >>= @intCast(Shift, head_keep_bits);
},
}
return @bitCast(Int, @truncate(UnInt, value));
}
pub fn set(bytes: []u8, index: usize, bit_offset: u3, int: Int) void {
if (int_bits == 0) return;
const bit_index = (index * int_bits) + bit_offset;
const max_end_byte = (bit_index + max_io_bits) / 8;
//Using the larger container size will potentially write out of bounds
if (max_end_byte > bytes.len) return setBits(bytes, MinIo, bit_index, int);
setBits(bytes, MaxIo, bit_index, int);
}
fn setBits(bytes: []u8, comptime Container: type, bit_index: usize, int: Int) void {
const container_bits = comptime std.meta.bitCount(Container);
const Shift = std.math.Log2Int(Container);
const start_byte = bit_index / 8;
const head_keep_bits = bit_index - (start_byte * 8);
const tail_keep_bits = container_bits - (int_bits + head_keep_bits);
const keep_shift = switch (endian) {
.Big => @intCast(Shift, tail_keep_bits),
.Little => @intCast(Shift, head_keep_bits),
};
//position the bits where they need to be in the container
const value = @intCast(Container, @bitCast(UnInt, int)) << keep_shift;
//read existing bytes
const target_ptr = @ptrCast(*align(1) Container, &bytes[start_byte]);
var target = target_ptr.*;
if (endian != builtin.endian) target = @byteSwap(Container, target);
//zero the bits we want to replace in the existing bytes
const inv_mask = @intCast(Container, std.math.maxInt(UnInt)) << keep_shift;
const mask = ~inv_mask;
target &= mask;
//merge the new value
target |= value;
if (endian != builtin.endian) target = @byteSwap(Container, target);
//save it back
target_ptr.* = target;
}
fn slice(bytes: []u8, bit_offset: u3, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(end >= start);
const length = end - start;
const bit_index = (start * int_bits) + bit_offset;
const start_byte = bit_index / 8;
const end_byte = (bit_index + (length * int_bits) + 7) / 8;
const new_bytes = bytes[start_byte..end_byte];
if (length == 0) return PackedIntSliceEndian(Int, endian).init(new_bytes[0..0], 0);
var new_slice = PackedIntSliceEndian(Int, endian).init(new_bytes, length);
new_slice.bit_offset = @intCast(u3, (bit_index - (start_byte * 8)));
return new_slice;
}
fn sliceCast(bytes: []u8, comptime NewInt: type, comptime new_endian: builtin.Endian, bit_offset: u3, old_len: usize) PackedIntSliceEndian(NewInt, new_endian) {
const new_int_bits = comptime std.meta.bitCount(NewInt);
const New = PackedIntSliceEndian(NewInt, new_endian);
const total_bits = (old_len * int_bits);
const new_int_count = total_bits / new_int_bits;
debug.assert(total_bits == new_int_count * new_int_bits);
var new = New.init(bytes, new_int_count);
new.bit_offset = bit_offset;
return new;
}
};
}
///Creates a bit-packed array of integers of type Int. Bits
/// are packed using native endianess and without storing any meta
/// data. PackedIntArray(i3, 8) will occupy exactly 3 bytes of memory.
pub fn PackedIntArray(comptime Int: type, comptime int_count: usize) type {
return PackedIntArrayEndian(Int, builtin.endian, int_count);
}
///Creates a bit-packed array of integers of type Int. Bits
/// are packed using specified endianess and without storing any meta
/// data.
pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: builtin.Endian, comptime int_count: usize) type {
const int_bits = comptime std.meta.bitCount(Int);
const total_bits = int_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
const Io = PackedIntIo(Int, endian);
return struct {
const Self = @This();
bytes: [total_bytes]u8,
///Returns the number of elements in the packed array
pub fn len(self: Self) usize {
return int_count;
}
///Initialize a packed array using an unpacked array
/// or, more likely, an array literal.
pub fn init(ints: [int_count]Int) Self {
var self = @as(Self, undefined);
for (ints) |int, i| self.set(i, int);
return self;
}
///Return the Int stored at index
pub fn get(self: Self, index: usize) Int {
debug.assert(index < int_count);
return Io.get(self.bytes, index, 0);
}
///Copy int into the array at index
pub fn set(self: *Self, index: usize, int: Int) void {
debug.assert(index < int_count);
return Io.set(&self.bytes, index, 0, int);
}
///Create a PackedIntSlice of the array from given start to given end
pub fn slice(self: *Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(start < int_count);
debug.assert(end <= int_count);
return Io.slice(&self.bytes, 0, start, end);
}
///Create a PackedIntSlice of the array using NewInt as the bit width integer.
/// NewInt's bit width must fit evenly within the array's Int's total bits.
pub fn sliceCast(self: *Self, comptime NewInt: type) PackedIntSlice(NewInt) {
return self.sliceCastEndian(NewInt, endian);
}
///Create a PackedIntSlice of the array using NewInt as the bit width integer
/// and new_endian as the new endianess. NewInt's bit width must fit evenly within
/// the array's Int's total bits.
pub fn sliceCastEndian(self: *Self, comptime NewInt: type, comptime new_endian: builtin.Endian) PackedIntSliceEndian(NewInt, new_endian) {
return Io.sliceCast(&self.bytes, NewInt, new_endian, 0, int_count);
}
};
}
///Uses a slice as a bit-packed block of int_count integers of type Int.
/// Bits are packed using native endianess and without storing any meta
/// data.
pub fn PackedIntSlice(comptime Int: type) type {
return PackedIntSliceEndian(Int, builtin.endian);
}
///Uses a slice as a bit-packed block of int_count integers of type Int.
/// Bits are packed using specified endianess and without storing any meta
/// data.
pub fn PackedIntSliceEndian(comptime Int: type, comptime endian: builtin.Endian) type {
const int_bits = comptime std.meta.bitCount(Int);
const Io = PackedIntIo(Int, endian);
return struct {
const Self = @This();
bytes: []u8,
int_count: usize,
bit_offset: u3,
///Returns the number of elements in the packed slice
pub fn len(self: Self) usize {
return self.int_count;
}
///Calculates the number of bytes required to store a desired count
/// of Ints
pub fn bytesRequired(int_count: usize) usize {
const total_bits = int_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
return total_bytes;
}
///Initialize a packed slice using the memory at bytes, with int_count
/// elements. bytes must be large enough to accomodate the requested
/// count.
pub fn init(bytes: []u8, int_count: usize) Self {
debug.assert(bytes.len >= bytesRequired(int_count));
return Self{
.bytes = bytes,
.int_count = int_count,
.bit_offset = 0,
};
}
///Return the Int stored at index
pub fn get(self: Self, index: usize) Int {
debug.assert(index < self.int_count);
return Io.get(self.bytes, index, self.bit_offset);
}
///Copy int into the array at index
pub fn set(self: *Self, index: usize, int: Int) void {
debug.assert(index < self.int_count);
return Io.set(self.bytes, index, self.bit_offset, int);
}
///Create a PackedIntSlice of this slice from given start to given end
pub fn slice(self: Self, start: usize, end: usize) PackedIntSliceEndian(Int, endian) {
debug.assert(start < self.int_count);
debug.assert(end <= self.int_count);
return Io.slice(self.bytes, self.bit_offset, start, end);
}
///Create a PackedIntSlice of this slice using NewInt as the bit width integer.
/// NewInt's bit width must fit evenly within this slice's Int's total bits.
pub fn sliceCast(self: Self, comptime NewInt: type) PackedIntSliceEndian(NewInt, endian) {
return self.sliceCastEndian(NewInt, endian);
}
///Create a PackedIntSlice of this slice using NewInt as the bit width integer
/// and new_endian as the new endianess. NewInt's bit width must fit evenly within
/// this slice's Int's total bits.
pub fn sliceCastEndian(self: Self, comptime NewInt: type, comptime new_endian: builtin.Endian) PackedIntSliceEndian(NewInt, new_endian) {
return Io.sliceCast(self.bytes, NewInt, new_endian, self.bit_offset, self.int_count);
}
};
}
test "PackedIntArray" {
@setEvalBranchQuota(10000);
const max_bits = 256;
const int_count = 19;
comptime var bits = 0;
inline while (bits <= 256) : (bits += 1) {
//alternate unsigned and signed
const even = bits % 2 == 0;
const I = @IntType(even, bits);
const PackedArray = PackedIntArray(I, int_count);
const expected_bytes = ((bits * int_count) + 7) / 8;
testing.expect(@sizeOf(PackedArray) == expected_bytes);
var data = @as(PackedArray, undefined);
//write values, counting up
var i = @as(usize, 0);
var count = @as(I, 0);
while (i < data.len()) : (i += 1) {
data.set(i, count);
if (bits > 0) count +%= 1;
}
//read and verify values
i = 0;
count = 0;
while (i < data.len()) : (i += 1) {
const val = data.get(i);
testing.expect(val == count);
if (bits > 0) count +%= 1;
}
}
}
test "PackedIntArray init" {
const PackedArray = PackedIntArray(u3, 8);
var packed_array = PackedArray.init([_]u3{ 0, 1, 2, 3, 4, 5, 6, 7 });
var i = @as(usize, 0);
while (i < packed_array.len()) : (i += 1) testing.expect(packed_array.get(i) == i);
}
test "PackedIntSlice" {
@setEvalBranchQuota(10000);
const max_bits = 256;
const int_count = 19;
const total_bits = max_bits * int_count;
const total_bytes = (total_bits + 7) / 8;
var buffer: [total_bytes]u8 = undefined;
comptime var bits = 0;
inline while (bits <= 256) : (bits += 1) {
//alternate unsigned and signed
const even = bits % 2 == 0;
const I = @IntType(even, bits);
const P = PackedIntSlice(I);
var data = P.init(&buffer, int_count);
//write values, counting up
var i = @as(usize, 0);
var count = @as(I, 0);
while (i < data.len()) : (i += 1) {
data.set(i, count);
if (bits > 0) count +%= 1;
}
//read and verify values
i = 0;
count = 0;
while (i < data.len()) : (i += 1) {
const val = data.get(i);
testing.expect(val == count);
if (bits > 0) count +%= 1;
}
}
}
test "PackedIntSlice of PackedInt(Array/Slice)" {
const max_bits = 16;
const int_count = 19;
comptime var bits = 0;
inline while (bits <= max_bits) : (bits += 1) {
const Int = @IntType(false, bits);
const PackedArray = PackedIntArray(Int, int_count);
var packed_array = @as(PackedArray, undefined);
const limit = (1 << bits);
var i = @as(usize, 0);
while (i < packed_array.len()) : (i += 1) {
packed_array.set(i, @intCast(Int, i % limit));
}
//slice of array
var packed_slice = packed_array.slice(2, 5);
testing.expect(packed_slice.len() == 3);
const ps_bit_count = (bits * packed_slice.len()) + packed_slice.bit_offset;
const ps_expected_bytes = (ps_bit_count + 7) / 8;
testing.expect(packed_slice.bytes.len == ps_expected_bytes);
testing.expect(packed_slice.get(0) == 2 % limit);
testing.expect(packed_slice.get(1) == 3 % limit);
testing.expect(packed_slice.get(2) == 4 % limit);
packed_slice.set(1, 7 % limit);
testing.expect(packed_slice.get(1) == 7 % limit);
//write through slice
testing.expect(packed_array.get(3) == 7 % limit);
//slice of a slice
const packed_slice_two = packed_slice.slice(0, 3);
testing.expect(packed_slice_two.len() == 3);
const ps2_bit_count = (bits * packed_slice_two.len()) + packed_slice_two.bit_offset;
const ps2_expected_bytes = (ps2_bit_count + 7) / 8;
testing.expect(packed_slice_two.bytes.len == ps2_expected_bytes);
testing.expect(packed_slice_two.get(1) == 7 % limit);
testing.expect(packed_slice_two.get(2) == 4 % limit);
//size one case
const packed_slice_three = packed_slice_two.slice(1, 2);
testing.expect(packed_slice_three.len() == 1);
const ps3_bit_count = (bits * packed_slice_three.len()) + packed_slice_three.bit_offset;
const ps3_expected_bytes = (ps3_bit_count + 7) / 8;
testing.expect(packed_slice_three.bytes.len == ps3_expected_bytes);
testing.expect(packed_slice_three.get(0) == 7 % limit);
//empty slice case
const packed_slice_empty = packed_slice.slice(0, 0);
testing.expect(packed_slice_empty.len() == 0);
testing.expect(packed_slice_empty.bytes.len == 0);
//slicing at byte boundaries
const packed_slice_edge = packed_array.slice(8, 16);
testing.expect(packed_slice_edge.len() == 8);
const pse_bit_count = (bits * packed_slice_edge.len()) + packed_slice_edge.bit_offset;
const pse_expected_bytes = (pse_bit_count + 7) / 8;
testing.expect(packed_slice_edge.bytes.len == pse_expected_bytes);
testing.expect(packed_slice_edge.bit_offset == 0);
}
}
test "PackedIntSlice accumulating bit offsets" {
//bit_offset is u3, so standard debugging asserts should catch
// anything
{
const PackedArray = PackedIntArray(u3, 16);
var packed_array = @as(PackedArray, undefined);
var packed_slice = packed_array.slice(0, packed_array.len());
var i = @as(usize, 0);
while (i < packed_array.len() - 1) : (i += 1) {
packed_slice = packed_slice.slice(1, packed_slice.len());
}
}
{
const PackedArray = PackedIntArray(u11, 88);
var packed_array = @as(PackedArray, undefined);
var packed_slice = packed_array.slice(0, packed_array.len());
var i = @as(usize, 0);
while (i < packed_array.len() - 1) : (i += 1) {
packed_slice = packed_slice.slice(1, packed_slice.len());
}
}
}
//@NOTE: As I do not have a big endian system to test this on,
// big endian values were not tested
test "PackedInt(Array/Slice) sliceCast" {
const PackedArray = PackedIntArray(u1, 16);
var packed_array = PackedArray.init([_]u1{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 });
const packed_slice_cast_2 = packed_array.sliceCast(u2);
const packed_slice_cast_4 = packed_slice_cast_2.sliceCast(u4);
var packed_slice_cast_9 = packed_array.slice(0, (packed_array.len() / 9) * 9).sliceCast(u9);
const packed_slice_cast_3 = packed_slice_cast_9.sliceCast(u3);
var i = @as(usize, 0);
while (i < packed_slice_cast_2.len()) : (i += 1) {
const val = switch (builtin.endian) {
.Big => 0b01,
.Little => 0b10,
};
testing.expect(packed_slice_cast_2.get(i) == val);
}
i = 0;
while (i < packed_slice_cast_4.len()) : (i += 1) {
const val = switch (builtin.endian) {
.Big => 0b0101,
.Little => 0b1010,
};
testing.expect(packed_slice_cast_4.get(i) == val);
}
i = 0;
while (i < packed_slice_cast_9.len()) : (i += 1) {
const val = 0b010101010;
testing.expect(packed_slice_cast_9.get(i) == val);
packed_slice_cast_9.set(i, 0b111000111);
}
i = 0;
while (i < packed_slice_cast_3.len()) : (i += 1) {
const val = switch (builtin.endian) {
.Big => if (i % 2 == 0) @as(u3, 0b111) else @as(u3, 0b000),
.Little => if (i % 2 == 0) @as(u3, 0b111) else @as(u3, 0b000),
};
testing.expect(packed_slice_cast_3.get(i) == val);
}
}
test "PackedInt(Array/Slice)Endian" {
{
const PackedArrayBe = PackedIntArrayEndian(u4, .Big, 8);
var packed_array_be = PackedArrayBe.init([_]u4{
0,
1,
2,
3,
4,
5,
6,
7,
});
testing.expect(packed_array_be.bytes[0] == 0b00000001);
testing.expect(packed_array_be.bytes[1] == 0b00100011);
var i = @as(usize, 0);
while (i < packed_array_be.len()) : (i += 1) {
testing.expect(packed_array_be.get(i) == i);
}
var packed_slice_le = packed_array_be.sliceCastEndian(u4, .Little);
i = 0;
while (i < packed_slice_le.len()) : (i += 1) {
const val = if (i % 2 == 0) i + 1 else i - 1;
testing.expect(packed_slice_le.get(i) == val);
}
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u4, .Little);
i = 0;
while (i < packed_slice_le_shift.len()) : (i += 1) {
const val = if (i % 2 == 0) i else i + 2;
testing.expect(packed_slice_le_shift.get(i) == val);
}
}
{
const PackedArrayBe = PackedIntArrayEndian(u11, .Big, 8);
var packed_array_be = PackedArrayBe.init([_]u11{
0,
1,
2,
3,
4,
5,
6,
7,
});
testing.expect(packed_array_be.bytes[0] == 0b00000000);
testing.expect(packed_array_be.bytes[1] == 0b00000000);
testing.expect(packed_array_be.bytes[2] == 0b00000100);
testing.expect(packed_array_be.bytes[3] == 0b00000001);
testing.expect(packed_array_be.bytes[4] == 0b00000000);
var i = @as(usize, 0);
while (i < packed_array_be.len()) : (i += 1) {
testing.expect(packed_array_be.get(i) == i);
}
var packed_slice_le = packed_array_be.sliceCastEndian(u11, .Little);
testing.expect(packed_slice_le.get(0) == 0b00000000000);
testing.expect(packed_slice_le.get(1) == 0b00010000000);
testing.expect(packed_slice_le.get(2) == 0b00000000100);
testing.expect(packed_slice_le.get(3) == 0b00000000000);
testing.expect(packed_slice_le.get(4) == 0b00010000011);
testing.expect(packed_slice_le.get(5) == 0b00000000010);
testing.expect(packed_slice_le.get(6) == 0b10000010000);
testing.expect(packed_slice_le.get(7) == 0b00000111001);
var packed_slice_le_shift = packed_array_be.slice(1, 5).sliceCastEndian(u11, .Little);
testing.expect(packed_slice_le_shift.get(0) == 0b00010000000);
testing.expect(packed_slice_le_shift.get(1) == 0b00000000100);
testing.expect(packed_slice_le_shift.get(2) == 0b00000000000);
testing.expect(packed_slice_le_shift.get(3) == 0b00010000011);
}
}
//@NOTE: Need to manually update this list as more posix os's get
// added to DirectAllocator.
//These tests prove we aren't accidentally accessing memory past
// the end of the array/slice by placing it at the end of a page
// and reading the last element. The assumption is that the page
// after this one is not mapped and will cause a segfault if we
// don't account for the bounds.
test "PackedIntArray at end of available memory" {
switch (builtin.os) {
.linux, .macosx, .ios, .freebsd, .netbsd, .windows => {},
else => return,
}
const PackedArray = PackedIntArray(u3, 8);
const Padded = struct {
_: [std.mem.page_size - @sizeOf(PackedArray)]u8,
p: PackedArray,
};
const allocator = std.heap.direct_allocator;
var pad = try allocator.create(Padded);
defer allocator.destroy(pad);
pad.p.set(7, std.math.maxInt(u3));
}
test "PackedIntSlice at end of available memory" {
switch (builtin.os) {
.linux, .macosx, .ios, .freebsd, .netbsd, .windows => {},
else => return,
}
const PackedSlice = PackedIntSlice(u11);
const allocator = std.heap.direct_allocator;
var page = try allocator.alloc(u8, std.mem.page_size);
defer allocator.free(page);
var p = PackedSlice.init(page[std.mem.page_size - 2 ..], 1);
p.set(0, std.math.maxInt(u11));
}
|
lib/std/packed_int_array.zig
|
const std = @import("std");
const builtin = @import("builtin");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
// zig fmt: off
/// General purpose registers in the AArch64 instruction set
pub const Register = enum(u7) {
// 64-bit registers
x0, x1, x2, x3, x4, x5, x6, x7,
x8, x9, x10, x11, x12, x13, x14, x15,
x16, x17, x18, x19, x20, x21, x22, x23,
x24, x25, x26, x27, x28, x29, x30, xzr,
// 32-bit registers
w0, w1, w2, w3, w4, w5, w6, w7,
w8, w9, w10, w11, w12, w13, w14, w15,
w16, w17, w18, w19, w20, w21, w22, w23,
w24, w25, w26, w27, w28, w29, w30, wzr,
// Stack pointer
sp, wsp,
pub fn id(self: Register) u6 {
return switch (@enumToInt(self)) {
0...63 => return @as(u6, @truncate(u5, @enumToInt(self))),
64...65 => 32,
else => unreachable,
};
}
pub fn enc(self: Register) u5 {
return switch (@enumToInt(self)) {
0...63 => return @truncate(u5, @enumToInt(self)),
64...65 => 31,
else => unreachable,
};
}
/// Returns the bit-width of the register.
pub fn size(self: Register) u7 {
return switch (@enumToInt(self)) {
0...31 => 64,
32...63 => 32,
64 => 64,
65 => 32,
else => unreachable,
};
}
/// Convert from any register to its 64 bit alias.
pub fn to64(self: Register) Register {
return switch (@enumToInt(self)) {
0...31 => self,
32...63 => @intToEnum(Register, @enumToInt(self) - 32),
64 => .sp,
65 => .sp,
else => unreachable,
};
}
/// Convert from any register to its 32 bit alias.
pub fn to32(self: Register) Register {
return switch (@enumToInt(self)) {
0...31 => @intToEnum(Register, @enumToInt(self) + 32),
32...63 => self,
64 => .wsp,
65 => .wsp,
else => unreachable,
};
}
/// Returns the index into `callee_preserved_regs`.
pub fn allocIndex(self: Register) ?u4 {
inline for (callee_preserved_regs) |cpreg, i| {
if (self.id() == cpreg.id()) return i;
}
return null;
}
pub fn dwarfLocOp(self: Register) u8 {
return @as(u8, self.enc()) + DW.OP.reg0;
}
};
// zig fmt: on
const callee_preserved_regs_impl = if (builtin.os.tag.isDarwin()) struct {
pub const callee_preserved_regs = [_]Register{
.x20, .x21, .x22, .x23,
.x24, .x25, .x26, .x27,
.x28,
};
} else struct {
pub const callee_preserved_regs = [_]Register{
.x19, .x20, .x21, .x22, .x23,
.x24, .x25, .x26, .x27, .x28,
};
};
pub const callee_preserved_regs = callee_preserved_regs_impl.callee_preserved_regs;
pub const c_abi_int_param_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
pub const c_abi_int_return_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
test "Register.enc" {
try testing.expectEqual(@as(u5, 0), Register.x0.enc());
try testing.expectEqual(@as(u5, 0), Register.w0.enc());
try testing.expectEqual(@as(u5, 31), Register.xzr.enc());
try testing.expectEqual(@as(u5, 31), Register.wzr.enc());
try testing.expectEqual(@as(u5, 31), Register.sp.enc());
try testing.expectEqual(@as(u5, 31), Register.sp.enc());
}
test "Register.size" {
try testing.expectEqual(@as(u7, 64), Register.x19.size());
try testing.expectEqual(@as(u7, 32), Register.w3.size());
}
test "Register.to64/to32" {
try testing.expectEqual(Register.x0, Register.w0.to64());
try testing.expectEqual(Register.x0, Register.x0.to64());
try testing.expectEqual(Register.w3, Register.w3.to32());
try testing.expectEqual(Register.w3, Register.x3.to32());
}
// zig fmt: off
/// Scalar floating point registers in the aarch64 instruction set
pub const FloatingPointRegister = enum(u8) {
// 128-bit registers
q0, q1, q2, q3, q4, q5, q6, q7,
q8, q9, q10, q11, q12, q13, q14, q15,
q16, q17, q18, q19, q20, q21, q22, q23,
q24, q25, q26, q27, q28, q29, q30, q31,
// 64-bit registers
d0, d1, d2, d3, d4, d5, d6, d7,
d8, d9, d10, d11, d12, d13, d14, d15,
d16, d17, d18, d19, d20, d21, d22, d23,
d24, d25, d26, d27, d28, d29, d30, d31,
// 32-bit registers
s0, s1, s2, s3, s4, s5, s6, s7,
s8, s9, s10, s11, s12, s13, s14, s15,
s16, s17, s18, s19, s20, s21, s22, s23,
s24, s25, s26, s27, s28, s29, s30, s31,
// 16-bit registers
h0, h1, h2, h3, h4, h5, h6, h7,
h8, h9, h10, h11, h12, h13, h14, h15,
h16, h17, h18, h19, h20, h21, h22, h23,
h24, h25, h26, h27, h28, h29, h30, h31,
// 8-bit registers
b0, b1, b2, b3, b4, b5, b6, b7,
b8, b9, b10, b11, b12, b13, b14, b15,
b16, b17, b18, b19, b20, b21, b22, b23,
b24, b25, b26, b27, b28, b29, b30, b31,
pub fn id(self: FloatingPointRegister) u5 {
return @truncate(u5, @enumToInt(self));
}
/// Returns the bit-width of the register.
pub fn size(self: FloatingPointRegister) u8 {
return switch (@enumToInt(self)) {
0...31 => 128,
32...63 => 64,
64...95 => 32,
96...127 => 16,
128...159 => 8,
else => unreachable,
};
}
/// Convert from any register to its 128 bit alias.
pub fn to128(self: FloatingPointRegister) FloatingPointRegister {
return @intToEnum(FloatingPointRegister, self.id());
}
/// Convert from any register to its 64 bit alias.
pub fn to64(self: FloatingPointRegister) FloatingPointRegister {
return @intToEnum(FloatingPointRegister, @as(u8, self.id()) + 32);
}
/// Convert from any register to its 32 bit alias.
pub fn to32(self: FloatingPointRegister) FloatingPointRegister {
return @intToEnum(FloatingPointRegister, @as(u8, self.id()) + 64);
}
/// Convert from any register to its 16 bit alias.
pub fn to16(self: FloatingPointRegister) FloatingPointRegister {
return @intToEnum(FloatingPointRegister, @as(u8, self.id()) + 96);
}
/// Convert from any register to its 8 bit alias.
pub fn to8(self: FloatingPointRegister) FloatingPointRegister {
return @intToEnum(FloatingPointRegister, @as(u8, self.id()) + 128);
}
};
// zig fmt: on
test "FloatingPointRegister.id" {
try testing.expectEqual(@as(u5, 0), FloatingPointRegister.b0.id());
try testing.expectEqual(@as(u5, 0), FloatingPointRegister.h0.id());
try testing.expectEqual(@as(u5, 0), FloatingPointRegister.s0.id());
try testing.expectEqual(@as(u5, 0), FloatingPointRegister.d0.id());
try testing.expectEqual(@as(u5, 0), FloatingPointRegister.q0.id());
try testing.expectEqual(@as(u5, 2), FloatingPointRegister.q2.id());
try testing.expectEqual(@as(u5, 31), FloatingPointRegister.d31.id());
}
test "FloatingPointRegister.size" {
try testing.expectEqual(@as(u8, 128), FloatingPointRegister.q1.size());
try testing.expectEqual(@as(u8, 64), FloatingPointRegister.d2.size());
try testing.expectEqual(@as(u8, 32), FloatingPointRegister.s3.size());
try testing.expectEqual(@as(u8, 16), FloatingPointRegister.h4.size());
try testing.expectEqual(@as(u8, 8), FloatingPointRegister.b5.size());
}
test "FloatingPointRegister.toX" {
try testing.expectEqual(FloatingPointRegister.q1, FloatingPointRegister.q1.to128());
try testing.expectEqual(FloatingPointRegister.q2, FloatingPointRegister.b2.to128());
try testing.expectEqual(FloatingPointRegister.q3, FloatingPointRegister.h3.to128());
try testing.expectEqual(FloatingPointRegister.d0, FloatingPointRegister.q0.to64());
try testing.expectEqual(FloatingPointRegister.s1, FloatingPointRegister.d1.to32());
try testing.expectEqual(FloatingPointRegister.h2, FloatingPointRegister.s2.to16());
try testing.expectEqual(FloatingPointRegister.b3, FloatingPointRegister.h3.to8());
}
/// Represents an instruction in the AArch64 instruction set
pub const Instruction = union(enum) {
move_wide_immediate: packed struct {
rd: u5,
imm16: u16,
hw: u2,
fixed: u6 = 0b100101,
opc: u2,
sf: u1,
},
pc_relative_address: packed struct {
rd: u5,
immhi: u19,
fixed: u5 = 0b10000,
immlo: u2,
op: u1,
},
load_store_register: packed struct {
rt: u5,
rn: u5,
offset: u12,
opc: u2,
op1: u2,
v: u1,
fixed: u3 = 0b111,
size: u2,
},
load_store_register_pair: packed struct {
rt1: u5,
rn: u5,
rt2: u5,
imm7: u7,
load: u1,
encoding: u2,
fixed: u5 = 0b101_0_0,
opc: u2,
},
load_literal: packed struct {
rt: u5,
imm19: u19,
fixed: u6 = 0b011_0_00,
opc: u2,
},
exception_generation: packed struct {
ll: u2,
op2: u3,
imm16: u16,
opc: u3,
fixed: u8 = 0b1101_0100,
},
unconditional_branch_register: packed struct {
op4: u5,
rn: u5,
op3: u6,
op2: u5,
opc: u4,
fixed: u7 = 0b1101_011,
},
unconditional_branch_immediate: packed struct {
imm26: u26,
fixed: u5 = 0b00101,
op: u1,
},
no_operation: packed struct {
fixed: u32 = 0b1101010100_0_00_011_0010_0000_000_11111,
},
logical_shifted_register: packed struct {
rd: u5,
rn: u5,
imm6: u6,
rm: u5,
n: u1,
shift: u2,
fixed: u5 = 0b01010,
opc: u2,
sf: u1,
},
add_subtract_immediate: packed struct {
rd: u5,
rn: u5,
imm12: u12,
sh: u1,
fixed: u6 = 0b100010,
s: u1,
op: u1,
sf: u1,
},
add_subtract_shifted_register: packed struct {
rd: u5,
rn: u5,
imm6: u6,
rm: u5,
fixed_1: u1 = 0b0,
shift: u2,
fixed_2: u5 = 0b01011,
s: u1,
op: u1,
sf: u1,
},
conditional_branch: struct {
cond: u4,
o0: u1,
imm19: u19,
o1: u1,
fixed: u7 = 0b0101010,
},
compare_and_branch: struct {
rt: u5,
imm19: u19,
op: u1,
fixed: u6 = 0b011010,
sf: u1,
},
conditional_select: struct {
rd: u5,
rn: u5,
op2: u2,
cond: u4,
rm: u5,
fixed: u8 = 0b11010100,
s: u1,
op: u1,
sf: u1,
},
data_processing_3_source: packed struct {
rd: u5,
rn: u5,
ra: u5,
o0: u1,
rm: u5,
op31: u3,
fixed: u5 = 0b11011,
op54: u2,
sf: u1,
},
pub const Condition = enum(u4) {
/// Integer: Equal
/// Floating point: Equal
eq,
/// Integer: Not equal
/// Floating point: Not equal or unordered
ne,
/// Integer: Carry set
/// Floating point: Greater than, equal, or unordered
cs,
/// Integer: Carry clear
/// Floating point: Less than
cc,
/// Integer: Minus, negative
/// Floating point: Less than
mi,
/// Integer: Plus, positive or zero
/// Floating point: Greater than, equal, or unordered
pl,
/// Integer: Overflow
/// Floating point: Unordered
vs,
/// Integer: No overflow
/// Floating point: Ordered
vc,
/// Integer: Unsigned higher
/// Floating point: Greater than, or unordered
hi,
/// Integer: Unsigned lower or same
/// Floating point: Less than or equal
ls,
/// Integer: Signed greater than or equal
/// Floating point: Greater than or equal
ge,
/// Integer: Signed less than
/// Floating point: Less than, or unordered
lt,
/// Integer: Signed greater than
/// Floating point: Greater than
gt,
/// Integer: Signed less than or equal
/// Floating point: Less than, equal, or unordered
le,
/// Integer: Always
/// Floating point: Always
al,
/// Integer: Always
/// Floating point: Always
nv,
/// Converts a std.math.CompareOperator into a condition flag,
/// i.e. returns the condition that is true iff the result of the
/// comparison is true. Assumes signed comparison
pub fn fromCompareOperatorSigned(op: std.math.CompareOperator) Condition {
return switch (op) {
.gte => .ge,
.gt => .gt,
.neq => .ne,
.lt => .lt,
.lte => .le,
.eq => .eq,
};
}
/// Converts a std.math.CompareOperator into a condition flag,
/// i.e. returns the condition that is true iff the result of the
/// comparison is true. Assumes unsigned comparison
pub fn fromCompareOperatorUnsigned(op: std.math.CompareOperator) Condition {
return switch (op) {
.gte => .cs,
.gt => .hi,
.neq => .ne,
.lt => .cc,
.lte => .ls,
.eq => .eq,
};
}
/// Returns the condition which is true iff the given condition is
/// false (if such a condition exists)
pub fn negate(cond: Condition) Condition {
return switch (cond) {
.eq => .ne,
.ne => .eq,
.cs => .cc,
.cc => .cs,
.mi => .pl,
.pl => .mi,
.vs => .vc,
.vc => .vs,
.hi => .ls,
.ls => .hi,
.ge => .lt,
.lt => .ge,
.gt => .le,
.le => .gt,
.al => unreachable,
.nv => unreachable,
};
}
};
pub fn toU32(self: Instruction) u32 {
return switch (self) {
.move_wide_immediate => |v| @bitCast(u32, v),
.pc_relative_address => |v| @bitCast(u32, v),
.load_store_register => |v| @bitCast(u32, v),
.load_store_register_pair => |v| @bitCast(u32, v),
.load_literal => |v| @bitCast(u32, v),
.exception_generation => |v| @bitCast(u32, v),
.unconditional_branch_register => |v| @bitCast(u32, v),
.unconditional_branch_immediate => |v| @bitCast(u32, v),
.no_operation => |v| @bitCast(u32, v),
.logical_shifted_register => |v| @bitCast(u32, v),
.add_subtract_immediate => |v| @bitCast(u32, v),
.add_subtract_shifted_register => |v| @bitCast(u32, v),
// TODO once packed structs work, this can be refactored
.conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25),
.compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31),
.conditional_select => |v| @as(u32, v.rd) | @as(u32, v.rn) << 5 | @as(u32, v.op2) << 10 | @as(u32, v.cond) << 12 | @as(u32, v.rm) << 16 | @as(u32, v.fixed) << 21 | @as(u32, v.s) << 29 | @as(u32, v.op) << 30 | @as(u32, v.sf) << 31,
.data_processing_3_source => |v| @bitCast(u32, v),
};
}
fn moveWideImmediate(
opc: u2,
rd: Register,
imm16: u16,
shift: u6,
) Instruction {
switch (rd.size()) {
32 => {
assert(shift % 16 == 0 and shift <= 16);
return Instruction{
.move_wide_immediate = .{
.rd = rd.enc(),
.imm16 = imm16,
.hw = @intCast(u2, shift / 16),
.opc = opc,
.sf = 0,
},
};
},
64 => {
assert(shift % 16 == 0 and shift <= 48);
return Instruction{
.move_wide_immediate = .{
.rd = rd.enc(),
.imm16 = imm16,
.hw = @intCast(u2, shift / 16),
.opc = opc,
.sf = 1,
},
};
},
else => unreachable, // unexpected register size
}
}
fn pcRelativeAddress(rd: Register, imm21: i21, op: u1) Instruction {
assert(rd.size() == 64);
const imm21_u = @bitCast(u21, imm21);
return Instruction{
.pc_relative_address = .{
.rd = rd.enc(),
.immlo = @truncate(u2, imm21_u),
.immhi = @truncate(u19, imm21_u >> 2),
.op = op,
},
};
}
pub const LoadStoreOffsetImmediate = union(enum) {
post_index: i9,
pre_index: i9,
unsigned: u12,
};
pub const LoadStoreOffsetRegister = struct {
rm: u5,
shift: union(enum) {
uxtw: u2,
lsl: u2,
sxtw: u2,
sxtx: u2,
},
};
/// Represents the offset operand of a load or store instruction.
/// Data can be loaded from memory with either an immediate offset
/// or an offset that is stored in some register.
pub const LoadStoreOffset = union(enum) {
immediate: LoadStoreOffsetImmediate,
register: LoadStoreOffsetRegister,
pub const none = LoadStoreOffset{
.immediate = .{ .unsigned = 0 },
};
pub fn toU12(self: LoadStoreOffset) u12 {
return switch (self) {
.immediate => |imm_type| switch (imm_type) {
.post_index => |v| (@intCast(u12, @bitCast(u9, v)) << 2) + 1,
.pre_index => |v| (@intCast(u12, @bitCast(u9, v)) << 2) + 3,
.unsigned => |v| v,
},
.register => |r| switch (r.shift) {
.uxtw => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 16 + 2050,
.lsl => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 24 + 2050,
.sxtw => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 48 + 2050,
.sxtx => |v| (@intCast(u12, r.rm) << 6) + (@intCast(u12, v) << 2) + 56 + 2050,
},
};
}
pub fn imm(offset: u12) LoadStoreOffset {
return .{
.immediate = .{ .unsigned = offset },
};
}
pub fn imm_post_index(offset: i9) LoadStoreOffset {
return .{
.immediate = .{ .post_index = offset },
};
}
pub fn imm_pre_index(offset: i9) LoadStoreOffset {
return .{
.immediate = .{ .pre_index = offset },
};
}
pub fn reg(rm: Register) LoadStoreOffset {
return .{
.register = .{
.rm = rm.enc(),
.shift = .{
.lsl = 0,
},
},
};
}
pub fn reg_uxtw(rm: Register, shift: u2) LoadStoreOffset {
assert(rm.size() == 32 and (shift == 0 or shift == 2));
return .{
.register = .{
.rm = rm.enc(),
.shift = .{
.uxtw = shift,
},
},
};
}
pub fn reg_lsl(rm: Register, shift: u2) LoadStoreOffset {
assert(rm.size() == 64 and (shift == 0 or shift == 3));
return .{
.register = .{
.rm = rm.enc(),
.shift = .{
.lsl = shift,
},
},
};
}
pub fn reg_sxtw(rm: Register, shift: u2) LoadStoreOffset {
assert(rm.size() == 32 and (shift == 0 or shift == 2));
return .{
.register = .{
.rm = rm.enc(),
.shift = .{
.sxtw = shift,
},
},
};
}
pub fn reg_sxtx(rm: Register, shift: u2) LoadStoreOffset {
assert(rm.size() == 64 and (shift == 0 or shift == 3));
return .{
.register = .{
.rm = rm.enc(),
.shift = .{
.sxtx = shift,
},
},
};
}
};
/// Which kind of load/store to perform
const LoadStoreVariant = enum {
/// 32-bit or 64-bit
str,
/// 16-bit, zero-extended
strh,
/// 8-bit, zero-extended
strb,
/// 32-bit or 64-bit
ldr,
/// 16-bit, zero-extended
ldrh,
/// 8-bit, zero-extended
ldrb,
};
fn loadStoreRegister(
rt: Register,
rn: Register,
offset: LoadStoreOffset,
variant: LoadStoreVariant,
) Instruction {
const off = offset.toU12();
const op1: u2 = blk: {
switch (offset) {
.immediate => |imm| switch (imm) {
.unsigned => break :blk 0b01,
else => {},
},
else => {},
}
break :blk 0b00;
};
const opc: u2 = switch (variant) {
.ldr, .ldrh, .ldrb => 0b01,
.str, .strh, .strb => 0b00,
};
return Instruction{
.load_store_register = .{
.rt = rt.enc(),
.rn = rn.enc(),
.offset = off,
.opc = opc,
.op1 = op1,
.v = 0,
.size = blk: {
switch (variant) {
.ldr, .str => switch (rt.size()) {
32 => break :blk 0b10,
64 => break :blk 0b11,
else => unreachable, // unexpected register size
},
.ldrh, .strh => break :blk 0b01,
.ldrb, .strb => break :blk 0b00,
}
},
},
};
}
fn loadStoreRegisterPair(
rt1: Register,
rt2: Register,
rn: Register,
offset: i9,
encoding: u2,
load: bool,
) Instruction {
switch (rt1.size()) {
32 => {
assert(-256 <= offset and offset <= 252);
const imm7 = @truncate(u7, @bitCast(u9, offset >> 2));
return Instruction{
.load_store_register_pair = .{
.rt1 = rt1.enc(),
.rn = rn.enc(),
.rt2 = rt2.enc(),
.imm7 = imm7,
.load = @boolToInt(load),
.encoding = encoding,
.opc = 0b00,
},
};
},
64 => {
assert(-512 <= offset and offset <= 504);
const imm7 = @truncate(u7, @bitCast(u9, offset >> 3));
return Instruction{
.load_store_register_pair = .{
.rt1 = rt1.enc(),
.rn = rn.enc(),
.rt2 = rt2.enc(),
.imm7 = imm7,
.load = @boolToInt(load),
.encoding = encoding,
.opc = 0b10,
},
};
},
else => unreachable, // unexpected register size
}
}
fn loadLiteral(rt: Register, imm19: u19) Instruction {
return Instruction{
.load_literal = .{
.rt = rt.enc(),
.imm19 = imm19,
.opc = switch (rt.size()) {
32 => 0b00,
64 => 0b01,
else => unreachable, // unexpected register size
},
},
};
}
fn exceptionGeneration(
opc: u3,
op2: u3,
ll: u2,
imm16: u16,
) Instruction {
return Instruction{
.exception_generation = .{
.ll = ll,
.op2 = op2,
.imm16 = imm16,
.opc = opc,
},
};
}
fn unconditionalBranchRegister(
opc: u4,
op2: u5,
op3: u6,
rn: Register,
op4: u5,
) Instruction {
assert(rn.size() == 64);
return Instruction{
.unconditional_branch_register = .{
.op4 = op4,
.rn = rn.enc(),
.op3 = op3,
.op2 = op2,
.opc = opc,
},
};
}
fn unconditionalBranchImmediate(
op: u1,
offset: i28,
) Instruction {
return Instruction{
.unconditional_branch_immediate = .{
.imm26 = @bitCast(u26, @intCast(i26, offset >> 2)),
.op = op,
},
};
}
pub const LogicalShiftedRegisterShift = enum(u2) { lsl, lsr, asr, ror };
fn logicalShiftedRegister(
opc: u2,
n: u1,
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
switch (rd.size()) {
32 => {
assert(amount < 32);
return Instruction{
.logical_shifted_register = .{
.rd = rd.enc(),
.rn = rn.enc(),
.imm6 = amount,
.rm = rm.enc(),
.n = n,
.shift = @enumToInt(shift),
.opc = opc,
.sf = 0b0,
},
};
},
64 => {
return Instruction{
.logical_shifted_register = .{
.rd = rd.enc(),
.rn = rn.enc(),
.imm6 = amount,
.rm = rm.enc(),
.n = n,
.shift = @enumToInt(shift),
.opc = opc,
.sf = 0b1,
},
};
},
else => unreachable, // unexpected register size
}
}
fn addSubtractImmediate(
op: u1,
s: u1,
rd: Register,
rn: Register,
imm12: u12,
shift: bool,
) Instruction {
return Instruction{
.add_subtract_immediate = .{
.rd = rd.enc(),
.rn = rn.enc(),
.imm12 = imm12,
.sh = @boolToInt(shift),
.s = s,
.op = op,
.sf = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
},
},
};
}
pub const AddSubtractShiftedRegisterShift = enum(u2) { lsl, lsr, asr, _ };
fn addSubtractShiftedRegister(
op: u1,
s: u1,
shift: AddSubtractShiftedRegisterShift,
rd: Register,
rn: Register,
rm: Register,
imm6: u6,
) Instruction {
return Instruction{
.add_subtract_shifted_register = .{
.rd = rd.enc(),
.rn = rn.enc(),
.imm6 = imm6,
.rm = rm.enc(),
.shift = @enumToInt(shift),
.s = s,
.op = op,
.sf = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
},
},
};
}
fn conditionalBranch(
o0: u1,
o1: u1,
cond: Condition,
offset: i21,
) Instruction {
assert(offset & 0b11 == 0b00);
return Instruction{
.conditional_branch = .{
.cond = @enumToInt(cond),
.o0 = o0,
.imm19 = @bitCast(u19, @intCast(i19, offset >> 2)),
.o1 = o1,
},
};
}
fn compareAndBranch(
op: u1,
rt: Register,
offset: i21,
) Instruction {
assert(offset & 0b11 == 0b00);
return Instruction{
.compare_and_branch = .{
.rt = rt.enc(),
.imm19 = @bitCast(u19, @intCast(i19, offset >> 2)),
.op = op,
.sf = switch (rt.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
},
},
};
}
fn conditionalSelect(
op2: u2,
op: u1,
s: u1,
rd: Register,
rn: Register,
rm: Register,
cond: Condition,
) Instruction {
return Instruction{
.conditional_select = .{
.rd = rd.enc(),
.rn = rn.enc(),
.op2 = op2,
.cond = @enumToInt(cond),
.rm = rm.enc(),
.s = s,
.op = op,
.sf = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
},
},
};
}
fn dataProcessing3Source(
op54: u2,
op31: u3,
o0: u1,
rd: Register,
rn: Register,
rm: Register,
ra: Register,
) Instruction {
return Instruction{
.data_processing_3_source = .{
.rd = rd.enc(),
.rn = rn.enc(),
.ra = ra.enc(),
.o0 = o0,
.rm = rm.enc(),
.op31 = op31,
.op54 = op54,
.sf = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
},
},
};
}
// Helper functions for assembly syntax functions
// Move wide (immediate)
pub fn movn(rd: Register, imm16: u16, shift: u6) Instruction {
return moveWideImmediate(0b00, rd, imm16, shift);
}
pub fn movz(rd: Register, imm16: u16, shift: u6) Instruction {
return moveWideImmediate(0b10, rd, imm16, shift);
}
pub fn movk(rd: Register, imm16: u16, shift: u6) Instruction {
return moveWideImmediate(0b11, rd, imm16, shift);
}
// PC relative address
pub fn adr(rd: Register, imm21: i21) Instruction {
return pcRelativeAddress(rd, imm21, 0b0);
}
pub fn adrp(rd: Register, imm21: i21) Instruction {
return pcRelativeAddress(rd, imm21, 0b1);
}
// Load or store register
pub fn ldrLiteral(rt: Register, literal: u19) Instruction {
return loadLiteral(rt, literal);
}
pub fn ldr(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
return loadStoreRegister(rt, rn, offset, .ldr);
}
pub fn ldrh(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
return loadStoreRegister(rt, rn, offset, .ldrh);
}
pub fn ldrb(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
return loadStoreRegister(rt, rn, offset, .ldrb);
}
pub fn str(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
return loadStoreRegister(rt, rn, offset, .str);
}
pub fn strh(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
return loadStoreRegister(rt, rn, offset, .strh);
}
pub fn strb(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
return loadStoreRegister(rt, rn, offset, .strb);
}
// Load or store pair of registers
pub const LoadStorePairOffset = struct {
encoding: enum(u2) {
post_index = 0b01,
signed = 0b10,
pre_index = 0b11,
},
offset: i9,
pub fn none() LoadStorePairOffset {
return .{ .encoding = .signed, .offset = 0 };
}
pub fn post_index(imm: i9) LoadStorePairOffset {
return .{ .encoding = .post_index, .offset = imm };
}
pub fn pre_index(imm: i9) LoadStorePairOffset {
return .{ .encoding = .pre_index, .offset = imm };
}
pub fn signed(imm: i9) LoadStorePairOffset {
return .{ .encoding = .signed, .offset = imm };
}
};
pub fn ldp(rt1: Register, rt2: Register, rn: Register, offset: LoadStorePairOffset) Instruction {
return loadStoreRegisterPair(rt1, rt2, rn, offset.offset, @enumToInt(offset.encoding), true);
}
pub fn ldnp(rt1: Register, rt2: Register, rn: Register, offset: i9) Instruction {
return loadStoreRegisterPair(rt1, rt2, rn, offset, 0, true);
}
pub fn stp(rt1: Register, rt2: Register, rn: Register, offset: LoadStorePairOffset) Instruction {
return loadStoreRegisterPair(rt1, rt2, rn, offset.offset, @enumToInt(offset.encoding), false);
}
pub fn stnp(rt1: Register, rt2: Register, rn: Register, offset: i9) Instruction {
return loadStoreRegisterPair(rt1, rt2, rn, offset, 0, false);
}
// Exception generation
pub fn svc(imm16: u16) Instruction {
return exceptionGeneration(0b000, 0b000, 0b01, imm16);
}
pub fn hvc(imm16: u16) Instruction {
return exceptionGeneration(0b000, 0b000, 0b10, imm16);
}
pub fn smc(imm16: u16) Instruction {
return exceptionGeneration(0b000, 0b000, 0b11, imm16);
}
pub fn brk(imm16: u16) Instruction {
return exceptionGeneration(0b001, 0b000, 0b00, imm16);
}
pub fn hlt(imm16: u16) Instruction {
return exceptionGeneration(0b010, 0b000, 0b00, imm16);
}
// Unconditional branch (register)
pub fn br(rn: Register) Instruction {
return unconditionalBranchRegister(0b0000, 0b11111, 0b000000, rn, 0b00000);
}
pub fn blr(rn: Register) Instruction {
return unconditionalBranchRegister(0b0001, 0b11111, 0b000000, rn, 0b00000);
}
pub fn ret(rn: ?Register) Instruction {
return unconditionalBranchRegister(0b0010, 0b11111, 0b000000, rn orelse .x30, 0b00000);
}
// Unconditional branch (immediate)
pub fn b(offset: i28) Instruction {
return unconditionalBranchImmediate(0, offset);
}
pub fn bl(offset: i28) Instruction {
return unconditionalBranchImmediate(1, offset);
}
// Nop
pub fn nop() Instruction {
return Instruction{ .no_operation = .{} };
}
// Logical (shifted register)
pub fn @"and"(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b00, 0b0, rd, rn, rm, shift, amount);
}
pub fn bic(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b00, 0b1, rd, rn, rm, shift, amount);
}
pub fn orr(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b01, 0b0, rd, rn, rm, shift, amount);
}
pub fn orn(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b01, 0b1, rd, rn, rm, shift, amount);
}
pub fn eor(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b10, 0b0, rd, rn, rm, shift, amount);
}
pub fn eon(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b10, 0b1, rd, rn, rm, shift, amount);
}
pub fn ands(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b11, 0b0, rd, rn, rm, shift, amount);
}
pub fn bics(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b11, 0b1, rd, rn, rm, shift, amount);
}
// Add/subtract (immediate)
pub fn add(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
return addSubtractImmediate(0b0, 0b0, rd, rn, imm, shift);
}
pub fn adds(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
return addSubtractImmediate(0b0, 0b1, rd, rn, imm, shift);
}
pub fn sub(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
return addSubtractImmediate(0b1, 0b0, rd, rn, imm, shift);
}
pub fn subs(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
return addSubtractImmediate(0b1, 0b1, rd, rn, imm, shift);
}
// Add/subtract (shifted register)
pub fn addShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
shift: AddSubtractShiftedRegisterShift,
imm6: u6,
) Instruction {
return addSubtractShiftedRegister(0b0, 0b0, shift, rd, rn, rm, imm6);
}
pub fn addsShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
shift: AddSubtractShiftedRegisterShift,
imm6: u6,
) Instruction {
return addSubtractShiftedRegister(0b0, 0b1, shift, rd, rn, rm, imm6);
}
pub fn subShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
shift: AddSubtractShiftedRegisterShift,
imm6: u6,
) Instruction {
return addSubtractShiftedRegister(0b1, 0b0, shift, rd, rn, rm, imm6);
}
pub fn subsShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
shift: AddSubtractShiftedRegisterShift,
imm6: u6,
) Instruction {
return addSubtractShiftedRegister(0b1, 0b1, shift, rd, rn, rm, imm6);
}
// Conditional branch
pub fn bCond(cond: Condition, offset: i21) Instruction {
return conditionalBranch(0b0, 0b0, cond, offset);
}
// Compare and branch
pub fn cbz(rt: Register, offset: i21) Instruction {
return compareAndBranch(0b0, rt, offset);
}
pub fn cbnz(rt: Register, offset: i21) Instruction {
return compareAndBranch(0b1, rt, offset);
}
// Conditional select
pub fn csel(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
return conditionalSelect(0b00, 0b0, 0b0, rd, rn, rm, cond);
}
pub fn csinc(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
return conditionalSelect(0b01, 0b0, 0b0, rd, rn, rm, cond);
}
pub fn csinv(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
return conditionalSelect(0b00, 0b1, 0b0, rd, rn, rm, cond);
}
pub fn csneg(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
return conditionalSelect(0b01, 0b1, 0b0, rd, rn, rm, cond);
}
// Data processing (3 source)
pub fn madd(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
return dataProcessing3Source(0b00, 0b000, 0b0, rd, rn, rm, ra);
}
pub fn msub(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
return dataProcessing3Source(0b00, 0b000, 0b1, rd, rn, rm, ra);
}
pub fn mul(rd: Register, rn: Register, rm: Register) Instruction {
return madd(rd, rn, rm, .xzr);
}
pub fn mneg(rd: Register, rn: Register, rm: Register) Instruction {
return msub(rd, rn, rm, .xzr);
}
};
test {
testing.refAllDecls(@This());
}
test "serialize instructions" {
const Testcase = struct {
inst: Instruction,
expected: u32,
};
const testcases = [_]Testcase{
.{ // orr x0, xzr, x1
.inst = Instruction.orr(.x0, .xzr, .x1, .lsl, 0),
.expected = 0b1_01_01010_00_0_00001_000000_11111_00000,
},
.{ // orn x0, xzr, x1
.inst = Instruction.orn(.x0, .xzr, .x1, .lsl, 0),
.expected = 0b1_01_01010_00_1_00001_000000_11111_00000,
},
.{ // movz x1, #4
.inst = Instruction.movz(.x1, 4, 0),
.expected = 0b1_10_100101_00_0000000000000100_00001,
},
.{ // movz x1, #4, lsl 16
.inst = Instruction.movz(.x1, 4, 16),
.expected = 0b1_10_100101_01_0000000000000100_00001,
},
.{ // movz x1, #4, lsl 32
.inst = Instruction.movz(.x1, 4, 32),
.expected = 0b1_10_100101_10_0000000000000100_00001,
},
.{ // movz x1, #4, lsl 48
.inst = Instruction.movz(.x1, 4, 48),
.expected = 0b1_10_100101_11_0000000000000100_00001,
},
.{ // movz w1, #4
.inst = Instruction.movz(.w1, 4, 0),
.expected = 0b0_10_100101_00_0000000000000100_00001,
},
.{ // movz w1, #4, lsl 16
.inst = Instruction.movz(.w1, 4, 16),
.expected = 0b0_10_100101_01_0000000000000100_00001,
},
.{ // svc #0
.inst = Instruction.svc(0),
.expected = 0b1101_0100_000_0000000000000000_00001,
},
.{ // svc #0x80 ; typical on Darwin
.inst = Instruction.svc(0x80),
.expected = 0b1101_0100_000_0000000010000000_00001,
},
.{ // ret
.inst = Instruction.ret(null),
.expected = 0b1101_011_00_10_11111_0000_00_11110_00000,
},
.{ // bl #0x10
.inst = Instruction.bl(0x10),
.expected = 0b1_00101_00_0000_0000_0000_0000_0000_0100,
},
.{ // ldr x2, [x1]
.inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.none),
.expected = 0b11_111_0_01_01_000000000000_00001_00010,
},
.{ // ldr x2, [x1, #1]!
.inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.imm_pre_index(1)),
.expected = 0b11_111_0_00_01_0_000000001_11_00001_00010,
},
.{ // ldr x2, [x1], #-1
.inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.imm_post_index(-1)),
.expected = 0b11_111_0_00_01_0_111111111_01_00001_00010,
},
.{ // ldr x2, [x1], (x3)
.inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.reg(.x3)),
.expected = 0b11_111_0_00_01_1_00011_011_0_10_00001_00010,
},
.{ // ldr x2, label
.inst = Instruction.ldrLiteral(.x2, 0x1),
.expected = 0b01_011_0_00_0000000000000000001_00010,
},
.{ // ldrh x7, [x4], #0xaa
.inst = Instruction.ldrh(.x7, .x4, Instruction.LoadStoreOffset.imm_post_index(0xaa)),
.expected = 0b01_111_0_00_01_0_010101010_01_00100_00111,
},
.{ // ldrb x9, [x15, #0xff]!
.inst = Instruction.ldrb(.x9, .x15, Instruction.LoadStoreOffset.imm_pre_index(0xff)),
.expected = 0b00_111_0_00_01_0_011111111_11_01111_01001,
},
.{ // str x2, [x1]
.inst = Instruction.str(.x2, .x1, Instruction.LoadStoreOffset.none),
.expected = 0b11_111_0_01_00_000000000000_00001_00010,
},
.{ // str x2, [x1], (x3)
.inst = Instruction.str(.x2, .x1, Instruction.LoadStoreOffset.reg(.x3)),
.expected = 0b11_111_0_00_00_1_00011_011_0_10_00001_00010,
},
.{ // strh w0, [x1]
.inst = Instruction.strh(.w0, .x1, Instruction.LoadStoreOffset.none),
.expected = 0b01_111_0_01_00_000000000000_00001_00000,
},
.{ // strb w8, [x9]
.inst = Instruction.strb(.w8, .x9, Instruction.LoadStoreOffset.none),
.expected = 0b00_111_0_01_00_000000000000_01001_01000,
},
.{ // adr x2, #0x8
.inst = Instruction.adr(.x2, 0x8),
.expected = 0b0_00_10000_0000000000000000010_00010,
},
.{ // adr x2, -#0x8
.inst = Instruction.adr(.x2, -0x8),
.expected = 0b0_00_10000_1111111111111111110_00010,
},
.{ // adrp x2, #0x8
.inst = Instruction.adrp(.x2, 0x8),
.expected = 0b1_00_10000_0000000000000000010_00010,
},
.{ // adrp x2, -#0x8
.inst = Instruction.adrp(.x2, -0x8),
.expected = 0b1_00_10000_1111111111111111110_00010,
},
.{ // stp x1, x2, [sp, #8]
.inst = Instruction.stp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.signed(8)),
.expected = 0b10_101_0_010_0_0000001_00010_11111_00001,
},
.{ // ldp x1, x2, [sp, #8]
.inst = Instruction.ldp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.signed(8)),
.expected = 0b10_101_0_010_1_0000001_00010_11111_00001,
},
.{ // stp x1, x2, [sp, #-16]!
.inst = Instruction.stp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.pre_index(-16)),
.expected = 0b10_101_0_011_0_1111110_00010_11111_00001,
},
.{ // ldp x1, x2, [sp], #16
.inst = Instruction.ldp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.post_index(16)),
.expected = 0b10_101_0_001_1_0000010_00010_11111_00001,
},
.{ // and x0, x4, x2
.inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0),
.expected = 0b1_00_01010_00_0_00010_000000_00100_00000,
},
.{ // and x0, x4, x2, lsl #0x8
.inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0x8),
.expected = 0b1_00_01010_00_0_00010_001000_00100_00000,
},
.{ // add x0, x10, #10
.inst = Instruction.add(.x0, .x10, 10, false),
.expected = 0b1_0_0_100010_0_0000_0000_1010_01010_00000,
},
.{ // subs x0, x5, #11, lsl #12
.inst = Instruction.subs(.x0, .x5, 11, true),
.expected = 0b1_1_1_100010_1_0000_0000_1011_00101_00000,
},
.{ // b.hi #-4
.inst = Instruction.bCond(.hi, -4),
.expected = 0b0101010_0_1111111111111111111_0_1000,
},
.{ // cbz x10, #40
.inst = Instruction.cbz(.x10, 40),
.expected = 0b1_011010_0_0000000000000001010_01010,
},
.{ // add x0, x1, x2, lsl #5
.inst = Instruction.addShiftedRegister(.x0, .x1, .x2, .lsl, 5),
.expected = 0b1_0_0_01011_00_0_00010_000101_00001_00000,
},
.{ // csinc x1, x2, x4, eq
.inst = Instruction.csinc(.x1, .x2, .x4, .eq),
.expected = 0b1_0_0_11010100_00100_0000_0_1_00010_00001,
},
.{ // mul x1, x4, x9
.inst = Instruction.mul(.x1, .x4, .x9),
.expected = 0b1_00_11011_000_01001_0_11111_00100_00001,
},
};
for (testcases) |case| {
const actual = case.inst.toU32();
try testing.expectEqual(case.expected, actual);
}
}
|
src/arch/aarch64/bits.zig
|
const std = @import("../../std.zig");
const net = @import("net.zig");
const os = std.os;
const mem = std.mem;
const windows = std.os.windows;
const ws2_32 = windows.ws2_32;
pub fn Mixin(comptime Socket: type) type {
return struct {
/// Open a new socket.
pub fn init(domain: u32, socket_type: u32, protocol: u32) !Socket {
var filtered_socket_type = socket_type & ~@as(u32, os.SOCK_CLOEXEC);
var filtered_flags: u32 = ws2_32.WSA_FLAG_OVERLAPPED;
if (socket_type & os.SOCK_CLOEXEC != 0) {
filtered_flags |= ws2_32.WSA_FLAG_NO_HANDLE_INHERIT;
}
const fd = ws2_32.WSASocketW(
@intCast(i32, domain),
@intCast(i32, filtered_socket_type),
@intCast(i32, protocol),
null,
0,
filtered_flags,
);
if (fd == ws2_32.INVALID_SOCKET) {
return switch (ws2_32.WSAGetLastError()) {
.WSANOTINITIALISED => {
_ = try windows.WSAStartup(2, 2);
return Socket.init(domain, socket_type, protocol);
},
.WSAEAFNOSUPPORT => error.AddressFamilyNotSupported,
.WSAEMFILE => error.ProcessFdQuotaExceeded,
.WSAENOBUFS => error.SystemResources,
.WSAEPROTONOSUPPORT => error.ProtocolNotSupported,
else => |err| windows.unexpectedWSAError(err),
};
}
return Socket{ .fd = fd };
}
/// Closes the socket.
pub fn deinit(self: Socket) void {
_ = ws2_32.closesocket(self.fd);
}
/// Shutdown either the read side, write side, or all side of the socket.
pub fn shutdown(self: Socket, how: os.ShutdownHow) !void {
const rc = ws2_32.shutdown(self.fd, switch (how) {
.recv => ws2_32.SD_RECEIVE,
.send => ws2_32.SD_SEND,
.both => ws2_32.SD_BOTH,
});
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSAECONNABORTED => return error.ConnectionAborted,
.WSAECONNRESET => return error.ConnectionResetByPeer,
.WSAEINPROGRESS => return error.BlockingOperationInProgress,
.WSAEINVAL => unreachable,
.WSAENETDOWN => return error.NetworkSubsystemFailed,
.WSAENOTCONN => return error.SocketNotConnected,
.WSAENOTSOCK => unreachable,
.WSANOTINITIALISED => unreachable,
else => |err| return windows.unexpectedWSAError(err),
};
}
}
/// Binds the socket to an address.
pub fn bind(self: Socket, address: Socket.Address) !void {
const rc = ws2_32.bind(self.fd, @ptrCast(*const ws2_32.sockaddr, &address.toNative()), @intCast(c_int, address.getNativeSize()));
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSAENETDOWN => error.NetworkSubsystemFailed,
.WSAEACCES => error.AccessDenied,
.WSAEADDRINUSE => error.AddressInUse,
.WSAEADDRNOTAVAIL => error.AddressNotAvailable,
.WSAEFAULT => error.BadAddress,
.WSAEINPROGRESS => error.WouldBlock,
.WSAEINVAL => error.AlreadyBound,
.WSAENOBUFS => error.NoEphemeralPortsAvailable,
.WSAENOTSOCK => error.NotASocket,
else => |err| windows.unexpectedWSAError(err),
};
}
}
/// Start listening for incoming connections on the socket.
pub fn listen(self: Socket, max_backlog_size: u31) !void {
const rc = ws2_32.listen(self.fd, max_backlog_size);
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSAENETDOWN => error.NetworkSubsystemFailed,
.WSAEADDRINUSE => error.AddressInUse,
.WSAEISCONN => error.AlreadyConnected,
.WSAEINVAL => error.SocketNotBound,
.WSAEMFILE, .WSAENOBUFS => error.SystemResources,
.WSAENOTSOCK => error.FileDescriptorNotASocket,
.WSAEOPNOTSUPP => error.OperationNotSupported,
.WSAEINPROGRESS => error.WouldBlock,
else => |err| windows.unexpectedWSAError(err),
};
}
}
/// Have the socket attempt to the connect to an address.
pub fn connect(self: Socket, address: Socket.Address) !void {
const rc = ws2_32.connect(self.fd, @ptrCast(*const ws2_32.sockaddr, &address.toNative()), @intCast(c_int, address.getNativeSize()));
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSAEADDRINUSE => error.AddressInUse,
.WSAEADDRNOTAVAIL => error.AddressNotAvailable,
.WSAECONNREFUSED => error.ConnectionRefused,
.WSAETIMEDOUT => error.ConnectionTimedOut,
.WSAEFAULT => error.BadAddress,
.WSAEINVAL => error.ListeningSocket,
.WSAEISCONN => error.AlreadyConnected,
.WSAENOTSOCK => error.NotASocket,
.WSAEACCES => error.BroadcastNotEnabled,
.WSAENOBUFS => error.SystemResources,
.WSAEAFNOSUPPORT => error.AddressFamilyNotSupported,
.WSAEINPROGRESS, .WSAEWOULDBLOCK => error.WouldBlock,
.WSAEHOSTUNREACH, .WSAENETUNREACH => error.NetworkUnreachable,
else => |err| windows.unexpectedWSAError(err),
};
}
}
/// Accept a pending incoming connection queued to the kernel backlog
/// of the socket.
pub fn accept(self: Socket, flags: u32) !Socket.Connection {
var address: ws2_32.sockaddr_storage = undefined;
var address_len: c_int = @sizeOf(ws2_32.sockaddr_storage);
const rc = ws2_32.accept(self.fd, @ptrCast(*ws2_32.sockaddr, &address), &address_len);
if (rc == ws2_32.INVALID_SOCKET) {
return switch (ws2_32.WSAGetLastError()) {
.WSANOTINITIALISED => unreachable,
.WSAECONNRESET => error.ConnectionResetByPeer,
.WSAEFAULT => unreachable,
.WSAEINVAL => error.SocketNotListening,
.WSAEMFILE => error.ProcessFdQuotaExceeded,
.WSAENETDOWN => error.NetworkSubsystemFailed,
.WSAENOBUFS => error.FileDescriptorNotASocket,
.WSAEOPNOTSUPP => error.OperationNotSupported,
.WSAEWOULDBLOCK => error.WouldBlock,
else => |err| windows.unexpectedWSAError(err),
};
}
const socket = Socket.from(rc);
const socket_address = Socket.Address.fromNative(@ptrCast(*ws2_32.sockaddr, &address));
return Socket.Connection.from(socket, socket_address);
}
/// Read data from the socket into the buffer provided with a set of flags
/// specified. It returns the number of bytes read into the buffer provided.
pub fn read(self: Socket, buf: []u8, flags: u32) !usize {
var bufs = &[_]ws2_32.WSABUF{.{ .len = @intCast(u32, buf.len), .buf = buf.ptr }};
var num_bytes: u32 = undefined;
var flags_ = flags;
const rc = ws2_32.WSARecv(self.fd, bufs, 1, &num_bytes, &flags_, null, null);
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSAECONNABORTED => error.ConnectionAborted,
.WSAECONNRESET => error.ConnectionResetByPeer,
.WSAEDISCON => error.ConnectionClosedByPeer,
.WSAEFAULT => error.BadBuffer,
.WSAEINPROGRESS,
.WSAEWOULDBLOCK,
.WSA_IO_PENDING,
.WSAETIMEDOUT,
=> error.WouldBlock,
.WSAEINTR => error.Cancelled,
.WSAEINVAL => error.SocketNotBound,
.WSAEMSGSIZE => error.MessageTooLarge,
.WSAENETDOWN => error.NetworkSubsystemFailed,
.WSAENETRESET => error.NetworkReset,
.WSAENOTCONN => error.SocketNotConnected,
.WSAENOTSOCK => error.FileDescriptorNotASocket,
.WSAEOPNOTSUPP => error.OperationNotSupported,
.WSAESHUTDOWN => error.AlreadyShutdown,
.WSA_OPERATION_ABORTED => error.OperationAborted,
else => |err| windows.unexpectedWSAError(err),
};
}
return @intCast(usize, num_bytes);
}
/// Write a buffer of data provided to the socket with a set of flags specified.
/// It returns the number of bytes that are written to the socket.
pub fn write(self: Socket, buf: []const u8, flags: u32) !usize {
var bufs = &[_]ws2_32.WSABUF{.{ .len = @intCast(u32, buf.len), .buf = @intToPtr([*]u8, @ptrToInt(buf.ptr)) }};
var num_bytes: u32 = undefined;
const rc = ws2_32.WSASend(self.fd, bufs, 1, &num_bytes, flags, null, null);
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSAECONNABORTED => error.ConnectionAborted,
.WSAECONNRESET => error.ConnectionResetByPeer,
.WSAEFAULT => error.BadBuffer,
.WSAEINPROGRESS,
.WSAEWOULDBLOCK,
.WSA_IO_PENDING,
.WSAETIMEDOUT,
=> error.WouldBlock,
.WSAEINTR => error.Cancelled,
.WSAEINVAL => error.SocketNotBound,
.WSAEMSGSIZE => error.MessageTooLarge,
.WSAENETDOWN => error.NetworkSubsystemFailed,
.WSAENETRESET => error.NetworkReset,
.WSAENOBUFS => error.BufferDeadlock,
.WSAENOTCONN => error.SocketNotConnected,
.WSAENOTSOCK => error.FileDescriptorNotASocket,
.WSAEOPNOTSUPP => error.OperationNotSupported,
.WSAESHUTDOWN => error.AlreadyShutdown,
.WSA_OPERATION_ABORTED => error.OperationAborted,
else => |err| windows.unexpectedWSAError(err),
};
}
return @intCast(usize, num_bytes);
}
/// Writes multiple I/O vectors with a prepended message header to the socket
/// with a set of flags specified. It returns the number of bytes that are
/// written to the socket.
pub fn writeVectorized(self: Socket, msg: ws2_32.msghdr_const, flags: u32) !usize {
const call = try windows.loadWinsockExtensionFunction(ws2_32.LPFN_WSASENDMSG, self.fd, ws2_32.WSAID_WSASENDMSG);
var num_bytes: u32 = undefined;
const rc = call(self.fd, &msg, flags, &num_bytes, null, null);
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSAECONNABORTED => error.ConnectionAborted,
.WSAECONNRESET => error.ConnectionResetByPeer,
.WSAEFAULT => error.BadBuffer,
.WSAEINPROGRESS,
.WSAEWOULDBLOCK,
.WSA_IO_PENDING,
.WSAETIMEDOUT,
=> error.WouldBlock,
.WSAEINTR => error.Cancelled,
.WSAEINVAL => error.SocketNotBound,
.WSAEMSGSIZE => error.MessageTooLarge,
.WSAENETDOWN => error.NetworkSubsystemFailed,
.WSAENETRESET => error.NetworkReset,
.WSAENOBUFS => error.BufferDeadlock,
.WSAENOTCONN => error.SocketNotConnected,
.WSAENOTSOCK => error.FileDescriptorNotASocket,
.WSAEOPNOTSUPP => error.OperationNotSupported,
.WSAESHUTDOWN => error.AlreadyShutdown,
.WSA_OPERATION_ABORTED => error.OperationAborted,
else => |err| windows.unexpectedWSAError(err),
};
}
return @intCast(usize, num_bytes);
}
/// Read multiple I/O vectors with a prepended message header from the socket
/// with a set of flags specified. It returns the number of bytes that were
/// read into the buffer provided.
pub fn readVectorized(self: Socket, msg: *ws2_32.msghdr, flags: u32) !usize {
const call = try windows.loadWinsockExtensionFunction(ws2_32.LPFN_WSARECVMSG, self.fd, ws2_32.WSAID_WSARECVMSG);
var num_bytes: u32 = undefined;
const rc = call(self.fd, msg, &num_bytes, null, null);
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSAECONNABORTED => error.ConnectionAborted,
.WSAECONNRESET => error.ConnectionResetByPeer,
.WSAEDISCON => error.ConnectionClosedByPeer,
.WSAEFAULT => error.BadBuffer,
.WSAEINPROGRESS,
.WSAEWOULDBLOCK,
.WSA_IO_PENDING,
.WSAETIMEDOUT,
=> error.WouldBlock,
.WSAEINTR => error.Cancelled,
.WSAEINVAL => error.SocketNotBound,
.WSAEMSGSIZE => error.MessageTooLarge,
.WSAENETDOWN => error.NetworkSubsystemFailed,
.WSAENETRESET => error.NetworkReset,
.WSAENOTCONN => error.SocketNotConnected,
.WSAENOTSOCK => error.FileDescriptorNotASocket,
.WSAEOPNOTSUPP => error.OperationNotSupported,
.WSAESHUTDOWN => error.AlreadyShutdown,
.WSA_OPERATION_ABORTED => error.OperationAborted,
else => |err| windows.unexpectedWSAError(err),
};
}
return @intCast(usize, num_bytes);
}
/// Query the address that the socket is locally bounded to.
pub fn getLocalAddress(self: Socket) !Socket.Address {
var address: ws2_32.sockaddr_storage = undefined;
var address_len: c_int = @sizeOf(ws2_32.sockaddr_storage);
const rc = ws2_32.getsockname(self.fd, @ptrCast(*ws2_32.sockaddr, &address), &address_len);
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSANOTINITIALISED => unreachable,
.WSAEFAULT => unreachable,
.WSAENETDOWN => error.NetworkSubsystemFailed,
.WSAENOTSOCK => error.FileDescriptorNotASocket,
.WSAEINVAL => error.SocketNotBound,
else => |err| windows.unexpectedWSAError(err),
};
}
return Socket.Address.fromNative(@ptrCast(*ws2_32.sockaddr, &address));
}
/// Query the address that the socket is connected to.
pub fn getRemoteAddress(self: Socket) !Socket.Address {
var address: ws2_32.sockaddr_storage = undefined;
var address_len: c_int = @sizeOf(ws2_32.sockaddr_storage);
const rc = ws2_32.getpeername(self.fd, @ptrCast(*ws2_32.sockaddr, &address), &address_len);
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSANOTINITIALISED => unreachable,
.WSAEFAULT => unreachable,
.WSAENETDOWN => error.NetworkSubsystemFailed,
.WSAENOTSOCK => error.FileDescriptorNotASocket,
.WSAEINVAL => error.SocketNotBound,
else => |err| windows.unexpectedWSAError(err),
};
}
return Socket.Address.fromNative(@ptrCast(*ws2_32.sockaddr, &address));
}
/// Query and return the latest cached error on the socket.
pub fn getError(self: Socket) !void {
return {};
}
/// Query the read buffer size of the socket.
pub fn getReadBufferSize(self: Socket) !u32 {
return 0;
}
/// Query the write buffer size of the socket.
pub fn getWriteBufferSize(self: Socket) !u32 {
return 0;
}
/// Set a socket option.
pub fn setOption(self: Socket, level: u32, code: u32, value: []const u8) !void {
const rc = ws2_32.setsockopt(self.fd, @intCast(i32, level), @intCast(i32, code), value.ptr, @intCast(i32, value.len));
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSANOTINITIALISED => unreachable,
.WSAENETDOWN => return error.NetworkSubsystemFailed,
.WSAEFAULT => unreachable,
.WSAENOTSOCK => return error.FileDescriptorNotASocket,
.WSAEINVAL => return error.SocketNotBound,
else => |err| windows.unexpectedWSAError(err),
};
}
}
/// Have close() or shutdown() syscalls block until all queued messages in the socket have been successfully
/// sent, or if the timeout specified in seconds has been reached. It returns `error.UnsupportedSocketOption`
/// if the host does not support the option for a socket to linger around up until a timeout specified in
/// seconds.
pub fn setLinger(self: Socket, timeout_seconds: ?u16) !void {
const settings = ws2_32.linger{
.l_onoff = @as(u16, @boolToInt(timeout_seconds != null)),
.l_linger = if (timeout_seconds) |seconds| seconds else 0,
};
return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_LINGER, mem.asBytes(&settings));
}
/// On connection-oriented sockets, have keep-alive messages be sent periodically. The timing in which keep-alive
/// messages are sent are dependant on operating system settings. It returns `error.UnsupportedSocketOption` if
/// the host does not support periodically sending keep-alive messages on connection-oriented sockets.
pub fn setKeepAlive(self: Socket, enabled: bool) !void {
return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_KEEPALIVE, mem.asBytes(&@as(u32, @boolToInt(enabled))));
}
/// Allow multiple sockets on the same host to listen on the same address. It returns `error.UnsupportedSocketOption` if
/// the host does not support sockets listening the same address.
pub fn setReuseAddress(self: Socket, enabled: bool) !void {
return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_REUSEADDR, mem.asBytes(&@as(u32, @boolToInt(enabled))));
}
/// Allow multiple sockets on the same host to listen on the same port. It returns `error.UnsupportedSocketOption` if
/// the host does not supports sockets listening on the same port.
///
/// TODO: verify if this truly mimicks SO_REUSEPORT behavior, or if SO_REUSE_UNICASTPORT provides the correct behavior
pub fn setReusePort(self: Socket, enabled: bool) !void {
try self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_BROADCAST, mem.asBytes(&@as(u32, @boolToInt(enabled))));
try self.setReuseAddress(enabled);
}
/// Set the write buffer size of the socket.
pub fn setWriteBufferSize(self: Socket, size: u32) !void {
return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_SNDBUF, mem.asBytes(&size));
}
/// Set the read buffer size of the socket.
pub fn setReadBufferSize(self: Socket, size: u32) !void {
return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_RCVBUF, mem.asBytes(&size));
}
/// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is
/// set on a non-blocking socket.
///
/// Set a timeout on the socket that is to occur if no messages are successfully written
/// to its bound destination after a specified number of milliseconds. A subsequent write
/// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded.
pub fn setWriteTimeout(self: Socket, milliseconds: u32) !void {
return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_SNDTIMEO, mem.asBytes(&milliseconds));
}
/// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is
/// set on a non-blocking socket.
///
/// Set a timeout on the socket that is to occur if no messages are successfully read
/// from its bound destination after a specified number of milliseconds. A subsequent
/// read from the socket will thereafter return `error.WouldBlock` should the timeout be
/// exceeded.
pub fn setReadTimeout(self: Socket, milliseconds: u32) !void {
return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_RCVTIMEO, mem.asBytes(&milliseconds));
}
};
}
|
lib/std/x/os/socket_windows.zig
|
const std = @import("std");
const tt = @import("root");
const Parser = @This();
const State = enum {
text,
spec,
};
const Token = enum {
text,
start_spec,
end_spec,
};
const TransitionCombinator = tt.util.enums.Combinator(State, Token);
const Transition = TransitionCombinator.Cross;
const transition = TransitionCombinator.cross;
state: State = .text,
pub fn parse(self: *Parser, tokens: []const Token) !void {
for (tokens) |token| {
switch (transition(self.state, token)) {
transition(.text, .text) => {
std.info.log("concatenating normal text", .{});
std.debug.print("concatenating normal text\n", .{});
},
transition(.text, .start_spec) => {
std.info.log("starting a spec", .{});
std.debug.print("starting a spec\n", .{});
self.state = .spec;
},
transition(.spec, .text) => {
std.info.log("concatenating spec text", .{});
std.debug.print("concatenating spec text\n", .{});
},
transition(.spec, .end_spec) => {
std.info.log("ending a spec", .{});
std.debug.print("ending a spec\n", .{});
self.state = .text;
},
else => {
std.err.log("invalid transition {s} -> {s}", .{ @tagName(self.state), @tagName(token) });
return error.InvalidTransition;
},
}
}
}
test "parse" {
var parser = Parser{};
std.debug.print("\ntesting parser\n", .{});
try parser.parse(&[_]Token{
.text,
.start_spec,
.text,
.end_spec,
.text,
.text,
.text,
});
}
const Tokenizer = struct {
pub fn parse(iterator: anytype) !void {
while (iterator.next()) |c| {
std.debug.print("{c}", .{c});
}
std.debug.print("\n", .{});
}
};
test "Tokenizer" {
const filename = "tests/foo.weeb";
@compileLog(@typeInfo(tt).Struct.fields.len);
try Tokenizer.parse(&tt.util.SliceIterator(u8).init(filename));
// std.testing.expectEqualStrings(filename, p.filename);
}
|
src/web/weeb/Parser.zig
|
const wlr = @import("../wlroots.zig");
const wayland = @import("wayland");
const wl = wayland.server.wl;
pub const InputMethodManagerV2 = extern struct {
global: *wl.Global,
input_methods: wl.list.Head(InputMethodV2, "link"),
server_destroy: wl.Listener(*wl.Server),
events: extern struct {
input_method: wl.Signal(*wlr.InputMethodV2),
destroy: wl.Signal(*wlr.InputMethodManagerV2),
},
extern fn wlr_input_method_manager_v2_create(server: *wl.Server) ?*wlr.InputMethodManagerV2;
pub fn create(server: *wl.Server) !*wlr.InputMethodManagerV2 {
return wlr_input_method_manager_v2_create(server) orelse error.OutOfMemory;
}
};
pub const InputMethodV2 = extern struct {
pub const PreeditString = extern struct {
text: [*:0]u8,
cursor_begin: i32,
cursor_end: i32,
};
pub const DeleteSurroundingText = extern struct {
before_length: u32,
after_length: u32,
};
pub const State = extern struct {
preedit: wlr.InputMethodV2.PreeditString,
commit_text: [*:0]u8,
delete: wlr.InputMethodV2.DeleteSurroundingText,
};
pub const KeyboardGrab = extern struct {
resource: *wl.Resource,
input_method: *wlr.InputMethodV2,
keyboard: ?*wlr.Keyboard,
keyboard_keymap: wl.Listener(*wlr.Keyboard),
keyboard_repeat_info: wl.Listener(*wlr.Keyboard),
keyboard_destroy: wl.Listener(*wlr.Keyboard),
events: extern struct {
destroy: wl.Signal(*wlr.InputMethodV2.KeyboardGrab),
},
extern fn wlr_input_method_keyboard_grab_v2_send_key(keyboard_grab: *wlr.InputMethodV2.KeyboardGrab, time: u32, key: u32, state: u32) void;
pub fn sendKey(keyboard_grab: *wlr.InputMethodV2.KeyboardGrab, time: u32, key: u32, state: wl.Keyboard.KeyState) void {
wlr_input_method_keyboard_grab_v2_send_key(keyboard_grab, time, key, @intCast(u32, @enumToInt(state)));
}
extern fn wlr_input_method_keyboard_grab_v2_send_modifiers(keyboard_grab: *wlr.InputMethodV2.KeyboardGrab, modifiers: *wlr.Keyboard.Modifiers) void;
pub const sendModifiers = wlr_input_method_keyboard_grab_v2_send_modifiers;
extern fn wlr_input_method_keyboard_grab_v2_set_keyboard(keyboard_grab: *wlr.InputMethodV2.KeyboardGrab, keyboard: ?*wlr.Keyboard) void;
pub const setKeyboard = wlr_input_method_keyboard_grab_v2_set_keyboard;
extern fn wlr_input_method_keyboard_grab_v2_destroy(keyboard_grab: *wlr.InputMethodV2.KeyboardGrab) void;
pub const destroy = wlr_input_method_keyboard_grab_v2_destroy;
};
resource: *wl.Resource,
seat: *wlr.Seat,
seat_client: *wlr.Seat.Client,
pending: wlr.InputMethodV2.State,
current: wlr.InputMethodV2.State,
active: bool,
client_active: bool,
current_serial: u32,
keyboard_grab: ?*KeyboardGrab,
link: wl.list.Link,
seat_client_destroy: wl.Listener(*wlr.Seat.Client),
events: extern struct {
commit: wl.Signal(*wlr.InputMethodV2),
grab_keyboard: wl.Signal(*wlr.InputMethodV2.KeyboardGrab),
destroy: wl.Signal(*wlr.InputMethodV2),
},
extern fn wlr_input_method_v2_send_activate(input_method: *wlr.InputMethodV2) void;
pub const sendActivate = wlr_input_method_v2_send_activate;
extern fn wlr_input_method_v2_send_deactivate(input_method: *wlr.InputMethodV2) void;
pub const sendDeactivate = wlr_input_method_v2_send_deactivate;
extern fn wlr_input_method_v2_send_surrounding_text(input_method: *wlr.InputMethodV2, text: [*:0]const u8, cursor: u32, anchor: u32) void;
pub const sendSurroundingText = wlr_input_method_v2_send_surrounding_text;
extern fn wlr_input_method_v2_send_content_type(input_method: *wlr.InputMethodV2, hint: u32, purpose: u32) void;
pub const sendContentType = wlr_input_method_v2_send_content_type;
extern fn wlr_input_method_v2_send_text_change_cause(input_method: *wlr.InputMethodV2, cause: u32) void;
pub const sendTextChangeCause = wlr_input_method_v2_send_text_change_cause;
extern fn wlr_input_method_v2_send_done(input_method: *wlr.InputMethodV2) void;
pub const sendDone = wlr_input_method_v2_send_done;
extern fn wlr_input_method_v2_send_unavailable(input_method: *wlr.InputMethodV2) void;
pub const sendUnavailable = wlr_input_method_v2_send_unavailable;
};
|
src/types/input_method_v2.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const Verbatim = @import("./verbatim.zig").Verbatim;
const testing = std.testing;
pub const E = error.DynamicReplyError;
/// DynamicReply lets you parse Redis replies without having to to know
/// their shape beforehand. It also supports parsing Redis errors and
/// attributes. By using DynamicReply you will be able to parse any possible
/// Redis reply. It even supports non-toplevel errors.
pub const DynamicReply = struct {
attribs: [][2]*DynamicReply,
data: Data,
pub const Data = union(enum) {
Nil: void,
Bool: bool,
Number: i64,
Double: f64,
Bignum: std.math.big.int.Managed,
String: Verbatim,
List: []DynamicReply,
Set: []DynamicReply,
Map: [][2]*DynamicReply,
};
pub const Redis = struct {
pub const Parser = struct {
pub const HandlesAttributes = true;
pub fn parse(tag: u8, comptime _: type, msg: anytype) !DynamicReply {
@compileError("DynamicReply requires an allocator. Use `sendAlloc`!");
}
pub fn destroy(self: DynamicReply, comptime rootParser: type, allocator: *Allocator) void {
rootParser.freeReply(self.attribs, allocator);
switch (self.data) {
.Nil, .Bool, .Number, .Double => {},
.Bignum => {
// `std.math.bit.Init` wants the parameter to be writable
// so we have to copy it inside a `var` variable.
var x = self;
x.data.Bignum.deinit();
},
.String => |ver| rootParser.freeReply(ver, allocator),
.List, .Set => |lst| {
for (lst) |elem| destroy(elem, rootParser, allocator);
allocator.free(lst);
},
.Map => |map| {
// rootParser.freeReply(map, allocator);
for (map) |elem| rootParser.freeReply(elem, allocator);
allocator.free(map);
},
}
}
pub fn parseAlloc(tag: u8, comptime rootParser: type, allocator: *Allocator, msg: anytype) error{DynamicReplyError}!DynamicReply {
var itemTag = tag;
var res: DynamicReply = undefined;
if (itemTag == '|') {
// Here we lie to the root parser and claim we encountered a map type >:3
// No error catching is done because DynamicReply parses correctly
// both errors and nil values.
res.attribs = rootParser.parseAllocFromTag([][2]*DynamicReply, '%', allocator, msg) catch return E;
itemTag = msg.readByte() catch return E;
} else {
res.attribs = &[0][2]*DynamicReply{};
}
res.data = switch (itemTag) {
else => return E,
'_' => Data{ .Nil = {} },
'#' => Data{ .Bool = rootParser.parseFromTag(bool, '#', msg) catch return E },
':' => Data{ .Number = rootParser.parseFromTag(i64, ':', msg) catch return E },
',' => Data{ .Double = rootParser.parseFromTag(f64, ',', msg) catch return E },
'+', '$', '=' => Data{ .String = rootParser.parseAllocFromTag(Verbatim, itemTag, allocator, msg) catch return E },
'%' => Data{ .Map = rootParser.parseAllocFromTag([][2]*DynamicReply, '%', allocator, msg) catch return E },
'*' => Data{ .List = rootParser.parseAllocFromTag([]DynamicReply, '*', allocator, msg) catch return E },
'~' => Data{ .Set = rootParser.parseAllocFromTag([]DynamicReply, '~', allocator, msg) catch return E },
'(' => Data{ .Bignum = rootParser.parseAllocFromTag(std.math.big.int.Managed, '(', allocator, msg) catch return E },
};
return res;
}
};
};
};
test "dynamic replies" {
const parser = @import("../parser.zig").RESP3Parser;
const allocator = std.heap.page_allocator;
{
const reply = try DynamicReply.Redis.Parser.parseAlloc('+', parser, allocator, MakeSimpleString().inStream());
testing.expectEqualSlices(u8, "Yayyyy I'm a string!", reply.data.String.string);
}
{
const reply = try DynamicReply.Redis.Parser.parseAlloc('*', parser, allocator, MakeComplexList().inStream());
testing.expectEqual(@as(usize, 0), reply.attribs.len);
testing.expectEqualSlices(u8, "Hello", reply.data.List[0].data.String.string);
testing.expectEqual(true, reply.data.List[1].data.Bool);
testing.expectEqual(@as(usize, 0), reply.data.List[1].attribs.len);
testing.expectEqual(@as(usize, 0), reply.data.List[2].attribs.len);
testing.expectEqual(@as(i64, 123), reply.data.List[2].data.List[0].data.Number);
testing.expectEqual(@as(usize, 0), reply.data.List[2].data.List[0].attribs.len);
testing.expectEqual(@as(f64, 12.34), reply.data.List[2].data.List[1].data.Double);
testing.expectEqual(@as(usize, 0), reply.data.List[2].data.List[1].attribs.len);
}
{
const reply = try DynamicReply.Redis.Parser.parseAlloc('|', parser, allocator, MakeComplexListWithAttributes().inStream());
testing.expectEqual(@as(usize, 2), reply.attribs.len);
testing.expectEqualSlices(u8, "Ciao", reply.attribs[0][0].data.String.string);
testing.expectEqualSlices(u8, "World", reply.attribs[0][1].data.String.string);
testing.expectEqualSlices(u8, "Peach", reply.attribs[1][0].data.String.string);
testing.expectEqual(@as(f64, 9.99), reply.attribs[1][1].data.Double);
testing.expectEqualSlices(u8, "Hello", reply.data.List[0].data.String.string);
testing.expectEqual(@as(usize, 0), reply.data.List[0].attribs.len);
testing.expectEqual(true, reply.data.List[1].data.Bool);
testing.expectEqual(@as(usize, 1), reply.data.List[1].attribs.len);
testing.expectEqualSlices(u8, "ttl", reply.data.List[1].attribs[0][0].data.String.string);
testing.expectEqual(@as(i64, 100), reply.data.List[1].attribs[0][1].data.Number);
testing.expectEqual(@as(usize, 0), reply.data.List[2].attribs.len);
testing.expectEqual(@as(i64, 123), reply.data.List[2].data.List[0].data.Number);
testing.expectEqual(@as(usize, 1), reply.data.List[2].data.List[0].attribs.len);
testing.expectEqualSlices(u8, "Banana", reply.data.List[2].data.List[0].attribs[0][0].data.String.string);
testing.expectEqual(true, reply.data.List[2].data.List[0].attribs[0][1].data.Bool);
testing.expectEqual(@as(i64, 424242), try reply.data.List[2].data.List[1].data.Bignum.to(i64));
testing.expectEqual(@as(usize, 0), reply.data.List[2].data.List[1].attribs.len);
testing.expectEqual(@as(f64, 12.34), reply.data.List[2].data.List[2].data.Double);
testing.expectEqual(@as(usize, 0), reply.data.List[2].data.List[2].attribs.len);
}
}
fn MakeSimpleString() std.io.FixedBufferStream([]const u8) {
return std.io.fixedBufferStream("+Yayyyy I'm a string!\r\n"[1..]);
}
fn MakeComplexList() std.io.FixedBufferStream([]const u8) {
return std.io.fixedBufferStream("*3\r\n+Hello\r\n#t\r\n*2\r\n:123\r\n,12.34\r\n"[1..]);
}
//zig fmt: off
fn MakeComplexListWithAttributes() std.io.FixedBufferStream([]const u8) {
return std.io.fixedBufferStream(
("|2\r\n" ++
"+Ciao\r\n" ++
"+World\r\n" ++
"+Peach\r\n" ++
",9.99\r\n" ++
"*3\r\n" ++
"+Hello\r\n" ++
"|1\r\n" ++
"+ttl\r\n" ++
":100\r\n" ++
"#t\r\n" ++
"*3\r\n" ++
"|1\r\n" ++
"+Banana\r\n" ++
"#t\r\n" ++
":123\r\n" ++
"(424242\r\n" ++
",12.34\r\n")
[1..]);
}
//zig fmt: on
test "docs" {
@import("std").meta.refAllDecls(@This());
@import("std").meta.refAllDecls(DynamicReply);
}
|
src/types/reply.zig
|
const c = @import("../../c_global.zig").c_imp;
const std = @import("std");
// dross-zig
const Application = @import("../../core/application.zig").Application;
const Matrix4 = @import("../../core/matrix4.zig").Matrix4;
const Vector3 = @import("../../core/vector3.zig").Vector3;
// How many cameras are instantiated in the scene
var camera_count: u8 = 0;
/// The currently active camera
var current_camera: *Camera2d = undefined;
/// ErrorSet for possible camera related errors
pub const CameraError = error{
/// Requested camera could not be found
CameraNotFound,
};
// -----------------------------------------
// - Camera2D -
// -----------------------------------------
// All Cameras will be accounted for by the framework.
// Instance will be created via the buildCamera2d, and
// freed from freeAllCamera2d and freeCamera2d(id).
// Each camera will get given a unique ID, and will be
// added and removed from a list of cameras in the scene.
// There can only get a single "active" camera, which
// is what will be rendered to the display.
/// Orthographic camera implementation
pub const Camera2d = struct {
/// Unique Camera ID
id: u16,
/// The position of the camera
internal_position: Vector3,
/// The target position the camera should be focusing on
internal_target_position: Vector3,
/// Level of zoom
internal_zoom: f32 = 0.4,
/// Determines how close something can be before getting clipped
internal_near: f32 = 0.0,
/// Determines how far something can be before getting clipped
internal_far: f32 = 0.0,
/// How quickly the camera will travel
internal_speed: f32 = 2.0,
/// Is this camera the currently active one
current: bool = false,
const Self = @This();
/// Allocates and setup a Camera2D instance
/// Comments: The caller will own the camera, which
/// means they will be responsible for freeing.
pub fn new(allocator: *std.mem.Allocator) !*Camera2d {
var self = try allocator.create(Camera2d);
self.internal_target_position = Vector3.new(0.0, 0.0, 0.0);
self.internal_zoom = 4.0;
self.internal_near = 0.01;
self.internal_far = 100.0;
self.internal_speed = 20.0;
//const window_size = Application.windowSize();
//self.position = Vector3.new(
// 0.5 * self.zoom,
// 0.5 * self.zoom,
// 0.0,
//);
// self.position = Vector3.zero();
if (camera_count == 0) {
camera_count += 1;
// TODO(devon): We'll have to check if this already exists at some point
self.*.id = camera_count;
self.*.current = true;
current_camera = self;
} else {
camera_count += 1;
}
return self;
}
/// Ensures to reduce the camera cound and removes the camera from the cameras list
pub fn free(allocator: *std.mem.Allocator, self: *Self) void {
camera_count -= 1;
allocator.destroy(self);
}
/// Sets the new target position to the desired `position`
pub fn setTargetPosition(self: *Self, position: Vector3) void {
self.internal_target_position = position;
}
/// Sets the zoom level to the desired `zoom`
pub fn setZoom(self: *Self, desired_zoom: f32) void {
self.internal_zoom = desired_zoom;
}
/// Sets the camera speed
pub fn setSpeed(self: *Self, speed: f32) void {
self.internal_speed = speed;
}
/// Sets the position of the camera
pub fn setPosition(self: *Self, new_position: Vector3) void {
self.internal_position = new_position;
}
/// Returns the zoom of the camera
pub fn zoom(self: *Self) f32 {
return self.internal_zoom;
}
/// Returns the position of the camera
pub fn position(self: *Self) Vector3 {
return self.internal_position;
}
/// Returns the speed of the camera
pub fn speed(self: *Self) f32 {
return self.internal_speed;
}
/// Returns the target position
pub fn targetPosition(self: *Self) Vector3 {
return self.internal_target_position;
}
};
/// Returns the current count of Cameras found in the Scene
pub fn cameraCount() u8 {
return camera_count;
}
/// Returns the current camera if one is set, otherwise it will return null
pub fn currentCamera() ?*Camera2d {
if (current_camera != undefined) return current_camera;
return null;
}
|
src/renderer/cameras/camera_2d.zig
|
const std = @import("std");
usingnamespace (@import("../machine.zig"));
usingnamespace (@import("../util.zig"));
test "80386" {
const m32 = Machine.init(.x86_32);
const m64 = Machine.init(.x64);
debugPrint(false);
{
const rm8 = Operand.memoryRm(.DefaultSeg, .BYTE, .EAX, 0);
const rm16 = Operand.memoryRm(.DefaultSeg, .WORD, .EAX, 0);
const rm32 = Operand.memoryRm(.DefaultSeg, .DWORD, .EAX, 0);
const reg16 = Operand.register(.AX);
const reg32 = Operand.register(.EAX);
const reg64 = Operand.register(.RAX);
// MOVSX
testOp2(m32, .MOVSX, reg16, rm8, "66 0F BE 00");
testOp2(m32, .MOVSX, reg32, rm8, "0F BE 00");
testOp2(m32, .MOVSX, reg64, rm8, AsmError.InvalidOperand);
testOp2(m32, .MOVSX, reg16, rm16, "66 0F BF 00");
testOp2(m32, .MOVSX, reg32, rm16, "0F BF 00");
testOp2(m32, .MOVSX, reg64, rm16, AsmError.InvalidOperand);
testOp2(m64, .MOVSX, reg16, rm8, "66 67 0F BE 00");
testOp2(m64, .MOVSX, reg32, rm8, "67 0F BE 00");
testOp2(m64, .MOVSX, reg64, rm8, "67 48 0F BE 00");
testOp2(m64, .MOVSX, reg16, rm16, "66 67 0F BF 00");
testOp2(m64, .MOVSX, reg32, rm16, "67 0F BF 00");
testOp2(m64, .MOVSX, reg64, rm16, "67 48 0F BF 00");
// MOVSXD
testOp2(m32, .MOVSXD, reg16, rm16, "66 63 00");
testOp2(m32, .MOVSXD, reg16, rm32, "66 63 00");
testOp2(m32, .MOVSXD, reg32, rm32, "63 00");
testOp2(m32, .MOVSXD, reg64, rm32, AsmError.InvalidOperand);
testOp2(m64, .MOVSXD, reg16, rm16, "66 67 63 00");
testOp2(m64, .MOVSXD, reg16, rm32, "66 67 63 00");
testOp2(m64, .MOVSXD, reg32, rm32, "67 63 00");
testOp2(m64, .MOVSXD, reg64, rm32, "67 48 63 00");
// MOVZX
testOp2(m32, .MOVZX, reg16, rm8, "66 0F B6 00");
testOp2(m32, .MOVZX, reg32, rm8, "0F B6 00");
testOp2(m32, .MOVZX, reg64, rm8, AsmError.InvalidOperand);
testOp2(m32, .MOVZX, reg16, rm16, "66 0F B7 00");
testOp2(m32, .MOVZX, reg32, rm16, "0F B7 00");
testOp2(m32, .MOVZX, reg64, rm16, AsmError.InvalidOperand);
testOp2(m64, .MOVZX, reg16, rm8, "66 67 0F B6 00");
testOp2(m64, .MOVZX, reg32, rm8, "67 0F B6 00");
testOp2(m64, .MOVZX, reg64, rm8, "67 48 0F B6 00");
testOp2(m64, .MOVZX, reg16, rm16, "66 67 0F B7 00");
testOp2(m64, .MOVZX, reg32, rm16, "67 0F B7 00");
testOp2(m64, .MOVZX, reg64, rm16, "67 48 0F B7 00");
}
{
const op1 = Operand.memoryRm(.DefaultSeg, .BYTE, .RAX, 0);
testOp1(m64, .SETA, op1, "0F 97 00");
testOp1(m64, .SETAE, op1, "0F 93 00");
testOp1(m64, .SETB, op1, "0F 92 00");
testOp1(m64, .SETBE, op1, "0F 96 00");
testOp1(m64, .SETC, op1, "0F 92 00");
testOp1(m64, .SETE, op1, "0F 94 00");
testOp1(m64, .SETG, op1, "0F 9F 00");
testOp1(m64, .SETGE, op1, "0F 9D 00");
testOp1(m64, .SETL, op1, "0F 9C 00");
testOp1(m64, .SETLE, op1, "0F 9E 00");
testOp1(m64, .SETNA, op1, "0F 96 00");
testOp1(m64, .SETNAE, op1, "0F 92 00");
testOp1(m64, .SETNB, op1, "0F 93 00");
testOp1(m64, .SETNBE, op1, "0F 97 00");
testOp1(m64, .SETNC, op1, "0F 93 00");
testOp1(m64, .SETNE, op1, "0F 95 00");
testOp1(m64, .SETNG, op1, "0F 9E 00");
testOp1(m64, .SETNGE, op1, "0F 9C 00");
testOp1(m64, .SETNL, op1, "0F 9D 00");
testOp1(m64, .SETNLE, op1, "0F 9F 00");
testOp1(m64, .SETNO, op1, "0F 91 00");
testOp1(m64, .SETNP, op1, "0F 9B 00");
testOp1(m64, .SETNS, op1, "0F 99 00");
testOp1(m64, .SETNZ, op1, "0F 95 00");
testOp1(m64, .SETO, op1, "0F 90 00");
testOp1(m64, .SETP, op1, "0F 9A 00");
testOp1(m64, .SETPE, op1, "0F 9A 00");
testOp1(m64, .SETPO, op1, "0F 9B 00");
testOp1(m64, .SETS, op1, "0F 98 00");
testOp1(m64, .SETZ, op1, "0F 94 00");
}
{
const rm16 = Operand.memoryRm(.DefaultSeg, .WORD, .EAX, 0);
const rm32 = Operand.memoryRm(.DefaultSeg, .DWORD, .EAX, 0);
const rm64 = Operand.memoryRm(.DefaultSeg, .QWORD, .EAX, 0);
const reg16 = Operand.register(.AX);
const reg32 = Operand.register(.EAX);
const reg64 = Operand.register(.RAX);
const imm8 = Operand.immediate(0xff);
// BSF
testOp2(m64, .BSF, reg16, rm16, "66 67 0F BC 00");
testOp2(m64, .BSF, reg32, rm32, "67 0F BC 00");
testOp2(m64, .BSF, reg64, rm64, "67 48 0F BC 00");
// BSR
testOp2(m64, .BSR, reg16, rm16, "66 67 0F BD 00");
testOp2(m64, .BSR, reg32, rm32, "67 0F BD 00");
testOp2(m64, .BSR, reg64, rm64, "67 48 0F BD 00");
// BSR
testOp2(m64, .BT, rm16, reg16, "66 67 0F A3 00");
testOp2(m64, .BT, rm32, reg32, "67 0F A3 00");
testOp2(m64, .BT, rm64, reg64, "67 48 0F A3 00");
//
testOp2(m64, .BT, rm16, imm8, "66 67 0F BA 20 ff");
testOp2(m64, .BT, rm32, imm8, "67 0F BA 20 ff");
testOp2(m64, .BT, rm64, imm8, "67 48 0F BA 20 ff");
// BTC
testOp2(m64, .BTC, rm16, reg16, "66 67 0F BB 00");
testOp2(m64, .BTC, rm32, reg32, "67 0F BB 00");
testOp2(m64, .BTC, rm64, reg64, "67 48 0F BB 00");
//
testOp2(m64, .BTC, rm16, imm8, "66 67 0F BA 38 ff");
testOp2(m64, .BTC, rm32, imm8, "67 0F BA 38 ff");
testOp2(m64, .BTC, rm64, imm8, "67 48 0F BA 38 ff");
// BTR
testOp2(m64, .BTR, rm16, reg16, "66 67 0F B3 00");
testOp2(m64, .BTR, rm32, reg32, "67 0F B3 00");
testOp2(m64, .BTR, rm64, reg64, "67 48 0F B3 00");
//
testOp2(m64, .BTR, rm16, imm8, "66 67 0F BA 30 ff");
testOp2(m64, .BTR, rm32, imm8, "67 0F BA 30 ff");
testOp2(m64, .BTR, rm64, imm8, "67 48 0F BA 30 ff");
// BTS
testOp2(m64, .BTS, rm16, reg16, "66 67 0F AB 00");
testOp2(m64, .BTS, rm32, reg32, "67 0F AB 00");
testOp2(m64, .BTS, rm64, reg64, "67 48 0F AB 00");
//
testOp2(m64, .BTS, rm16, imm8, "66 67 0F BA 28 ff");
testOp2(m64, .BTS, rm32, imm8, "67 0F BA 28 ff");
testOp2(m64, .BTS, rm64, imm8, "67 48 0F BA 28 ff");
}
{
const reg32 = Operand.register(.EAX);
const reg64 = Operand.register(.RAX);
const cr0 = Operand.register(.CR0);
const cr2 = Operand.register(.CR2);
const cr8 = Operand.register(.CR8);
const dr0 = Operand.register(.DR0);
const dr1 = Operand.register(.DR1);
const dr8 = Operand.register(.DR8);
{
testOp2(m32, .MOV, reg32, cr0, "0F 20 C0");
testOp2(m32, .MOV, reg64, cr0, AsmError.InvalidOperand);
testOp2(m32, .MOV, reg32, cr2, "0F 20 D0");
testOp2(m32, .MOV, reg64, cr2, AsmError.InvalidOperand);
testOp2(m32, .MOV, reg32, cr8, AsmError.InvalidMode);
testOp2(m32, .MOV, reg64, cr8, AsmError.InvalidOperand);
//
testOp2(m64, .MOV, reg32, cr0, AsmError.InvalidOperand);
testOp2(m64, .MOV, reg64, cr0, "0F 20 C0");
testOp2(m64, .MOV, reg32, cr2, AsmError.InvalidOperand);
testOp2(m64, .MOV, reg64, cr2, "0F 20 D0");
testOp2(m64, .MOV, reg32, cr8, AsmError.InvalidOperand);
testOp2(m64, .MOV, reg64, cr8, "44 0F 20 C0");
//
testOp2(m32, .MOV, cr0, reg32, "0F 22 C0");
testOp2(m32, .MOV, cr0, reg64, AsmError.InvalidOperand);
testOp2(m32, .MOV, cr2, reg32, "0F 22 D0");
testOp2(m32, .MOV, cr2, reg64, AsmError.InvalidOperand);
testOp2(m32, .MOV, cr8, reg32, AsmError.InvalidMode);
testOp2(m32, .MOV, cr8, reg64, AsmError.InvalidOperand);
//
testOp2(m64, .MOV, cr0, reg32, AsmError.InvalidOperand);
testOp2(m64, .MOV, cr0, reg64, "0F 22 C0");
testOp2(m64, .MOV, cr2, reg32, AsmError.InvalidOperand);
testOp2(m64, .MOV, cr2, reg64, "0F 22 D0");
testOp2(m64, .MOV, cr8, reg32, AsmError.InvalidOperand);
testOp2(m64, .MOV, cr8, reg64, "44 0F 22 C0");
}
{
testOp2(m32, .MOV, reg32, dr0, "0F 21 C0");
testOp2(m32, .MOV, reg64, dr0, AsmError.InvalidOperand);
testOp2(m32, .MOV, reg32, dr1, "0F 21 C8");
testOp2(m32, .MOV, reg64, dr1, AsmError.InvalidOperand);
testOp2(m32, .MOV, reg32, dr8, AsmError.InvalidMode);
testOp2(m32, .MOV, reg64, dr8, AsmError.InvalidOperand);
//
testOp2(m64, .MOV, reg32, dr0, AsmError.InvalidOperand);
testOp2(m64, .MOV, reg64, dr0, "0F 21 C0");
testOp2(m64, .MOV, reg32, dr1, AsmError.InvalidOperand);
testOp2(m64, .MOV, reg64, dr1, "0F 21 C8");
testOp2(m64, .MOV, reg32, dr8, AsmError.InvalidOperand);
testOp2(m64, .MOV, reg64, dr8, "44 0F 21 C0");
//
testOp2(m32, .MOV, dr0, reg32, "0F 23 C0");
testOp2(m32, .MOV, dr0, reg64, AsmError.InvalidOperand);
testOp2(m32, .MOV, dr1, reg32, "0F 23 C8");
testOp2(m32, .MOV, dr1, reg64, AsmError.InvalidOperand);
testOp2(m32, .MOV, dr8, reg32, AsmError.InvalidMode);
testOp2(m32, .MOV, dr8, reg64, AsmError.InvalidOperand);
//
testOp2(m64, .MOV, dr0, reg32, AsmError.InvalidOperand);
testOp2(m64, .MOV, dr0, reg64, "0F 23 C0");
testOp2(m64, .MOV, dr1, reg32, AsmError.InvalidOperand);
testOp2(m64, .MOV, dr1, reg64, "0F 23 C8");
testOp2(m64, .MOV, dr8, reg32, AsmError.InvalidOperand);
testOp2(m64, .MOV, dr8, reg64, "44 0F 23 C0");
}
// TODO: the cpu ignores the bits in the mod field, so we should support
// setting these to arbitrary values
// m64.mod_fill = 0b00;
// testOp2(m64, .BSR, reg64, cr0, "0F 20 00");
}
{
const rm16 = Operand.memoryRm(.DefaultSeg, .WORD, .EAX, 0);
const rm32 = Operand.memoryRm(.DefaultSeg, .DWORD, .EAX, 0);
const rm64 = Operand.memoryRm(.DefaultSeg, .QWORD, .EAX, 0);
const reg16 = Operand.register(.AX);
const reg32 = Operand.register(.EAX);
const reg64 = Operand.register(.RAX);
const reg_cl = Operand.register(.CL);
const imm8 = Operand.immediate(0x04);
{
testOp3(m32, .SHLD, rm16, reg16, imm8, "66 0F A4 00 04");
testOp3(m32, .SHLD, rm32, reg32, imm8, "0F A4 00 04");
testOp3(m32, .SHLD, rm64, reg64, imm8, AsmError.InvalidOperand);
//
testOp3(m32, .SHLD, rm16, reg16, reg_cl, "66 0F A5 00");
testOp3(m32, .SHLD, rm32, reg32, reg_cl, "0F A5 00");
testOp3(m32, .SHLD, rm64, reg64, reg_cl, AsmError.InvalidOperand);
//
testOp3(m64, .SHLD, rm16, reg16, imm8, "66 67 0F A4 00 04");
testOp3(m64, .SHLD, rm32, reg32, imm8, "67 0F A4 00 04");
testOp3(m64, .SHLD, rm64, reg64, imm8, "67 48 0F A4 00 04");
//
testOp3(m64, .SHLD, rm16, reg16, reg_cl, "66 67 0F A5 00");
testOp3(m64, .SHLD, rm32, reg32, reg_cl, "67 0F A5 00");
testOp3(m64, .SHLD, rm64, reg64, reg_cl, "67 48 0F A5 00");
}
{
testOp3(m32, .SHRD, rm16, reg16, imm8, "66 0F AC 00 04");
testOp3(m32, .SHRD, rm32, reg32, imm8, "0F AC 00 04");
testOp3(m32, .SHRD, rm64, reg64, imm8, AsmError.InvalidOperand);
//
testOp3(m32, .SHRD, rm16, reg16, reg_cl, "66 0F AD 00");
testOp3(m32, .SHRD, rm32, reg32, reg_cl, "0F AD 00");
testOp3(m32, .SHRD, rm64, reg64, reg_cl, AsmError.InvalidOperand);
//
testOp3(m64, .SHRD, rm16, reg16, imm8, "66 67 0F AC 00 04");
testOp3(m64, .SHRD, rm32, reg32, imm8, "67 0F AC 00 04");
testOp3(m64, .SHRD, rm64, reg64, imm8, "67 48 0F AC 00 04");
//
testOp3(m64, .SHRD, rm16, reg16, reg_cl, "66 67 0F AD 00");
testOp3(m64, .SHRD, rm32, reg32, reg_cl, "67 0F AD 00");
testOp3(m64, .SHRD, rm64, reg64, reg_cl, "67 48 0F AD 00");
}
}
}
|
src/x86/tests/80386.zig
|
const std = @import("std");
const os = std.os;
const warn = std.debug.warn;
// How this should appear from a ptracing program:
// pid - syscall
// 0 - sigprocmask
// 0 - gettid
// 0 - fork
// 0 - kill | waitpid || 1 - gettid | rt_sigtimedwait
// 0 - gettid
pub fn main() anyerror!void {
// Ignore SIGUSR1 until we specifically wait for it in child fork later on
var mask = os.linux.empty_sigset;
sigaddset(&mask, os.SIGUSR1);
const sigproc_result = os.linux.sigprocmask(os.SIG_BLOCK, &mask, null);
if (sigproc_result != 0) @panic("sigproc error");
const original_parent_pid = os.linux.gettid();
const pid = try os.fork();
const signal = os.SIGUSR1;
if (pid == 0) {
wait_for_signal(signal);
} else {
try os.kill(pid, signal);
const wstatus = os.waitpid(pid, 0);
std.testing.expect(os.WIFEXITED(wstatus));
std.testing.expectEqual(@as(u32, 2), os.WEXITSTATUS(wstatus));
_ = os.linux.gettid();
}
}
fn wait_for_signal(signal: u6) void {
_ = os.linux.gettid();
var mask = os.linux.empty_sigset;
sigaddset(&mask, signal);
const sigsetsize = os.linux.NSIG / 8;
var info: os.linux.siginfo_t = undefined;
var timeout = os.linux.timespec{
.tv_sec = 3,
.tv_nsec = 0,
};
const status = @intCast(c_int, os.linux.syscall4(.rt_sigtimedwait, @ptrToInt(&mask), @ptrToInt(&info), @ptrToInt(&timeout), sigsetsize));
if (status < 0) @panic("child spent too much time waiting for signal!");
if (status != signal) {
@panic("child received incorrect signal!\n");
}
os.exit(2);
}
/// Copied from zig version "0.6.0+ed357f989" std library.
/// Reason: 0.6.0's sigaddset fn results in a compile error.
const usize_bits = @typeInfo(usize).Int.bits;
pub fn sigaddset(set: *os.sigset_t, sig: u6) void {
const s = sig - 1;
const shift = @intCast(u5, s & (usize_bits - 1));
const val = @intCast(u32, 1) << shift;
(set.*)[@intCast(usize, s) / usize_bits] |= val;
}
|
tests/example-programs/child_signals.zig
|
const std = @import("std");
const dos = @import("./DOtherSide.zig");
const QMetaType = @import("./QMetaType.zig").QMetaType;
pub const ParameterDefinition = struct {
name: [*c]const u8,
type_: QMetaType,
};
pub const SignalDefinition = struct {
name: [*c]const u8,
parameters: []const ParameterDefinition,
};
pub const SlotDefinition = struct {
name: [*c]const u8,
returnType: QMetaType,
parameters: []const ParameterDefinition,
};
pub const PropertyDefinition = struct {
name: [*c]const u8,
type_: QMetaType,
readSlot: [*c]const u8,
writeSlot: [*c]const u8,
notifySignal: [*c]const u8,
};
pub const QMetaObject = struct {
vptr: ?*dos.DosQMetaObject,
pub fn create(parent: ?*dos.DosQObject, comptime T: type) anyerror!QMetaObject {
const signals_ = try allocSignalDefinitions(T);
defer freeSignalDefinitions(signals_);
const slots_ = try allocSlotDefinitions(T);
defer freeSlotDefinitions(slots_);
const properties_ = try allocPropertyDefinitions(T);
defer freePropertyDefinitions(properties_);
return QMetaObject{ .vptr = dos.dos_qmetaobject_create(parent, &@typeName(T)[0], &signals_, &slots_, &properties_) };
}
pub fn delete(Self: QMetaObject) void {
dos.dos_qmetaobject_delete(Self.vptr);
}
//TODO: invoke method
fn allocSignalDefinitions(comptime T: type) anyerror!dos.SignalDefinitions {
comptime var signals: []const SignalDefinition = &[_]SignalDefinition{};
inline for (@typeInfo(T).Struct.decls) |declaration| {
switch (declaration.data) {
.Var => |Var| {
switch (@typeInfo(Var)) {
.Fn => |Fn| {
comptime var parameters: []const ParameterDefinition = &[_]ParameterDefinition{};
inline for (Fn.args) |arg, i| {
if (i == 0)
continue;
parameters = parameters ++ &[_]ParameterDefinition{.{
.name = "arg", // TODO: somehow get the argument names
.type_ = QMetaType.toQMetaType(arg.arg_type),
}};
}
signals = signals ++ &[_]SignalDefinition{.{
.name = &declaration.name[0],
.parameters = parameters,
}};
},
else => continue,
}
},
else => continue,
}
}
const allocator = std.heap.page_allocator;
const list = try allocator.alloc(dos.SignalDefinition, signals.len);
for (signals) |signal, i| {
var parameterList = try allocator.alloc(dos.ParameterDefinition, signal.parameters.len);
for (signal.parameters) |parameter, pi| {
parameterList[pi] = dos.ParameterDefinition{
.name = parameter.name,
.metaType = @enumToInt(parameter.type_),
};
}
list[i] = dos.SignalDefinition{
.name = signal.name,
.parametersCount = @intCast(c_int, signal.parameters.len),
.parameters = if (signal.parameters.len > 0) ¶meterList[0] else null,
};
}
if (signals.len > 0) {
return dos.SignalDefinitions{
.count = @intCast(c_int, signals.len),
.definitions = &list[0],
};
} else {
return dos.SignalDefinitions{
.count = 0,
.definitions = null,
};
}
}
fn allocSlotDefinitions(comptime T: type) anyerror!dos.SlotDefinitions {
comptime var slots: []const SlotDefinition = &[_]SlotDefinition{};
inline for (@typeInfo(T).Struct.decls) |declaration| {
switch (declaration.data) {
.Fn => |Fn| {
comptime var parameters: []const ParameterDefinition = &[_]ParameterDefinition{};
inline for (@typeInfo(Fn.fn_type).Fn.args) |arg, i| {
if (i == 0)
continue;
parameters = parameters ++ &[_]ParameterDefinition{.{
.name = "arg", // TODO: somehow get the argument names
.type_ = QMetaType.toQMetaType(arg.arg_type),
}};
}
slots = slots ++ &[_]SlotDefinition{.{
.name = &declaration.name[0],
.returnType = QMetaType.toQMetaType(Fn.return_type),
.parameters = parameters,
}};
},
else => continue,
}
}
const allocator = std.heap.page_allocator;
const list = try allocator.alloc(dos.SlotDefinition, slots.len);
for (slots) |slot, i| {
var parameterList = try allocator.alloc(dos.ParameterDefinition, slot.parameters.len);
for (slot.parameters) |parameter, pi| {
parameterList[pi] = dos.ParameterDefinition{
.name = parameter.name,
.metaType = @enumToInt(parameter.type_),
};
}
list[i] = dos.SlotDefinition{
.name = slot.name,
.parametersCount = @intCast(c_int, parameterList.len),
.returnMetaType = @enumToInt(slot.returnType),
.parameters = if (slot.parameters.len > 0) ¶meterList[0] else null,
};
}
if (slots.len > 0) {
return dos.SlotDefinitions{
.count = @intCast(c_int, slots.len),
.definitions = &list[0],
};
} else {
return dos.SlotDefinitions{
.count = 0,
.definitions = null,
};
}
}
fn allocPropertyDefinitions(comptime T: type) anyerror!dos.PropertyDefinitions {
comptime var properties: []const PropertyDefinition = &[_]PropertyDefinition{};
inline for (@typeInfo(T).Struct.fields) |field| {
comptime if (field.field_type == ?*c_void)
continue;
comptime var name = &[_]u8{toUpper(field.name[0])} ++ field.name[1..field.name.len];
comptime if (!@hasDecl(T, "get" ++ name))
@compileError("Field \"" ++ field.name ++ "\" doesnt have getter: get" ++ name);
properties = properties ++ &[_]PropertyDefinition{.{
.name = &field.name[0],
.type_ = QMetaType.toQMetaType(field.field_type),
.readSlot = "get" ++ name,
.writeSlot = if (@hasDecl(T, "set" ++ name)) "set" ++ name else null,
.notifySignal = if (hasSignal(T, field.name ++ "Changed")) field.name ++ "Changed" else null,
}};
}
const allocator = std.heap.page_allocator;
const list = try allocator.alloc(dos.PropertyDefinition, properties.len);
for (properties) |property, i| {
list[i] = dos.PropertyDefinition{
.name = property.name,
.propertyMetaType = @enumToInt(property.type_),
.readSlot = property.readSlot,
.writeSlot = property.writeSlot,
.notifySignal = property.notifySignal,
};
}
var result = dos.PropertyDefinitions{
.count = @intCast(c_int, properties.len),
.definitions = &list[0],
};
return result;
}
fn freeSignalDefinitions(signals: dos.SignalDefinitions) void {
if (signals.count > 0) {
for (signals.definitions[0..@intCast(usize, signals.count)]) |signal| {
std.heap.page_allocator.free(signal.parameters[0..@intCast(usize, signal.parametersCount)]);
}
std.heap.page_allocator.free(signals.definitions[0..@intCast(usize, signals.count)]);
}
}
fn freeSlotDefinitions(slots: dos.SlotDefinitions) void {
if (slots.count > 0) {
for (slots.definitions[0..@intCast(usize, slots.count)]) |slot| {
if (slot.parametersCount > 0) {
std.heap.page_allocator.free(slot.parameters[0..@intCast(usize, slot.parametersCount)]);
}
}
std.heap.page_allocator.free(slots.definitions[0..@intCast(usize, slots.count)]);
}
}
fn freePropertyDefinitions(properties: dos.PropertyDefinitions) void {
if (properties.count > 0) {
std.heap.page_allocator.free(properties.definitions[0..@intCast(usize, properties.count)]);
}
}
};
fn toUpper(c: u8) u8 {
if (c <= 'z' and c >= 'a')
return c - ' ';
return c;
}
fn hasSignal(comptime T: type, comptime name: []const u8) bool {
comptime var signals: []const SignalDefinition = &[_]SignalDefinition{};
inline for (@typeInfo(T).Struct.decls) |declaration| {
switch (declaration.data) {
.Var => |Var| {
switch (@typeInfo(Var)) {
.Fn => |Fn| {
if (std.mem.eql(u8, name, declaration.name))
return true;
},
else => continue,
}
},
else => continue,
}
}
return false;
}
|
src/QMetaObject.zig
|
const macro = @import("pspmacros.zig");
comptime {
asm (macro.import_module_start("sceNet", "0x00090000", "8"));
asm (macro.import_function("sceNet", "0x39AF39A6", "sceNetInit"));
asm (macro.import_function("sceNet", "0x281928A9", "sceNetTerm"));
asm (macro.import_function("sceNet", "0x50647530", "sceNetFreeThreadinfo"));
asm (macro.import_function("sceNet", "0xAD6844C6", "sceNetThreadAbort"));
asm (macro.import_function("sceNet", "0x89360950", "sceNetEtherNtostr"));
asm (macro.import_function("sceNet", "0xD27961C9", "sceNetEtherStrton"));
asm (macro.import_function("sceNet", "0x0BF0A3AE", "sceNetGetLocalEtherAddr"));
asm (macro.import_function("sceNet", "0xCC393E48", "sceNetGetMallocStat"));
}
comptime {
asm (macro.import_module_start("sceNetAdhoc", "0x00090000", "25"));
asm (macro.import_function("sceNetAdhoc", "0xE1D621D7", "sceNetAdhocInit"));
asm (macro.import_function("sceNetAdhoc", "0xA62C6F57", "sceNetAdhocTerm"));
asm (macro.import_function("sceNetAdhoc", "0x7A662D6B", "sceNetAdhocPollSocket"));
asm (macro.import_function("sceNetAdhoc", "0x73BFD52D", "sceNetAdhocSetSocketAlert"));
asm (macro.import_function("sceNetAdhoc", "0x4D2CE199", "sceNetAdhocGetSocketAlert"));
asm (macro.import_function("sceNetAdhoc", "0x6F92741B", "sceNetAdhocPdpCreate"));
asm (macro.import_function("sceNetAdhoc", "0xABED3790", "sceNetAdhocPdpSend_stub"));
asm (macro.import_function("sceNetAdhoc", "0xDFE53E03", "sceNetAdhocPdpRecv_stub"));
asm (macro.import_function("sceNetAdhoc", "0x7F27BB5E", "sceNetAdhocPdpDelete"));
asm (macro.import_function("sceNetAdhoc", "0xC7C1FC57", "sceNetAdhocGetPdpStat"));
asm (macro.import_function("sceNetAdhoc", "0x877F6D66", "sceNetAdhocPtpOpen_stub"));
asm (macro.import_function("sceNetAdhoc", "0xFC6FC07B", "sceNetAdhocPtpConnect"));
asm (macro.import_function("sceNetAdhoc", "0xE08BDAC1", "sceNetAdhocPtpListen_stub"));
asm (macro.import_function("sceNetAdhoc", "0x9DF81198", "sceNetAdhocPtpAccept_stub"));
asm (macro.import_function("sceNetAdhoc", "0x4DA4C788", "sceNetAdhocPtpSend_stub"));
asm (macro.import_function("sceNetAdhoc", "0x8BEA2B3E", "sceNetAdhocPtpRecv_stub"));
asm (macro.import_function("sceNetAdhoc", "0x9AC2EEAC", "sceNetAdhocPtpFlush"));
asm (macro.import_function("sceNetAdhoc", "0x157E6225", "sceNetAdhocPtpClose"));
asm (macro.import_function("sceNetAdhoc", "0xB9685118", "sceNetAdhocGetPtpStat"));
asm (macro.import_function("sceNetAdhoc", "0x7F75C338", "sceNetAdhocGameModeCreateMaster"));
asm (macro.import_function("sceNetAdhoc", "0x3278AB0C", "sceNetAdhocGameModeCreateReplica"));
asm (macro.import_function("sceNetAdhoc", "0x98C204C8", "sceNetAdhocGameModeUpdateMaster"));
asm (macro.import_function("sceNetAdhoc", "0xFA324B4E", "sceNetAdhocGameModeUpdateReplica"));
asm (macro.import_function("sceNetAdhoc", "0xA0229362", "sceNetAdhocGameModeDeleteMaster"));
asm (macro.import_function("sceNetAdhoc", "0x0B2228E9", "sceNetAdhocGameModeDeleteReplica"));
asm (macro.generic_abi_wrapper("sceNetAdhocPdpSend", 7));
asm (macro.generic_abi_wrapper("sceNetAdhocPdpRecv", 7));
asm (macro.generic_abi_wrapper("sceNetAdhocPtpOpen", 8));
asm (macro.generic_abi_wrapper("sceNetAdhocPtpListen", 7));
asm (macro.generic_abi_wrapper("sceNetAdhocPtpAccept", 5));
asm (macro.generic_abi_wrapper("sceNetAdhocPtpSend", 5));
asm (macro.generic_abi_wrapper("sceNetAdhocPtpRecv", 5));
}
comptime {
asm (macro.import_module_start("sceNetAdhocctl", "0x00090000", "21"));
asm (macro.import_function("sceNetAdhocctl", "0xE26F226E", "sceNetAdhocctlInit"));
asm (macro.import_function("sceNetAdhocctl", "0x9D689E13", "sceNetAdhocctlTerm"));
asm (macro.import_function("sceNetAdhocctl", "0x0AD043ED", "sceNetAdhocctlConnect"));
asm (macro.import_function("sceNetAdhocctl", "0xEC0635C1", "sceNetAdhocctlCreate"));
asm (macro.import_function("sceNetAdhocctl", "0x5E7F79C9", "sceNetAdhocctlJoin"));
asm (macro.import_function("sceNetAdhocctl", "0x08FFF7A0", "sceNetAdhocctlScan"));
asm (macro.import_function("sceNetAdhocctl", "0x34401D65", "sceNetAdhocctlDisconnect"));
asm (macro.import_function("sceNetAdhocctl", "0x20B317A0", "sceNetAdhocctlAddHandler"));
asm (macro.import_function("sceNetAdhocctl", "0x6402490B", "sceNetAdhocctlDelHandler"));
asm (macro.import_function("sceNetAdhocctl", "0x75ECD386", "sceNetAdhocctlGetState"));
asm (macro.import_function("sceNetAdhocctl", "0x362CBE8F", "sceNetAdhocctlGetAdhocId"));
asm (macro.import_function("sceNetAdhocctl", "0xE162CB14", "sceNetAdhocctlGetPeerList"));
asm (macro.import_function("sceNetAdhocctl", "0x99560ABE", "sceNetAdhocctlGetAddrByName"));
asm (macro.import_function("sceNetAdhocctl", "0x8916C003", "sceNetAdhocctlGetNameByAddr"));
asm (macro.import_function("sceNetAdhocctl", "0xDED9D28E", "sceNetAdhocctlGetParameter"));
asm (macro.import_function("sceNetAdhocctl", "0x81AEE1BE", "sceNetAdhocctlGetScanInfo"));
asm (macro.import_function("sceNetAdhocctl", "0xA5C055CE", "sceNetAdhocctlCreateEnterGameMode_stub"));
asm (macro.import_function("sceNetAdhocctl", "0x1FF89745", "sceNetAdhocctlJoinEnterGameMode"));
asm (macro.import_function("sceNetAdhocctl", "0xCF8E084D", "sceNetAdhocctlExitGameMode"));
asm (macro.import_function("sceNetAdhocctl", "0x5A014CE0", "sceNetAdhocctlGetGameModeInfo"));
asm (macro.import_function("sceNetAdhocctl", "0x8DB83FDC", "sceNetAdhocctlGetPeerInfo"));
asm (macro.generic_abi_wrapper("sceNetAdhocctlCreateEnterGameMode", 6));
}
comptime {
asm (macro.import_module_start("sceNetAdhocMatching", "0x00090000", "21"));
asm (macro.import_function("sceNetAdhocMatching", "0x2A2A1E07", "sceNetAdhocMatchingInit"));
asm (macro.import_function("sceNetAdhocMatching", "0x7945ECDA", "sceNetAdhocMatchingTerm"));
asm (macro.import_function("sceNetAdhocMatching", "0xCA5EDA6F", "sceNetAdhocMatchingCreate_stub"));
asm (macro.import_function("sceNetAdhocMatching", "0x93EF3843", "sceNetAdhocMatchingStart_stub"));
asm (macro.import_function("sceNetAdhocMatching", "0x32B156B3", "sceNetAdhocMatchingStop"));
asm (macro.import_function("sceNetAdhocMatching", "0xF16EAF4F", "sceNetAdhocMatchingDelete"));
asm (macro.import_function("sceNetAdhocMatching", "0x5E3D4B79", "sceNetAdhocMatchingSelectTarget"));
asm (macro.import_function("sceNetAdhocMatching", "0xEA3C6108", "sceNetAdhocMatchingCancelTarget"));
asm (macro.import_function("sceNetAdhocMatching", "0xB58E61B7", "sceNetAdhocMatchingSetHelloOpt"));
asm (macro.import_function("sceNetAdhocMatching", "0xB5D96C2A", "sceNetAdhocMatchingGetHelloOpt"));
asm (macro.import_function("sceNetAdhocMatching", "0xC58BCD9E", "sceNetAdhocMatchingGetMembers"));
asm (macro.import_function("sceNetAdhocMatching", "0x40F8F435", "sceNetAdhocMatchingGetPoolMaxAlloc"));
asm (macro.import_function("sceNetAdhocMatching", "0x8F58BEDF", "sceNetAdhocMatchingCancelTargetWithOpt"));
asm (macro.import_function("sceNetAdhocMatching", "0x9C5CFB7D", "sceNetAdhocMatchingGetPoolStat"));
asm (macro.import_function("sceNetAdhocMatching", "0xF79472D7", "sceNetAdhocMatchingSendData"));
asm (macro.import_function("sceNetAdhocMatching", "0xEC19337D", "sceNetAdhocMatchingAbortSendData"));
asm (macro.generic_abi_wrapper("sceNetAdhocMatchingStart", 7));
asm (macro.generic_abi_wrapper("sceNetAdhocMatchingCreate", 9));
}
comptime {
asm (macro.import_module_start("sceNetApctl", "0x00090000", "8"));
asm (macro.import_function("sceNetApctl", "0xE2F91F9B", "sceNetApctlInit"));
asm (macro.import_function("sceNetApctl", "0xB3EDD0EC", "sceNetApctlTerm"));
asm (macro.import_function("sceNetApctl", "0x2BEFDF23", "sceNetApctlGetInfo"));
asm (macro.import_function("sceNetApctl", "0x8ABADD51", "sceNetApctlAddHandler"));
asm (macro.import_function("sceNetApctl", "0x5963991B", "sceNetApctlDelHandler"));
asm (macro.import_function("sceNetApctl", "0xCFB957C6", "sceNetApctlConnect"));
asm (macro.import_function("sceNetApctl", "0x24FE91A1", "sceNetApctlDisconnect"));
asm (macro.import_function("sceNetApctl", "0x5DEAC81B", "sceNetApctlGetState"));
}
comptime {
asm (macro.import_module_start("sceNetInet", "0x00090000", "30"));
asm (macro.import_function("sceNetInet", "0x17943399", "sceNetInetInit"));
asm (macro.import_function("sceNetInet", "0xA9ED66B9", "sceNetInetTerm"));
asm (macro.import_function("sceNetInet", "0xDB094E1B", "sceNetInetAccept"));
asm (macro.import_function("sceNetInet", "0x1A33F9AE", "sceNetInetBind"));
asm (macro.import_function("sceNetInet", "0x8D7284EA", "sceNetInetClose"));
asm (macro.import_function("sceNetInet", "0x805502DD", "sceNetInetCloseWithRST"));
asm (macro.import_function("sceNetInet", "0x410B34AA", "sceNetInetConnect"));
asm (macro.import_function("sceNetInet", "0xE247B6D6", "sceNetInetGetpeername"));
asm (macro.import_function("sceNetInet", "0x162E6FD5", "sceNetInetGetsockname"));
asm (macro.import_function("sceNetInet", "0x4A114C7C", "sceNetInetGetsockopt_stub"));
asm (macro.import_function("sceNetInet", "0xD10A1A7A", "sceNetInetListen"));
asm (macro.import_function("sceNetInet", "0xFAABB1DD", "sceNetInetPoll"));
asm (macro.import_function("sceNetInet", "0xCDA85C99", "sceNetInetRecv"));
asm (macro.import_function("sceNetInet", "0xC91142E4", "sceNetInetRecvfrom_stub"));
asm (macro.import_function("sceNetInet", "0xEECE61D2", "sceNetInetRecvmsg"));
asm (macro.import_function("sceNetInet", "0x5BE8D595", "sceNetInetSelect"));
asm (macro.import_function("sceNetInet", "0x7AA671BC", "sceNetInetSend"));
asm (macro.import_function("sceNetInet", "0x05038FC7", "sceNetInetSendto_stub"));
asm (macro.import_function("sceNetInet", "0x774E36F4", "sceNetInetSendmsg"));
asm (macro.import_function("sceNetInet", "0x2FE71FE7", "sceNetInetSetsockopt_stub"));
asm (macro.import_function("sceNetInet", "0x4CFE4E56", "sceNetInetShutdown"));
asm (macro.import_function("sceNetInet", "0x8B7B220F", "sceNetInetSocket"));
asm (macro.import_function("sceNetInet", "0x80A21ABD", "sceNetInetSocketAbort"));
asm (macro.import_function("sceNetInet", "0xFBABE411", "sceNetInetGetErrno"));
asm (macro.import_function("sceNetInet", "0xB3888AD4", "sceNetInetGetTcpcbstat"));
asm (macro.import_function("sceNetInet", "0x39B0C7D3", "sceNetInetGetUdpcbstat"));
asm (macro.import_function("sceNetInet", "0xB75D5B0A", "sceNetInetInetAddr"));
asm (macro.import_function("sceNetInet", "0x1BDF5D13", "sceNetInetInetAton"));
asm (macro.import_function("sceNetInet", "0xD0792666", "sceNetInetInetNtop"));
asm (macro.import_function("sceNetInet", "0xE30B8C19", "sceNetInetInetPton"));
asm (macro.generic_abi_wrapper("sceNetInetGetsockopt", 5));
asm (macro.generic_abi_wrapper("sceNetInetRecvfrom", 5));
asm (macro.generic_abi_wrapper("sceNetInetSendto", 6));
asm (macro.generic_abi_wrapper("sceNetInetSetsockopt", 5));
}
comptime {
asm (macro.import_module_start("sceNetResolver", "0x00090000", "7"));
asm (macro.import_function("sceNetResolver", "0xF3370E61", "sceNetResolverInit"));
asm (macro.import_function("sceNetResolver", "0x6138194A", "sceNetResolverTerm"));
asm (macro.import_function("sceNetResolver", "0x244172AF", "sceNetResolverCreate"));
asm (macro.import_function("sceNetResolver", "0x94523E09", "sceNetResolverDelete"));
asm (macro.import_function("sceNetResolver", "0x224C5F44", "sceNetResolverStartNtoA_stub"));
asm (macro.import_function("sceNetResolver", "0x629E2FB7", "sceNetResolverStartAtoN_stub"));
asm (macro.import_function("sceNetResolver", "0x808F6063", "sceNetResolverStop"));
asm (macro.generic_abi_wrapper("sceNetResolverStartNtoA", 5));
asm (macro.generic_abi_wrapper("sceNetResolverStartAtoN", 6));
}
|
src/psp/nids/pspnet.zig
|
const NvPtx = @This();
const std = @import("std");
const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.link);
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
const link = @import("../link.zig");
const trace = @import("../tracy.zig").trace;
const build_options = @import("build_options");
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
base: link.File,
llvm_object: *LlvmObject,
pub fn createEmpty(gpa: Allocator, options: link.Options) !*NvPtx {
if (!build_options.have_llvm) return error.PtxArchNotSupported;
if (!options.use_llvm) return error.PtxArchNotSupported;
switch (options.target.cpu.arch) {
.nvptx, .nvptx64 => {},
else => return error.PtxArchNotSupported,
}
switch (options.target.os.tag) {
// TODO: does it also work with nvcl ?
.cuda => {},
else => return error.PtxArchNotSupported,
}
const llvm_object = try LlvmObject.create(gpa, options);
const nvptx = try gpa.create(NvPtx);
nvptx.* = .{
.base = .{
.tag = .nvptx,
.options = options,
.file = null,
.allocator = gpa,
},
.llvm_object = llvm_object,
};
return nvptx;
}
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*NvPtx {
if (!build_options.have_llvm) @panic("nvptx target requires a zig compiler with llvm enabled.");
if (!options.use_llvm) return error.PtxArchNotSupported;
assert(options.object_format == .nvptx);
const nvptx = try createEmpty(allocator, options);
log.info("Opening .ptx target file {s}", .{sub_path});
return nvptx;
}
pub fn deinit(self: *NvPtx) void {
if (!build_options.have_llvm) return;
self.llvm_object.destroy(self.base.allocator);
}
pub fn updateFunc(self: *NvPtx, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
if (!build_options.have_llvm) return;
try self.llvm_object.updateFunc(module, func, air, liveness);
}
pub fn updateDecl(self: *NvPtx, module: *Module, decl: *Module.Decl) !void {
if (!build_options.have_llvm) return;
return self.llvm_object.updateDecl(module, decl);
}
pub fn updateDeclExports(
self: *NvPtx,
module: *Module,
decl: *const Module.Decl,
exports: []const *Module.Export,
) !void {
if (!build_options.have_llvm) return;
if (build_options.skip_non_native and builtin.object_format != .nvptx) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
return self.llvm_object.updateDeclExports(module, decl, exports);
}
pub fn freeDecl(self: *NvPtx, decl: *Module.Decl) void {
if (!build_options.have_llvm) return;
return self.llvm_object.freeDecl(decl);
}
pub fn flush(self: *NvPtx, comp: *Compilation) !void {
return self.flushModule(comp);
}
pub fn flushModule(self: *NvPtx, comp: *Compilation) !void {
if (!build_options.have_llvm) return;
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
const tracy = trace(@src());
defer tracy.end();
var hack_comp = comp;
if (comp.bin_file.options.emit) |emit| {
hack_comp.emit_asm = .{
.directory = emit.directory,
.basename = comp.bin_file.intermediary_basename.?,
};
hack_comp.bin_file.options.emit = null;
}
return try self.llvm_object.flushModule(hack_comp);
}
|
src/link/NvPtx.zig
|
const std = @import("std");
const StringList = std.ArrayListUnmanaged([]const u8);
pub const Flag = void;
pub const PositionalData = struct {
items: [][]const u8,
separator_index: usize,
pub fn beforeSeparator(self: PositionalData) [][]const u8 {
return self.items[0..self.separator_index];
}
pub fn afterSeparator(self: PositionalData) [][]const u8 {
return self.items[self.separator_index..];
}
pub fn deinit(self: PositionalData, allocator: std.mem.Allocator) void {
allocator.free(self.items);
}
};
const log = std.log.scoped(.accord);
pub const Option = struct {
short: u8,
long: []const u8,
type: type,
default: *const anyopaque,
settings: *const anyopaque,
pub fn getDefault(comptime self: Option) *const ValueType(self.type) {
return @ptrCast(*const ValueType(self.type), self.default);
}
pub fn getSettings(comptime self: Option) *const OptionSettings(self.type) {
return @ptrCast(*const OptionSettings(self.type), self.settings);
}
};
fn ValueType(comptime T: type) type {
return switch (T) {
void => bool,
else => T,
};
}
const Field = std.builtin.TypeInfo.StructField;
fn optionSettingsFields(comptime T: type) []const Field {
comptime var info = @typeInfo(T);
switch (info) {
.Int => return &[1]Field{structField("radix", u8, &@as(u8, 0))},
.Float => return &[1]Field{structField("hex", bool, &false)},
.Enum => {
const EnumSetting = enum { name, value, both };
return optionSettingsFields(info.Enum.tag_type) ++
&[1]Field{structField("enum_parsing", EnumSetting, &EnumSetting.name)};
},
.Optional => return optionSettingsFields(info.Optional.child),
.Array => {
// TODO: make them work!
if (@typeInfo(info.Array.child) == .Array)
@compileError("Multidimensional arrays not yet supported!");
const delimiter: []const u8 = ",";
return optionSettingsFields(info.Array.child) ++
&[1]Field{structField("delimiter", []const u8, &delimiter)};
},
else => return &[0]Field{},
}
}
pub fn OptionSettings(comptime T: type) type {
const fields = optionSettingsFields(T);
return if (fields.len == 0)
struct { padding_so_i_can_make_a_non_zero_sized_pointer: u1 = 0 }
else
@Type(std.builtin.TypeInfo{ .Struct = .{
.layout = .Auto,
.fields = fields,
.decls = &.{},
.is_tuple = false,
} });
}
pub fn option(
comptime short: u8,
comptime long: []const u8,
comptime T: type,
comptime default: T,
comptime settings: OptionSettings(T),
) Option {
if (short == 0 and long.len == 0)
@compileError("Must have either a short or long name, cannot have neither!");
return .{
.short = short,
.long = long,
.type = T,
.default = if (T == void) &false else &default,
.settings = &settings,
};
}
fn structField(
comptime name: []const u8,
comptime T: type,
comptime default: ?*const T,
) std.builtin.TypeInfo.StructField {
return .{
.name = name,
.field_type = T,
.default_value = default,
.is_comptime = false,
.alignment = @alignOf(T),
};
}
pub fn OptionStruct(comptime options: []const Option) type {
const TypeInfo = std.builtin.TypeInfo;
comptime var struct_fields: [options.len + 1]TypeInfo.StructField = undefined;
for (options) |opt, i| {
struct_fields[i] = structField(
if (opt.long.len > 0) opt.long else &[1]u8{opt.short},
ValueType(opt.type),
opt.getDefault(),
);
}
struct_fields[options.len] = structField(
"positionals",
PositionalData,
null,
);
// struct_fields[options.len + 1] = StructField(
// "positional_separator_index",
// usize,
// null,
// );
const struct_info = TypeInfo{ .Struct = .{
.layout = .Auto,
.fields = &struct_fields,
.decls = &.{},
.is_tuple = false,
} };
return @Type(struct_info);
}
const Error = error{
UnrecognizedOption,
OptionMissingValue,
OptionUnexpectedValue,
};
fn parseValue(comptime T: type, comptime default: ?T, comptime settings: anytype, string: []const u8) Error!T {
const info = @typeInfo(T);
switch (T) {
[]const u8 => return string,
bool => {
return if (std.ascii.eqlIgnoreCase(string, "true"))
true
else if (std.ascii.eqlIgnoreCase(string, "false"))
false
else
error.OptionUnexpectedValue;
},
else => {},
}
switch (info) {
.Int => return std.fmt.parseInt(T, string, settings.radix) catch error.OptionUnexpectedValue,
.Float => {
return if (settings.hex)
std.fmt.parseHexFloat(T, string) catch error.OptionUnexpectedValue
else
std.fmt.parseFloat(T, string) catch error.OptionUnexpectedValue;
},
.Optional => {
const d = default orelse null;
return if (std.ascii.eqlIgnoreCase(string, "null"))
null
else
// try is necessary here otherwise there are type errors
try parseValue(info.Optional.child, d, settings, string);
},
.Array => {
const ChildT = info.Array.child;
var result: T = default orelse undefined;
var iterator = std.mem.split(u8, string, settings.delimiter);
comptime var i: usize = 0; // iterate with i instead of iterator so default can be indexed
inline while (i < result.len) : (i += 1) {
// TODO: if token length == 0, grab default value instead
const token = iterator.next() orelse break;
result[i] = try parseValue(
ChildT,
if (default) |d| d[i] else null,
settings,
token,
);
}
if (i != result.len and default == null) {
log.err("Optional arrays that have a default value of null must have every value filled out!", .{});
return error.OptionUnexpectedValue;
}
return result;
},
.Enum => {
const TagT = info.Enum.tag_type;
return switch (settings.enum_parsing) {
.name => std.meta.stringToEnum(T, string) orelse error.OptionUnexpectedValue,
.value => std.meta.intToEnum(T, parseValue(
TagT,
if (default) |d| @enumToInt(d) else null,
settings,
string,
) catch return error.OptionUnexpectedValue) catch error.OptionUnexpectedValue,
.both => std.meta.intToEnum(T, parseValue(
TagT,
if (default) |d| @enumToInt(d) else null,
settings,
string,
) catch {
return std.meta.stringToEnum(T, string) orelse error.OptionUnexpectedValue;
}) catch error.OptionUnexpectedValue,
};
},
else => @compileError("Unsupported type '" ++ @typeName(T) ++ "'"),
}
}
pub fn parse(comptime options: []const Option, allocator: std.mem.Allocator, arg_iterator: anytype) !OptionStruct(options) {
const OptValues = OptionStruct(options);
var result = OptValues{ .positionals = undefined };
var positional_list = StringList{};
errdefer positional_list.deinit(allocator);
const Parser = struct {
pub fn common(comptime long_name: bool, arg_name: []const u8, value_string: ?[]const u8, values: *OptValues, iterator: anytype) Error!void {
inline for (options) |opt| {
const opt_name = if (long_name) opt.long else &[1]u8{opt.short};
if (std.mem.eql(u8, arg_name, opt_name)) {
const field_name = if (opt.long.len > 0) opt.long else &[1]u8{opt.short};
if (opt.type == void) {
if (value_string != null and value_string.?.len > 0) {
if (long_name) {
log.err("Option '{s}' does not take an argument!", .{opt_name});
return error.OptionUnexpectedValue;
} else {
@field(values, field_name) = true;
const next_name = &[1]u8{value_string.?[0]};
const next_value_string = if (value_string.?[1..].len > 0)
value_string.?[1..]
else
null;
try common(false, next_name, next_value_string, values, iterator);
}
} else @field(values, field_name) = true;
} else {
const vs = value_string orelse (iterator.next() orelse {
log.err("Option '{s}' missing argument!", .{opt_name});
return error.OptionMissingValue;
});
@field(values, field_name) = parseValue(
opt.type,
comptime opt.getDefault().*,
comptime opt.getSettings(),
vs,
) catch {
log.err("Could not parse value '{s}' for option '{s}!", .{ vs, opt_name });
return error.OptionUnexpectedValue;
};
}
break;
}
} else {
log.err("Unrecognized {s} option '{s}'!", .{
if (long_name) "long" else "short",
arg_name,
});
return error.UnrecognizedOption;
}
}
pub fn long(arg: []const u8, values: *OptValues, iterator: anytype) Error!void {
const index = std.mem.indexOf(u8, arg, "=");
var arg_name: []const u8 = undefined;
var value_string: ?[]const u8 = undefined;
if (index) |i| {
arg_name = arg[2..i];
value_string = arg[i + 1 ..];
} else {
arg_name = arg[2..];
}
try common(true, arg_name, value_string, values, iterator);
}
pub fn short(arg: []const u8, values: *OptValues, iterator: anytype) Error!void {
const arg_name = &[1]u8{arg[1]};
const value_string = if (arg.len > 2)
arg[2..]
else
null;
try common(false, arg_name, value_string, values, iterator);
}
};
var all_positional = false;
while (arg_iterator.next()) |arg| {
if (std.mem.startsWith(u8, arg, "--") and !all_positional) {
if (arg.len == 2) {
all_positional = true;
result.positionals.separator_index = positional_list.items.len;
} else try Parser.long(arg, &result, arg_iterator);
} else if (std.mem.startsWith(u8, arg, "-") and arg.len > 1 and !all_positional) {
try Parser.short(arg, &result, arg_iterator);
} else {
try positional_list.append(allocator, arg);
}
}
positional_list.shrinkAndFree(allocator, positional_list.items.len);
result.positionals.items = positional_list.items;
if (!all_positional)
result.positionals.separator_index = result.positionals.items.len;
return result;
}
fn SliceIterator(comptime T: type) type {
return struct {
slice: []const T,
index: usize,
const Self = @This();
pub fn init(slice: []const T) Self {
return Self{ .slice = slice, .index = 0 };
}
pub fn next(self: *Self) ?T {
var result: ?T = null;
if (self.index < self.slice.len) {
result = self.slice[self.index];
self.index += 1;
}
return result;
}
};
}
const TestEnum = enum(u2) { a, b, c, d };
test "argument parsing" {
const allocator = std.testing.allocator;
// zig fmt: off
const args = [_][]const u8{
"positional1",
"-a", "test arg",
"-b",
"positional2",
"-cd", "FaLSE",
"-eff0000",
"--longf=c",
"--longg", "1",
"positional3",
"-h1,d,0",
"-ib",
"-j", "d,a,b",
"-k10|NULL|d",
"-lnull",
"positional4",
"-m0b00110010",
"-n", "1.2e4",
"-o0x10p+10",
"positional5",
"-p0x10p-10",
"-q", "bingusDELIMITERbungusDELIMITERbongoDELIMITERbingo",
"-r", "bujungo",
"--",
"-s",
"positional6",
};
// zig fmt: on
var args_iterator = SliceIterator([]const u8).init(args[0..]);
const options = try parse(&.{
option('a', "longa", []const u8, "", .{}),
option('b', "", Flag, {}, .{}),
option('c', "longc", Flag, {}, .{}),
option('d', "", bool, true, .{}),
option('e', "", u32, 0, .{ .radix = 16 }),
option('f', "", TestEnum, .a, .{}),
option('f', "longf", TestEnum, .a, .{}),
option('g', "longg", TestEnum, .a, .{ .enum_parsing = .value }),
option('h', "", [3]TestEnum, .{ .a, .a, .a }, .{ .enum_parsing = .both }),
option('i', "", ?TestEnum, null, .{}),
option('j', "", ?[3]TestEnum, null, .{}),
option('k', "", [3]?TestEnum, .{ null, .a, .a }, .{ .enum_parsing = .both, .delimiter = "|", .radix = 2 }),
option('l', "", ?[3]?TestEnum, .{ .a, .a, .a }, .{}),
option('m', "", u8, 0, .{}),
option('n', "", f32, 0.0, .{}),
option('o', "", f64, 0.0, .{ .hex = true }),
option('p', "", f128, 0.0, .{ .hex = true }),
option('q', "", [4][]const u8, .{ "", "", "", "" }, .{ .delimiter = "DELIMITER" }),
option('r', "", ?[]const u8, null, .{}),
option('s', "", Flag, {}, .{}),
}, allocator, &args_iterator);
defer options.positionals.deinit(allocator);
try std.testing.expectEqualStrings("test arg", options.longa);
try std.testing.expect(options.b);
try std.testing.expect(options.longc);
try std.testing.expect(!options.d);
try std.testing.expectEqual(options.e, 0xff0000);
try std.testing.expectEqual(options.longf, .c);
try std.testing.expectEqual(options.longg, .b);
try std.testing.expectEqualSlices(TestEnum, &.{ .b, .d, .a }, options.h[0..]);
try std.testing.expectEqual(options.i, .b);
try std.testing.expectEqual(options.j, .{ .d, .a, .b });
try std.testing.expectEqualSlices(?TestEnum, &.{ .c, null, .d }, options.k[0..]);
try std.testing.expectEqual(options.l, null);
try std.testing.expectEqual(options.m, 50);
try std.testing.expectEqual(options.n, 12000);
try std.testing.expectEqual(options.o, 16384.0);
try std.testing.expectEqual(options.p, 0.015625);
const expected_q = [_][]const u8{ "bingus", "bungus", "bongo", "bingo" };
for (expected_q) |string, i| {
try std.testing.expectEqualStrings(string, options.q[i]);
}
try std.testing.expectEqualStrings(options.r.?, "bujungo");
const expected_positionals = [_][]const u8{
"positional1",
"positional2",
"positional3",
"positional4",
"positional5",
"-s",
"positional6",
};
for (expected_positionals) |string, i| {
try std.testing.expectEqualStrings(string, options.positionals.items[i]);
}
for (expected_positionals[0..options.positionals.separator_index]) |string, i| {
try std.testing.expectEqualStrings(
string,
options.positionals.beforeSeparator()[i],
);
}
for (expected_positionals[options.positionals.separator_index..]) |string, i| {
try std.testing.expectEqualStrings(
string,
options.positionals.afterSeparator()[i],
);
}
try std.testing.expectEqual(options.s, false);
}
|
accord.zig
|
const std = @import("std");
const liu = @import("src/liu/lib.zig");
const assets = @import("src/assets.zig");
const bld = std.build;
const Arch = std.Target.Cpu.Arch;
const Builder = bld.Builder;
const Mode = std.builtin.Mode;
var mode: Mode = undefined;
const ProgData = struct {
name: []const u8,
root: []const u8,
};
fn pathTool(b: *Builder, prog: ProgData) *bld.LibExeObjStep {
const program = b.addExecutable(prog.name, prog.root);
program.addPackagePath("liu", "src/liu/lib.zig");
program.setBuildMode(mode);
program.setOutputDir("config/local/path");
program.install();
b.default_step.dependOn(&program.step);
return program;
}
fn wasmProgram(b: *Builder, prog: ProgData) *bld.LibExeObjStep {
const vers = b.version(0, 0, 0);
const program = b.addSharedLibrary(prog.name, prog.root, vers);
program.addPackagePath("liu", "src/liu/lib.zig");
program.addPackagePath("assets", "src/assets.zig");
program.setBuildMode(mode);
program.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
// Output straight to static folder by default to make things easier
program.setOutputDir("./.zig/zig-out/");
if (mode != .Debug) {
program.strip = true;
}
program.install();
b.default_step.dependOn(&program.step);
return program;
}
pub fn build(b: *Builder) !void {
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
mode = b.standardReleaseOptions();
const wasmPrograms = [_]ProgData{ .{
.name = "kilordle",
.root = "./src/routes/kilordle/kilordle.zig",
}, .{
.name = "painter",
.root = "./src/routes/painter/painter.zig",
}, .{
.name = "erlang",
.root = "./src/routes/erlang/erlang.zig",
} };
const pathTools = [_]ProgData{.{
.name = "aliu_path_helper",
.root = "./src/tools/path_helper.zig",
}};
for (wasmPrograms) |p| {
_ = wasmProgram(b, p);
}
for (pathTools) |p| {
_ = pathTool(b, p);
}
}
// For running scripts/etc.
pub fn main() !void {
try assets.kilordle.generate();
}
|
build.zig
|
const std = @import("std");
const mem = std.mem;
const Fullwidth = @This();
allocator: *mem.Allocator,
array: []bool,
lo: u21 = 12288,
hi: u21 = 65510,
pub fn init(allocator: *mem.Allocator) !Fullwidth {
var instance = Fullwidth{
.allocator = allocator,
.array = try allocator.alloc(bool, 53223),
};
mem.set(bool, instance.array, false);
var index: u21 = 0;
instance.array[0] = true;
index = 52993;
while (index <= 52995) : (index += 1) {
instance.array[index] = true;
}
instance.array[52996] = true;
index = 52997;
while (index <= 52999) : (index += 1) {
instance.array[index] = true;
}
instance.array[53000] = true;
instance.array[53001] = true;
instance.array[53002] = true;
instance.array[53003] = true;
instance.array[53004] = true;
instance.array[53005] = true;
index = 53006;
while (index <= 53007) : (index += 1) {
instance.array[index] = true;
}
index = 53008;
while (index <= 53017) : (index += 1) {
instance.array[index] = true;
}
index = 53018;
while (index <= 53019) : (index += 1) {
instance.array[index] = true;
}
index = 53020;
while (index <= 53022) : (index += 1) {
instance.array[index] = true;
}
index = 53023;
while (index <= 53024) : (index += 1) {
instance.array[index] = true;
}
index = 53025;
while (index <= 53050) : (index += 1) {
instance.array[index] = true;
}
instance.array[53051] = true;
instance.array[53052] = true;
instance.array[53053] = true;
instance.array[53054] = true;
instance.array[53055] = true;
instance.array[53056] = true;
index = 53057;
while (index <= 53082) : (index += 1) {
instance.array[index] = true;
}
instance.array[53083] = true;
instance.array[53084] = true;
instance.array[53085] = true;
instance.array[53086] = true;
instance.array[53087] = true;
instance.array[53088] = true;
index = 53216;
while (index <= 53217) : (index += 1) {
instance.array[index] = true;
}
instance.array[53218] = true;
instance.array[53219] = true;
instance.array[53220] = true;
index = 53221;
while (index <= 53222) : (index += 1) {
instance.array[index] = true;
}
// Placeholder: 0. Struct name, 1. Code point kind
return instance;
}
pub fn deinit(self: *Fullwidth) void {
self.allocator.free(self.array);
}
// isFullwidth checks if cp is of the kind Fullwidth.
pub fn isFullwidth(self: Fullwidth, cp: u21) bool {
if (cp < self.lo or cp > self.hi) return false;
const index = cp - self.lo;
return if (index >= self.array.len) false else self.array[index];
}
|
src/components/autogen/DerivedEastAsianWidth/Fullwidth.zig
|
const std = @import("std");
const web = @import("zhp.zig");
const log = std.log;
pub const Opcode = enum(u4) {
Continue = 0x0,
Text = 0x1,
Binary = 0x2,
Res3 = 0x3,
Res4 = 0x4,
Res5 = 0x5,
Res6 = 0x6,
Res7 = 0x7,
Close = 0x8,
Ping = 0x9,
Pong = 0xA,
ResB = 0xB,
ResC = 0xC,
ResD = 0xD,
ResE = 0xE,
ResF = 0xF,
pub fn isControl(opcode: Opcode) bool {
return @enumToInt(opcode) & 0x8 != 0;
}
};
pub const WebsocketHeader = packed struct {
len: u7,
mask: bool,
opcode: Opcode,
rsv3: u1 = 0,
rsv2: u1 = 0,
compressed: bool = false, // rsv1
final: bool = true,
pub fn packLength(length: usize) u7 {
return switch (length) {
0...126 => @truncate(u7, length),
127...0xFFFF => 126,
else => 127
};
}
};
pub const WebsocketDataFrame = struct {
header: WebsocketHeader,
mask: [4]u8 = undefined,
data: []const u8,
pub fn isValid(dataframe: WebsocketDataFrame) bool {
// Validate control frame
if (dataframe.header.opcode.isControl()) {
if (!dataframe.header.final) {
return false; // Control frames cannot be fragmented
}
if (dataframe.data.len > 125) {
return false; // Control frame payloads cannot exceed 125 bytes
}
}
// Validate header len field
const expected = switch (dataframe.data.len) {
0...126 => dataframe.data.len,
127...0xFFFF => 126,
else => 127
};
return dataframe.header.len == expected;
}
};
// Create a buffered writer
// TODO: This will still split packets
pub fn Writer(comptime size: usize, comptime opcode: Opcode) type {
const WriterType = switch (opcode) {
.Text => Websocket.TextFrameWriter,
.Binary => Websocket.BinaryFrameWriter,
else => @compileError("Unsupported writer opcode"),
};
return std.io.BufferedWriter(size, WriterType);
}
pub const Websocket = struct {
pub const WriteError = error{
InvalidMessage,
MessageTooLarge,
EndOfStream,
} || std.fs.File.WriteError;
request: *web.Request,
response: *web.Response,
io: *web.IOStream,
err: ?anyerror = null,
// ------------------------------------------------------------------------
// Stream API
// ------------------------------------------------------------------------
pub const TextFrameWriter = std.io.Writer(*Websocket, WriteError, Websocket.writeText);
pub const BinaryFrameWriter = std.io.Writer(*Websocket, WriteError, Websocket.writeBinary);
// A buffered writer that will buffer up to size bytes before writing out
pub fn writer(self: *Websocket, comptime size: usize, comptime opcode: Opcode) Writer(size, opcode) {
const BufferedWriter = Writer(size, opcode);
const frame_writer = switch(opcode) {
.Text => TextFrameWriter{.context=self},
.Binary => BinaryFrameWriter{.context=self},
else => @compileError("Unsupported writer type"),
};
return BufferedWriter{.unbuffered_writer=frame_writer};
}
// Close and send the status
pub fn close(self: Websocket, code: u16) !void {
const c = if (std.builtin.endian == .Big) code else @byteSwap(u16, code);
const data = @bitCast([2]u8, c);
_ = try self.writeMessage(.Close, &data);
}
// ------------------------------------------------------------------------
// Low level API
// ------------------------------------------------------------------------
// Flush any buffered data out the underlying stream
pub fn flush(self: *Websocket) !void {
try self.io.flush();
}
pub fn writeText(self: *Websocket, data: []const u8) !usize {
return self.writeMessage(.Text, data);
}
pub fn writeBinary(self: *Websocket, data: []const u8) !usize {
return self.writeMessage(.Binary, data);
}
// Write a final message packet with the given opcode
pub fn writeMessage(self: Websocket, opcode: Opcode, message: []const u8) !usize {
return self.writeSplitMessage(opcode, true, message);
}
// Write a message packet with the given opcode and final flag
pub fn writeSplitMessage(self: Websocket, opcode: Opcode, final: bool, message: []const u8) !usize {
return self.writeDataFrame(WebsocketDataFrame{
.header = WebsocketHeader{
.final = final,
.opcode = opcode,
.mask = false, // Server to client is not masked
.len = WebsocketHeader.packLength(message.len),
},
.data = message,
});
}
// Write a raw data frame
pub fn writeDataFrame(self: Websocket, dataframe: WebsocketDataFrame) !usize {
const stream = self.io.writer();
if (!dataframe.isValid()) return error.InvalidMessage;
try stream.writeIntBig(u16, @bitCast(u16, dataframe.header));
// Write extended length if needed
const n = dataframe.data.len;
switch (n) {
0...126 => {}, // Included in header
127...0xFFFF => try stream.writeIntBig(u16, @truncate(u16, n)),
else => try stream.writeIntBig(u64, n),
}
// TODO: Handle compression
if (dataframe.header.compressed) return error.InvalidMessage;
if (dataframe.header.mask) {
const mask = &dataframe.mask;
try stream.writeAll(mask);
// Encode
for (dataframe.data) |c, i| {
try stream.writeByte(c ^ mask[i % 4]);
}
} else {
try stream.writeAll(dataframe.data);
}
try self.io.flush();
return dataframe.data.len;
}
pub fn readDataFrame(self: Websocket) !WebsocketDataFrame {
// Read and retry if we hit the end of the stream buffer
var start = self.io.readCount();
while (true) {
return self.readDataFrameInBuffer() catch |err| switch (err) {
error.EndOfBuffer => {
// TODO: This can make the request buffer invalid
const n = try self.io.shiftAndFillBuffer(start);
if (n == 0) return error.EndOfStream;
start = 0;
continue;
},
else => return err,
};
}
}
// Read assuming everything can fit before the stream hits the end of
// it's buffer
pub fn readDataFrameInBuffer(self: Websocket) !WebsocketDataFrame {
const stream = self.io;
const header = try stream.readType(WebsocketHeader, .Big);
if (header.rsv2 != 0 or header.rsv3 != 0) {
log.debug("Websocket reserved bits set! {}", .{header});
return error.InvalidMessage; // Reserved bits are not yet used
}
if (!header.mask) {
log.debug("Websocket client mask header not set! {}", .{header});
return error.InvalidMessage; // Expected a client message!
}
if (header.opcode.isControl() and (header.len >= 126 or !header.final)) {
log.debug("Websocket control message is invalid! {}", .{header});
return error.InvalidMessage; // Abort, frame is invalid
}
// Decode length
const length: u64 = switch (header.len) {
0...125 => header.len,
126 => try stream.readType(u16, .Big),
127 => blk: {
const l = try stream.readType(u64, .Big);
// Most significant bit must be 0
if (l >> 63 == 1) {
log.debug("Websocket is out of range!", .{});
return error.InvalidMessage;
}
break :blk l;
},
};
// TODO: Make configurable
if (length > stream.in_buffer.len) {
try self.close(1009); // Abort
return error.MessageTooLarge;
} else if (length + stream.readCount() > stream.in_buffer.len) {
return error.EndOfBuffer; // Need to retry
}
const start: usize = if (header.mask) 4 else 0;
const end = start + length;
// Keep reading until it's filled
while (stream.amountBuffered() < end) {
try stream.fillBuffer();
}
const buf = stream.readBuffered();
defer stream.skipBytes(end);
const mask: [4]u8 = if (header.mask) buf[0..4].* else undefined;
const data = buf[start..end];
if (header.mask) {
// Decode data in place
for (data) |c, i| {
data[i] = c ^ mask[i % 4];
}
}
return WebsocketDataFrame{
.header = header,
.mask = mask,
.data = data,
};
}
};
|
src/websocket.zig
|
const std = @import("std");
const zwin32 = @import("../zwin32/build.zig");
const ztracy = @import("../ztracy/build.zig");
const zd3d12 = @import("../zd3d12/build.zig");
pub fn getPkg(b: *std.build.Builder, options_pkg: std.build.Pkg) std.build.Pkg {
const pkg = std.build.Pkg{
.name = "common",
.path = .{ .path = thisDir() ++ "/src/common.zig" },
.dependencies = &[_]std.build.Pkg{
zwin32.pkg,
ztracy.getPkg(b, options_pkg),
zd3d12.getPkg(b, options_pkg),
options_pkg,
},
};
return b.dupePkg(pkg);
}
pub fn build(b: *std.build.Builder) void {
_ = b;
}
pub fn link(exe: *std.build.LibExeObjStep) void {
const lib = buildLibrary(exe);
exe.linkLibrary(lib);
exe.addIncludeDir(thisDir() ++ "/src/c");
exe.addIncludeDir(thisDir() ++ "/../zgpu/libs/imgui");
exe.addIncludeDir(thisDir() ++ "/../zmesh/libs/cgltf");
exe.addIncludeDir(thisDir() ++ "/../zgpu/libs/stb");
}
fn buildLibrary(exe: *std.build.LibExeObjStep) *std.build.LibExeObjStep {
const lib = exe.builder.addStaticLibrary("common", thisDir() ++ "/src/common.zig");
lib.setBuildMode(exe.build_mode);
lib.setTarget(exe.target);
lib.want_lto = false;
lib.addIncludeDir(thisDir() ++ "/src/c");
lib.linkSystemLibrary("c");
lib.linkSystemLibrary("c++");
lib.linkSystemLibrary("imm32");
lib.addIncludeDir(thisDir() ++ "/../zgpu/libs");
lib.addCSourceFile(thisDir() ++ "/../zgpu/libs/imgui/imgui.cpp", &.{""});
lib.addCSourceFile(thisDir() ++ "/../zgpu/libs/imgui/imgui_widgets.cpp", &.{""});
lib.addCSourceFile(thisDir() ++ "/../zgpu/libs/imgui/imgui_tables.cpp", &.{""});
lib.addCSourceFile(thisDir() ++ "/../zgpu/libs/imgui/imgui_draw.cpp", &.{""});
lib.addCSourceFile(thisDir() ++ "/../zgpu/libs/imgui/imgui_demo.cpp", &.{""});
lib.addCSourceFile(thisDir() ++ "/../zgpu/libs/imgui/cimgui.cpp", &.{""});
lib.addIncludeDir(thisDir() ++ "/../zmesh/libs/cgltf");
lib.addCSourceFile(thisDir() ++ "/../zmesh/libs/cgltf/cgltf.c", &.{"-std=c99"});
lib.addIncludeDir(thisDir() ++ "/../zgpu/libs/stb");
lib.addCSourceFile(thisDir() ++ "/../zgpu/libs/stb/stb_image.c", &.{"-std=c99"});
return lib;
}
fn thisDir() []const u8 {
return std.fs.path.dirname(@src().file) orelse ".";
}
|
libs/common/build.zig
|
const std = @import("std");
const testing = std.testing;
const meta = std.meta;
const trait = std.meta.trait;
const log = std.log.scoped(.wasmtime_zig);
pub const wasm = @import("wasm");
// Re-exports
pub const ByteVec = wasm.ByteVec;
pub const NameVec = wasm.NameVec;
pub const ValVec = wasm.ValVec;
pub const Value = wasm.Value;
pub const Extern = wasm.Extern;
pub const ExternVec = wasm.ExternVec;
pub const Engine = wasm.Engine;
pub const Store = wasm.Store;
pub const Error = wasm.Error;
pub const Trap = wasm.Trap;
pub const Memory = wasm.Memory;
pub const MemoryType = wasm.MemoryType;
pub const WasiInstance = wasm.WasiInstance;
pub const WasiConfig = wasm.WasiConfig;
// Helpers
extern "c" fn wasmtime_wat2wasm(wat: *ByteVec, wasm: *ByteVec) ?*WasmError;
pub const WasmError = opaque {
/// Gets the error message
pub fn getMessage(self: *WasmError) *ByteVec {
var bytes: ?*ByteVec = null;
wasmtime_error_message(self, &bytes);
return bytes.?;
}
pub fn deinit(self: *WasmError) void {
wasmtime_error_delete(self);
}
extern "c" fn wasmtime_error_message(*const WasmError, *?*ByteVec) void;
extern "c" fn wasmtime_error_delete(*WasmError) void;
};
pub const Config = struct {
inner: *wasm.Config,
const Options = struct {
interruptable: bool = false,
};
pub fn init(options: Options) !Config {
var config = Config{
.inner = try wasm.Config.init(),
};
if (options.interruptable) {
config.setInterruptable(true);
}
return config;
}
pub fn setInterruptable(self: Config, opt: bool) void {
wasmtime_config_interruptable_set(self.inner, opt);
}
extern "c" fn wasmtime_config_interruptable_set(*wasm.Config, bool) void;
};
pub const Module = struct {
inner: *wasm.Module,
/// Initializes a new `Module` using the supplied engine and wasm bytecode
pub fn initFromWasm(engine: *Engine, wasm: []const u8) !Module {
var wasm_bytes = ByteVec.initWithCapacity(wasm.len);
defer wasm_bytes.deinit();
var i: usize = 0;
var ptr = wasm_bytes.data;
while (i < wasm.len) : (i += 1) {
ptr.* = wasm[i];
ptr += 1;
}
var module = Module{
.inner = try Module.initInner(engine, &wasm_bytes),
};
return module;
}
/// Initializes a new `Module` by first converting the given wat format
/// into wasm bytecode.
pub fn initFromWat(engine: *Engine, wat: []const u8) !Module {
var wat_bytes = ByteVec.initWithCapacity(wat.len);
defer wat_bytes.deinit();
var i: usize = 0;
var ptr = wat_bytes.data;
while (i < wat.len) : (i += 1) {
ptr.* = wat[i];
ptr += 1;
}
var wasm_bytes: ByteVec = undefined;
const err = wasmtime_wat2wasm(&wat_bytes, &wasm_bytes);
defer if (err == null) wasm_bytes.deinit();
if (err) |e| {
defer e.deinit();
var msg = e.getMessage();
defer msg.deinit();
log.err("unexpected error occurred: '{s}'", .{msg.toSlice()});
return Error.ModuleInit;
}
var module = Module{
.inner = try Module.initInner(engine, &wasm_bytes),
};
return module;
}
fn initInner(engine: *Engine, wasm_bytes: *ByteVec) !*wasm.Module {
var inner: ?*wasm.Module = undefined;
const err = wasmtime_module_new(engine, wasm_bytes, &inner);
if (err) |e| {
defer e.deinit();
var msg = e.getMessage();
defer msg.deinit();
log.err("unexpected error occurred: '{s}'", .{msg.toSlice()});
return Error.ModuleInit;
}
return inner.?;
}
pub fn deinit(self: Module) void {
self.inner.deinit();
}
extern "c" fn wasmtime_module_new(*Engine, *ByteVec, *?*wasm.Module) ?*WasmError;
};
pub const Func = struct {
inner: *wasm.Func,
pub const CallError = wasm.Func.CallError;
pub fn init(store: *Store, callback: anytype) !Func {
return Func{
.inner = try wasm.Func.init(store, callback),
};
}
/// Tries to call the wasm function
/// expects `args` to be tuple of arguments
/// TODO this is a hard-copy of wasm.Func.call implementation. Refactor.
pub fn call(self: Func, comptime ResultType: type, args: anytype) CallError!ResultType {
if (!comptime trait.isTuple(@TypeOf(args)))
@compileError("Expected 'args' to be a tuple, but found type '" ++ @typeName(@TypeOf(args)) ++ "'");
const args_len = args.len;
comptime var wasm_args: [args_len]Value = undefined;
inline for (wasm_args) |*arg, i| {
arg.* = switch (@TypeOf(args[i])) {
i32, u32 => .{ .kind = .i32, .of = .{ .i32 = @intCast(i32, args[i]) } },
i64, u64 => .{ .kind = .i64, .of = .{ .i64 = @intCast(i64, args[i]) } },
f32 => .{ .kind = .f32, .of = .{ .f32 = args[i] } },
f64 => .{ .kind = .f64, .of = .{ .f64 = args[i] } },
*Func => .{ .kind = .funcref, .of = .{ .ref = args[i] } },
*Extern => .{ .kind = .anyref, .of = .{ .ref = args[i] } },
else => |ty| @compileError("Unsupported argument type '" ++ @typeName(ty) + "'"),
};
}
// TODO multiple return values
const result_len: usize = if (ResultType == void) 0 else 1;
if (result_len != wasm_func_result_arity(self.inner)) return CallError.InvalidResultCount;
if (args_len != wasm_func_param_arity(self.inner)) return CallError.InvalidParamCount;
const final_args = ValVec{
.size = args_len,
.data = if (args_len == 0) undefined else @ptrCast([*]Value, &wasm_args),
};
var trap: ?*Trap = null;
var result_list = ValVec.initWithCapacity(result_len);
defer result_list.deinit();
const err = wasmtime_func_call(self.inner, &final_args, &result_list, &trap);
if (err) |e| {
defer e.deinit();
var msg = e.getMessage();
defer msg.deinit();
log.err("Unable to call function: '{s}'", .{msg.toSlice()});
return CallError.InnerError;
}
if (trap) |t| {
t.deinit();
// TODO handle trap message
log.err("code unexpectedly trapped", .{});
return CallError.Trap;
}
if (ResultType == void) return;
// TODO: Handle multiple returns
const result_ty = result_list.data[0];
if (!wasm.Func.matchesKind(ResultType, result_ty.kind)) return CallError.InvalidResultType;
return switch (ResultType) {
i32, u32 => @intCast(ResultType, result_ty.of.i32),
i64, u64 => @intCast(ResultType, result_ty.of.i64),
f32 => result_ty.of.f32,
f64 => result_ty.of.f64,
*Func => @ptrCast(?*Func, result_ty.of.ref).?,
*Extern => @ptrCast(?*c.Extern, result_ty.of.ref).?,
else => |ty| @compileError("Unsupported result type '" ++ @typeName(ty) ++ "'"),
};
}
pub fn deinit(self: Func) void {
self.inner.deinit();
}
extern "c" fn wasmtime_func_call(*wasm.Func, *const ValVec, *ValVec, *?*Trap) ?*WasmError;
extern "c" fn wasm_func_result_arity(*const wasm.Func) usize;
extern "c" fn wasm_func_param_arity(*const wasm.Func) usize;
};
pub const Instance = struct {
inner: *wasm.Instance,
/// Initializes a new `Instance` using the given `store` and `mode`.
/// The given slice defined in `import` must match what was initialized
/// using the same `Store` as given.
pub fn init(store: *Store, module: Module, import: []const Func) !Instance {
var trap: ?*Trap = null;
var inner: ?*wasm.Instance = null;
var imports = ExternVec.initWithCapacity(import.len);
defer imports.deinit();
var ptr = imports.data;
for (import) |func| {
ptr.* = func.inner.asExtern();
ptr += 1;
}
const err = wasmtime_instance_new(store, module.inner, &imports, &inner, &trap);
if (err) |e| {
defer e.deinit();
var msg = e.getMessage();
defer msg.deinit();
log.err("unexpected error occurred: '{s}'", .{msg.toSlice()});
return Error.InstanceInit;
}
if (trap) |t| {
defer t.deinit();
// TODO handle trap message
log.err("code unexpectedly trapped", .{});
return Error.InstanceInit;
}
return Instance{
.inner = inner.?,
};
}
pub fn getExportFunc(self: Instance, module: Module, name: []const u8) ?Func {
var inner = self.inner.getExportFunc(module.inner, name) orelse return null;
return Func{ .inner = inner };
}
pub fn getExportMem(self: Instance, module: Module, name: []const u8) ?*Memory {
return self.inner.getExportMem(module.inner, name);
}
pub fn deinit(self: Instance) void {
self.inner.deinit();
}
extern "c" fn wasmtime_instance_new(*Store, *const wasm.Module, *const ExternVec, *?*wasm.Instance, *?*Trap) ?*WasmError;
};
pub const InterruptHandle = opaque {
/// Creates a new interrupt handle.
/// Must be freed by calling `deinit()`
pub fn init(store: *Store) !*InterruptHandle {
return wasmtime_interrupt_handle_new(store) orelse error.InterruptsNotEnabled;
}
/// Invokes an interrupt in the current wasm module
pub fn interrupt(self: *InterruptHandle) void {
wasmtime_interrupt_handle_interrupt(self);
}
pub fn deinit(self: *InterruptHandle) void {
wasmtime_interrupt_handle_delete(self);
}
extern "c" fn wasmtime_interrupt_handle_interrupt(*InterruptHandle) void;
extern "c" fn wasmtime_interrupt_handle_delete(*InterruptHandle) void;
extern "c" fn wasmtime_interrupt_handle_new(*Store) ?*InterruptHandle;
};
pub const Linker = opaque {
pub fn init(store: *Store) !*Linker {
return wasmtime_linker_new(store) orelse error.LinkerInit;
}
pub fn deinit(self: *Linker) void {
wasmtime_linker_delete(self);
}
/// Defines a `WasiInstance` for the current `Linker`
pub fn defineWasi(self: *Linker, wasi: *const WasiInstance) ?*WasmError {
return wasmtime_linker_define_wasi(self, wasi);
}
/// Defines an `Instance` for the current `Linker` object using the given `name`
pub fn defineInstance(self: *Linker, name: *const NameVec, instance: *const wasm.Instance) ?*WasmError {
return wasmtime_linker_define_instance(self, name, instance);
}
/// Instantiates the `Linker` for the given `Module` and creates a new `Instance` for it
/// Returns a `WasmError` when failed to instantiate
pub fn instantiate(
self: *const Linker,
module: Module,
instance: *?*wasm.Instance,
trap: *?*Trap,
) ?*WasmError {
return wasmtime_linker_instantiate(self, module.inner, instance, trap);
}
extern "c" fn wasmtime_linker_new(*Store) ?*Linker;
extern "c" fn wasmtime_linker_delete(*Linker) void;
extern "c" fn wasmtime_linker_define_wasi(*Linker, *const WasiInstance) ?*WasmError;
extern "c" fn wasmtime_linker_define_instance(*Linker, *const NameVec, *const wasm.Instance) ?*WasmError;
extern "c" fn wasmtime_linker_instantiate(*const Linker, *const wasm.Module, *?*wasm.Instance, *?*Trap) ?*WasmError;
};
test "" {
testing.refAllDecls(@This());
}
|
src/main.zig
|
const std = @import("std");
const fs = std.fs;
const Allocator = std.mem.Allocator;
const g = @import("spirv/grammar.zig");
const Version = struct {
major: u32,
minor: u32,
fn parse(str: []const u8) !Version {
var it = std.mem.split(str, ".");
const major = it.next() orelse return error.InvalidVersion;
const minor = it.next() orelse return error.InvalidVersion;
if (it.next() != null) return error.InvalidVersion;
return Version{
.major = std.fmt.parseInt(u32, major, 10) catch return error.InvalidVersion,
.minor = std.fmt.parseInt(u32, minor, 10) catch return error.InvalidVersion,
};
}
fn eql(a: Version, b: Version) bool {
return a.major == b.major and a.minor == b.minor;
}
fn lessThan(ctx: void, a: Version, b: Version) bool {
return if (a.major == b.major)
a.minor < b.minor
else
a.major < b.major;
}
};
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
const args = try std.process.argsAlloc(allocator);
if (args.len <= 1) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
if (std.mem.eql(u8, args[1], "--help")) {
usageAndExit(std.io.getStdErr(), args[0], 0);
}
if (args.len != 3) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
const spirv_headers_root = args[1];
const spirv_registry_root = args[2];
if (std.mem.startsWith(u8, spirv_headers_root, "-") or std.mem.startsWith(u8, spirv_registry_root, "-")) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
// Required for json parsing.
@setEvalBranchQuota(10000);
const registry_path = try fs.path.join(allocator, &.{ spirv_headers_root, "include", "spirv", "unified1", "spirv.core.grammar.json" });
const registry_json = try std.fs.cwd().readFileAlloc(allocator, registry_path, std.math.maxInt(usize));
var tokens = std.json.TokenStream.init(registry_json);
const registry = try std.json.parse(g.CoreRegistry, &tokens, .{ .allocator = allocator });
const capabilities = for (registry.operand_kinds) |opkind| {
if (std.mem.eql(u8, opkind.kind, "Capability"))
break opkind.enumerants orelse return error.InvalidRegistry;
} else return error.InvalidRegistry;
const extensions = try gather_extensions(allocator, spirv_registry_root);
const versions = try gatherVersions(allocator, registry);
var bw = std.io.bufferedWriter(std.io.getStdOut().writer());
const w = bw.writer();
try w.writeAll(
\\//! This file is auto-generated by tools/update_spirv_features.zig.
\\//! TODO: Dependencies of capabilities on extensions.
\\//! TODO: Dependencies of extensions on extensions.
\\//! TODO: Dependencies of extensions on versions.
\\
\\const std = @import("../std.zig");
\\const CpuFeature = std.Target.Cpu.Feature;
\\const CpuModel = std.Target.Cpu.Model;
\\
\\pub const Feature = enum {
\\
);
for (versions) |ver| {
try w.print(" v{}_{},\n", .{ ver.major, ver.minor });
}
for (extensions) |ext| {
try w.print(" {},\n", .{std.zig.fmtId(ext)});
}
for (capabilities) |cap| {
try w.print(" {},\n", .{std.zig.fmtId(cap.enumerant)});
}
try w.writeAll(
\\};
\\
\\pub usingnamespace CpuFeature.feature_set_fns(Feature);
\\
\\pub const all_features = blk: {
\\ @setEvalBranchQuota(2000);
\\ const len = @typeInfo(Feature).Enum.fields.len;
\\ std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
\\ var result: [len]CpuFeature = undefined;
\\
);
for (versions) |ver, i| {
try w.print(
\\ result[@enumToInt(Feature.v{0}_{1})] = .{{
\\ .llvm_name = null,
\\ .description = "SPIR-V version {0}.{1}",
\\
, .{ ver.major, ver.minor });
if (i == 0) {
try w.writeAll(
\\ .dependencies = featureSet(&[_]Feature{}),
\\ };
\\
);
} else {
try w.print(
\\ .dependencies = featureSet(&[_]Feature{{
\\ .v{}_{},
\\ }}),
\\ }};
\\
, .{ versions[i - 1].major, versions[i - 1].minor });
}
}
// TODO: Extension dependencies.
for (extensions) |ext| {
try w.print(
\\ result[@enumToInt(Feature.{s})] = .{{
\\ .llvm_name = null,
\\ .description = "SPIR-V extension {s}",
\\ .dependencies = featureSet(&[_]Feature{{}}),
\\ }};
\\
, .{
std.zig.fmtId(ext),
ext,
});
}
// TODO: Capability extension dependencies.
for (capabilities) |cap| {
try w.print(
\\ result[@enumToInt(Feature.{s})] = .{{
\\ .llvm_name = null,
\\ .description = "Enable SPIR-V capability {s}",
\\ .dependencies = featureSet(&[_]Feature{{
\\
, .{
std.zig.fmtId(cap.enumerant),
cap.enumerant,
});
if (cap.version) |ver_str| {
if (!std.mem.eql(u8, ver_str, "None")) {
const ver = try Version.parse(ver_str);
try w.print(" .v{}_{},\n", .{ ver.major, ver.minor });
}
}
for (cap.capabilities) |cap_dep| {
try w.print(" .{},\n", .{std.zig.fmtId(cap_dep)});
}
try w.writeAll(
\\ }),
\\ };
\\
);
}
try w.writeAll(
\\ const ti = @typeInfo(Feature);
\\ for (result) |*elem, i| {
\\ elem.index = i;
\\ elem.name = ti.Enum.fields[i].name;
\\ }
\\ break :blk result;
\\};
\\
);
try bw.flush();
}
/// SPIRV-Registry should hold all extensions currently registered for SPIR-V.
/// The *.grammar.json in SPIRV-Headers should have most of these as well, but with this we're sure to get only the actually
/// registered ones.
/// TODO: Unfortunately, neither repository contains a machine-readable list of extension dependencies.
fn gather_extensions(allocator: *Allocator, spirv_registry_root: []const u8) ![]const []const u8 {
const extensions_path = try fs.path.join(allocator, &.{ spirv_registry_root, "extensions" });
var extensions_dir = try fs.cwd().openDir(extensions_path, .{ .iterate = true });
defer extensions_dir.close();
var extensions = std.ArrayList([]const u8).init(allocator);
var vendor_it = extensions_dir.iterate();
while (try vendor_it.next()) |vendor_entry| {
std.debug.assert(vendor_entry.kind == .Directory); // If this fails, the structure of SPIRV-Registry has changed.
const vendor_dir = try extensions_dir.openDir(vendor_entry.name, .{ .iterate = true });
var ext_it = vendor_dir.iterate();
while (try ext_it.next()) |ext_entry| {
// There is both a HTML and asciidoc version of every spec (as well as some other directories),
// we need just the name, but to avoid duplicates here we will just skip anything thats not asciidoc.
if (!std.mem.endsWith(u8, ext_entry.name, ".asciidoc"))
continue;
// Unfortunately, some extension filenames are incorrect, so we need to look for the string in tne 'Name Strings' section.
// This has the following format:
// ```
// Name Strings
// ------------
//
// SPV_EXT_name
// ```
// OR
// ```
// == Name Strings
//
// SPV_EXT_name
// ```
const ext_spec = try vendor_dir.readFileAlloc(allocator, ext_entry.name, std.math.maxInt(usize));
const name_strings = "Name Strings";
const name_strings_offset = std.mem.indexOf(u8, ext_spec, name_strings) orelse return error.InvalidRegistry;
// As the specs are inconsistent on this next part, just skip any newlines/minuses
var ext_start = name_strings_offset + name_strings.len + 1;
while (ext_spec[ext_start] == '\n' or ext_spec[ext_start] == '-') {
ext_start += 1;
}
const ext_end = std.mem.indexOfScalarPos(u8, ext_spec, ext_start, '\n') orelse return error.InvalidRegistry;
const ext = ext_spec[ext_start..ext_end];
std.debug.assert(std.mem.startsWith(u8, ext, "SPV_")); // Sanity check, all extensions should have a name like SPV_VENDOR_extension.
try extensions.append(try allocator.dupe(u8, ext));
}
}
return extensions.items;
}
fn insertVersion(versions: *std.ArrayList(Version), version: ?[]const u8) !void {
const ver_str = version orelse return;
if (std.mem.eql(u8, ver_str, "None"))
return;
const ver = try Version.parse(ver_str);
for (versions.items) |existing_ver| {
if (ver.eql(existing_ver)) return;
}
try versions.append(ver);
}
fn gatherVersions(allocator: *Allocator, registry: g.CoreRegistry) ![]const Version {
// Expected number of versions is small
var versions = std.ArrayList(Version).init(allocator);
for (registry.instructions) |inst| {
try insertVersion(&versions, inst.version);
}
for (registry.operand_kinds) |opkind| {
const enumerants = opkind.enumerants orelse continue;
for (enumerants) |enumerant| {
try insertVersion(&versions, enumerant.version);
}
}
std.sort.sort(Version, versions.items, {}, Version.lessThan);
return versions.items;
}
fn usageAndExit(file: fs.File, arg0: []const u8, code: u8) noreturn {
file.writer().print(
\\Usage: {s} /path/git/SPIRV-Headers /path/git/SPIRV-Registry
\\
\\Prints to stdout Zig code which can be used to replace the file lib/std/target/spirv.zig.
\\
\\SPIRV-Headers can be cloned from https://github.com/KhronosGroup/SPIRV-Headers,
\\SPIRV-Registry can be cloned from https://github.com/KhronosGroup/SPIRV-Registry.
\\
, .{arg0}) catch std.process.exit(1);
std.process.exit(code);
}
|
tools/update_spirv_features.zig
|
const std = @import("std");
const Token = @import("Token.zig");
const Errors = @import("error.zig").Errors;
const Type = @import("Value.zig").Type;
//! All AST Nodes are defined here
//! The `Parser` parses all of the `Lexer`'s tokens into these nodes
/// Tree represents all parsed Nodes
pub const Tree = struct {
nodes: []const Node,
arena: std.heap.ArenaAllocator.State,
allocator: *std.mem.Allocator,
/// Frees all memory
pub fn deinit(self: Tree) void {
self.arena.promote(self.allocator).deinit();
}
};
/// Node represents a grammatical token within the language
/// i.e. a mutable statement such as mut x = 5
pub const Node = union(NodeType) {
declaration: *Declaration,
identifier: *Identifier,
@"return": *Return,
prefix: *Prefix,
infix: *Infix,
int_lit: *IntegerLiteral,
expression: *Expression,
block_statement: *BlockStatement,
boolean: *Bool,
if_expression: *IfExpression,
func_lit: *FunctionLiteral,
func_arg: *FunctionArgument,
call_expression: *CallExpression,
string_lit: *StringLiteral,
data_structure: *DataStructure,
map_pair: *MapPair,
index: *IndexExpression,
while_loop: *WhileLoop,
for_loop: *ForLoop,
assignment: *Assignment,
comment: *Comment,
nil: *Nil,
import: *Import,
@"continue": *Continue,
@"break": *Break,
range: *Range,
@"enum": *EnumLiteral,
switch_statement: *SwitchLiteral,
switch_prong: *SwitchProng,
type_def: *TypeDef,
slice: *SliceExpression,
/// Possible Nodes which are supported
pub const NodeType = enum {
declaration,
identifier,
@"return",
prefix,
infix,
int_lit,
expression,
block_statement,
boolean,
if_expression,
func_lit,
func_arg,
call_expression,
string_lit,
data_structure,
map_pair,
index,
while_loop,
for_loop,
assignment,
comment,
nil,
import,
@"continue",
@"break",
range,
@"enum",
switch_statement,
switch_prong,
type_def,
slice,
};
/// Returns the Luf `Type` that Node corresponds to
pub fn getType(self: Node) ?Type {
return switch (self) {
.declaration => |x| if (x.type_def) |td| td.getType() else x.value.getType(),
.@"return" => |ret| ret.value.getType(),
.prefix => |pre| pre.right.getType(),
.int_lit => .integer,
.expression => |exp| exp.value.getType(),
.boolean => .boolean,
.func_lit => .function,
.func_arg => |arg| arg.arg_type.getType(),
.string_lit => .string,
.data_structure => |ds| if (ds.d_type == .array) Type.list else Type.map,
.nil => .nil,
.import => .module,
.range => .range,
.@"enum" => ._enum,
.type_def => |td| td.getType(),
.infix => |inf| inf.left.getType(),
.slice => .list,
else => null,
};
}
/// Returns the inner type of a Node. i.e. this will return `Type.integer` for []int
/// This will also return the final result type of functions that return functions
/// i.e. this will return `Type.integer` for 'fn() fn()int {}'.
pub fn getInnerType(self: Node) ?Type {
return switch (self) {
.type_def => |td| if (td.value) |val| val.getInnerType() else td.getType(),
.data_structure => |list| list.type_def_key.getInnerType(),
.func_lit => |func| func.ret_type.getInnerType(),
.func_arg => |arg| arg.arg_type.getInnerType(),
.expression => |exp| exp.value.getInnerType(),
.map_pair => |pair| pair.key.getInnerType(),
.declaration => |decl| if (decl.type_def) |td| td.getInnerType() else decl.value.getInnerType(),
.range => .integer,
.slice => |slice| slice.left.getInnerType(),
else => self.getType(),
};
}
/// Returns the position of the token which generated the `Node`
pub fn tokenPos(self: Node) usize {
return switch (self) {
.declaration => |x| x.token.start,
.identifier => |x| x.token.start,
.@"return" => |x| x.token.start,
.prefix => |x| x.token.start,
.infix => |x| x.token.start,
.int_lit => |x| x.token.start,
.expression => |x| x.token.start,
.block_statement => |x| x.token.start,
.boolean => |x| x.token.start,
.if_expression => |x| x.token.start,
.func_lit => |x| x.token.start,
.func_arg => |x| x.token.start,
.call_expression => |x| x.token.start,
.string_lit => |x| x.token.start,
.data_structure => |x| x.token.start,
.map_pair => |x| x.token.start,
.index => |x| x.token.start,
.while_loop => |x| x.token.start,
.for_loop => |x| x.token.start,
.assignment => |x| x.token.start,
.comment => |x| x.token.start,
.nil => |x| x.token.start,
.import => |x| x.token.start,
.@"continue" => |x| x.token.start,
.@"break" => |x| x.token.start,
.range => |x| x.token.start,
.@"enum" => |x| x.token.start,
.switch_statement => |x| x.token.start,
.switch_prong => |x| x.token.start,
.type_def => |x| x.token.start,
.slice => |x| x.token.start,
};
}
/// Represents a String
pub const StringLiteral = struct {
token: Token,
value: []const u8,
};
/// Node representing a data structure such as a list or map
pub const DataStructure = struct {
token: Token,
d_type: enum { array, map },
value: ?[]const Node,
type_def_key: Node,
type_def_value: ?Node,
len: ?Node,
};
/// Node presents a key/value pair for inside a `DataStructure`
pub const MapPair = struct {
token: Token,
key: Node,
value: Node,
};
/// Represents an index selector to retrieve a value from an Array or Map
pub const IndexExpression = struct {
token: Token,
left: Node,
index: Node,
};
/// Statement node -> const x = 5
pub const Declaration = struct {
token: Token,
name: Node,
value: Node,
type_def: ?Node,
mutable: bool = false,
is_pub: bool = false,
};
/// Identifier node -> x
pub const Identifier = struct {
token: Token,
value: []const u8,
};
/// Return statement node -> return x
pub const Return = struct {
token: Token,
value: Node,
};
/// Prefix node, determines order such as negation
pub const Prefix = struct {
token: Token,
operator: Op,
right: Node,
pub const Op = enum {
minus,
bang,
bitwise_not,
pub fn fromToken(token: Token) Op {
return switch (token.token_type) {
.minus => .minus,
.bang => .bang,
.tilde => .bitwise_not,
else => unreachable,
};
}
};
};
/// Infix node, used to determine order for arithmetics
pub const Infix = struct {
token: Token,
operator: Op,
right: Node,
left: Node,
pub const Op = enum {
assign,
add,
sub,
mod,
multiply,
divide,
less_than,
greater_than,
equal,
not_equal,
bitwise_xor,
bitwise_or,
bitwise_and,
shift_left,
shift_right,
@"and",
@"or",
assign_add,
assign_sub,
assign_mul,
assign_div,
/// Returns the corresponding `Op` based on the given `Token`
pub fn fromToken(token: Token) Op {
return switch (token.token_type) {
.plus => .add,
.minus => .sub,
.assign => .assign,
.asterisk => .multiply,
.percent => .mod,
.slash => .divide,
.less_than => .less_than,
.greater_than => .greater_than,
.equal => .equal,
.not_equal => .not_equal,
.ampersand => .bitwise_and,
.caret => .bitwise_xor,
.pipe => .bitwise_or,
.shift_left => .shift_left,
.shift_right => .shift_right,
.@"and" => .@"and",
.@"or" => .@"or",
.equal_add => .assign_add,
.equal_div => .assign_div,
.equal_mul => .assign_mul,
.equal_sub => .assign_sub,
else => unreachable,
};
}
};
};
/// Represents an integer such as 385722
pub const IntegerLiteral = struct {
token: Token,
value: u64,
};
/// Node to represent an expression, could be anything
/// `value` contains the root node of the expression
pub const Expression = struct {
token: Token,
value: Node,
};
/// Node representing a block statement
/// Nodes contains a slice of Nodes
pub const BlockStatement = struct {
token: Token,
nodes: []const Node,
};
/// Node representing a boolean
pub const Bool = struct {
token: Token,
value: bool,
};
/// Represents an 'If' node
pub const IfExpression = struct {
token: Token,
condition: Node,
true_pong: Node,
false_pong: ?Node = null,
};
/// Node representing a literal function which holds the function's
/// parameters and the body of the function as a `BlockStatement`
pub const FunctionLiteral = struct {
token: Token,
params: []const Node,
body: ?Node,
ret_type: Node,
name: ?[]const u8,
};
/// Represents an argument inside a function, contains both the
/// identifier as the type of the argument.
pub const FunctionArgument = struct {
token: Token,
value: []const u8,
arg_type: Node,
};
/// Node representing a call expression and holds the function to be called
/// and also the arguments to its function
pub const CallExpression = struct {
token: Token,
function: Node,
arguments: []const Node,
};
/// Represents a while loop, contains the condition and the block
pub const WhileLoop = struct {
token: Token,
condition: Node,
block: Node,
};
pub const ForLoop = struct {
token: Token,
iter: Node,
capture: Node,
index: ?Node,
block: Node,
pub var index_node = IntegerLiteral{ .token = undefined, .value = 0 };
};
/// Represents an assignment '=' for setting the value of an existing variable
pub const Assignment = struct {
token: Token,
left: Node,
right: Node,
};
/// Represents a single line comment
pub const Comment = struct {
token: Token,
value: []const u8,
};
/// Represents "Nil"
pub const Nil = struct { token: Token };
/// Represents the Node which imports other Luf files
pub const Import = struct {
token: Token,
value: Node,
};
/// "break" statement
pub const Break = struct { token: Token };
/// "continue" statement
pub const Continue = struct { token: Token };
/// Represents a range i.e. 0..128
pub const Range = struct {
token: Token,
left: Node,
right: Node,
};
/// Represents an Enum declaration i.e. const my_enum = enum{}
pub const EnumLiteral = struct {
token: Token,
nodes: []const Node,
};
/// Represents a Switch statement i.e. switch(x) {}
pub const SwitchLiteral = struct {
token: Token,
capture: Node,
prongs: []const Node,
};
/// Represents the pong inside a switch statement, i.e.
pub const SwitchProng = struct {
token: Token,
left: Node,
right: Node,
};
/// Represents the type of the parent node
pub const TypeDef = struct {
token: Token,
value: ?Node,
/// Returns a Luf `Type` based on the token of the `TypeDef`
pub fn getType(self: *const TypeDef) ?Type {
return switch (self.token.token_type) {
.bool_type => .boolean,
.string_type => .string,
.int_type => .integer,
.void_type => ._void,
.query => .optional,
.function => self.value.?.getType(),
.left_bracket => self.value.?.getType(),
else => null,
};
}
};
/// Represents a slice node i.e. array[x:y]
pub const SliceExpression = struct {
token: Token,
left: Node,
start: ?Node,
end: ?Node,
};
};
|
src/ast.zig
|
pub const COMDB_MIN_PORTS_ARBITRATED = @as(u32, 256);
pub const COMDB_MAX_PORTS_ARBITRATED = @as(u32, 4096);
pub const CDB_REPORT_BITS = @as(u32, 0);
pub const CDB_REPORT_BYTES = @as(u32, 1);
//--------------------------------------------------------------------------------
// Section: Types (1)
//--------------------------------------------------------------------------------
pub const HCOMDB = *opaque{};
//--------------------------------------------------------------------------------
// Section: Functions (7)
//--------------------------------------------------------------------------------
pub extern "MSPORTS" fn ComDBOpen(
PHComDB: ?*isize,
) callconv(@import("std").os.windows.WINAPI) i32;
pub extern "MSPORTS" fn ComDBClose(
HComDB: ?HCOMDB,
) callconv(@import("std").os.windows.WINAPI) i32;
pub extern "MSPORTS" fn ComDBGetCurrentPortUsage(
HComDB: ?HCOMDB,
// TODO: what to do with BytesParamIndex 2?
Buffer: ?*u8,
BufferSize: u32,
ReportType: u32,
MaxPortsReported: ?*u32,
) callconv(@import("std").os.windows.WINAPI) i32;
pub extern "MSPORTS" fn ComDBClaimNextFreePort(
HComDB: ?HCOMDB,
ComNumber: ?*u32,
) callconv(@import("std").os.windows.WINAPI) i32;
pub extern "MSPORTS" fn ComDBClaimPort(
HComDB: ?HCOMDB,
ComNumber: u32,
ForceClaim: BOOL,
Forced: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) i32;
pub extern "MSPORTS" fn ComDBReleasePort(
HComDB: ?HCOMDB,
ComNumber: u32,
) callconv(@import("std").os.windows.WINAPI) i32;
pub extern "MSPORTS" fn ComDBResizeDatabase(
HComDB: ?HCOMDB,
NewSize: u32,
) callconv(@import("std").os.windows.WINAPI) i32;
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (1)
//--------------------------------------------------------------------------------
const BOOL = @import("../foundation.zig").BOOL;
test {
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
}
|
win32/devices/serial_communication.zig
|
const std = @import("std");
const FarPtr = @import("../far_ptr.zig").FarPtr;
const Segment = @import("segment.zig").Segment;
/// DosMemBlock represents an allocated block of memory that resides below the
/// 1 MiB address in physical memory and is accessible to DOS.
pub const DosMemBlock = struct {
protected_mode_segment: Segment,
real_mode_segment: u16,
len: usize,
pub fn alloc(size: u20) !DosMemBlock {
const aligned_size = std.mem.alignForwardGeneric(@TypeOf(size), size, 16);
var protected_selector: u16 = 0;
var real_segment: u16 = 0;
const flags = asm volatile (
\\ int $0x31
\\ pushfw
\\ popw %[flags]
: [flags] "=r" (-> u16),
[_] "={ax}" (real_segment),
[_] "={dx}" (protected_selector),
: [_] "{ax}" (@as(u16, 0x100)),
[_] "{bx}" (aligned_size / 16),
: "cc"
);
// TODO: Better error handling.
if (flags & 1 != 0) return error.DpmiAllocError;
return DosMemBlock{
.protected_mode_segment = .{ .selector = protected_selector },
.real_mode_segment = real_segment,
.len = aligned_size,
};
}
pub fn read(self: DosMemBlock, buffer: []u8) void {
return self.protected_mode_segment.read(buffer);
}
pub fn write(self: DosMemBlock, bytes: []const u8) void {
return self.protected_mode_segment.write(bytes);
}
};
/// ExtMemBlock represents an allocated block of extended memory that resides
/// above the 1 MiB address in physical memory.
pub const ExtMemBlock = struct {
addr: usize,
len: usize,
handle: usize,
pub fn alloc(size: usize) !ExtMemBlock {
var bx: u16 = undefined;
var cx: u16 = undefined;
var si: u16 = undefined;
var di: u16 = undefined;
const flags = asm volatile (
\\ int $0x31
\\ pushfw
\\ popw %[flags]
: [flags] "=r" (-> u16),
[_] "={bx}" (bx),
[_] "={cx}" (cx),
[_] "={si}" (si),
[_] "={di}" (di),
: [_] "{ax}" (@as(u16, 0x501)),
[_] "{bx}" (@truncate(u16, size >> 16)),
[_] "{cx}" (@truncate(u16, size)),
);
// TODO: Better error handling.
if (flags & 1 != 0) return error.DpmiAllocError;
return ExtMemBlock{
.addr = @as(usize, bx) << 16 | cx,
.len = size,
.handle = @as(usize, si) << 16 | di,
};
}
pub fn createSegment(self: ExtMemBlock, seg_type: Segment.Type) Segment {
const segment = Segment.alloc();
segment.setBaseAddress(self.addr);
segment.setAccessRights(seg_type);
segment.setLimit(self.len - 1);
return segment;
}
};
|
src/dos/dpmi/mem.zig
|
const std = @import("std");
const Compilation = @import("Compilation.zig");
/// Used to implement the __has_feature macro.
pub fn hasFeature(comp: *Compilation, ext: []const u8) bool {
const list = .{
.assume_nonnull = true,
.attribute_analyzer_noreturn = true,
.attribute_availability = true,
.attribute_availability_with_message = true,
.attribute_availability_app_extension = true,
.attribute_availability_with_version_underscores = true,
.attribute_availability_tvos = true,
.attribute_availability_watchos = true,
.attribute_availability_with_strict = true,
.attribute_availability_with_replacement = true,
.attribute_availability_in_templates = true,
.attribute_availability_swift = true,
.attribute_cf_returns_not_retained = true,
.attribute_cf_returns_retained = true,
.attribute_cf_returns_on_parameters = true,
.attribute_deprecated_with_message = true,
.attribute_deprecated_with_replacement = true,
.attribute_ext_vector_type = true,
.attribute_ns_returns_not_retained = true,
.attribute_ns_returns_retained = true,
.attribute_ns_consumes_self = true,
.attribute_ns_consumed = true,
.attribute_cf_consumed = true,
.attribute_overloadable = true,
.attribute_unavailable_with_message = true,
.attribute_unused_on_fields = true,
.attribute_diagnose_if_objc = true,
.blocks = false, // TODO
.c_thread_safety_attributes = true,
.enumerator_attributes = true,
.nullability = true,
.nullability_on_arrays = true,
.nullability_nullable_result = true,
.c_alignas = comp.langopts.standard.atLeast(.c11),
.c_alignof = comp.langopts.standard.atLeast(.c11),
.c_atomic = comp.langopts.standard.atLeast(.c11),
.c_generic_selections = comp.langopts.standard.atLeast(.c11),
.c_static_assert = comp.langopts.standard.atLeast(.c11),
.c_thread_local = comp.langopts.standard.atLeast(.c11) and comp.isTlsSupported(),
};
inline for (std.meta.fields(@TypeOf(list))) |f| {
if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name);
}
return false;
}
/// Used to implement the __has_extension macro.
pub fn hasExtension(comp: *Compilation, ext: []const u8) bool {
const list = .{
// C11 features
.c_alignas = true,
.c_alignof = true,
.c_atomic = false, // TODO
.c_generic_selections = true,
.c_static_assert = true,
.c_thread_local = comp.isTlsSupported(),
// misc
.overloadable_unmarked = false, // TODO
.statement_attributes_with_gnu_syntax = false, // TODO
.gnu_asm = true,
.gnu_asm_goto_with_outputs = true,
.matrix_types = false, // TODO
.matrix_types_scalar_division = false, // TODO
};
inline for (std.meta.fields(@TypeOf(list))) |f| {
if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name);
}
return false;
}
|
src/features.zig
|
const testing = @import("std").testing;
const builtin = @import("builtin");
// Ported from llvm-project 13.0.0 d7b669b3a30345cfcdb2fde2af6f48aa4b94845d
//
// https://github.com/llvm/llvm-project/blob/llvmorg-13.0.0/compiler-rt/lib/builtins/os_version_check.c
// The compiler generates calls to __isPlatformVersionAtLeast() when Objective-C's @available
// function is invoked.
//
// Old versions of clang would instead emit calls to __isOSVersionAtLeast(), which is still
// supported in clang's compiler-rt implementation today in case anyone tries to link an object file
// produced with an old clang version. This requires dynamically loading frameworks, parsing a
// system plist file, and generally adds a fair amount of complexity to the implementation and so
// our implementation differs by simply removing that backwards compatability support. We only use
// the newer codepath, which merely calls out to the Darwin _availability_version_check API which is
// available on macOS 10.15+, iOS 13+, tvOS 13+ and watchOS 6+.
inline fn constructVersion(major: u32, minor: u32, subminor: u32) u32 {
return ((major & 0xffff) << 16) | ((minor & 0xff) << 8) | (subminor & 0xff);
}
// Darwin-only
pub fn __isPlatformVersionAtLeast(platform: u32, major: u32, minor: u32, subminor: u32) callconv(.C) i32 {
return @boolToInt(_availability_version_check(1, &[_]dyld_build_version_t{
.{
.platform = platform,
.version = constructVersion(major, minor, subminor),
},
}));
}
// _availability_version_check darwin API support.
const dyld_platform_t = u32;
const dyld_build_version_t = extern struct {
platform: dyld_platform_t,
version: u32,
};
// Darwin-only
extern "c" fn _availability_version_check(count: u32, versions: [*c]const dyld_build_version_t) bool;
test "isPlatformVersionAtLeast" {
if (!builtin.os.tag.isDarwin()) return error.SkipZigTest;
// Note: this test depends on the actual host OS version since it is merely calling into the
// native Darwin API.
const macos_platform_constant = 1;
try testing.expect(__isPlatformVersionAtLeast(macos_platform_constant, 10, 0, 15) == 1);
try testing.expect(__isPlatformVersionAtLeast(macos_platform_constant, 99, 0, 0) == 0);
}
|
lib/std/special/compiler_rt/os_version_check.zig
|
const std = @import("std");
const testing = std.testing;
const expect = testing.expect;
const mem = std.mem;
var s_array: [8]Sub = undefined;
const Sub = struct { b: u8 };
const Str = struct { a: []Sub };
test "set global var array via slice embedded in struct" {
var s = Str{ .a = s_array[0..] };
s.a[0].b = 1;
s.a[1].b = 2;
s.a[2].b = 3;
try expect(s_array[0].b == 1);
try expect(s_array[1].b == 2);
try expect(s_array[2].b == 3);
}
test "read/write through global variable array of struct fields initialized via array mult" {
const S = struct {
fn doTheTest() !void {
try expect(storage[0].term == 1);
storage[0] = MyStruct{ .term = 123 };
try expect(storage[0].term == 123);
}
pub const MyStruct = struct {
term: usize,
};
var storage: [1]MyStruct = [_]MyStruct{MyStruct{ .term = 1 }} ** 1;
};
try S.doTheTest();
}
test "implicit cast single-item pointer" {
try testImplicitCastSingleItemPtr();
comptime try testImplicitCastSingleItemPtr();
}
fn testImplicitCastSingleItemPtr() !void {
var byte: u8 = 100;
const slice = @as(*[1]u8, &byte)[0..];
slice[0] += 1;
try expect(byte == 101);
}
fn testArrayByValAtComptime(b: [2]u8) u8 {
return b[0];
}
test "comptime evaluating function that takes array by value" {
const arr = [_]u8{ 1, 2 };
const x = comptime testArrayByValAtComptime(arr);
const y = comptime testArrayByValAtComptime(arr);
try expect(x == 1);
try expect(y == 1);
}
test "runtime initialize array elem and then implicit cast to slice" {
var two: i32 = 2;
const x: []const i32 = &[_]i32{two};
try expect(x[0] == 2);
}
test "array literal as argument to function" {
const S = struct {
fn entry(two: i32) !void {
try foo(&[_]i32{ 1, 2, 3 });
try foo(&[_]i32{ 1, two, 3 });
try foo2(true, &[_]i32{ 1, 2, 3 });
try foo2(true, &[_]i32{ 1, two, 3 });
}
fn foo(x: []const i32) !void {
try expect(x[0] == 1);
try expect(x[1] == 2);
try expect(x[2] == 3);
}
fn foo2(trash: bool, x: []const i32) !void {
try expect(trash);
try expect(x[0] == 1);
try expect(x[1] == 2);
try expect(x[2] == 3);
}
};
try S.entry(2);
comptime try S.entry(2);
}
test "double nested array to const slice cast in array literal" {
const S = struct {
fn entry(two: i32) !void {
const cases = [_][]const []const i32{
&[_][]const i32{&[_]i32{1}},
&[_][]const i32{&[_]i32{ 2, 3 }},
&[_][]const i32{
&[_]i32{4},
&[_]i32{ 5, 6, 7 },
},
};
try check(&cases);
const cases2 = [_][]const i32{
&[_]i32{1},
&[_]i32{ two, 3 },
};
try expect(cases2.len == 2);
try expect(cases2[0].len == 1);
try expect(cases2[0][0] == 1);
try expect(cases2[1].len == 2);
try expect(cases2[1][0] == 2);
try expect(cases2[1][1] == 3);
const cases3 = [_][]const []const i32{
&[_][]const i32{&[_]i32{1}},
&[_][]const i32{&[_]i32{ two, 3 }},
&[_][]const i32{
&[_]i32{4},
&[_]i32{ 5, 6, 7 },
},
};
try check(&cases3);
}
fn check(cases: []const []const []const i32) !void {
try expect(cases.len == 3);
try expect(cases[0].len == 1);
try expect(cases[0][0].len == 1);
try expect(cases[0][0][0] == 1);
try expect(cases[1].len == 1);
try expect(cases[1][0].len == 2);
try expect(cases[1][0][0] == 2);
try expect(cases[1][0][1] == 3);
try expect(cases[2].len == 2);
try expect(cases[2][0].len == 1);
try expect(cases[2][0][0] == 4);
try expect(cases[2][1].len == 3);
try expect(cases[2][1][0] == 5);
try expect(cases[2][1][1] == 6);
try expect(cases[2][1][2] == 7);
}
};
try S.entry(2);
comptime try S.entry(2);
}
test "anonymous literal in array" {
const S = struct {
const Foo = struct {
a: usize = 2,
b: usize = 4,
};
fn doTheTest() !void {
var array: [2]Foo = .{
.{ .a = 3 },
.{ .b = 3 },
};
try expect(array[0].a == 3);
try expect(array[0].b == 4);
try expect(array[1].a == 2);
try expect(array[1].b == 3);
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
|
test/behavior/array_llvm.zig
|
const std = @import("std");
const aoc = @import("aoc-lib.zig");
test "examples" {
const test1 = aoc.readLines(aoc.talloc, aoc.test1file);
defer aoc.talloc.free(test1);
const test2 = aoc.readLines(aoc.talloc, aoc.test2file);
defer aoc.talloc.free(test2);
const inp = aoc.readLines(aoc.talloc, aoc.inputfile);
defer aoc.talloc.free(inp);
try aoc.assertEq(@as(usize, 4), try part1(aoc.talloc, test1));
try aoc.assertEq(@as(usize, 0), try part1(aoc.talloc, test2));
try aoc.assertEq(@as(usize, 112), try part1(aoc.talloc, inp));
try aoc.assertEq(@as(usize, 32), try part2(aoc.talloc, test1));
try aoc.assertEq(@as(usize, 126), try part2(aoc.talloc, test2));
try aoc.assertEq(@as(usize, 6260), try part2(aoc.talloc, inp));
}
fn traverse1(m: std.StringHashMap(std.ArrayList([]const u8)), bag: []const u8, seen: *std.StringHashMap(bool)) void {
if (!m.contains(bag)) {
return;
}
for (m.get(bag).?.items) |outer| {
_ = seen.getOrPutValue(outer, true) catch unreachable;
traverse1(m, outer, seen);
}
}
fn part1(palloc: std.mem.Allocator, inp: anytype) !usize {
var arena = std.heap.ArenaAllocator.init(palloc);
defer arena.deinit();
const alloc = arena.allocator();
var map = std.StringHashMap(std.ArrayList([]const u8)).init(alloc);
for (inp) |line| {
var lit = std.mem.split(u8, line, " bags contain ");
const bag = lit.next().?;
const spec = lit.next().?;
if (spec[0] == 'n' and spec[1] == 'o' and spec[2] == ' ') {
continue;
}
var sit = std.mem.split(u8, spec, ", ");
while (sit.next()) |bags| {
var bit = std.mem.split(u8, bags, " ");
const ns = bit.next().?;
_ = std.fmt.parseUnsigned(usize, ns, 10) catch unreachable;
const b1 = bit.next().?;
const b2 = bit.next().?;
var innerBag = alloc.alloc(u8, b1.len + b2.len + 1) catch unreachable;
std.mem.copy(u8, innerBag[0..], b1);
innerBag[b1.len] = ' ';
std.mem.copy(u8, innerBag[b1.len + 1 ..], b2);
const kv = map.getOrPutValue(innerBag, std.ArrayList([]const u8).init(alloc)) catch unreachable;
kv.value_ptr.append(bag) catch unreachable;
}
}
var seen = std.StringHashMap(bool).init(alloc);
defer seen.deinit();
traverse1(map, "shiny gold", &seen);
return seen.count();
}
const BS = struct { bag: []const u8, n: usize };
fn traverse2(m: std.StringHashMap(std.ArrayList(*BS)), bag: []const u8) usize {
var c: usize = 1;
if (!m.contains(bag)) {
return 1;
}
for (m.get(bag).?.items) |bc| {
c += bc.n * traverse2(m, bc.bag);
}
return c;
}
fn part2(palloc: std.mem.Allocator, inp: anytype) !usize {
var arena = std.heap.ArenaAllocator.init(palloc);
defer arena.deinit();
const alloc = arena.allocator();
var map = std.StringHashMap(std.ArrayList(*BS)).init(alloc);
for (inp) |line| {
var lit = std.mem.split(u8, line, " bags contain ");
const bag = lit.next().?;
const spec = lit.next().?;
if (spec[0] == 'n' and spec[1] == 'o' and spec[2] == ' ') {
continue;
}
var sit = std.mem.split(u8, spec, ", ");
while (sit.next()) |bags| {
var bit = std.mem.split(u8, bags, " ");
const ns = bit.next().?;
const n = try std.fmt.parseUnsigned(usize, ns, 10);
const b1 = bit.next().?;
const b2 = bit.next().?;
var innerBag = try alloc.alloc(u8, b1.len + b2.len + 1);
std.mem.copy(u8, innerBag[0..], b1);
innerBag[b1.len] = ' ';
std.mem.copy(u8, innerBag[b1.len + 1 ..], b2);
const kv = try map.getOrPutValue(bag, std.ArrayList(*BS).init(alloc));
var bs = try alloc.create(BS);
bs.bag = innerBag;
bs.n = n;
try kv.value_ptr.append(bs);
}
}
return traverse2(map, "shiny gold") - 1;
}
fn day07(inp: []const u8, bench: bool) anyerror!void {
var spec = aoc.readLines(aoc.halloc, inp);
defer aoc.halloc.free(spec);
var p1 = try part1(aoc.halloc, spec);
var p2 = try part2(aoc.halloc, spec);
if (!bench) {
try aoc.print("Part 1: {}\nPart 2: {}\n", .{ p1, p2 });
}
}
pub fn main() anyerror!void {
try aoc.benchme(aoc.input(), day07);
}
|
2020/07/aoc.zig
|
// DO NOT MODIFY. This is not actually a source file; it is a textual representation of generated code.
// Use only for debugging purposes.
// Auto-generated constructor
// Access: public
Method <init> : V
(
// (no arguments)
) {
ALOAD 0
// Method descriptor: ()V
INVOKESPECIAL java/lang/Object#<init>
RETURN
}
// Access: public
Method deploy_0 : V
(
arg 1 = Lio/quarkus/runtime/StartupContext;,
arg 2 = [Ljava/lang/Object;
) {
** label1
NEW java/util/HashSet
DUP
// Method descriptor: ()V
INVOKESPECIAL java/util/HashSet#<init>
ASTORE 3
ALOAD 2
LDC (Integer) 1
ALOAD 3
AASTORE
ALOAD 2
LDC (Integer) 1
AALOAD
ASTORE 4
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.IIOImageProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.sse.SseEventOutputProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.sse.SseEventProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.jsonp.JsonObjectProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.DefaultTextPlain"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.client.jaxrs.internal.CompletionStageRxInvokerProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.ReactiveStreamProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.interceptors.CacheControlFeature"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.interceptors.ServerContentEncodingAnnotationFeature"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.FormUrlEncodedProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.CompletionStageProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.interceptors.ClientContentEncodingAnnotationFeature"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.DefaultNumberWriter"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.jsonb.JsonBindingProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.SourceProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.FileProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.context.ContextFeature"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.interceptors.MessageSanitizerContainerResponseFilter"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.MultiValuedParamConverterProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.jsonp.JsonArrayProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.DocumentProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.DefaultBooleanWriter"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.ByteArrayProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.StringTextStar"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "io.quarkus.resteasy.runtime.RolesFilterRegistrar"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.jsonp.JsonStructureProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.StreamingOutputProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.ReaderProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.sse.SseEventSinkInterceptor"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.DataSourceProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.JaxrsFormProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.FileRangeWriter"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.InputStreamProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 4
CHECKCAST java/util/Collection
LDC (String) "org.jboss.resteasy.plugins.providers.jsonp.JsonValueProvider"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 2
LDC (Integer) 2
ALOAD 4
AASTORE
NEW java/util/HashSet
DUP
// Method descriptor: ()V
INVOKESPECIAL java/util/HashSet#<init>
ASTORE 5
ALOAD 2
LDC (Integer) 3
ALOAD 5
AASTORE
ALOAD 2
LDC (Integer) 3
AALOAD
ASTORE 6
ALOAD 6
CHECKCAST java/util/Collection
LDC (String) "io.quarkus.resteasy.runtime.RolesFilterRegistrar"
// Method descriptor: (Ljava/lang/Object;)Z
INVOKEINTERFACE java/util/Collection#add
POP
ALOAD 2
LDC (Integer) 4
ALOAD 6
AASTORE
NEW io/quarkus/restclient/runtime/RestClientRecorder
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/restclient/runtime/RestClientRecorder#<init>
ASTORE 7
ALOAD 2
LDC (Integer) 0
ALOAD 7
AASTORE
LDC (Boolean) true
// Method descriptor: (Z)Ljava/lang/Boolean;
INVOKESTATIC java/lang/Boolean#valueOf
ASTORE 8
ALOAD 2
LDC (Integer) 2
AALOAD
ASTORE 9
ALOAD 2
LDC (Integer) 4
AALOAD
ASTORE 10
ALOAD 2
LDC (Integer) 0
AALOAD
CHECKCAST io/quarkus/restclient/runtime/RestClientRecorder
ALOAD 8
CHECKCAST java/lang/Boolean
// Method descriptor: ()Z
INVOKEVIRTUAL java/lang/Boolean#booleanValue
ALOAD 9
CHECKCAST java/util/Set
ALOAD 10
CHECKCAST java/util/Set
// Method descriptor: (ZLjava/util/Set;Ljava/util/Set;)V
INVOKEVIRTUAL io/quarkus/restclient/runtime/RestClientRecorder#initializeResteasyProviderFactory
RETURN
** label2
}
// Access: public
Method deploy : V
(
arg 1 = Lio/quarkus/runtime/StartupContext;
) {
** label1
LDC (Integer) 5
ANEWARRAY java/lang/Object
ASTORE 2
ALOAD 0
ALOAD 1
ALOAD 2
// Method descriptor: (Lio/quarkus/runtime/StartupContext;[Ljava/lang/Object;)V
INVOKEVIRTUAL io/quarkus/deployment/steps/RestClientProcessor$registerProviders18#deploy_0
RETURN
** label2
}
|
localproxyservice/target/generated-sources/gizmo/io/quarkus/deployment/steps/RestClientProcessor$registerProviders18.zig
|
const wlr = @import("wlroots.zig");
const wayland = @import("wayland");
const wl = wayland.server.wl;
pub const Backend = extern struct {
const Impl = opaque {};
impl: *const Impl,
events: extern struct {
destroy: wl.Signal(*Backend),
new_input: wl.Signal(*wlr.InputDevice),
new_output: wl.Signal(*wlr.Output),
},
// backend.h
extern fn wlr_backend_autocreate(server: *wl.Server) ?*Backend;
pub fn autocreate(server: *wl.Server) !*Backend {
return wlr_backend_autocreate(server) orelse error.BackendCreateFailed;
}
extern fn wlr_backend_start(backend: *Backend) bool;
pub fn start(backend: *Backend) !void {
if (!wlr_backend_start(backend)) {
return error.BackendStartFailed;
}
}
extern fn wlr_backend_destroy(backend: *Backend) void;
pub const destroy = wlr_backend_destroy;
extern fn wlr_backend_get_session(backend: *Backend) ?*wlr.Session;
pub const getSession = wlr_backend_get_session;
extern fn wlr_backend_get_drm_fd(backend: *Backend) c_int;
pub const getDrmFd = wlr_backend_get_drm_fd;
// backend/multi.h
extern fn wlr_multi_backend_create(server: *wl.Server) ?*Backend;
pub fn createMulti(server: *wl.Server) !*Backend {
return wlr_multi_backend_create(server) orelse error.BackendCreateFailed;
}
extern fn wlr_multi_backend_add(multi: *Backend, backend: *Backend) bool;
pub const multiAdd = wlr_multi_backend_add;
extern fn wlr_multi_backend_remove(multi: *Backend, backend: *Backend) void;
pub const multiRemove = wlr_multi_backend_remove;
extern fn wlr_backend_is_multi(backend: *Backend) bool;
pub const isMulti = wlr_backend_is_multi;
extern fn wlr_multi_is_empty(backend: *Backend) bool;
pub const multiIsEmpty = wlr_multi_is_empty;
extern fn wlr_multi_for_each_backend(backend: *Backend, callback: fn (backend: *Backend, data: ?*anyopaque) callconv(.C) void, data: ?*anyopaque) void;
pub const multiForEachBackend = wlr_multi_for_each_backend;
// backend/headless.h
extern fn wlr_headless_backend_create(server: *wl.Server) ?*Backend;
pub fn createHeadless(server: *wl.Server) !*Backend {
return wlr_headless_backend_create(server) orelse error.BackendCreateFailed;
}
extern fn wlr_headless_add_output(headless: *Backend) ?*wlr.Output;
pub fn headlessAddOutput(headless: *Backend) !*wlr.Output {
return wlr_headless_add_output(headless) orelse error.OutOfMemory;
}
extern fn wlr_headless_add_input_device(headless: *Backend) ?*wlr.InputDevice;
pub fn headlessAddInputDevice(headless: *Backend) !*wlr.InputDevice {
return wlr_headless_add_input_device(headless) orelse error.OutOfMemory;
}
extern fn wlr_backend_is_headless(backend: *Backend) bool;
pub const isHeadless = wlr_backend_is_headless;
};
|
src/backend.zig
|
const std = @import("std");
const Sha1 = std.crypto.hash.Sha1;
const Allocator = std.mem.Allocator;
const Peer = @import("Peer.zig");
const Torrent = @import("Torrent.zig");
const Client = @import("net/Tcp_client.zig");
const Message = @import("net/message.zig").Message;
/// Max blocks we request at a time
const max_items = 5;
/// most clients/servers report this as max block size (2^14)
const max_block_size = 16384;
fn compare(a: Work, b: Work) bool {
return true;
}
/// Worker that manages jobs
pub const Worker = struct {
const Self = @This();
/// mutex used to lock and unlock this `Worker` for multi-threading support
mutex: *std.Thread.Mutex,
/// Priority based queue that contains our pieces that need to be downloaded
/// we probably want to replace this with a regular queue as the priority does not matter
work: *std.ArrayList(Work),
/// Constant Torrent reference, purely for access to its data
torrent: *const Torrent,
/// Remaining worker slots left based on peers
workers: usize,
/// allocator used for the workers to allocate and free memory
gpa: *Allocator,
/// the total size that has been downloaded so far
downloaded: usize,
/// The file we write to
file: std.fs.File,
/// Creates a new worker for the given work
pub fn init(
gpa: *Allocator,
mutex: *std.Thread.Mutex,
torrent: *const Torrent,
work: *std.ArrayList(Work),
file: std.fs.File,
) Self {
return Self{
.mutex = mutex,
.work = work,
.torrent = torrent,
.workers = torrent.peers.len,
.gpa = gpa,
.file = file,
.downloaded = 0,
};
}
/// Returns a Client for a worker, returns null if all peers have a client.
/// TODO, implement a way to get back client slots when a (unexpected) disconnect happends
pub fn getClient(self: *Self) ?Client {
const lock = self.mutex.acquire();
defer lock.release();
const peer = if (self.workers > 0) self.torrent.peers[self.workers - 1] else return null;
var client = Client.init(peer, self.torrent.file.hash, self.torrent.peer_id);
self.workers -= 1;
return client;
}
/// Puts a new piece of work in the queue
pub fn put(self: *Self, work: Work) !void {
const lock = self.mutex.acquire();
defer lock.release();
try self.work.append(work);
}
/// Returns next job from queue, returns null if no work left.
pub fn next(self: *Self) ?Work {
const lock = self.mutex.acquire();
defer lock.release();
return self.work.popOrNull();
}
/// Writes a piece of work to a file. This blocks access to the Worker.
/// This will also destroy the given `Work` piece.
pub fn write(self: *Self, work: *Work) !void {
const lock = self.mutex.acquire();
defer lock.release();
const size = try self.file.pwrite(work.buffer, work.index * work.size);
self.downloaded += size;
const completed: f64 = @intToFloat(f64, self.downloaded) / @intToFloat(f64, self.torrent.file.size) * 100;
std.debug.print("\r{:.2} \t\t{:.2} \t\t {d:.2}", .{
std.fmt.fmtIntSizeBin(self.downloaded),
std.fmt.fmtIntSizeBin(self.torrent.file.size),
completed,
});
}
};
/// A piece of work that needs to be downloaded
pub const Work = struct {
index: u32,
hash: [20]u8,
size: u32,
gpa: *Allocator,
buffer: []u8,
/// Initializes work and creates a buffer according to the given size,
/// call deinit() to free its memory.
pub fn init(index: u32, hash: [20]u8, size: u32, gpa: *Allocator) Work {
return .{
.index = index,
.hash = hash,
.size = size,
.gpa = gpa,
.buffer = undefined,
};
}
/// Creates a buffer with the size of the work piece,
/// then downloads the smaller pieces and puts them inside the buffer
pub fn download(self: *Work, client: *Client) !void {
var downloaded: usize = 0;
var requested: u32 = 0;
var backlog: usize = 0;
// incase of an error, the thread function will take care of the memory
self.buffer = try self.gpa.alloc(u8, self.size);
// request to the peer for more bytes
while (downloaded < self.size) {
// if we are not choked, request for bytes
if (!client.choked) {
while (backlog < max_items and requested < self.size) : (backlog += 1) {
const block_size: u32 = if (self.size - requested < max_block_size) self.size - requested else max_block_size;
try client.sendRequest(self.index, requested, block_size);
requested += block_size;
}
}
var message = Message.deserialize(self.gpa, client.socket.reader()) catch |err| switch (err) {
error.Unsupported => continue, // Unsupported protocol message type/extension
else => |e| return e,
} orelse continue; // peer sent keep-alive
defer message.deinit(self.gpa);
switch (message) {
.choke => client.choked = true,
.unchoke => client.choked = false,
.have => |index| if (client.bitfield) |*bit_field| bit_field.setPiece(index),
.piece => |piece| {
downloaded += piece.block.len;
backlog -= 1;
std.mem.copy(u8, self.buffer[piece.begin..], piece.block);
},
.bitfield => |payload| client.bitfield = .{ .buffer = payload },
else => {},
}
}
}
/// Frees the Work's memory
pub fn deinit(self: *Work) void {
self.gpa.free(self.buffer);
self.* = undefined;
}
/// Checks the integrity of the data by checking its hash against the work's hash.
pub fn eqlHash(self: Work) bool {
var out: [20]u8 = undefined;
Sha1.hash(self.buffer, &out, .{});
return std.mem.eql(u8, &self.hash, &out);
}
};
|
src/worker.zig
|
const std = @import("std");
const c = @import("internal/c.zig");
const internal = @import("internal/internal.zig");
const log = std.log.scoped(.git);
const git = @import("git.zig");
/// An action signature (e.g. for committers, taggers, etc)
pub const Signature = extern struct {
/// Use `name`
z_name: [*:0]const u8,
/// Use `email`
z_email: [*:0]const u8,
/// Time when the action happened
when: Time,
/// Full name of the author
pub fn name(self: Signature) [:0]const u8 {
return std.mem.sliceTo(self.z_name, 0);
}
/// Email of the author
pub fn email(self: Signature) [:0]const u8 {
return std.mem.sliceTo(self.z_email, 0);
}
/// Free an existing signature.
pub fn deinit(self: *Signature) void {
log.debug("Signature.deinit called", .{});
c.git_signature_free(@ptrCast(*c.git_signature, self));
log.debug("signature freed successfully", .{});
}
/// Create a new signature by parsing the given buffer, which is expected to be in the format
/// "Real Name <email> timestamp tzoffset", where `timestamp` is the number of seconds since the Unix epoch and `tzoffset` is
/// the timezone offset in `hhmm` format (note the lack of a colon separator).
///
/// ## Parameters
/// * `buf` - Signature string
pub fn fromSlice(buf: [:0]const u8) !*Signature {
log.debug("Signature.fromSlice called, buf: {s}", .{buf});
var ret: *git.Signature = undefined;
try internal.wrapCall("git_signature_from_buffer", .{
@ptrCast(*?*c.git_signature, &ret),
buf.ptr,
});
log.debug("successfully initalized signature {*}", .{ret});
return ret;
}
/// Create a copy of an existing signature. All internal strings are also duplicated.
pub fn duplicate(self: *const git.Signature) !*git.Signature {
log.debug("Signature.duplicate called", .{});
var ret: *git.Signature = undefined;
try internal.wrapCall("git_signature_dup", .{
@ptrCast(*?*c.git_signature, &ret),
@ptrCast(*const c.git_signature, self),
});
log.debug("successfully duplicated signature", .{});
return ret;
}
test {
try std.testing.expectEqual(@sizeOf(c.git_signature), @sizeOf(Signature));
try std.testing.expectEqual(@bitSizeOf(c.git_signature), @bitSizeOf(Signature));
}
comptime {
std.testing.refAllDecls(@This());
}
};
pub const Time = extern struct {
/// time in seconds from epoch
time: SystemTime,
/// timezone offset, in minutes
offset: c_int,
/// indicator for questionable '-0000' offsets in signature
sign: u8,
pub const SystemTime = c.git_time_t;
test {
try std.testing.expectEqual(@sizeOf(c.git_time), @sizeOf(Time));
try std.testing.expectEqual(@bitSizeOf(c.git_time), @bitSizeOf(Time));
}
comptime {
std.testing.refAllDecls(@This());
}
};
comptime {
std.testing.refAllDecls(@This());
}
|
src/signature.zig
|
const tup = @import("util.zig").tuplicate;
pub const DecoderError = error{IllegalOpcode};
pub const RType = struct {
rs2: u5,
rs1: u5,
rd: u5,
pub fn decode(raw: u32) RType {
return .{ .rs2 = @intCast(u5, (raw >> 20) & 0x1f), .rs1 = @intCast(u5, (raw >> 15) & 0x1f), .rd = @intCast(u5, (raw >> 7) & 0x1f) };
}
};
pub const IType = struct {
imm: u12,
rs1: u5,
rd: u5,
pub fn decode(raw: u32) IType {
return .{ .imm = @intCast(u12, (raw >> 20)), .rs1 = @intCast(u5, (raw >> 15) & 0x1f), .rd = @intCast(u5, (raw >> 7) & 0x1f) };
}
pub inline fn immSigned(self: IType) i12 {
return @bitCast(i12, self.imm);
}
};
pub const SType = struct {
imm: u12,
rs1: u5,
rs2: u5,
pub fn decode(raw: u32) SType {
return .{ .imm = @intCast(u12, ((raw >> 20) & 0xfe0) | ((raw >> 7) & 0x1f)), .rs1 = @intCast(u5, (raw >> 15) & 0x1f), .rs2 = @intCast(u5, (raw >> 20) & 0x1f) };
}
pub inline fn immSigned(self: SType) i12 {
return @bitCast(i12, self.imm);
}
};
pub const BType = struct {
imm: u12,
rs1: u5,
rs2: u5,
pub fn decode(raw: u32) BType {
return .{
.imm = @intCast(u12, ((raw & 0x8000_0000) >> 19) | ((raw & 0x7e00_0000) >> 20) |
((raw & 0x0000_0f00) >> 7) | ((raw & 0x0000_0080) << 4)),
.rs1 = @intCast(u5, (raw >> 15) & 0x1f),
.rs2 = @intCast(u5, (raw >> 20) & 0x1f),
};
}
};
pub const JType = struct {
imm: u20,
rd: u5,
pub fn decode(raw: u32) JType {
return .{
.imm = @intCast(u20, ((raw & 0x8000_0000) >> 11) | ((raw & 0x7fe0_0000) >> 20) |
((raw & 0x0010_0000) >> 9) | (raw & 0x000f_f000)),
.rd = @intCast(u5, (raw >> 7) & 0x1f),
};
}
};
pub const UType = struct {
imm: u32,
rd: u5,
pub fn decode(raw: u32) UType {
return .{ .imm = raw & 0xfffff000, .rd = @intCast(u5, (raw >> 7) & 0x1f) };
}
};
pub const ShiftType = struct {
shamt: u6,
rs1: u5,
rd: u5,
pub fn decode(raw: u32) ShiftType {
return .{ .shamt = @intCast(u6, (raw >> 20) & 0x3f), .rs1 = @intCast(u5, (raw >> 15) & 0x1f), .rd = @intCast(u5, (raw >> 7) & 0x1f) };
}
};
pub const Instruction = union(enum(u8)) {
add: RType,
sub: RType,
xor: RType,
@"or": RType,
@"and": RType,
sll: RType,
srl: RType,
sra: RType,
slt: RType,
sltu: RType,
addi: IType,
xori: IType,
ori: IType,
andi: IType,
slli: IType,
srli: IType,
srai: IType,
slti: IType,
sltiu: IType,
lb: IType,
lh: IType,
lw: IType,
ld: IType,
lbu: IType,
lhu: IType,
lwu: IType,
sb: SType,
sh: SType,
sw: SType,
sd: SType,
beq: BType,
bne: BType,
blt: BType,
bge: BType,
bltu: BType,
bgeu: BType,
jal: JType,
jalr: IType,
lui: UType,
auipc: UType,
ecall: IType,
addw: RType,
subw: RType,
sllw: RType,
srlw: RType,
sraw: RType,
addiw: IType,
slliw: ShiftType,
srliw: ShiftType,
sraiw: ShiftType,
mul: RType,
mulh: RType,
mulsu: RType,
mulu: RType,
div: RType,
divu: RType,
rem: RType,
remu: RType,
pub fn init(op: anytype, val: anytype) Instruction {
return @unionInit(Instruction, @tagName(op), val);
}
pub fn decode32(raw: u32) anyerror!Instruction {
var opcode = (raw >> 2) & 0b11111;
return switch (opcode) {
0b01100 => decodeAluOp(raw),
0b00100 => decodeAluImmOp(raw),
0b00000 => decodeLoad(raw),
0b01000 => decodeStore(raw),
0b11000 => decodeBranch(raw),
0b11011 => init(.jal, JType.decode(raw)),
0b11001 => init(.jalr, IType.decode(raw)),
0b01011 => init(.lui, UType.decode(raw)),
0b00101 => init(.auipc, UType.decode(raw)),
0b11100 => init(.ecall, IType.decode(raw)),
0b01110 => decodeAluOp32(raw),
0b00110 => decodeAluImmOp32(raw),
else => DecoderError.IllegalOpcode,
};
}
fn decodeAluOp(raw: u32) !Instruction {
var funct7and3 = tup(raw >> 25, (raw >> 12) & 0b111);
return switch (funct7and3) {
tup(0b0000000, 0b000) => init(.add, RType.decode(raw)),
tup(0b0100000, 0b000) => init(.sub, RType.decode(raw)),
tup(0b0000000, 0b001) => init(.sll, RType.decode(raw)),
tup(0b0000000, 0b010) => init(.slt, RType.decode(raw)),
tup(0b0000000, 0b011) => init(.sltu, RType.decode(raw)),
tup(0b0000000, 0b100) => init(.xor, RType.decode(raw)),
tup(0b0000000, 0b101) => init(.srl, RType.decode(raw)),
tup(0b0100000, 0b101) => init(.sra, RType.decode(raw)),
tup(0b0000000, 0b110) => init(.@"or", RType.decode(raw)),
tup(0b0000000, 0b111) => init(.@"and", RType.decode(raw)),
tup(0b0000001, 0b000) => init(.mul, RType.decode(raw)),
tup(0b0000001, 0b001) => init(.mulh, RType.decode(raw)),
tup(0b0000001, 0b010) => init(.mulsu, RType.decode(raw)),
tup(0b0000001, 0b011) => init(.mulu, RType.decode(raw)),
tup(0b0000001, 0b100) => init(.div, RType.decode(raw)),
tup(0b0000001, 0b101) => init(.divu, RType.decode(raw)),
tup(0b0000001, 0b110) => init(.rem, RType.decode(raw)),
tup(0b0000001, 0b111) => init(.remu, RType.decode(raw)),
else => DecoderError.IllegalOpcode,
};
}
fn decodeAluImmOp(raw: u32) !Instruction {
var funct3 = (raw >> 12) & 0b111;
return switch (funct3) {
0b000 => init(.addi, IType.decode(raw)),
0b001 => init(.slli, IType.decode(raw)),
0b010 => init(.slti, IType.decode(raw)),
0b011 => init(.sltiu, IType.decode(raw)),
0b100 => init(.xori, IType.decode(raw)),
0b101 => switch (raw >> 26) {
0b000000 => init(.srli, IType.decode(raw)),
0b010000 => init(.srai, IType.decode(raw)),
else => DecoderError.IllegalOpcode,
},
0b110 => init(.ori, IType.decode(raw)),
0b111 => init(.andi, IType.decode(raw)),
else => DecoderError.IllegalOpcode,
};
}
fn decodeLoad(raw: u32) !Instruction {
var funct3 = (raw >> 12) & 0b111;
return switch (funct3) {
0b000 => init(.lb, IType.decode(raw)),
0b001 => init(.lh, IType.decode(raw)),
0b010 => init(.lw, IType.decode(raw)),
0b011 => init(.ld, IType.decode(raw)),
0b100 => init(.lbu, IType.decode(raw)),
0b101 => init(.lhu, IType.decode(raw)),
0b110 => init(.lwu, IType.decode(raw)),
else => DecoderError.IllegalOpcode,
};
}
fn decodeStore(raw: u32) !Instruction {
var funct3 = (raw >> 12) & 0b111;
return switch (funct3) {
0b000 => init(.sb, SType.decode(raw)),
0b001 => init(.sh, SType.decode(raw)),
0b010 => init(.sw, SType.decode(raw)),
0b011 => init(.sd, SType.decode(raw)),
else => DecoderError.IllegalOpcode,
};
}
fn decodeBranch(raw: u32) !Instruction {
var funct3 = (raw >> 12) & 0b111;
return switch (funct3) {
0b000 => init(.beq, BType.decode(raw)),
0b001 => init(.bne, BType.decode(raw)),
0b100 => init(.blt, BType.decode(raw)),
0b101 => init(.bge, BType.decode(raw)),
0b110 => init(.bltu, BType.decode(raw)),
0b111 => init(.bgeu, BType.decode(raw)),
else => DecoderError.IllegalOpcode,
};
}
fn decodeAluOp32(raw: u32) !Instruction {
var funct7and3 = tup(raw >> 25, (raw >> 12) & 0b111);
return switch (funct7and3) {
tup(0b0000000, 0b000) => init(.addw, RType.decode(raw)),
tup(0b0100000, 0b000) => init(.subw, RType.decode(raw)),
tup(0b0000000, 0b001) => init(.sllw, RType.decode(raw)),
tup(0b0000000, 0b101) => init(.srlw, RType.decode(raw)),
tup(0b0100000, 0b101) => init(.sraw, RType.decode(raw)),
else => DecoderError.IllegalOpcode,
};
}
fn decodeAluImmOp32(raw: u32) DecoderError!Instruction {
var funct3 = (raw >> 12) & 0b111;
var funct7and3 = tup(raw >> 25, (raw >> 12) & 0b111);
return switch (funct3) {
0b000 => init(.addiw, IType.decode(raw)),
else => switch (funct7and3) {
tup(0b0000000, 0b001) => init(.slliw, ShiftType.decode(raw)),
tup(0b0000000, 0b101) => init(.srliw, ShiftType.decode(raw)),
tup(0b0100000, 0b101) => init(.sraiw, ShiftType.decode(raw)),
else => DecoderError.IllegalOpcode,
},
};
}
};
|
decoder.zig
|
pub fn UserVisit(comptime Node: type, comptime Context: type, comptime IncludeEnter: bool) type {
if (IncludeEnter) {
return fn(Context, Node, enter: bool) void;
} else {
return fn(Context, Node) void;
}
}
fn VisitIface(comptime Node: type) type {
return struct {
visit_fn: fn(*@This(), Node) void,
fn visit(self: *@This(), node: Node) void {
self.visit_fn(self, node);
}
};
}
pub fn WalkContext(comptime Context: type, comptime Node: type) type {
return struct {
visitor: *VisitIface(Node),
user_ctx: Context,
pub fn visit(self: *@This(), node: Node) void {
self.visitor.visit(node);
}
};
}
fn UserWalkFn(comptime Context: type, comptime Node: type) type {
return fn (*WalkContext(Context, Node), Node) void;
}
pub fn Walker(comptime Context: type, comptime Node: type) type {
return struct {
ctx: WalkContext(Context, Node),
user_walk: UserWalkFn(Context, Node),
pub fn init(user_ctx: Context, user_walk: UserWalkFn(Context, Node)) @This() {
return .{
.ctx = .{
// Visit iface is injected when walker is used.
.visitor = undefined,
.user_ctx = user_ctx,
},
.user_walk = user_walk,
};
}
pub fn walk(self: *@This(), visitor: *VisitIface(Node), node: Node) void {
self.ctx.visitor = visitor;
self.user_walk(&self.ctx, node);
}
};
}
pub fn ChildArrayListWalker(comptime Node: type) Walker(void, Node) {
const S = struct {
fn walk(ctx: *WalkContext(void, Node), node: Node) void {
for (node.children.items) |it| {
ctx.visit(it);
}
}
};
return Walker(void, Node).init({}, S.walk);
}
pub fn walkPrePost(comptime Context: type, user_ctx: Context, comptime Node: type, root: Node,
walker: anytype, user_visit: UserVisit(Node, Context, true)) void {
const S = struct {
inner: VisitIface(Node) = .{
.visit_fn = visit,
},
user_ctx: Context,
user_visit: UserVisit(Node, Context, true),
walker: @TypeOf(walker),
fn visit(ptr: *VisitIface(Node), node: Node) void {
const self = @fieldParentPtr(@This(), "inner", ptr);
self.user_visit(self.user_ctx, node, true);
self.walker.walk(ptr, node);
self.user_visit(self.user_ctx, node, false);
}
};
var visitor = S{ .user_visit = user_visit, .user_ctx = user_ctx, .walker = walker };
visitor.inner.visit(root);
}
pub fn walkPre(comptime Context: type, user_ctx: Context, comptime Node: type, root: Node,
walker: anytype, user_visit: UserVisit(Node, Context, false)) void {
const S = struct {
inner: VisitIface(Node) = .{
.visit_fn = visit,
},
user_ctx: Context,
user_visit: UserVisit(Node, Context, false),
walker: @TypeOf(walker),
fn visit(ptr: *VisitIface(Node), node: Node) void {
const self = @fieldParentPtr(@This(), "inner", ptr);
self.user_visit(self.user_ctx, node);
self.walker.walk(ptr, node);
}
};
var visitor = S{ .user_visit = user_visit, .user_ctx = user_ctx, .walker = walker };
visitor.inner.visit(root);
}
pub fn walkPost(comptime Context: type, user_ctx: Context, comptime Node: type, root: Node,
walker: anytype, user_visit: UserVisit(Node, Context, false)) void {
const S = struct {
inner: VisitIface(Node) = .{
.visit_fn = visit,
},
user_ctx: Context,
user_visit: UserVisit(Node, Context, false),
walker: @TypeOf(walker),
fn visit(ptr: *VisitIface(Node), node: Node) void {
const self = @fieldParentPtr(@This(), "inner", ptr);
self.walker.walk(ptr, node);
self.user_visit(self.user_ctx, node);
}
};
var visitor = S{ .user_visit = user_visit, .user_ctx = user_ctx, .walker = walker };
visitor.inner.visit(root);
}
// Multiple roots.
pub fn walkPreMany(comptime Context: type, ctx: Context, comptime NodeType: type, roots: []const NodeType,
walker: anytype, user_visit: UserVisit(NodeType, Context, false)) void {
for (roots) |it| {
walkPre(Context, ctx, NodeType, it, walker, user_visit);
}
}
pub fn walkPrePostMany(comptime Context: type, ctx: Context, NodeType: type, roots: []const NodeType,
walker: anytype, user_visit: UserVisit(NodeType, Context, true)) void {
for (roots) |it| {
walkPrePost(Context, ctx, NodeType, it, walker, user_visit);
}
}
fn SearchVisitIface(comptime Node: type) type {
return struct {
visit_fn: fn(*@This(), Node) ?Node,
fn visit(self: *@This(), node: Node) ?Node {
return self.visit_fn(self, node);
}
};
}
pub fn SearchWalkContext(comptime Context: type, comptime Node: type) type {
return struct {
visitor: *SearchVisitIface(Node),
user_ctx: Context,
pub fn visit(self: *@This(), node: Node) ?Node {
return self.visitor.visit(node);
}
};
}
pub fn UserPredicate(comptime Context: type, comptime Node: type) type {
return fn(Context, Node) bool;
}
pub fn UserSearchWalkFn(comptime Context: type, comptime Node: type) type {
return fn(*SearchWalkContext(Context, Node), Node) ?Node;
}
pub fn SearchWalker(comptime Context: type, comptime Node: type) type {
return struct {
ctx: SearchWalkContext(Context, Node),
user_walk: UserSearchWalkFn(Context, Node),
pub fn init(user_ctx: Context, user_walk: UserSearchWalkFn(Context, Node)) @This() {
return .{
.ctx = .{
// Visit iface is injected when walker is used.
.visitor = undefined,
.user_ctx = user_ctx,
},
.user_walk = user_walk,
};
}
pub fn walk(self: *@This(), visitor: *SearchVisitIface(Node), node: Node) ?Node {
self.ctx.visitor = visitor;
return self.user_walk(&self.ctx, node);
}
};
}
pub fn ChildArrayListSearchWalker(comptime Node: type) SearchWalker(void, Node) {
const S = struct {
fn walk(ctx: *SearchWalkContext(void, Node), node: Node) ?Node {
for (node.children.items) |it| {
if (ctx.visit(it)) |res| {
return res;
}
}
return null;
}
};
return SearchWalker(void, Node).init({}, S.walk);
}
pub fn searchPre(comptime Context: type, user_ctx: Context, comptime Node: type, root: Node,
walker: anytype, user_visit: UserPredicate(Context, Node)) ?Node {
const S = struct {
visitor: SearchVisitIface(Node) = .{
.visit_fn = visit,
},
user_visit: UserPredicate(Context, Node),
user_ctx: Context,
walker: @TypeOf(walker),
fn visit(iface: *SearchVisitIface(Node), node: Node) ?Node {
const self = @fieldParentPtr(@This(), "visitor", iface);
if (self.user_visit(self.user_ctx, node)) {
return node;
}
return self.walker.walk(iface, node);
}
};
var ctx = S{
.user_ctx = user_ctx,
.walker = walker,
.user_visit = user_visit,
};
return ctx.visitor.visit(root);
}
pub fn searchPreMany(comptime Context: type, ctx: Context, comptime Node: type, roots: []const Node,
walker: anytype, pred: UserPredicate(Context, Node)) ?Node {
for (roots) |it| {
if (searchPre(Context, ctx, Node, it, walker, pred)) |res| {
return res;
}
}
return null;
}
|
stdx/algo/walk_recursive.zig
|
const gl = @import("gl");
const graphics = @import("../../graphics.zig");
const gpu = graphics.gpu;
pub const SwapChain = @import("swapchain.zig").SwapChain;
pub const Shader = @import("shader.zig").Shader;
const shaders = @import("shaders.zig");
pub const TexShader = shaders.TexShader;
pub const GradientShader = shaders.GradientShader;
pub const Pipelines = struct {
tex: TexShader,
gradient: GradientShader,
pub fn deinit(self: Pipelines) void {
self.tex.deinit();
self.gradient.deinit();
}
};
pub fn initImage(image: *gpu.Image, width: usize, height: usize, data: ?[]const u8, linear_filter: bool) void {
image.* = .{
.tex_id = undefined,
.width = width,
.height = height,
.inner = undefined,
.remove = false,
};
gl.genTextures(1, &image.tex_id);
gl.activeTexture(gl.GL_TEXTURE0 + 0);
gl.bindTexture(gl.GL_TEXTURE_2D, image.tex_id);
// A GLint specifying the level of detail. Level 0 is the base image level and level n is the nth mipmap reduction level.
const level = 0;
// A GLint specifying the width of the border. Usually 0.
const border = 0;
// Data type of the texel data.
const data_type = gl.GL_UNSIGNED_BYTE;
// Set the filtering so we don't need mips.
// TEXTURE_MIN_FILTER - filter for scaled down texture
// TEXTURE_MAG_FILTER - filter for scaled up texture
// Linear filter is better for anti-aliased font bitmaps.
gl.texParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, if (linear_filter) gl.GL_LINEAR else gl.GL_NEAREST);
gl.texParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, if (linear_filter) gl.GL_LINEAR else gl.GL_NEAREST);
gl.texParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE);
gl.texParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE);
const data_ptr = if (data != null) data.?.ptr else null;
gl.texImage2D(gl.GL_TEXTURE_2D, level, gl.GL_RGBA8, @intCast(c_int, width), @intCast(c_int, height), border, gl.GL_RGBA, data_type, data_ptr);
gl.bindTexture(gl.GL_TEXTURE_2D, 0);
}
|
graphics/src/backend/gl/graphics.zig
|
const std = @import("std");
const os = std.os;
const tests = @import("tests.zig");
// zig fmt: off
pub fn addCases(cases: *tests.StackTracesContext) void {
const source_return =
\\const std = @import("std");
\\
\\pub fn main() !void {
\\ return error.TheSkyIsFalling;
\\}
;
const source_try_return =
\\const std = @import("std");
\\
\\fn foo() !void {
\\ return error.TheSkyIsFalling;
\\}
\\
\\pub fn main() !void {
\\ try foo();
\\}
;
const source_try_try_return_return =
\\const std = @import("std");
\\
\\fn foo() !void {
\\ try bar();
\\}
\\
\\fn bar() !void {
\\ return make_error();
\\}
\\
\\fn make_error() !void {
\\ return error.TheSkyIsFalling;
\\}
\\
\\pub fn main() !void {
\\ try foo();
\\}
;
const source_dumpCurrentStackTrace =
\\const std = @import("std");
\\
\\fn bar() void {
\\ std.debug.dumpCurrentStackTrace(@returnAddress());
\\}
\\fn foo() void {
\\ bar();
\\}
\\pub fn main() u8 {
\\ foo();
\\ return 1;
\\}
;
switch (std.Target.current.os.tag) {
.freebsd => {
cases.addCase(
"return",
source_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in main (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\
,
// release-safe
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in std.start.main (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\
,
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
cases.addCase(
"try return",
source_try_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in foo (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in main (test)
\\ try foo();
\\ ^
\\
,
// release-safe
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in std.start.main (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in std.start.main (test)
\\ try foo();
\\ ^
\\
,
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
cases.addCase(
"try try return return",
source_try_try_return_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:12:5: [address] in make_error (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in bar (test)
\\ return make_error();
\\ ^
\\source.zig:4:5: [address] in foo (test)
\\ try bar();
\\ ^
\\source.zig:16:5: [address] in main (test)
\\ try foo();
\\ ^
\\
,
// release-safe
\\error: TheSkyIsFalling
\\source.zig:12:5: [address] in std.start.main (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in std.start.main (test)
\\ return make_error();
\\ ^
\\source.zig:4:5: [address] in std.start.main (test)
\\ try bar();
\\ ^
\\source.zig:16:5: [address] in std.start.main (test)
\\ try foo();
\\ ^
\\
,
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
},
.linux => {
cases.addCase(
"return",
source_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in main (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\
,
// release-safe
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in std.start.posixCallMainAndExit (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\
,
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
cases.addCase(
"try return",
source_try_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in foo (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in main (test)
\\ try foo();
\\ ^
\\
,
// release-safe
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in std.start.posixCallMainAndExit (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in std.start.posixCallMainAndExit (test)
\\ try foo();
\\ ^
\\
,
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
cases.addCase(
"try try return return",
source_try_try_return_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:12:5: [address] in make_error (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in bar (test)
\\ return make_error();
\\ ^
\\source.zig:4:5: [address] in foo (test)
\\ try bar();
\\ ^
\\source.zig:16:5: [address] in main (test)
\\ try foo();
\\ ^
\\
,
// release-safe
\\error: TheSkyIsFalling
\\source.zig:12:5: [address] in std.start.posixCallMainAndExit (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in std.start.posixCallMainAndExit (test)
\\ return make_error();
\\ ^
\\source.zig:4:5: [address] in std.start.posixCallMainAndExit (test)
\\ try bar();
\\ ^
\\source.zig:16:5: [address] in std.start.posixCallMainAndExit (test)
\\ try foo();
\\ ^
\\
,
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
cases.addCase(
"dumpCurrentStackTrace",
source_dumpCurrentStackTrace,
[_][]const u8{
// debug
\\source.zig:7:8: [address] in foo (test)
\\ bar();
\\ ^
\\source.zig:10:8: [address] in main (test)
\\ foo();
\\ ^
\\start.zig:341:29: [address] in std.start.posixCallMainAndExit (test)
\\ return root.main();
\\ ^
\\start.zig:162:5: [address] in std.start._start (test)
\\ @call(.{ .modifier = .never_inline }, posixCallMainAndExit, .{});
\\ ^
\\
,
// release-safe
switch (std.Target.current.cpu.arch) {
.aarch64 => "", // TODO disabled; results in segfault
else =>
\\start.zig:162:5: [address] in std.start._start (test)
\\ @call(.{ .modifier = .never_inline }, posixCallMainAndExit, .{});
\\ ^
\\
,
},
// release-fast
\\
,
// release-small
\\
},
);
},
.macos => {
cases.addCase(
"return",
source_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in main (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\
,
// release-safe
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in std.start.main (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\
,
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
cases.addCase(
"try return",
source_try_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in foo (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in main (test)
\\ try foo();
\\ ^
\\
,
// release-safe
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in std.start.main (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in std.start.main (test)
\\ try foo();
\\ ^
\\
,
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
cases.addCase(
"try try return return",
source_try_try_return_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:12:5: [address] in make_error (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in bar (test)
\\ return make_error();
\\ ^
\\source.zig:4:5: [address] in foo (test)
\\ try bar();
\\ ^
\\source.zig:16:5: [address] in main (test)
\\ try foo();
\\ ^
\\
,
// release-safe
\\error: TheSkyIsFalling
\\source.zig:12:5: [address] in std.start.main (test)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in std.start.main (test)
\\ return make_error();
\\ ^
\\source.zig:4:5: [address] in std.start.main (test)
\\ try bar();
\\ ^
\\source.zig:16:5: [address] in std.start.main (test)
\\ try foo();
\\ ^
\\
,
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
},
.windows => {
cases.addCase(
"return",
source_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in main (test.obj)
\\ return error.TheSkyIsFalling;
\\ ^
\\
,
// release-safe
// --disabled-- results in segmenetation fault
"",
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
cases.addCase(
"try return",
source_try_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:4:5: [address] in foo (test.obj)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in main (test.obj)
\\ try foo();
\\ ^
\\
,
// release-safe
// --disabled-- results in segmenetation fault
"",
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
cases.addCase(
"try try return return",
source_try_try_return_return,
[_][]const u8{
// debug
\\error: TheSkyIsFalling
\\source.zig:12:5: [address] in make_error (test.obj)
\\ return error.TheSkyIsFalling;
\\ ^
\\source.zig:8:5: [address] in bar (test.obj)
\\ return make_error();
\\ ^
\\source.zig:4:5: [address] in foo (test.obj)
\\ try bar();
\\ ^
\\source.zig:16:5: [address] in main (test.obj)
\\ try foo();
\\ ^
\\
,
// release-safe
// --disabled-- results in segmenetation fault
"",
// release-fast
\\error: TheSkyIsFalling
\\
,
// release-small
\\error: TheSkyIsFalling
\\
},
);
},
else => {},
}
}
// zig fmt: off
|
test/stack_traces.zig
|
const std = @import("std");
const DynamicArray = @import("./dynamic_array.zig").DynamicArray;
const Value = @import("./value.zig").Value;
const Allocator = std.mem.Allocator;
const expect = std.testing.expect;
pub const OpCode = enum(u8) {
const Self = @This();
op_constant,
op_negate,
op_add,
op_subtract,
op_multiply,
op_divide,
op_return,
pub fn toU8(self: Self) u8 {
return @enumToInt(self);
}
pub fn fromU8(n: u8) Self {
return @intToEnum(Self, n);
}
pub fn operands(self: *Self) usize {
return switch (self) {
.op_constant => 1,
else => 0,
};
}
};
pub const Chunk = struct {
const Self = @This();
const BytesArray = DynamicArray(u8);
const ValueArray = DynamicArray(Value);
const LinesArray = DynamicArray(usize);
code: BytesArray,
constants: ValueArray,
lines: LinesArray,
pub fn init(allocator: *Allocator) Chunk {
return Self{
.code = BytesArray.init(allocator),
.constants = ValueArray.init(allocator),
.lines = LinesArray.init(allocator),
};
}
pub fn deinit(self: *Chunk) void {
self.code.deinit();
self.constants.deinit();
self.lines.deinit();
}
pub fn write(self: *Self, byte: u8, line: usize) void {
self.code.appendItem(byte);
self.lines.appendItem(line);
}
pub fn addConstant(self: *Self, value: Value) u16 {
self.constants.appendItem(value);
return @intCast(u16, self.constants.count - 1);
}
};
test "create a Chunk with bytes only" {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer {
const leaked = gpa.deinit();
if (leaked) expect(false) catch @panic("The list is leaking");
}
var chunk = Chunk.init(&gpa.allocator());
defer chunk.deinit();
chunk.write(OpCode.op_return.toU8(), 1);
try expect(chunk.code.items[0] == OpCode.op_return.toU8());
chunk.write(OpCode.op_return.toU8(), 1);
chunk.write(OpCode.op_return.toU8(), 1);
chunk.write(OpCode.op_return.toU8(), 1);
chunk.write(OpCode.op_return.toU8(), 1);
chunk.write(OpCode.op_return.toU8(), 1);
try expect(chunk.code.items[4] == OpCode.op_return.toU8());
chunk.deinit();
}
|
src/chunk.zig
|
const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const expect = testing.expect;
const expectEqual = testing.expectEqual;
test "passing an optional integer as a parameter" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
fn entry() bool {
var x: i32 = 1234;
return foo(x);
}
fn foo(x: ?i32) bool {
return x.? == 1234;
}
};
try expect(S.entry());
comptime try expect(S.entry());
}
pub const EmptyStruct = struct {};
test "optional pointer to size zero struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var e = EmptyStruct{};
var o: ?*EmptyStruct = &e;
try expect(o != null);
}
test "equality compare optional pointers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
try testNullPtrsEql();
comptime try testNullPtrsEql();
}
fn testNullPtrsEql() !void {
var number: i32 = 1234;
var x: ?*i32 = null;
var y: ?*i32 = null;
try expect(x == y);
y = &number;
try expect(x != y);
try expect(x != &number);
try expect(&number != x);
x = &number;
try expect(x == y);
try expect(x == &number);
try expect(&number == x);
}
test "optional with void type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const Foo = struct {
x: ?void,
};
var x = Foo{ .x = null };
try expect(x.x == null);
}
test "address of unwrap optional" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const S = struct {
const Foo = struct {
a: i32,
};
var global: ?Foo = null;
pub fn getFoo() anyerror!*Foo {
return &global.?;
}
};
S.global = S.Foo{ .a = 1234 };
const foo = S.getFoo() catch unreachable;
try expect(foo.a == 1234);
}
test "nested optional field in struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const S2 = struct {
y: u8,
};
const S1 = struct {
x: ?S2,
};
var s = S1{
.x = S2{ .y = 127 },
};
try expect(s.x.?.y == 127);
}
test "equality compare optional with non-optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
try test_cmp_optional_non_optional();
comptime try test_cmp_optional_non_optional();
}
fn test_cmp_optional_non_optional() !void {
var ten: i32 = 10;
var opt_ten: ?i32 = 10;
var five: i32 = 5;
var int_n: ?i32 = null;
try expect(int_n != ten);
try expect(opt_ten == ten);
try expect(opt_ten != five);
// test evaluation is always lexical
// ensure that the optional isn't always computed before the non-optional
var mutable_state: i32 = 0;
_ = blk1: {
mutable_state += 1;
break :blk1 @as(?f64, 10.0);
} != blk2: {
try expect(mutable_state == 1);
break :blk2 @as(f64, 5.0);
};
_ = blk1: {
mutable_state += 1;
break :blk1 @as(f64, 10.0);
} != blk2: {
try expect(mutable_state == 2);
break :blk2 @as(?f64, 5.0);
};
}
test "unwrap function call with optional pointer return value" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
fn entry() !void {
try expect(foo().?.* == 1234);
try expect(bar() == null);
}
const global: i32 = 1234;
fn foo() ?*const i32 {
return &global;
}
fn bar() ?*i32 {
return null;
}
};
try S.entry();
comptime try S.entry();
}
test "nested orelse" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
fn entry() !void {
try expect(func() == null);
}
fn maybe() ?Foo {
return null;
}
fn func() ?Foo {
const x = maybe() orelse
maybe() orelse
return null;
_ = x;
unreachable;
}
const Foo = struct {
field: i32,
};
};
try S.entry();
comptime try S.entry();
}
test "self-referential struct through a slice of optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
const Node = struct {
children: []?Node,
data: ?u8,
fn new() Node {
return Node{
.children = undefined,
.data = null,
};
}
};
};
var n = S.Node.new();
try expect(n.data == null);
}
test "assigning to an unwrapped optional field in an inline loop" {
comptime var maybe_pos_arg: ?comptime_int = null;
inline for ("ab") |x| {
_ = x;
maybe_pos_arg = 0;
if (maybe_pos_arg.? != 0) {
@compileError("bad");
}
maybe_pos_arg.? = 10;
}
}
test "coerce an anon struct literal to optional struct" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
const Struct = struct {
field: u32,
};
fn doTheTest() !void {
var maybe_dims: ?Struct = null;
maybe_dims = .{ .field = 1 };
try expect(maybe_dims.?.field == 1);
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
test "0-bit child type coerced to optional return ptr result location" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var y = Foo{};
var z = y.thing();
try expect(z != null);
}
const Foo = struct {
pub const Bar = struct {
field: *Foo,
};
pub fn thing(self: *Foo) ?Bar {
return Bar{ .field = self };
}
};
};
try S.doTheTest();
comptime try S.doTheTest();
}
test "0-bit child type coerced to optional" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var it: Foo = .{
.list = undefined,
};
try expect(it.foo() != null);
}
const Empty = struct {};
const Foo = struct {
list: [10]Empty,
fn foo(self: *Foo) ?*Empty {
const data = &self.list[0];
return data;
}
};
};
try S.doTheTest();
comptime try S.doTheTest();
}
test "array of optional unaligned types" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const Enum = enum { one, two, three };
const SomeUnion = union(enum) {
Num: Enum,
Other: u32,
};
const values = [_]?SomeUnion{
SomeUnion{ .Num = .one },
SomeUnion{ .Num = .two },
SomeUnion{ .Num = .three },
SomeUnion{ .Num = .one },
SomeUnion{ .Num = .two },
SomeUnion{ .Num = .three },
};
// The index must be a runtime value
var i: usize = 0;
try expect(Enum.one == values[i].?.Num);
i += 1;
try expect(Enum.two == values[i].?.Num);
i += 1;
try expect(Enum.three == values[i].?.Num);
i += 1;
try expect(Enum.one == values[i].?.Num);
i += 1;
try expect(Enum.two == values[i].?.Num);
i += 1;
try expect(Enum.three == values[i].?.Num);
}
|
test/behavior/optional.zig
|
const std = @import("std");
const process = std.process;
const mem = std.mem;
const bog = @import("bog.zig");
const repl = bog.repl;
const is_debug = @import("builtin").mode == .Debug;
var state = std.heap.GeneralPurposeAllocator(.{}){};
pub fn main() !void {
const gpa = &state.allocator;
const args = try process.argsAlloc(gpa);
defer process.argsFree(gpa, args);
if (args.len > 1) {
if (mem.eql(u8, args[1], "fmt")) {
return fmt(gpa, args[2..]);
}
if (mem.eql(u8, args[1], "help") or mem.eql(u8, args[1], "--help")) {
return help();
}
if (is_debug) {
if (mem.eql(u8, args[1], "debug:dump")) {
return debugDump(gpa, args[2..]);
}
if (mem.eql(u8, args[1], "debug:tokens")) {
return debugTokens(gpa, args[2..]);
}
if (mem.eql(u8, args[1], "debug:write")) {
return debugWrite(gpa, args[2..]);
}
if (mem.eql(u8, args[1], "debug:read")) {
return debugRead(gpa, args[2..]);
}
}
if (!mem.startsWith(u8, "-", args[1])) {
return run(gpa, args[1..]);
}
}
const in = std.io.bufferedInStream(std.io.getStdIn().reader()).reader();
var stdout = std.io.getStdOut().writer();
try repl.run(gpa, in, stdout);
}
const usage =
\\usage: bog [command] [options] [-- [args]]
\\
\\Commands:
\\
\\ fmt [source] Parse file and render it
\\ run [source] Run file
\\
\\
;
fn help() !void {
try std.io.getStdOut().writer().writeAll(usage);
process.exit(0);
}
fn run(gpa: *std.mem.Allocator, args: [][]const u8) !void {
std.debug.assert(args.len > 0);
const file_name = args[0];
var vm = bog.Vm.init(gpa, .{ .import_files = true });
defer vm.deinit();
try vm.addStd();
const S = struct {
var _args: [][]const u8 = undefined;
fn argsToBog(_vm: *bog.Vm) bog.Vm.Error!*bog.Value {
const ret = try _vm.gc.alloc();
ret.* = .{ .list = .{} };
var list = &ret.list;
errdefer list.deinit(_vm.gc.gpa);
try list.ensureCapacity(_vm.gc.gpa, _args.len);
for (_args) |arg| {
const str = try _vm.gc.alloc();
str.* = bog.Value.string(arg);
list.appendAssumeCapacity(str);
}
return ret;
}
};
S._args = args[0..];
try vm.imports.putNoClobber(vm.gc.gpa, "args", S.argsToBog);
const source = std.fs.cwd().readFileAlloc(gpa, file_name, 1024 * 1024) catch |e| switch (e) {
error.OutOfMemory => return error.OutOfMemory,
else => |err| {
fatal("unable to open '{}': {}", .{ file_name, err });
},
};
defer gpa.free(source);
var module = bog.Module.read(source) catch |e| switch (e) {
// not a bog bytecode file
error.InvalidMagic => null,
else => |err| fatal("cannot execute file '{}': {}", .{ file_name, err }),
};
// TODO this doesn't cast nicely for some reason
const res_with_err = (if (module) |*some| blk: {
vm.ip = some.entry;
break :blk vm.exec(some);
} else
vm.run(source));
const res = res_with_err catch |e| switch (e) {
error.TokenizeError, error.ParseError, error.CompileError, error.RuntimeError => {
vm.errors.render(source, std.io.getStdErr().writer()) catch {};
process.exit(1);
},
error.MalformedByteCode => if (is_debug) @panic("malformed") else fatal("attempted to execute invalid bytecode", .{}),
error.OutOfMemory => return error.OutOfMemory,
};
switch (res.*) {
.int => |int| {
if (int >= 0 and int < std.math.maxInt(u8)) {
process.exit(@intCast(u8, int));
} else {
fatal("invalid exit code: {}", .{int});
}
},
.err => |err| {
const stderr = std.io.getStdErr().writer();
try stderr.writeAll("script exited with error: ");
try err.dump(stderr, 4);
try stderr.writeAll("\n");
process.exit(1);
},
.none => {},
else => fatal("invalid return type '{}'", .{@tagName(res.*)}),
}
}
const usage_fmt =
\\usage: bog fmt [file]...
\\
\\ Formats the input files.
\\
;
fn fmt(gpa: *std.mem.Allocator, args: [][]const u8) !void {
if (args.len == 0) fatal("expected at least one file", .{});
var any_err = false;
for (args) |arg| {
any_err = (try fmtFile(gpa, arg)) or any_err;
}
if (any_err) process.exit(1);
}
const FmtError = std.mem.Allocator.Error || std.fs.File.OpenError || std.fs.File.Writer.Error ||
std.fs.Dir.OpenError || std.fs.File.GetPosError || std.fs.Dir.Iterator.Error ||
std.fs.File.Reader.Error || error{EndOfStream};
fn fmtFile(gpa: *std.mem.Allocator, name: []const u8) FmtError!bool {
const source = std.fs.cwd().readFileAlloc(gpa, name, 1024 * 1024) catch |e| switch (e) {
error.OutOfMemory => return error.OutOfMemory,
error.IsDir => {
var dir = std.fs.cwd().openDir(name, .{ .iterate = true }) catch |e2| {
try std.io.getStdErr().writer().print("unable to open '{}': {}\n", .{ name, e2 });
return e2;
};
var any_err = false;
defer dir.close();
var it = dir.iterate();
while (try it.next()) |entry| if (entry.kind == .Directory or std.mem.endsWith(u8, entry.name, bog.extension)) {
const full_path = try std.fs.path.join(gpa, &[_][]const u8{ name, entry.name });
defer gpa.free(full_path);
any_err = (try fmtFile(gpa, full_path)) or any_err;
};
return any_err;
},
else => |err| {
try std.io.getStdErr().writer().print("unable to open '{}': {}\n", .{ name, err });
return err;
},
};
defer gpa.free(source);
var errors = bog.Errors.init(gpa);
defer errors.deinit();
var tree = bog.parse(gpa, source, &errors) catch |e| switch (e) {
error.TokenizeError, error.ParseError => {
try errors.render(source, std.io.getStdErr().writer());
return true;
},
error.OutOfMemory => return error.OutOfMemory,
};
defer tree.deinit();
const file = try std.fs.cwd().createFile(name, .{});
defer file.close();
// TODO add check mode
_ = try tree.render(file.writer());
return false;
}
fn fatal(comptime msg: []const u8, args: anytype) noreturn {
std.io.getStdErr().writer().print(msg ++ "\n", args) catch {};
process.exit(1);
}
fn getFileName(usage_arg: []const u8, args: [][]const u8) []const u8 {
if (args.len != 1) {
fatal("{}", .{usage_arg});
}
return args[0];
}
const usage_debug =
\\usage: bog debug:[command] [file]
\\
;
fn debugDump(gpa: *std.mem.Allocator, args: [][]const u8) !void {
const file_name = getFileName(usage_debug, args);
const source = std.fs.cwd().readFileAlloc(gpa, file_name, 1024 * 1024) catch |e| switch (e) {
error.OutOfMemory => return error.OutOfMemory,
else => |err| {
fatal("unable to open '{}': {}", .{ file_name, err });
},
};
defer gpa.free(source);
var errors = bog.Errors.init(gpa);
defer errors.deinit();
var module = bog.compile(gpa, source, &errors) catch |e| switch (e) {
error.OutOfMemory => return error.OutOfMemory,
error.TokenizeError, error.ParseError, error.CompileError => {
try errors.render(source, std.io.getStdErr().writer());
process.exit(1);
},
};
defer module.deinit(gpa);
try module.dump(gpa, std.io.getStdOut().writer());
}
fn debugTokens(gpa: *std.mem.Allocator, args: [][]const u8) !void {
const file_name = getFileName(usage_debug, args);
const source = std.fs.cwd().readFileAlloc(gpa, file_name, 1024 * 1024) catch |e| switch (e) {
error.OutOfMemory => return error.OutOfMemory,
else => |err| {
fatal("unable to open '{}': {}", .{ file_name, err });
},
};
defer gpa.free(source);
var errors = bog.Errors.init(gpa);
defer errors.deinit();
const tokens = bog.tokenize(gpa, source, &errors) catch |e| switch (e) {
error.OutOfMemory => return error.OutOfMemory,
error.TokenizeError => {
try errors.render(source, std.io.getStdErr().writer());
process.exit(1);
},
};
defer gpa.free(tokens);
const stream = std.io.getStdOut().writer();
for (tokens) |tok| {
switch (tok.id) {
.Nl, .Eof => try stream.print("{}\n", .{@tagName(tok.id)}),
.Indent => |level| try stream.print("{} {}\n", .{ @tagName(tok.id), level }),
else => try stream.print("{} |{}|\n", .{ @tagName(tok.id), source[tok.start..tok.end] }),
}
}
}
const usage_debug_write =
\\usage: bog debug:write [file] [out]
\\
;
fn debugWrite(gpa: *std.mem.Allocator, args: [][]const u8) !void {
if (args.len != 2) {
fatal("{}", .{usage_debug_write});
}
const file_name = args[0];
const source = std.fs.cwd().readFileAlloc(gpa, file_name, 1024 * 1024) catch |e| switch (e) {
error.OutOfMemory => return error.OutOfMemory,
else => |err| {
fatal("unable to open '{}': {}", .{ file_name, err });
},
};
defer gpa.free(source);
var errors = bog.Errors.init(gpa);
defer errors.deinit();
var module = bog.compile(gpa, source, &errors) catch |e| switch (e) {
error.OutOfMemory => return error.OutOfMemory,
error.TokenizeError, error.ParseError, error.CompileError => {
try errors.render(source, std.io.getStdErr().writer());
process.exit(1);
},
};
defer module.deinit(gpa);
const file = try std.fs.cwd().createFile(args[1], .{});
defer file.close();
try module.write(file.writer());
}
fn debugRead(gpa: *std.mem.Allocator, args: [][]const u8) !void {
const file_name = getFileName(usage_debug, args);
const source = std.fs.cwd().readFileAlloc(gpa, file_name, 1024 * 1024) catch |e| switch (e) {
error.OutOfMemory => return error.OutOfMemory,
else => |err| {
fatal("unable to open '{}': {}", .{ file_name, err });
},
};
defer gpa.free(source);
const module = try bog.Module.read(source);
try module.dump(gpa, std.io.getStdOut().writer());
}
comptime {
_ = main;
}
|
src/main.zig
|
const std = @import("std");
const Engine = @import("Engine.zig");
const Dependency = @import("Dependency.zig");
const Project = @import("Project.zig");
const utils = @import("utils.zig");
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
pub const name = "local";
pub const Resolution = []const u8;
pub const ResolutionEntry = struct {
path: []const u8,
root: []const u8,
dep_idx: ?usize = null,
};
pub const FetchError = error{Todo} ||
@typeInfo(@typeInfo(@TypeOf(std.fs.Dir.openDir)).Fn.return_type.?).ErrorUnion.error_set ||
@typeInfo(@typeInfo(@TypeOf(std.fs.path.join)).Fn.return_type.?).ErrorUnion.error_set ||
@typeInfo(@typeInfo(@TypeOf(Project.fromDirPath)).Fn.return_type.?).ErrorUnion.error_set;
const FetchQueue = Engine.MultiQueueImpl(Resolution, FetchError);
const ResolutionTable = std.ArrayListUnmanaged(ResolutionEntry);
/// local source types should never be in the lockfile
pub fn deserializeLockfileEntry(
allocator: *Allocator,
it: *std.mem.TokenIterator(u8),
resolutions: *ResolutionTable,
) !void {
// TODO: warn but continue processing lockfile
_ = allocator;
_ = it;
_ = resolutions;
return error.LocalsDontLock;
}
/// does nothing because we don't lock local source types
pub fn serializeResolutions(
resolutions: []const ResolutionEntry,
writer: anytype,
) !void {
_ = resolutions;
_ = writer;
}
pub fn dedupeResolveAndFetch(
dep_table: []const Dependency.Source,
resolutions: []const ResolutionEntry,
fetch_queue: *FetchQueue,
i: usize,
) FetchError!void {
_ = resolutions;
const arena = &fetch_queue.items(.arena)[i];
const dep = &dep_table[fetch_queue.items(.edge)[i].to].local;
var base_dir = try std.fs.cwd().openDir(dep.path, .{});
defer base_dir.close();
const project_file = try base_dir.createFile("gyro.zzz", .{
.read = true,
.truncate = false,
.exclusive = false,
});
defer project_file.close();
const text = try project_file.reader().readAllAlloc(&arena.allocator, std.math.maxInt(usize));
const project = try Project.fromUnownedText(arena.child_allocator, dep.path, text);
defer project.destroy();
// TODO: resolve path when default root
const root = dep.root orelse utils.default_root;
fetch_queue.items(.path)[i] = try utils.joinPathConvertSep(arena, &.{ dep.path, root });
try fetch_queue.items(.deps)[i].appendSlice(arena.child_allocator, project.deps.items);
}
pub fn updateResolution(
allocator: *Allocator,
resolutions: *ResolutionTable,
dep_table: []const Dependency.Source,
fetch_queue: *FetchQueue,
i: usize,
) !void {
_ = allocator;
_ = resolutions;
_ = dep_table;
_ = fetch_queue;
_ = i;
}
|
src/local.zig
|
const zang = @import("zang");
const common = @import("common.zig");
const c = @import("common/c.zig");
const PhaseModOscillator = @import("modules.zig").PhaseModOscillator;
pub const AUDIO_FORMAT: zang.AudioFormat = .signed16_lsb;
pub const AUDIO_SAMPLE_RATE = 48000;
pub const AUDIO_BUFFER_SIZE = 1024;
pub const DESCRIPTION =
\\example_mouse
\\
\\Play a phase-modulation instrument with the keyboard,
\\while controlling the sound parameters with the mouse
\\position.
\\
\\Press spacebar to toggle between relative (the
\\default) and absolute modulator frequency.
;
const a4 = 440.0;
const PMOscInstrument = struct {
pub const num_outputs = 1;
pub const num_temps = 3;
pub const Params = struct {
sample_rate: f32,
freq: f32,
note_on: bool,
relative: bool,
ratio: []const f32,
multiplier: []const f32,
};
osc: PhaseModOscillator,
env: zang.Envelope,
pub fn init() PMOscInstrument {
return .{
.osc = PhaseModOscillator.init(),
.env = zang.Envelope.init(),
};
}
pub fn paint(
self: *PMOscInstrument,
span: zang.Span,
outputs: [num_outputs][]f32,
temps: [num_temps][]f32,
note_id_changed: bool,
params: Params,
) void {
zang.zero(span, temps[0]);
self.osc.paint(span, .{temps[0]}, .{ temps[1], temps[2] }, note_id_changed, .{
.sample_rate = params.sample_rate,
.freq = params.freq,
.relative = params.relative,
.ratio = zang.buffer(params.ratio),
.multiplier = zang.buffer(params.multiplier),
});
zang.zero(span, temps[1]);
self.env.paint(span, .{temps[1]}, .{}, note_id_changed, .{
.sample_rate = params.sample_rate,
.attack = .{ .cubed = 0.025 },
.decay = .{ .cubed = 0.1 },
.release = .{ .cubed = 1.0 },
.sustain_volume = 0.5,
.note_on = params.note_on,
});
zang.multiply(span, outputs[0], temps[0], temps[1]);
}
};
pub const MainModule = struct {
pub const num_outputs = 1;
pub const num_temps = 5;
pub const output_audio = common.AudioOut{ .mono = 0 };
pub const output_visualize = 0;
const Instr = struct {
const Params = struct { freq: f32, note_on: bool };
iq: zang.Notes(Params).ImpulseQueue,
idgen: zang.IdGenerator,
mod: PMOscInstrument,
trig: zang.Trigger(Params),
};
const Porta = struct {
const Params = struct { value: f32, note_on: bool };
iq: zang.Notes(Params).ImpulseQueue,
idgen: zang.IdGenerator,
mod: zang.Portamento,
trig: zang.Trigger(Params),
};
key: ?i32,
first: bool,
mode: u32,
mode_iq: zang.Notes(u32).ImpulseQueue,
mode_idgen: zang.IdGenerator,
instr: Instr,
ratio: Porta,
multiplier: Porta,
pub fn init() MainModule {
return .{
.key = null,
.first = true,
.mode = 0,
.mode_iq = zang.Notes(u32).ImpulseQueue.init(),
.mode_idgen = zang.IdGenerator.init(),
.instr = .{
.iq = zang.Notes(Instr.Params).ImpulseQueue.init(),
.idgen = zang.IdGenerator.init(),
.mod = PMOscInstrument.init(),
.trig = zang.Trigger(Instr.Params).init(),
},
.ratio = .{
.iq = zang.Notes(Porta.Params).ImpulseQueue.init(),
.idgen = zang.IdGenerator.init(),
.mod = zang.Portamento.init(),
.trig = zang.Trigger(Porta.Params).init(),
},
.multiplier = .{
.iq = zang.Notes(Porta.Params).ImpulseQueue.init(),
.idgen = zang.IdGenerator.init(),
.mod = zang.Portamento.init(),
.trig = zang.Trigger(Porta.Params).init(),
},
};
}
pub fn paint(
self: *MainModule,
span: zang.Span,
outputs: [num_outputs][]f32,
temps: [num_temps][]f32,
) void {
if (self.first) {
self.first = false;
self.mode_iq.push(0, self.mode_idgen.nextId(), self.mode);
}
// ratio
zang.zero(span, temps[0]);
{
var ctr = self.ratio.trig.counter(span, self.ratio.iq.consume());
while (self.ratio.trig.next(&ctr)) |result| {
self.ratio.mod.paint(
result.span,
.{temps[0]},
.{},
result.note_id_changed,
.{
.sample_rate = AUDIO_SAMPLE_RATE,
.curve = .{ .linear = 0.1 },
.goal = switch (self.mode) {
0 => result.params.value * 4.0,
else => result.params.value * 880.0,
},
.note_on = result.params.note_on,
.prev_note_on = true,
},
);
}
}
// multiplier
zang.zero(span, temps[1]);
{
const iap = self.multiplier.iq.consume();
var ctr = self.multiplier.trig.counter(span, iap);
while (self.multiplier.trig.next(&ctr)) |result| {
self.multiplier.mod.paint(
result.span,
.{temps[1]},
.{},
result.note_id_changed,
.{
.sample_rate = AUDIO_SAMPLE_RATE,
.curve = .{ .linear = 0.1 },
.goal = result.params.value * 2.0,
.note_on = result.params.note_on,
.prev_note_on = true,
},
);
}
}
// instr
{
var ctr = self.instr.trig.counter(span, self.instr.iq.consume());
while (self.instr.trig.next(&ctr)) |result| {
self.instr.mod.paint(
result.span,
outputs,
.{ temps[2], temps[3], temps[4] },
result.note_id_changed,
.{
.sample_rate = AUDIO_SAMPLE_RATE,
.freq = result.params.freq,
.note_on = result.params.note_on,
.relative = self.mode == 0,
.ratio = temps[0],
.multiplier = temps[1],
},
);
}
}
}
pub fn mouseEvent(
self: *MainModule,
x: f32,
y: f32,
impulse_frame: usize,
) void {
self.ratio.iq.push(impulse_frame, self.ratio.idgen.nextId(), .{
.value = x,
.note_on = true,
});
self.multiplier.iq.push(impulse_frame, self.multiplier.idgen.nextId(), .{
.value = y,
.note_on = true,
});
}
pub fn keyEvent(self: *MainModule, key: i32, down: bool, impulse_frame: usize) bool {
if (key == c.SDLK_SPACE and down) {
self.mode = (self.mode + 1) % 2;
self.mode_iq.push(impulse_frame, self.mode_idgen.nextId(), self.mode);
return false;
}
if (common.getKeyRelFreq(key)) |rel_freq| {
if (down or (if (self.key) |nh| nh == key else false)) {
self.key = if (down) key else null;
self.instr.iq.push(impulse_frame, self.instr.idgen.nextId(), .{
.freq = a4 * rel_freq,
.note_on = down,
});
}
}
return true;
}
};
|
examples/example_mouse.zig
|
const std = @import("std");
const zlm = @import("zlm");
const log = std.log.scoped(.wavefront_obj);
const vec2 = zlm.vec2;
const vec3 = zlm.vec3;
const vec4 = zlm.vec4;
const Vec2 = zlm.Vec2;
const Vec3 = zlm.Vec3;
const Vec4 = zlm.Vec4;
test "" {
std.testing.refAllDecls(@This());
}
// this file parses OBJ wavefront according to
// http://paulbourke.net/dataformats/obj/
// with a lot of restrictions
pub const Vertex = struct {
position: usize,
normal: ?usize,
textureCoordinate: ?usize,
};
pub const Face = struct {
vertices: []Vertex,
};
pub const Object = struct {
name: []const u8,
material: ?[]const u8,
start: usize,
count: usize,
};
pub const Model = struct {
const Self = @This();
allocator: *std.mem.Allocator,
arena: std.heap.ArenaAllocator,
positions: []Vec4,
normals: []Vec3,
textureCoordinates: []Vec3,
faces: []Face,
objects: []Object,
pub fn deinit(self: *Self) void {
self.allocator.free(self.positions);
self.allocator.free(self.normals);
self.allocator.free(self.textureCoordinates);
self.allocator.free(self.faces);
self.allocator.free(self.objects);
self.arena.deinit();
self.* = undefined;
}
};
fn parseVertexSpec(spec: []const u8) !Vertex {
var vertex = Vertex{
.position = 0,
.normal = null,
.textureCoordinate = null,
};
var iter = std.mem.split(spec, "/");
var state: u32 = 0;
while (iter.next()) |part| {
switch (state) {
0 => vertex.position = (try std.fmt.parseInt(usize, part, 10)) - 1,
1 => vertex.textureCoordinate = if (!std.mem.eql(u8, part, "")) (try std.fmt.parseInt(usize, part, 10)) - 1 else null,
2 => vertex.normal = if (!std.mem.eql(u8, part, "")) (try std.fmt.parseInt(usize, part, 10)) - 1 else null,
else => return error.InvalidFormat,
}
state += 1;
}
return vertex;
}
pub fn loadFile(allocator: *std.mem.Allocator, path: []const u8) !Model {
var file = try std.fs.File.openRead(path);
defer file.close();
return load(allocator, file.inStream());
}
pub fn load(
allocator: *std.mem.Allocator,
stream: anytype,
) !Model {
var arena = std.heap.ArenaAllocator.init(allocator);
errdefer arena.deinit();
var positions = std.ArrayList(Vec4).init(allocator);
defer positions.deinit();
var normals = std.ArrayList(Vec3).init(allocator);
defer normals.deinit();
var textureCoordinates = std.ArrayList(Vec3).init(allocator);
defer textureCoordinates.deinit();
var faces = std.ArrayList(Face).init(allocator);
defer faces.deinit();
var objects = std.ArrayList(Object).init(allocator);
defer objects.deinit();
try positions.ensureCapacity(10_000);
try normals.ensureCapacity(10_000);
try textureCoordinates.ensureCapacity(10_000);
try faces.ensureCapacity(10_000);
try objects.ensureCapacity(100);
// note:
// this may look like a dangling pointer as ArrayList changes it's pointers when resized.
// BUT: the pointer will be changed with the added element, so it will not dangle
var currentObject: ?*Object = null;
var line_reader = lineIterator(allocator, stream);
defer line_reader.deinit();
while (try line_reader.next()) |line| {
errdefer {
log.err("error parsing line: '{s}'", .{
line,
});
}
// parse vertex
if (std.mem.startsWith(u8, line, "v ")) {
var iter = std.mem.tokenize(line[2..], " ");
var state: u32 = 0;
var vertex = vec4(0, 0, 0, 1);
while (iter.next()) |part| {
switch (state) {
0 => vertex.x = try std.fmt.parseFloat(f32, part),
1 => vertex.y = try std.fmt.parseFloat(f32, part),
2 => vertex.z = try std.fmt.parseFloat(f32, part),
3 => vertex.w = try std.fmt.parseFloat(f32, part),
else => return error.InvalidFormat,
}
state += 1;
}
if (state < 3) // v x y z w, with x,y,z are required, w is optional
return error.InvalidFormat;
try positions.append(vertex);
}
// parse uv coords
else if (std.mem.startsWith(u8, line, "vt ")) {
var iter = std.mem.tokenize(line[3..], " ");
var state: u32 = 0;
var texcoord = vec3(0, 0, 0);
while (iter.next()) |part| {
switch (state) {
0 => texcoord.x = try std.fmt.parseFloat(f32, part),
1 => texcoord.y = try std.fmt.parseFloat(f32, part),
2 => texcoord.z = try std.fmt.parseFloat(f32, part),
else => return error.InvalidFormat,
}
state += 1;
}
if (state < 1) // vt u v w, with u is required, v and w are optional
return error.InvalidFormat;
try textureCoordinates.append(texcoord);
}
// parse normals
else if (std.mem.startsWith(u8, line, "vn ")) {
var iter = std.mem.tokenize(line[3..], " ");
var state: u32 = 0;
var normal = vec3(0, 0, 0);
while (iter.next()) |part| {
switch (state) {
0 => normal.x = try std.fmt.parseFloat(f32, part),
1 => normal.y = try std.fmt.parseFloat(f32, part),
2 => normal.z = try std.fmt.parseFloat(f32, part),
else => return error.InvalidFormat,
}
state += 1;
}
if (state < 3) // vn i j k, with i,j,k are required, none are optional
return error.InvalidFormat;
try normals.append(normal);
}
// parse faces
else if (std.mem.startsWith(u8, line, "f ")) {
var iter = std.mem.tokenize(line[2..], " ");
var state: u32 = 0;
var vertices = std.ArrayList(Vertex).init(&arena.allocator);
defer vertices.deinit();
while (iter.next()) |part| {
const vert = try parseVertexSpec(part);
try vertices.append(vert);
state += 1;
}
if (vertices.items.len < 3) // less than 3 faces is an error (no line or point support)
return error.InvalidFormat;
try faces.append(Face{
.vertices = vertices.toOwnedSlice(),
});
}
// parse objects
else if (std.mem.startsWith(u8, line, "o ")) {
if (currentObject) |obj| {
// terminate object
obj.count = faces.items.len - obj.start;
}
var obj = try objects.addOne();
obj.start = faces.items.len;
obj.count = 0;
obj.name = arena.allocator.dupe(u8, line[2..]) catch |err| {
_ = objects.pop(); // remove last element, then error
return err;
};
currentObject = obj;
}
// parse material libraries
else if (std.mem.startsWith(u8, line, "mtllib ")) {
// ignore material libraries for now...
// TODO: Implement material libraries
}
// parse material application
else if (std.mem.startsWith(u8, line, "usemtl ")) {
if (currentObject) |*obj| {
if (obj.*.material != null) {
// duplicate object when two materials per object
const current_name = obj.*.name;
// terminate object
obj.*.count = faces.items.len - obj.*.start;
obj.* = try objects.addOne();
obj.*.start = faces.items.len;
obj.*.count = 0;
obj.*.name = arena.allocator.dupe(u8, current_name) catch |err| {
_ = objects.pop(); // remove last element, then error
return err;
};
}
obj.*.material = try arena.allocator.dupe(u8, line[7..]);
} else {
currentObject = try objects.addOne();
currentObject.?.start = faces.items.len;
currentObject.?.count = 0;
currentObject.?.name = arena.allocator.dupe(u8, "unnamed") catch |err| {
_ = objects.pop(); // remove last element, then error
return err;
};
}
}
// parse smoothing groups
else if (std.mem.startsWith(u8, line, "s ")) {
// and just ignore them :(
} else {
log.warn("unrecognized line: {s}", .{line});
}
}
// terminate object if any
if (currentObject) |obj| {
obj.count = faces.items.len - obj.start;
}
return Model{
.allocator = allocator,
.arena = arena,
.positions = positions.toOwnedSlice(),
.normals = normals.toOwnedSlice(),
.textureCoordinates = textureCoordinates.toOwnedSlice(),
.faces = faces.toOwnedSlice(),
.objects = objects.toOwnedSlice(),
};
}
pub const Color = struct {
r: f32,
g: f32,
b: f32,
};
pub const Material = struct {
ambient_texture: ?[]const u8 = null,
diffuse_texture: ?[]const u8 = null,
specular_texture: ?[]const u8 = null,
ambient_color: ?Color = null,
diffuse_color: ?Color = null,
specular_color: ?Color = null,
};
pub const MaterialLibrary = struct {
const Self = @This();
arena: std.heap.ArenaAllocator,
materials: std.StringHashMap(Material),
pub fn deinit(self: *Self) void {
self.materials.deinit();
self.arena.deinit();
self.* = undefined;
}
};
pub fn loadMaterials(allocator: *std.mem.Allocator, stream: anytype) !MaterialLibrary {
var materials = std.StringHashMap(Material).init(allocator);
errdefer materials.deinit();
var arena = std.heap.ArenaAllocator.init(allocator);
errdefer arena.deinit();
var line_reader = lineIterator(allocator, stream);
defer line_reader.deinit();
var current_mtl: ?*Material = null;
while (try line_reader.next()) |line| {
errdefer {
log.err("error parsing line: '{s}'\n", .{
line,
});
}
if (std.mem.startsWith(u8, line, "newmtl ")) {
const mtl_name = try arena.allocator.dupe(u8, line[7..]);
const gop = try materials.getOrPut(mtl_name);
if (gop.found_existing) {
log.err("duplicate material name: '{s}'", .{mtl_name});
return error.DuplicateMaterial;
}
gop.entry.value = Material{};
current_mtl = &gop.entry.value;
} else if (std.mem.startsWith(u8, line, "Ka ")) {
if (current_mtl) |mtl| {
mtl.ambient_color = try parseColor(line[3..]);
} else {
log.err("missing newmtl!", .{});
return error.InvalidFormat;
}
} else if (std.mem.startsWith(u8, line, "Kd ")) {
if (current_mtl) |mtl| {
mtl.diffuse_color = try parseColor(line[3..]);
} else {
log.err("missing newmtl!", .{});
return error.InvalidFormat;
}
} else if (std.mem.startsWith(u8, line, "Ks ")) {
if (current_mtl) |mtl| {
mtl.specular_color = try parseColor(line[3..]);
} else {
log.err("missing newmtl!", .{});
return error.InvalidFormat;
}
} else if (std.mem.startsWith(u8, line, "map_Ka")) {
if (current_mtl) |mtl| {
mtl.ambient_texture = try arena.allocator.dupe(u8, std.mem.trim(u8, line[7..], " \t\r\n"));
} else {
log.err("missing newmtl!", .{});
return error.InvalidFormat;
}
} else if (std.mem.startsWith(u8, line, "map_Kd")) {
if (current_mtl) |mtl| {
mtl.diffuse_texture = try arena.allocator.dupe(u8, std.mem.trim(u8, line[7..], " \t\r\n"));
} else {
log.err("missing newmtl!", .{});
return error.InvalidFormat;
}
} else if (std.mem.startsWith(u8, line, "map_Ks")) {
if (current_mtl) |mtl| {
mtl.specular_texture = try arena.allocator.dupe(u8, std.mem.trim(u8, line[7..], " \t\r\n"));
} else {
log.err("missing newmtl!", .{});
return error.InvalidFormat;
}
} else {
log.warn("unrecognized line: '{s}'", .{line});
}
}
return MaterialLibrary{
.arena = arena,
.materials = materials,
};
}
fn parseColor(line: []const u8) !Color {
var iterator = std.mem.tokenize(line, " ");
var result = Color{
.r = undefined,
.g = undefined,
.b = undefined,
};
var index: usize = 0;
while (iterator.next()) |tok| : (index += 1) {
switch (index) {
0 => result.r = try std.fmt.parseFloat(f32, tok),
1 => result.g = try std.fmt.parseFloat(f32, tok),
2 => result.b = try std.fmt.parseFloat(f32, tok),
else => return error.InvalidFormat,
}
}
if (index < 3)
return error.InvalidFormat;
return result;
}
fn LineIterator(comptime Reader: type) type {
return struct {
const Self = @This();
reader: Reader,
buffer: std.ArrayList(u8),
pub fn deinit(self: *Self) void {
self.buffer.deinit();
self.* = undefined;
}
pub fn next(self: *Self) !?[]const u8 {
while (true) {
self.reader.readUntilDelimiterArrayList(&self.buffer, '\n', 4096) catch |err| switch (err) {
error.EndOfStream => return null,
else => return err,
};
var line: []const u8 = self.buffer.items;
// remove comments
if (std.mem.indexOf(u8, line, "#")) |idx| {
line = line[0..idx];
}
// strip trailing/leading whites
line = std.mem.trim(u8, line, " \r\n\t");
if (line.len == 0) {
continue;
}
return line;
}
}
};
}
fn lineIterator(allocator: *std.mem.Allocator, reader: anytype) LineIterator(@TypeOf(reader)) {
return LineIterator(@TypeOf(reader)){
.reader = reader,
.buffer = std.ArrayList(u8).init(allocator),
};
}
|
wavefront-obj.zig
|
const std = @import("std");
/// Determines the virtual machine memory size.
const memory_size = 32;
/// Represents the kind of the opcode.
const OpcodeKind = enum {
Loadi, // Loadi rx l1
Addi, // Addi rx ra l1
Compare, // Compare rx ra rb
Jump, // Jump l1
Branch, // Branch ra l1
Exit // Exit
};
/// Represents an opcode with 3 operands.
const Opcode = struct {
kind: OpcodeKind,
op1: i64 = 0,
op2: i64 = 0,
op3: i64 = 0,
};
/// Performs a jump to a given location.
fn jump(pc: *usize, offset: i64) void {
if (offset >= 0) {
pc.* += @intCast(usize, offset);
} else {
pc.* -= @intCast(usize, -offset);
}
}
/// Entry point.
pub fn main() anyerror!void {
// Program memory (registers)
var memory = comptime ([_]i64{ 0 } ** memory_size);
// Program code to execute.
var code = [_]Opcode{
Opcode{ .kind = .Loadi, .op1 = 0, .op2 = 20000 }, // r0 = 20000;
Opcode{ .kind = .Loadi, .op1 = 1, .op2 = 0 }, // r1 = 0;
Opcode{ .kind = .Compare, .op1 = 2, .op2 = 0, .op3 = 1 }, // r2 = r0 == r1;
Opcode{ .kind = .Branch, .op1 = 2, .op2 = 2 }, // if (r2 == 0) goto +2;
Opcode{ .kind = .Addi, .op1 = 1, .op2 = 1, .op3 = 1 }, // r0 = r0 + 1;
Opcode{ .kind = .Jump, .op1 = -4 }, // goto -4;
Opcode{ .kind = .Exit }
};
// Program Counter.
var pc: usize = 0;
// The VM itself.
while (true) {
const op = code[pc];
// std.debug.warn("({}) Kind = {}\n\tr0={}\tr1={}\tr2={}\n", pc, op.kind, memory[0], memory[1], memory[2]);
switch (op.kind) {
.Loadi => memory[@intCast(usize, op.op1)] = op.op2,
.Addi => memory[@intCast(usize, op.op1)] = memory[@intCast(usize, op.op2)] + op.op3,
.Compare => memory[@intCast(usize, op.op1)] =
@intCast(i64, @boolToInt(memory[@intCast(usize, op.op2)] == memory[@intCast(usize, op.op3)])),
.Jump => jump(&pc, op.op1),
.Branch => if (memory[@intCast(usize, op.op1)] != 0) { jump(&pc, op.op2); },
.Exit => break,
}
pc += 1;
}
}
|
src/main.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const ArrayListUnmanaged = std.ArrayListUnmanaged;
const assert = std.debug.assert;
const expect = std.testing.expect;
pub fn PrefixTreeNode(comptime T: type, comptime cmpFn: fn (lhs: T, rhs: T) std.math.Order) type {
return struct {
const Self = @This();
// https://github.com/ziglang/zig/issues/4562
const S = struct { s: ?*Self };
edges: ArrayListUnmanaged(T) = ArrayListUnmanaged(T){},
child_nodes: ArrayListUnmanaged(S) = ArrayListUnmanaged(S){},
fn initCapacity(allocator: *Allocator, n: usize) !Self {
var edges = try ArrayListUnmanaged(T).initCapacity(allocator, n);
errdefer edges.deinit(allocator);
var child_nodes = try ArrayListUnmanaged(S).initCapacity(allocator, n);
errdefer child_nodes.deinit(allocator);
return Self{
.edges = edges,
.child_nodes = child_nodes,
};
}
fn deinit(self: *Self, allocator: *Allocator) void {
self.edges.deinit(allocator);
self.child_nodes.deinit(allocator);
}
// FIXME recursive
pub fn deallocRecursive(self: *Self, allocator: *Allocator) void {
for (self.child_nodes.items) |child_node| {
if (child_node.s) |node| node.deallocRecursive(allocator);
}
self.deinit(allocator);
allocator.destroy(self);
}
pub fn get(self: Self, index: usize) T {
return self.edges.items[index];
}
pub fn child(self: Self, index: usize) ?*Self {
return self.child_nodes.items[index].s;
}
pub fn numChildren(self: Self) usize {
return self.child_nodes.items.len;
}
pub fn exists(self: *const Self, item: []const T) bool {
if (item.len == 0) return true;
var current = self;
for (item[0 .. item.len - 1]) |edge| {
const edge_index = current.indexOf(edge) orelse return false;
current = current.child_nodes.items[edge_index].s orelse return false;
}
return current.indexOf(item[item.len - 1]) != null;
}
fn indexOf(self: Self, edge: T) ?usize {
const cmp = struct {
fn f(ctx: void, lhs: T, rhs: T) std.math.Order {
return cmpFn(lhs, rhs);
}
}.f;
return std.sort.binarySearch(T, edge, self.edges.items, {}, cmp);
}
pub fn insertChild(self: *Self, parent: []const T, value: T, allocator: *Allocator) !void {
const parent_node_or_ptr = self.getNodeOrNodePtr(parent) orelse unreachable;
const parent_node = switch (parent_node_or_ptr) {
.node => |n| blk: {
assert(n.indexOf(value) == null);
break :blk n;
},
.node_ptr => blk: {
const new_node = try allocator.create(Self);
new_node.* = Self{};
break :blk new_node;
},
};
errdefer if (parent_node_or_ptr == .node_ptr) allocator.destroy(parent_node);
_ = try parent_node.newEdge(value, allocator);
if (parent_node_or_ptr == .node_ptr) parent_node_or_ptr.node_ptr.s = parent_node;
}
const GetNodeOrNodePtr = union(enum) {
node: *Self,
node_ptr: *S,
};
fn getNodeOrNodePtr(self: *Self, item: []const T) ?GetNodeOrNodePtr {
if (item.len == 0) return GetNodeOrNodePtr{ .node = self };
var current = self;
for (item[0 .. item.len - 1]) |edge| {
const edge_index = current.indexOf(edge) orelse return null;
current = current.child(edge_index) orelse return null;
}
const i = current.indexOf(item[item.len - 1]) orelse return null;
const node_ptr = ¤t.child_nodes.items[i];
if (node_ptr.s) |node| {
return GetNodeOrNodePtr{ .node = node };
} else {
return GetNodeOrNodePtr{ .node_ptr = node_ptr };
}
}
/// Stolen from std.sort.binarySearch.
fn searchForInsertPosition(
key: T,
items: []const T,
) usize {
var left: usize = 0;
var right: usize = items.len;
while (left < right) {
// Avoid overflowing in the midpoint calculation
const mid = left + (right - left) / 2;
// Compare the key with the midpoint element
switch (cmpFn(key, items[mid])) {
.eq => unreachable,
.gt => left = mid + 1,
.lt => right = mid,
}
}
return left;
}
fn newEdge(self: *Self, edge: T, allocator: *Allocator) !usize {
const index = searchForInsertPosition(edge, self.edges.items);
try self.edges.insert(allocator, index, edge);
errdefer _ = self.edges.orderedRemove(index);
try self.child_nodes.insert(allocator, index, .{ .s = null });
errdefer _ = self.child_nodes.orderedRemove(index);
return index;
}
/// Delete the most recent edge that was just created by newEdge
fn deleteNewEdge(self: *Self, edge_index: usize, allocator: *Allocator) void {
_ = self.edges.orderedRemove(edge_index);
_ = self.child_nodes.orderedRemove(edge_index);
}
pub fn insert(self: *Self, item: []const T, allocator: *Allocator) !void {
if (item.len == 0) return;
var last_edge_index: usize = undefined;
var last_edge_is_new = false;
var current_node = self;
var i: usize = 0;
while (i < item.len) : (i += 1) {
const edge = item[i];
if (current_node.indexOf(edge)) |edge_index| {
if (current_node.child(edge_index)) |child_node| {
current_node = child_node;
} else {
last_edge_index = edge_index;
break;
}
} else {
last_edge_index = try current_node.newEdge(edge, allocator);
last_edge_is_new = true;
break;
}
}
if (i < item.len - 1) {
errdefer if (last_edge_is_new) current_node.deleteNewEdge(last_edge_index, allocator);
const the_rest = try createBranch(item[i + 1 ..], allocator);
errdefer the_rest.deallocRecursive(allocator);
current_node.child_nodes.items[last_edge_index] = .{ .s = the_rest };
}
}
// Must be at least 1 element in item
fn createBranch(item: []const T, allocator: *Allocator) !*Self {
var list = try std.ArrayList(*Self).initCapacity(allocator, item.len);
defer list.deinit();
errdefer for (list.items) |node| {
node.deinit(allocator);
allocator.destroy(node);
};
const singleEdgeNode = (struct {
fn f(edge: T, alloc: *Allocator) !*Self {
const new_node = try alloc.create(Self);
errdefer alloc.destroy(new_node);
new_node.* = try Self.initCapacity(alloc, 1);
errdefer new_node.deinit(alloc);
new_node.edges.appendAssumeCapacity(edge);
new_node.child_nodes.appendAssumeCapacity(.{ .s = null });
return new_node;
}
}).f;
const first_node = try singleEdgeNode(item[0], allocator);
list.appendAssumeCapacity(first_node);
var i: usize = 1;
while (i < item.len) : (i += 1) {
const new_node = try singleEdgeNode(item[i], allocator);
list.appendAssumeCapacity(new_node);
list.items[i - 1].child_nodes.items[0] = .{ .s = new_node };
}
return list.items[0];
}
fn clone(self: Self, allocator: *Allocator) Allocator.Error!*Self {
const clone_result = internalClone(self, allocator);
if (clone_result.err) |err| {
if (clone_result.node) |node| node.deallocRecursive(allocator);
return err;
} else {
return clone_result.node.?;
}
}
// FIXME recursive
fn internalClone(self: Self, allocator: *Allocator) (struct { node: ?*Self, err: ?Allocator.Error }) {
// Don't try to handle any memory allocation failures in this function.
// Instead, the cleanup is done by the `clone` function.
const result = allocator.create(Self) catch |err| return .{ .node = null, .err = err };
result.* = Self{};
const edges_copy = allocator.dupe(T, self.edges.items) catch |err| return .{ .node = result, .err = err };
// Ideally, should be ArrayListUnmanaged(T).fromOwnedSlice(...)
result.edges = ArrayList(T).fromOwnedSlice(allocator, edges_copy).toUnmanaged();
result.child_nodes = ArrayListUnmanaged(S).initCapacity(allocator, self.edges.items.len) catch |err| return .{ .node = result, .err = err };
for (self.child_nodes.items) |child| {
if (child.s) |node| {
const child_clone_result = node.internalClone(allocator);
result.child_nodes.appendAssumeCapacity(.{ .s = child_clone_result.node });
if (child_clone_result.err) |_| return .{ .node = result, .err = child_clone_result.err };
} else {
result.child_nodes.appendAssumeCapacity(.{ .s = null });
}
}
return .{ .node = result, .err = null };
}
// FIXME recursive
fn format(self: Self, writer: anytype, comptime fmt: []const u8) @TypeOf(writer).Error!void {
const fmt_string = "({" ++ fmt ++ "}";
try writer.print(fmt_string, .{self.edges.items[0]});
if (self.child_nodes.items[0].s) |child| {
try writer.writeAll(" ");
try child.format(writer, fmt);
}
try writer.writeAll(")");
if (self.edges.items.len > 0) {
for (self.edges.items[1..]) |edge, i| {
try writer.print(" " ++ fmt_string, .{edge});
if (self.child_nodes.items[1..][i].s) |child| {
try writer.writeAll(" ");
try child.format(writer, fmt);
}
try writer.writeAll(")");
}
}
}
const Leaves = struct {
items: [][]T,
allocator: *Allocator,
fn free(self: @This()) void {
for (self.items) |leaf| self.allocator.free(leaf);
}
};
fn getLeaves(self: *const Self, allocator: *Allocator) !Leaves {
var stack = ArrayList(T).init(allocator);
defer stack.deinit();
var list = ArrayList([]T).init(allocator);
try internalGetLeaves(self, &list, &stack);
return Leaves{ .items = list.toOwnedSlice(), .allocator = allocator };
}
fn internalGetLeaves(self: *const Self, list: *ArrayList([]T), stack: *ArrayList(T)) Allocator.Error!void {
for (self.child_nodes.items) |child, i| {
const edge = self.edges.items[i];
if (child.s) |node| {
try stack.append(edge);
try internalGetLeaves(node, list, stack);
_ = stack.pop();
} else {
const item = blk: {
const buf = try list.allocator.alloc(T, stack.items.len + 1);
std.mem.copy(T, buf, stack.items);
buf[buf.len - 1] = edge;
break :blk buf;
};
errdefer list.allocator.free(item);
try list.append(item);
}
}
}
};
}
pub fn PrefixTree(comptime T: type, comptime cmpFn: fn (lhs: T, rhs: T) std.math.Order) type {
return struct {
const Self = @This();
const Node = PrefixTreeNode(T, cmpFn);
root: *Node,
allocator: *Allocator,
pub fn init(allocator: *Allocator) !Self {
const root = try allocator.create(Node);
root.* = Node{};
return Self{
.root = root,
.allocator = allocator,
};
}
pub fn deinit(self: *Self) void {
self.root.deallocRecursive(self.allocator);
}
pub fn exists(self: Self, item: []const T) bool {
return self.root.exists(item);
}
pub fn insert(self: Self, item: []const T) !void {
return self.root.insert(item, self.allocator);
}
pub fn clone(self: Self, allocator: *Allocator) !Self {
return Self{
.root = try self.root.clone(allocator),
.allocator = allocator,
};
}
pub fn merge(first: Self, second: Self, allocator: *Allocator) !Self {
var copy = try first.clone(allocator);
errdefer copy.deinit();
const second_leaves = try second.root.getLeaves(allocator);
defer second_leaves.free();
for (second_leaves.items) |leaf| try copy.insert(leaf);
return copy;
}
pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
return self.root.format(writer, fmt);
}
};
}
test "sample tree" {
const cmpFn = (struct {
fn f(lhs: u8, rhs: u8) std.math.Order {
return std.math.order(lhs, rhs);
}
}).f;
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
var tree = try PrefixTree(u8, cmpFn).init(&arena.allocator);
defer tree.deinit();
expect(tree.exists(""));
expect(!tree.exists("abc"));
try tree.insert("abc");
expect(tree.exists("abc"));
expect(tree.exists("ab"));
expect(tree.exists("a"));
try tree.insert("rockstar");
expect(tree.exists("rockstar"));
expect(tree.exists("rocks"));
expect(tree.exists("rock"));
try tree.insert("rockstar");
try tree.insert("rockstars");
try tree.insert("rockers");
expect(tree.exists("rockers"));
expect(tree.exists("rocker"));
expect(!tree.exists("arock"));
try tree.insert("bean");
try tree.insert("bean-truck");
const stdout = std.io.getStdOut().writer();
try stdout.print("{c}\n", .{tree});
var tree_clone = try tree.clone(tree.allocator);
defer tree_clone.deinit();
try stdout.print("{c}\n", .{tree_clone});
const leaves = try tree.root.getLeaves(&arena.allocator);
defer leaves.free();
for (leaves.items) |leaf| {
try stdout.print("{}\n", .{leaf});
}
var second_tree = blk: {
var res = try PrefixTree(u8, cmpFn).init(&arena.allocator);
errdefer res.deinit();
try res.insert("roadmap");
try res.insert("beethoven");
try res.insert("zig");
try res.insert("ziglang");
break :blk res;
};
defer second_tree.deinit();
const second_leaves = try second_tree.root.getLeaves(&arena.allocator);
defer second_leaves.free();
for (second_leaves.items) |leaf| {
try stdout.print("{}\n", .{leaf});
}
var merged = try tree.merge(second_tree, &arena.allocator);
defer merged.deinit();
try stdout.print("{c}\n", .{merged});
}
|
source/prefix_tree.zig
|
const std = @import("std");
const Builder = std.build.Builder;
const builtin = std.builtin;
const assert = std.debug.assert;
const Arch = if (@hasField(builtin, "Arch")) builtin.Arch else std.Target.Cpu.Arch;
// var source_blob: *std.build.RunStep = undefined;
// var source_blob_path: []u8 = undefined;
// fn make_source_blob(b: *Builder) void {
// source_blob_path =
// std.mem.concat(b.allocator, u8,
// &[_][]const u8{ b.cache_root, "/sources.tar" }
// ) catch unreachable;
// source_blob = b.addSystemCommand(
// &[_][]const u8 {
// "tar", "--no-xattrs", "-cf", source_blob_path, "src", "build.zig",
// },
// );
// }
const TransformFileCommandStep = struct {
step: std.build.Step,
output_path: []const u8,
fn run_command(s: *std.build.Step) !void {}
};
const custom_build_id: std.build.Step.Id =
if (@hasField(std.build.Step.Id, "Custom"))
.Custom
else
.custom;
fn make_transform(b: *Builder, dep: *std.build.Step, command: [][]const u8, output_path: []const u8) !*TransformFileCommandStep {
const transform = try b.allocator.create(TransformFileCommandStep);
transform.output_path = output_path;
transform.step = std.build.Step.init(custom_build_id, "", b.allocator, TransformFileCommandStep.run_command);
const command_step = b.addSystemCommand(command);
command_step.step.dependOn(dep);
transform.step.dependOn(&command_step.step);
return transform;
}
fn cpu_features(arch: Arch, ctarget: std.zig.CrossTarget) std.zig.CrossTarget {
var disabled_features = std.Target.Cpu.Feature.Set.empty;
var enabled_feautres = std.Target.Cpu.Feature.Set.empty;
if (arch == .aarch64) {
const features = std.Target.aarch64.Feature;
// This is equal to -mgeneral-regs-only
disabled_features.addFeature(@enumToInt(features.fp_armv8));
disabled_features.addFeature(@enumToInt(features.crypto));
disabled_features.addFeature(@enumToInt(features.neon));
}
return std.zig.CrossTarget{
.cpu_arch = ctarget.cpu_arch,
.os_tag = ctarget.os_tag,
.abi = ctarget.abi,
.cpu_features_sub = disabled_features,
.cpu_features_add = enabled_feautres,
};
}
fn freestanding_target(elf: *std.build.LibExeObjStep, arch: Arch, do_code_model: bool) void {
if (arch == .aarch64) {
// We don't need the code model in asm blobs
if (do_code_model)
elf.code_model = .tiny;
}
elf.setTarget(cpu_features(arch, .{
.cpu_arch = arch,
.os_tag = std.Target.Os.Tag.freestanding,
.abi = std.Target.Abi.none,
}));
}
fn takes_new_file_ref() bool {
return @typeInfo(@TypeOf(std.build.LibExeObjStep.setLinkerScriptPath)).Fn.args[1].arg_type.? != []const u8;
}
fn file_ref(filename: []const u8) if (takes_new_file_ref()) std.build.FileSource else []const u8 {
if (comptime takes_new_file_ref()) {
return .{ .path = filename };
} else {
return filename;
}
}
pub fn board_supported(arch: Arch, target_name: []const u8) bool {
switch (arch) {
.aarch64 => {
if (std.mem.eql(u8, target_name, "virt"))
return true;
if (std.mem.eql(u8, target_name, "pi3"))
return true;
if (std.mem.eql(u8, target_name, "pine"))
return true;
return false;
},
else => return false,
}
}
pub fn build_elf(b: *Builder, arch: Arch, target_name: []const u8, path_prefix: []const u8) !*std.build.LibExeObjStep {
if (!board_supported(arch, target_name)) return error.UnsupportedBoard;
const elf_filename = b.fmt("Sabaton_{s}_{s}.elf", .{ target_name, @tagName(arch) });
const platform_path = b.fmt("{s}src/platform/{s}_{s}", .{ path_prefix, target_name, @tagName(arch) });
const elf = b.addExecutable(elf_filename, b.fmt("{s}/main.zig", .{platform_path}));
elf.setLinkerScriptPath(file_ref(b.fmt("{s}/linker.ld", .{platform_path})));
elf.addAssemblyFile(b.fmt("{s}/entry.S", .{platform_path}));
elf.addBuildOption([]const u8, "board_name", target_name);
freestanding_target(elf, arch, true);
elf.setBuildMode(.ReleaseSmall);
if (@hasField(@TypeOf(elf.*), "want_lto"))
elf.want_lto = false;
elf.setMainPkgPath(b.fmt("{s}src/", .{path_prefix}));
elf.setOutputDir(b.cache_root);
elf.disable_stack_probing = true;
elf.install();
//elf.step.dependOn(&source_blob.step);
return elf;
}
pub fn pad_file(b: *Builder, dep: *std.build.Step, path: []const u8) !*TransformFileCommandStep {
const padded_path = b.fmt("{s}.pad", .{path});
const pad_step = try make_transform(
b,
dep,
&[_][]const u8{ "/bin/sh", "-c", b.fmt("cp {s} {s} && truncate -s 64M {s}", .{ path, padded_path, padded_path }) },
padded_path,
);
return pad_step;
}
const pad_mode = enum { Padded, NotPadded };
fn section_blob(b: *Builder, elf: *std.build.LibExeObjStep, mode: pad_mode, section_name: []const u8) !*TransformFileCommandStep {
const elf_path = b.getInstallPath(elf.install_step.?.dest_dir, elf.out_filename);
const dumped_path = b.fmt("{s}.bin", .{elf_path});
const dump_step = try make_transform(
b,
&elf.install_step.?.step,
&[_][]const u8{
// zig fmt: off
"llvm-objcopy",
"-O", "binary",
"--only-section", section_name,
elf_path, dumped_path,
// zig fmt: on
},
dumped_path,
);
if (mode == .Padded)
return pad_file(b, &dump_step.step, dump_step.output_path);
return dump_step;
}
fn blob(b: *Builder, elf: *std.build.LibExeObjStep, mode: pad_mode) !*TransformFileCommandStep {
return section_blob(b, elf, mode, ".blob");
}
fn assembly_blob(b: *Builder, arch: Arch, name: []const u8, asm_file: []const u8) !*TransformFileCommandStep {
const elf_filename = b.fmt("{s}_{s}.elf", .{ name, @tagName(arch) });
const elf = b.addExecutable(elf_filename, null);
elf.setLinkerScriptPath(file_ref("src/blob.ld"));
elf.addAssemblyFile(asm_file);
freestanding_target(elf, arch, false);
elf.setBuildMode(.ReleaseSafe);
elf.setMainPkgPath("src/");
elf.setOutputDir(b.cache_root);
elf.install();
return blob(b, elf, .NotPadded);
}
pub fn build_blob(b: *Builder, arch: Arch, target_name: []const u8, path_prefix: []const u8) !*TransformFileCommandStep {
const elf = try build_elf(b, arch, target_name, path_prefix);
return blob(b, elf, .Padded);
}
fn qemu_aarch64(b: *Builder, board_name: []const u8, desc: []const u8, dep_elf: *std.build.LibExeObjStep) !void {
const command_step = b.step(board_name, desc);
const dep = try blob(b, dep_elf, .Padded);
const params = &[_][]const u8{
// zig fmt: off
"qemu-system-aarch64",
"-M", board_name,
"-cpu", "cortex-a57",
"-drive", b.fmt("if=pflash,format=raw,file={s},readonly=on", .{dep.output_path}),
"-m", "4G",
"-serial", "stdio",
//"-S", "-s",
"-d", "int",
"-smp", "8",
"-device", "ramfb",
"-fw_cfg", "opt/Sabaton/kernel,file=test/Flork_stivale2_aarch64",
// zig fmt: on
};
const run_step = b.addSystemCommand(params);
run_step.step.dependOn(&dep.step);
command_step.dependOn(&run_step.step);
}
fn qemu_pi3_aarch64(b: *Builder, desc: []const u8, dep_elf: *std.build.LibExeObjStep) !void {
const command_step = b.step("pi3", desc);
const dep = try blob(b, dep_elf, .NotPadded);
const params = &[_][]const u8{
// zig fmt: off
"qemu-system-aarch64",
"-M", "raspi3",
"-device", "loader,file=test/Flork_stivale2_aarch64,addr=0x200000,force-raw=on",
"-serial", "null",
"-serial", "stdio",
"-d", "int",
"-kernel", dep.output_path
// zig fmt: off
};
const run_step = b.addSystemCommand(params);
run_step.step.dependOn(&dep.step);
command_step.dependOn(&run_step.step);
}
const Device = struct {
name: []const u8,
arch: Arch,
};
const AssemblyBlobSpec = struct {
name: []const u8,
arch: Arch,
path: []const u8,
};
pub fn build(b: *Builder) !void {
//make_source_blob(b);
try qemu_aarch64(
b,
"virt",
"Run aarch64 sabaton on for the qemu virt board",
try build_elf(b, .aarch64, "virt", "./"),
);
try qemu_pi3_aarch64(
b,
"Run aarch64 sabaton on for the qemu raspi3 board",
try build_elf(b, .aarch64, "pi3", "./"),
);
{
const assembly_blobs = &[_]AssemblyBlobSpec{
.{ .path = "src/platform/pine_aarch64/identity.S", .name = "identity_pine", .arch = .aarch64 },
};
for (assembly_blobs) |spec| {
const blob_file = try assembly_blob(b, spec.arch, spec.name, spec.path);
b.default_step.dependOn(&blob_file.step);
}
}
{
const elf_devices = &[_]Device{};
for (elf_devices) |dev| {
const elf_file = try build_elf(b, .aarch64, dev.name);
const s = b.step(dev.name, b.fmt("Build the blob for {s}", .{dev.name}));
s.dependOn(&elf_file.step);
b.default_step.dependOn(s);
}
}
{
const blob_devices = &[_]Device{
.{ .name = "pine", .arch = .aarch64 },
};
for (blob_devices) |dev| {
const elf_file = try build_elf(b, .aarch64, dev.name, "./");
const blob_file = try blob(b, elf_file, .NotPadded);
const s = b.step(dev.name, b.fmt("Build the blob for {s}", .{dev.name}));
s.dependOn(&blob_file.step);
b.default_step.dependOn(s);
}
}
// qemu_riscv(b,
// "virt",
// "Run riscv64 sabaton on for the qemu virt board",
// build_elf(b, .riscv64, "virt"),
// );
}
|
build.zig
|
const kern = @import("kern.zig");
// in BPF, all the helper calls
// TODO: when https://github.com/ziglang/zig/issues/1717 is here, make a nice
// function that uses the Helper enum
//
// Note, these function signatures were created from documentation found in
// '/usr/include/linux/bpf.h'
pub const map_lookup_elem = @intToPtr(fn (map: *const kern.MapDef, key: ?*const c_void) ?*c_void, 1);
pub const map_update_elem = @intToPtr(fn (map: *const kern.MapDef, key: ?*const c_void, value: ?*const c_void, flags: u64) c_long, 2);
pub const map_delete_elem = @intToPtr(fn (map: *const kern.MapDef, key: ?*const c_void) c_long, 3);
pub const probe_read = @intToPtr(fn (dst: ?*c_void, size: u32, unsafe_ptr: ?*const c_void) c_long, 4);
pub const ktime_get_ns = @intToPtr(fn () u64, 5);
pub const trace_printk = @intToPtr(fn (fmt: [*:0]const u8, fmt_size: u32, arg1: u64, arg2: u64, arg3: u64) c_long, 6);
pub const get_prandom_u32 = @intToPtr(fn () u32, 7);
pub const get_smp_processor_id = @intToPtr(fn () u32, 8);
pub const skb_store_bytes = @intToPtr(fn (skb: *kern.SkBuff, offset: u32, from: ?*const c_void, len: u32, flags: u64) c_long, 9);
pub const l3_csum_replace = @intToPtr(fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, size: u64) c_long, 10);
pub const l4_csum_replace = @intToPtr(fn (skb: *kern.SkBuff, offset: u32, from: u64, to: u64, flags: u64) c_long, 11);
pub const tail_call = @intToPtr(fn (ctx: ?*c_void, prog_array_map: *const kern.MapDef, index: u32) c_long, 12);
pub const clone_redirect = @intToPtr(fn (skb: *kern.SkBuff, ifindex: u32, flags: u64) c_long, 13);
pub const get_current_pid_tgid = @intToPtr(fn () u64, 14);
pub const get_current_uid_gid = @intToPtr(fn () u64, 15);
pub const get_current_comm = @intToPtr(fn (buf: ?*c_void, size_of_buf: u32) c_long, 16);
pub const get_cgroup_classid = @intToPtr(fn (skb: *kern.SkBuff) u32, 17);
// Note vlan_proto is big endian
pub const skb_vlan_push = @intToPtr(fn (skb: *kern.SkBuff, vlan_proto: u16, vlan_tci: u16) c_long, 18);
pub const skb_vlan_pop = @intToPtr(fn (skb: *kern.SkBuff) c_long, 19);
pub const skb_get_tunnel_key = @intToPtr(fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 20);
pub const skb_set_tunnel_key = @intToPtr(fn (skb: *kern.SkBuff, key: *kern.TunnelKey, size: u32, flags: u64) c_long, 21);
pub const perf_event_read = @intToPtr(fn (map: *const kern.MapDef, flags: u64) u64, 22);
pub const redirect = @intToPtr(fn (ifindex: u32, flags: u64) c_long, 23);
pub const get_route_realm = @intToPtr(fn (skb: *kern.SkBuff) u32, 24);
pub const perf_event_output = @intToPtr(fn (ctx: ?*c_void, map: *const kern.MapDef, flags: u64, data: ?*c_void, size: u64) c_long, 25);
pub const skb_load_bytes = @intToPtr(fn (skb: ?*c_void, offset: u32, to: ?*c_void, len: u32) c_long, 26);
pub const get_stackid = @intToPtr(fn (ctx: ?*c_void, map: *const kern.MapDef, flags: u64) c_long, 27);
// from and to point to __be32
pub const csum_diff = @intToPtr(fn (from: *u32, from_size: u32, to: *u32, to_size: u32, seed: u32) i64, 28);
pub const skb_get_tunnel_opt = @intToPtr(fn (skb: *kern.SkBuff, opt: ?*c_void, size: u32) c_long, 29);
pub const skb_set_tunnel_opt = @intToPtr(fn (skb: *kern.SkBuff, opt: ?*c_void, size: u32) c_long, 30);
// proto is __be16
pub const skb_change_proto = @intToPtr(fn (skb: *kern.SkBuff, proto: u16, flags: u64) c_long, 31);
pub const skb_change_type = @intToPtr(fn (skb: *kern.SkBuff, skb_type: u32) c_long, 32);
pub const skb_under_cgroup = @intToPtr(fn (skb: *kern.SkBuff, map: ?*const c_void, index: u32) c_long, 33);
pub const get_hash_recalc = @intToPtr(fn (skb: *kern.SkBuff) u32, 34);
pub const get_current_task = @intToPtr(fn () u64, 35);
pub const probe_write_user = @intToPtr(fn (dst: ?*c_void, src: ?*const c_void, len: u32) c_long, 36);
pub const current_task_under_cgroup = @intToPtr(fn (map: *const kern.MapDef, index: u32) c_long, 37);
pub const skb_change_tail = @intToPtr(fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 38);
pub const skb_pull_data = @intToPtr(fn (skb: *kern.SkBuff, len: u32) c_long, 39);
pub const csum_update = @intToPtr(fn (skb: *kern.SkBuff, csum: u32) i64, 40);
pub const set_hash_invalid = @intToPtr(fn (skb: *kern.SkBuff) void, 41);
pub const get_numa_node_id = @intToPtr(fn () c_long, 42);
pub const skb_change_head = @intToPtr(fn (skb: *kern.SkBuff, len: u32, flags: u64) c_long, 43);
pub const xdp_adjust_head = @intToPtr(fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 44);
pub const probe_read_str = @intToPtr(fn (dst: ?*c_void, size: u32, unsafe_ptr: ?*const c_void) c_long, 45);
pub const get_socket_cookie = @intToPtr(fn (ctx: ?*c_void) u64, 46);
pub const get_socket_uid = @intToPtr(fn (skb: *kern.SkBuff) u32, 47);
pub const set_hash = @intToPtr(fn (skb: *kern.SkBuff, hash: u32) c_long, 48);
pub const setsockopt = @intToPtr(fn (bpf_socket: *kern.SockOps, level: c_int, optname: c_int, optval: ?*c_void, optlen: c_int) c_long, 49);
pub const skb_adjust_room = @intToPtr(fn (skb: *kern.SkBuff, len_diff: i32, mode: u32, flags: u64) c_long, 50);
pub const redirect_map = @intToPtr(fn (map: *const kern.MapDef, key: u32, flags: u64) c_long, 51);
pub const sk_redirect_map = @intToPtr(fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: u32, flags: u64) c_long, 52);
pub const sock_map_update = @intToPtr(fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*c_void, flags: u64) c_long, 53);
pub const xdp_adjust_meta = @intToPtr(fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 54);
pub const perf_event_read_value = @intToPtr(fn (map: *const kern.MapDef, flags: u64, buf: *kern.PerfEventValue, buf_size: u32) c_long, 55);
pub const perf_prog_read_value = @intToPtr(fn (ctx: *kern.PerfEventData, buf: *kern.PerfEventValue, buf_size: u32) c_long, 56);
pub const getsockopt = @intToPtr(fn (bpf_socket: ?*c_void, level: c_int, optname: c_int, optval: ?*c_void, optlen: c_int) c_long, 57);
pub const override_return = @intToPtr(fn (regs: *PtRegs, rc: u64) c_long, 58);
pub const sock_ops_cb_flags_set = @intToPtr(fn (bpf_sock: *kern.SockOps, argval: c_int) c_long, 59);
pub const msg_redirect_map = @intToPtr(fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: u32, flags: u64) c_long, 60);
pub const msg_apply_bytes = @intToPtr(fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 61);
pub const msg_cork_bytes = @intToPtr(fn (msg: *kern.SkMsgMd, bytes: u32) c_long, 62);
pub const msg_pull_data = @intToPtr(fn (msg: *kern.SkMsgMd, start: u32, end: u32, flags: u64) c_long, 63);
pub const bind = @intToPtr(fn (ctx: *kern.BpfSockAddr, addr: *kern.SockAddr, addr_len: c_int) c_long, 64);
pub const xdp_adjust_tail = @intToPtr(fn (xdp_md: *kern.XdpMd, delta: c_int) c_long, 65);
pub const skb_get_xfrm_state = @intToPtr(fn (skb: *kern.SkBuff, index: u32, xfrm_state: *kern.XfrmState, size: u32, flags: u64) c_long, 66);
pub const get_stack = @intToPtr(fn (ctx: ?*c_void, buf: ?*c_void, size: u32, flags: u64) c_long, 67);
pub const skb_load_bytes_relative = @intToPtr(fn (skb: ?*const c_void, offset: u32, to: ?*c_void, len: u32, start_header: u32) c_long, 68);
pub const fib_lookup = @intToPtr(fn (ctx: ?*c_void, params: *kern.FibLookup, plen: c_int, flags: u32) c_long, 69);
pub const sock_hash_update = @intToPtr(fn (skops: *kern.SockOps, map: *const kern.MapDef, key: ?*c_void, flags: u64) c_long, 70);
pub const msg_redirect_hash = @intToPtr(fn (msg: *kern.SkMsgMd, map: *const kern.MapDef, key: ?*c_void, flags: u64) c_long, 71);
pub const sk_redirect_hash = @intToPtr(fn (skb: *kern.SkBuff, map: *const kern.MapDef, key: ?*c_void, flags: u64) c_long, 72);
pub const lwt_push_encap = @intToPtr(fn (skb: *kern.SkBuff, typ: u32, hdr: ?*c_void, len: u32) c_long, 73);
pub const lwt_seg6_store_bytes = @intToPtr(fn (skb: *kern.SkBuff, offset: u32, from: ?*const c_void, len: u32) c_long, 74);
pub const lwt_seg6_adjust_srh = @intToPtr(fn (skb: *kern.SkBuff, offset: u32, delta: i32) c_long, 75);
pub const lwt_seg6_action = @intToPtr(fn (skb: *kern.SkBuff, action: u32, param: ?*c_void, param_len: u32) c_long, 76);
pub const rc_repeat = @intToPtr(fn (ctx: ?*c_void) c_long, 77);
pub const rc_keydown = @intToPtr(fn (ctx: ?*c_void, protocol: u32, scancode: u64, toggle: u32) c_long, 78);
pub const skb_cgroup_id = @intToPtr(fn (skb: *kern.SkBuff) u64, 79);
pub const get_current_cgroup_id = @intToPtr(fn () u64, 80);
pub const get_local_storage = @intToPtr(fn (map: ?*c_void, flags: u64) ?*c_void, 81);
pub const sk_select_reuseport = @intToPtr(fn (reuse: *kern.SkReusePortMd, map: *const kern.MapDef, key: ?*c_void, flags: u64) c_long, 82);
pub const skb_ancestor_cgroup_id = @intToPtr(fn (skb: *kern.SkBuff, ancestor_level: c_int) u64, 83);
pub const sk_lookup_tcp = @intToPtr(fn (ctx: ?*c_void, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 84);
pub const sk_lookup_udp = @intToPtr(fn (ctx: ?*c_void, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 85);
pub const sk_release = @intToPtr(fn (sock: *kern.Sock) c_long, 86);
pub const map_push_elem = @intToPtr(fn (map: *const kern.MapDef, value: ?*const c_void, flags: u64) c_long, 87);
pub const map_pop_elem = @intToPtr(fn (map: *const kern.MapDef, value: ?*c_void) c_long, 88);
pub const map_peek_elem = @intToPtr(fn (map: *const kern.MapDef, value: ?*c_void) c_long, 89);
pub const msg_push_data = @intToPtr(fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 90);
pub const msg_pop_data = @intToPtr(fn (msg: *kern.SkMsgMd, start: u32, len: u32, flags: u64) c_long, 91);
pub const rc_pointer_rel = @intToPtr(fn (ctx: ?*c_void, rel_x: i32, rel_y: i32) c_long, 92);
pub const spin_lock = @intToPtr(fn (lock: *kern.SpinLock) c_long, 93);
pub const spin_unlock = @intToPtr(fn (lock: *kern.SpinLock) c_long, 94);
pub const sk_fullsock = @intToPtr(fn (sk: *kern.Sock) ?*SkFullSock, 95);
pub const tcp_sock = @intToPtr(fn (sk: *kern.Sock) ?*kern.TcpSock, 96);
pub const skb_ecn_set_ce = @intToPtr(fn (skb: *kern.SkBuff) c_long, 97);
pub const get_listener_sock = @intToPtr(fn (sk: *kern.Sock) ?*kern.Sock, 98);
pub const skc_lookup_tcp = @intToPtr(fn (ctx: ?*c_void, tuple: *kern.SockTuple, tuple_size: u32, netns: u64, flags: u64) ?*kern.Sock, 99);
pub const tcp_check_syncookie = @intToPtr(fn (sk: *kern.Sock, iph: ?*c_void, iph_len: u32, th: *TcpHdr, th_len: u32) c_long, 100);
pub const sysctl_get_name = @intToPtr(fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong, flags: u64) c_long, 101);
pub const sysctl_get_current_value = @intToPtr(fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 102);
pub const sysctl_get_new_value = @intToPtr(fn (ctx: *kern.SysCtl, buf: ?*u8, buf_len: c_ulong) c_long, 103);
pub const sysctl_set_new_value = @intToPtr(fn (ctx: *kern.SysCtl, buf: ?*const u8, buf_len: c_ulong) c_long, 104);
pub const strtol = @intToPtr(fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_long) c_long, 105);
pub const strtoul = @intToPtr(fn (buf: *const u8, buf_len: c_ulong, flags: u64, res: *c_ulong) c_long, 106);
pub const sk_storage_get = @intToPtr(fn (map: *const kern.MapDef, sk: *kern.Sock, value: ?*c_void, flags: u64) ?*c_void, 107);
pub const sk_storage_delete = @intToPtr(fn (map: *const kern.MapDef, sk: *kern.Sock) c_long, 108);
pub const send_signal = @intToPtr(fn (sig: u32) c_long, 109);
pub const tcp_gen_syncookie = @intToPtr(fn (sk: *kern.Sock, iph: ?*c_void, iph_len: u32, th: *TcpHdr, th_len: u32) i64, 110);
pub const skb_output = @intToPtr(fn (ctx: ?*c_void, map: *const kern.MapDef, flags: u64, data: ?*c_void, size: u64) c_long, 111);
pub const probe_read_user = @intToPtr(fn (dst: ?*c_void, size: u32, unsafe_ptr: ?*const c_void) c_long, 112);
pub const probe_read_kernel = @intToPtr(fn (dst: ?*c_void, size: u32, unsafe_ptr: ?*const c_void) c_long, 113);
pub const probe_read_user_str = @intToPtr(fn (dst: ?*c_void, size: u32, unsafe_ptr: ?*const c_void) c_long, 114);
pub const probe_read_kernel_str = @intToPtr(fn (dst: ?*c_void, size: u32, unsafe_ptr: ?*const c_void) c_long, 115);
pub const tcp_send_ack = @intToPtr(fn (tp: ?*c_void, rcv_nxt: u32) c_long, 116);
pub const send_signal_thread = @intToPtr(fn (sig: u32) c_long, 117);
pub const jiffies64 = @intToPtr(fn () u64, 118);
pub const read_branch_records = @intToPtr(fn (ctx: *kern.PerfEventData, buf: ?*c_void, size: u32, flags: u64) c_long, 119);
pub const get_ns_current_pid_tgid = @intToPtr(fn (dev: u64, ino: u64, nsdata: *kern.PidNsInfo, size: u32) c_long, 120);
pub const xdp_output = @intToPtr(fn (ctx: ?*c_void, map: *const kern.MapDef, flags: u64, data: ?*c_void, size: u64) c_long, 121);
pub const get_netns_cookie = @intToPtr(fn (ctx: ?*c_void) u64, 122);
pub const get_current_ancestor_cgroup_id = @intToPtr(fn (ancestor_level: c_int) u64, 123);
pub const sk_assign = @intToPtr(fn (skb: *kern.SkBuff, sk: *kern.Sock, flags: u64) c_long, 124);
pub const ktime_get_boot_ns = @intToPtr(fn () u64, 125);
pub const seq_printf = @intToPtr(fn (m: *kern.SeqFile, fmt: ?*const u8, fmt_size: u32, data: ?*const c_void, data_len: u32) c_long, 126);
pub const seq_write = @intToPtr(fn (m: *kern.SeqFile, data: ?*const u8, len: u32) c_long, 127);
pub const sk_cgroup_id = @intToPtr(fn (sk: *kern.BpfSock) u64, 128);
pub const sk_ancestor_cgroup_id = @intToPtr(fn (sk: *kern.BpfSock, ancestor_level: c_long) u64, 129);
pub const ringbuf_output = @intToPtr(fn (ringbuf: ?*c_void, data: ?*c_void, size: u64, flags: u64) ?*c_void, 130);
pub const ringbuf_reserve = @intToPtr(fn (ringbuf: ?*c_void, size: u64, flags: u64) ?*c_void, 131);
pub const ringbuf_submit = @intToPtr(fn (data: ?*c_void, flags: u64) void, 132);
pub const ringbuf_discard = @intToPtr(fn (data: ?*c_void, flags: u64) void, 133);
pub const ringbuf_query = @intToPtr(fn (ringbuf: ?*c_void, flags: u64) u64, 134);
pub const csum_level = @intToPtr(fn (skb: *kern.SkBuff, level: u64) c_long, 135);
pub const skc_to_tcp6_sock = @intToPtr(fn (sk: ?*c_void) ?*kern.Tcp6Sock, 136);
pub const skc_to_tcp_sock = @intToPtr(fn (sk: ?*c_void) ?*kern.TcpSock, 137);
pub const skc_to_tcp_timewait_sock = @intToPtr(fn (sk: ?*c_void) ?*kern.TcpTimewaitSock, 138);
pub const skc_to_tcp_request_sock = @intToPtr(fn (sk: ?*c_void) ?*kern.TcpRequestSock, 139);
pub const skc_to_udp6_sock = @intToPtr(fn (sk: ?*c_void) ?*kern.Udp6Sock, 140);
pub const get_task_stack = @intToPtr(fn (task: ?*c_void, buf: ?*c_void, size: u32, flags: u64) c_long, 141);
|
lib/std/os/linux/bpf/helpers.zig
|
const sf = struct {
pub usingnamespace @import("../sfml.zig");
pub usingnamespace sf.system;
pub usingnamespace sf.graphics;
};
const RectangleShape = @This();
// Constructor/destructor
/// Creates a rectangle shape with a size. The rectangle will be white
pub fn create(size: sf.Vector2f) !RectangleShape {
var rect = sf.c.sfRectangleShape_create();
if (rect == null)
return sf.Error.nullptrUnknownReason;
sf.c.sfRectangleShape_setFillColor(rect, sf.c.sfWhite);
sf.c.sfRectangleShape_setSize(rect, size._toCSFML());
return RectangleShape{ ._ptr = rect.? };
}
/// Destroys a rectangle shape
pub fn destroy(self: *RectangleShape) void {
sf.c.sfRectangleShape_destroy(self._ptr);
}
// Draw function
pub fn sfDraw(self: RectangleShape, window: anytype, states: ?*sf.c.sfRenderStates) void {
switch (@TypeOf(window)) {
sf.RenderWindow => sf.c.sfRenderWindow_drawRectangleShape(window._ptr, self._ptr, states),
sf.RenderTexture => sf.c.sfRenderTexture_drawRectangleShape(window._ptr, self._ptr, states),
else => @compileError("window must be a render target"),
}
}
// Getters/setters
/// Gets the fill color of this rectangle shape
pub fn getFillColor(self: RectangleShape) sf.Color {
return sf.Color._fromCSFML(sf.c.sfRectangleShape_getFillColor(self._ptr));
}
/// Sets the fill color of this rectangle shape
pub fn setFillColor(self: *RectangleShape, color: sf.Color) void {
sf.c.sfRectangleShape_setFillColor(self._ptr, color._toCSFML());
}
/// Gets the outline color of this rectangle shape
pub fn getOutlineColor(self: RectangleShape) sf.Color {
return sf.Color._fromCSFML(sf.c.sfRectangleShape_getOutlineColor(self._ptr));
}
/// Sets the outline color of this rectangle shape
pub fn setOutlineColor(self: *RectangleShape, color: sf.Color) void {
sf.c.sfRectangleShape_setOutlineColor(self._ptr, color._toCSFML());
}
/// Gets the outline thickness of this rectangle shape
pub fn getOutlineThickness(self: RectangleShape) f32 {
return sf.c.sfRectangleShape_getOutlineThickness(self._ptr);
}
/// Sets the outline thickness of this rectangle shape
pub fn setOutlineThickness(self: *RectangleShape, thickness: f32) void {
sf.c.sfRectangleShape_setOutlineThickness(self._ptr, thickness);
}
/// Gets the size of this rectangle shape
pub fn getSize(self: RectangleShape) sf.Vector2f {
return sf.Vector2f._fromCSFML(sf.c.sfRectangleShape_getSize(self._ptr));
}
/// Sets the size of this rectangle shape
pub fn setSize(self: *RectangleShape, size: sf.Vector2f) void {
sf.c.sfRectangleShape_setSize(self._ptr, size._toCSFML());
}
/// Gets the position of this rectangle shape
pub fn getPosition(self: RectangleShape) sf.Vector2f {
return sf.Vector2f._fromCSFML(sf.c.sfRectangleShape_getPosition(self._ptr));
}
/// Sets the position of this rectangle shape
pub fn setPosition(self: *RectangleShape, pos: sf.Vector2f) void {
sf.c.sfRectangleShape_setPosition(self._ptr, pos._toCSFML());
}
/// Adds the offset to this shape's position
pub fn move(self: *RectangleShape, offset: sf.Vector2f) void {
sf.c.sfRectangleShape_move(self._ptr, offset._toCSFML());
}
/// Gets the origin of this rectangle shape
pub fn getOrigin(self: RectangleShape) sf.Vector2f {
return sf.Vector2f._fromCSFML(sf.c.sfRectangleShape_getOrigin(self._ptr));
}
/// Sets the origin of this rectangle shape
pub fn setOrigin(self: *RectangleShape, origin: sf.Vector2f) void {
sf.c.sfRectangleShape_setOrigin(self._ptr, origin._toCSFML());
}
/// Gets the rotation of this rectangle shape
pub fn getRotation(self: RectangleShape) f32 {
return sf.c.sfRectangleShape_getRotation(self._ptr);
}
/// Sets the rotation of this rectangle shape
pub fn setRotation(self: *RectangleShape, angle: f32) void {
sf.c.sfRectangleShape_setRotation(self._ptr, angle);
}
/// Rotates this shape by a given amount
pub fn rotate(self: *RectangleShape, angle: f32) void {
sf.c.sfRectangleShape_rotate(self._ptr, angle);
}
/// Gets the texture of this shape
pub fn getTexture(self: RectangleShape) ?sf.Texture {
const t = sf.c.sfRectangleShape_getTexture(self._ptr);
if (t) |tex| {
return sf.Texture{ ._const_ptr = tex };
} else return null;
}
/// Sets the texture of this shape
pub fn setTexture(self: *RectangleShape, texture: ?sf.Texture) void {
var tex = if (texture) |t| t._get() else null;
sf.c.sfRectangleShape_setTexture(self._ptr, tex, 0);
}
/// Gets the sub-rectangle of the texture that the shape will display
pub fn getTextureRect(self: RectangleShape) sf.FloatRect {
return sf.FloatRect._fromCSFML(sf.c.sfRectangleShape_getTextureRect(self._ptr));
}
/// Sets the sub-rectangle of the texture that the shape will display
pub fn setTextureRect(self: *RectangleShape, rect: sf.FloatRect) void {
sf.c.sfRectangleShape_getTextureRect(self._ptr, rect._toCSFML());
}
/// Gets the bounds in the local coordinates system
pub fn getLocalBounds(self: RectangleShape) sf.FloatRect {
return sf.FloatRect._fromCSFML(sf.c.sfRectangleShape_getLocalBounds(self._ptr));
}
/// Gets the bounds in the global coordinates
pub fn getGlobalBounds(self: RectangleShape) sf.FloatRect {
return sf.FloatRect._fromCSFML(sf.c.sfRectangleShape_getGlobalBounds(self._ptr));
}
/// Pointer to the csfml structure
_ptr: *sf.c.sfRectangleShape,
test "rectangle shape: sane getters and setters" {
const tst = @import("std").testing;
var rect = try RectangleShape.create(sf.Vector2f{ .x = 30, .y = 50 });
defer rect.destroy();
try tst.expectEqual(sf.Vector2f{ .x = 30, .y = 50 }, rect.getSize());
rect.setFillColor(sf.Color.Yellow);
rect.setOutlineColor(sf.Color.Red);
rect.setOutlineThickness(3);
rect.setSize(.{ .x = 15, .y = 510 });
rect.setRotation(15);
rect.setPosition(.{ .x = 1, .y = 2 });
rect.setOrigin(.{ .x = 20, .y = 25 });
rect.setTexture(null); //Weirdly, getTexture if texture wasn't set gives a wrong pointer
try tst.expectEqual(sf.Color.Yellow, rect.getFillColor());
try tst.expectEqual(sf.Color.Red, rect.getOutlineColor());
try tst.expectEqual(@as(f32, 3), rect.getOutlineThickness());
try tst.expectEqual(sf.Vector2f{ .x = 15, .y = 510 }, rect.getSize());
try tst.expectEqual(@as(f32, 15), rect.getRotation());
try tst.expectEqual(sf.Vector2f{ .x = 1, .y = 2 }, rect.getPosition());
try tst.expectEqual(sf.Vector2f{ .x = 20, .y = 25 }, rect.getOrigin());
try tst.expectEqual(@as(?sf.Texture, null), rect.getTexture());
rect.rotate(5);
rect.move(.{ .x = -5, .y = 5 });
try tst.expectEqual(@as(f32, 20), rect.getRotation());
try tst.expectEqual(sf.Vector2f{ .x = -4, .y = 7 }, rect.getPosition());
}
|
src/sfml/graphics/RectangleShape.zig
|
const std = @import("std");
pub const clua = @cImport({
@cInclude("lua.h");
// @cInclude("lauxlib.h");
// @cInclude("lualib.h");
});
pub const Lua = struct {
L: *clua.lua_State,
allocator: *std.mem.Allocator,
pub fn init(allocator: std.mem.Allocator) !Lua {
// Even if the Lua struct is moved around, we want the allocator to be
// in the same place. So we allocate memory for it!
var staticAllocator = try allocator.create(std.mem.Allocator);
staticAllocator.* = allocator;
errdefer allocator.destroy(staticAllocator);
var L = clua.lua_newstate(luaAlloc, staticAllocator) orelse return error.OutOfMemory;
return Lua{
.L = L,
.allocator = staticAllocator,
};
}
/// Function used as the Lua state allocator function, bridging between the Lua
/// allocator function contract and Zig allocators.
fn luaAlloc(ud: ?*anyopaque, ptr: ?*anyopaque, osize: usize, nsize: usize) callconv(.C) ?*anyopaque {
// ?*anyopaque has an alignment of 1, but the Allocator struct has a larger
// alignment. We know that ud is a pointer to an Allocator (as passed in
// new()) and so this is okay.
const allocator_aligned = @alignCast(@alignOf(std.mem.Allocator), ud);
const allocator = @ptrCast(*std.mem.Allocator, allocator_aligned);
// malloc() in C guarantees valid alignment for any type, so we must match
// that guarantee.
//
// This method of determining the required alignment is from here:
// https://github.com/ziglang/zig/blob/2117fbdae35dddf368c4ce5bb39cc73fa0f78d4c/lib/include/__stddef_max_align_t.h
const alignment = @alignOf(extern struct {
one: c_longlong,
two: c_longdouble,
});
// The memory pointed to by ptr was allocated by this function and so has
// the alignment we are using.
const ptr_aligned = @alignCast(alignment, ptr);
// We cast the pointer to a multiple-item pointer so that it can be sliced
// to be passed to the allocator, since we are allocating slices of u8 in
// the first place.
const ptr_multi = @ptrCast(?[*]align(alignment) u8, ptr_aligned);
if (ptr_multi) |previous_ptr| {
const previous_block = previous_ptr[0..osize];
if (nsize <= osize) {
// Shrinking or freeing (if nsize == 0) a block.
// shrink() is used instead of realloc() because Lua assumes that
// shrinking never fails.
const new_block = allocator.shrink(previous_block, nsize);
return new_block.ptr;
} else {
// Expanding a block.
const new_block = allocator.realloc(previous_block, nsize) catch return null;
return new_block.ptr;
}
} else {
// Allocating a new block.
const allocated_block = allocator.allocAdvanced(u8, alignment, nsize, .exact) catch return null;
return allocated_block.ptr;
}
}
};
|
src/lua.zig
|
const std = @import("std");
const ecs = @import("ecs");
const Registry = @import("ecs").Registry;
const Velocity = struct { x: f32, y: f32 };
const Position = struct { x: f32 = 0, y: f32 = 0 };
const Empty = struct {};
const BigOne = struct { pos: Position, vel: Velocity, accel: Velocity };
test "entity traits" {
_ = ecs.EntityTraitsType(.large).init();
}
test "Registry" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var e1 = reg.create();
reg.addTypes(e1, .{ Empty, Position });
reg.add(e1, BigOne{ .pos = Position{ .x = 5, .y = 5 }, .vel = Velocity{ .x = 5, .y = 5 }, .accel = Velocity{ .x = 5, .y = 5 } });
try std.testing.expect(reg.has(Empty, e1));
try std.testing.expect(reg.has(Position, e1));
try std.testing.expect(reg.has(BigOne, e1));
var iter = reg.entities();
while (iter.next()) |e| try std.testing.expectEqual(e1, e);
reg.remove(Empty, e1);
try std.testing.expect(!reg.has(Empty, e1));
}
test "context get/set/unset" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var ctx = reg.getContext(Position);
try std.testing.expectEqual(ctx, null);
var pos = Position{ .x = 5, .y = 5 };
reg.setContext(&pos);
ctx = reg.getContext(Position);
try std.testing.expectEqual(ctx.?, &pos);
reg.unsetContext(Position);
ctx = reg.getContext(Position);
try std.testing.expectEqual(ctx, null);
}
// this test should fail
test "context not pointer" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var pos = Position{ .x = 5, .y = 5 };
_ = pos;
// reg.setContext(pos);
}
test "context get/set/unset" {
const SomeType = struct { dummy: u1 };
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var ctx = reg.getContext(SomeType);
try std.testing.expectEqual(ctx, null);
var pos = SomeType{ .dummy = 0 };
reg.setContext(&pos);
ctx = reg.getContext(SomeType);
try std.testing.expectEqual(ctx.?, &pos);
reg.unsetContext(SomeType);
ctx = reg.getContext(SomeType);
try std.testing.expectEqual(ctx, null);
}
test "singletons" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var pos = Position{ .x = 5, .y = 5 };
reg.singletons.add(pos);
try std.testing.expect(reg.singletons.has(Position));
try std.testing.expectEqual(reg.singletons.get(Position).*, pos);
reg.singletons.remove(Position);
try std.testing.expect(!reg.singletons.has(Position));
}
test "destroy" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var i = @as(u8, 0);
while (i < 255) : (i += 1) {
const e = reg.create();
reg.add(e, Position{ .x = @intToFloat(f32, i), .y = @intToFloat(f32, i) });
}
reg.destroy(3);
reg.destroy(4);
i = 0;
while (i < 6) : (i += 1) {
if (i != 3 and i != 4)
try std.testing.expectEqual(Position{ .x = @intToFloat(f32, i), .y = @intToFloat(f32, i) }, reg.getConst(Position, i));
}
}
test "remove all" {
var reg = Registry.init(std.testing.allocator);
defer reg.deinit();
var e = reg.create();
reg.add(e, Position{ .x = 1, .y = 1 });
reg.addTyped(u32, e, 666);
try std.testing.expect(reg.has(Position, e));
try std.testing.expect(reg.has(u32, e));
reg.removeAll(e);
try std.testing.expect(!reg.has(Position, e));
try std.testing.expect(!reg.has(u32, e));
}
|
tests/registry_test.zig
|
const std = @import("std");
const stdx = @import("stdx");
const t = stdx.testing;
const log = stdx.log.scoped(.meta);
pub fn assertPointerType(comptime T: type) void {
if (@typeInfo(T) != .Pointer) {
@compileError("Expected Pointer type.");
}
}
pub fn assertFunctionType(comptime T: type) void {
if (@typeInfo(T) != .Fn) {
@compileError("Expected Function type.");
}
}
pub fn hasFunctionSignature(comptime ExpFunc: type, comptime Func: type) bool {
if (FnNumParams(ExpFunc) != FnNumParams(Func)) {
return false;
}
return std.mem.eql(std.builtin.TypeInfo.FnArg, FnParams(ExpFunc), FnParams(Func));
}
pub fn isFunc(comptime Fn: type) bool {
return @typeInfo(Fn) == .Fn;
}
pub fn FnParamAt(comptime Fn: type, comptime idx: u32) type {
assertFunctionType(Fn);
const Params = comptime FnParams(Fn);
if (Params.len <= idx) {
@compileError(std.fmt.comptimePrint("Expected {} params for function.", .{idx + 1}));
}
return Params[idx].arg_type.?;
}
pub const FnParamsTuple = std.meta.ArgsTuple;
pub fn FnParams(comptime Fn: type) []const std.builtin.TypeInfo.FnArg {
return @typeInfo(Fn).Fn.args;
}
pub fn FnNumParams(comptime Fn: type) u32 {
return @typeInfo(Fn).Fn.args.len;
}
pub fn FnReturn(comptime Fn: type) type {
return @typeInfo(Fn).Fn.return_type.?;
}
pub fn FnWithPrefixParam(comptime Fn: type, comptime Param: type) type {
assertFunctionType(Fn);
const FnParam = std.builtin.Type.Fn.Param{
.is_generic = false,
.is_noalias = false,
.arg_type = Param,
};
return @Type(.{
.Fn = .{
.calling_convention = .Unspecified,
.alignment = 0,
.is_generic = false,
.is_var_args = false,
.return_type = FnReturn(Fn),
.args = &[_]std.builtin.Type.Fn.Param{FnParam} ++ @typeInfo(Fn).Fn.args,
},
});
}
pub fn FnAfterFirstParam(comptime Fn: type) type {
assertFunctionType(Fn);
return @Type(.{
.Fn = .{
.calling_convention = .Unspecified,
.alignment = 0,
.is_generic = false,
.is_var_args = false,
.return_type = FnReturn(Fn),
.args = &[_]std.builtin.Type.Fn.Param{} ++ @typeInfo(Fn).Fn.args[1..],
},
});
}
pub fn FieldType(comptime T: type, comptime Field: std.meta.FieldEnum(T)) type {
return std.meta.fieldInfo(T, Field).field_type;
}
/// Generate a unique type id.
/// This should work in debug and release modes.
pub fn TypeId(comptime T: type) usize {
const S = struct {
fn Type(comptime _: type) type {
return struct { pub var uniq: u8 = 0; };
}
};
return @ptrToInt(&S.Type(T).uniq);
}
test "typeId" {
const S = struct {};
const a = S;
const b = S;
try t.eq(TypeId(a), TypeId(b));
try t.neq(TypeId(a), TypeId(struct {}));
}
/// Generate a unique id for an enum literal.
/// This should work in debug and release modes.
pub fn enumLiteralId(comptime T: @Type(.EnumLiteral)) usize {
_ = T;
const S = struct {
pub var id: u8 = 0;
};
return @ptrToInt(&S.id);
}
test "enumLiteralId" {
try t.eq(enumLiteralId(.foo), enumLiteralId(.foo));
try t.neq(enumLiteralId(.foo), enumLiteralId(.bar));
}
pub fn TupleLen(comptime T: type) usize {
return @typeInfo(T).Struct.fields.len;
}
|
stdx/meta.zig
|
const std = @import("std");
const ChildProcess = std.ChildProcess;
const builtin = std.builtin;
const builtins = @import("builtins.zig");
const Parser = @import("parser.zig").Parser();
pub var env_map: std.BufMap = undefined;
pub var lastexitcode: u32 = 0;
pub fn init(ally: *std.mem.Allocator) !void {
env_map = try std.process.getEnvMap(ally);
}
pub fn deinit() void {
env_map.deinit();
}
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const ally = &gpa.allocator;
defer _ = gpa.deinit();
try init(ally);
defer deinit();
const stdout = std.io.getStdOut().writer();
while (true) {
// the prompt
try printPrompt(ally);
// reading the line
const stdin = std.io.getStdIn();
const line = (stdin.reader().readUntilDelimiterAlloc(ally, '\n', std.math.maxInt(usize)) catch |e| switch (e) {
error.EndOfStream => {
try stdout.writeByte('\n');
break;
},
else => return e,
});
defer ally.free(line);
if (line.len < 1) continue;
// execute the command
try executeLine(ally, line);
}
}
pub fn executeLine(ally: *std.mem.Allocator, line: []const u8) !void {
// tokenization of line
var parser = try Parser.init(ally, line);
defer parser.deinit();
// parse the args / handle builtin funcs
if (parser.tokens.items.len < 1 or try builtins.handleBuiltin(parser.tokens.items, ally)) return;
// from zig docs:
// // The POSIX standard does not allow malloc() between fork() and execve(),
// // and `self.allocator` may be a libc allocator.
// // I have personally observed the child process deadlocking when it tries
// // to call malloc() due to a heap allocation between fork() and execve(),
// // in musl v1.1.24.
// // Additionally, we want to reduce the number of possible ways things
// // can fail between fork() and execve().
// // Therefore, we do all the allocation for the execve() before the fork().
// // This means we must do the null-termination of argv and env vars here.
const pid_result = try std.os.fork();
if (pid_result == 0) {
// child
switch (std.process.execv(ally, parser.tokens.items)) {
error.FileNotFound => try shigError("{s}: file not found", .{parser.tokens.items[0]}),
error.AccessDenied => try shigError("{s}: Permission denied", .{parser.tokens.items[0]}),
else => |e| try shigError("{s}: TODO handle more errors", .{@errorName(e)}),
}
} else {
// parent
const waitpid_results = std.os.waitpid(pid_result, 0);
lastexitcode = waitpid_results.status;
}
}
fn printPrompt(ally: *std.mem.Allocator) !void {
const stdout = std.io.getStdOut();
const cwd = try std.process.getCwdAlloc(ally);
defer ally.free(cwd);
try stdout.writer().print("\x1b[34;1m{s} \x1b[32;1m(shig)>\x1b[0m ", .{cwd});
}
pub fn shigError(
comptime fmt: []const u8,
args: anytype,
) !void {
const stderr = std.io.getStdErr().writer();
try stderr.print("shig: " ++ fmt ++ "\n", args);
}
test "builtins" {
_ = @import("builtins.zig");
}
test "exec" {
try init(std.testing.allocator);
defer deinit();
// relative paths in $PATH
try executeLine(std.testing.allocator, "ls");
// Absolute path
var cmd = try std.mem.concat(std.testing.allocator, u8, &.{ std.testing.zig_exe_path, " --help" });
defer std.testing.allocator.free(cmd);
try executeLine(std.testing.allocator, cmd);
}
/// returns an owned slice
pub fn getProgFromPath(allocator: *std.mem.Allocator, prog: []const u8) !?[:0]const u8 {
if (std.mem.indexOfScalar(u8, prog, '/') != null) {
std.os.access(prog, std.os.system.X_OK) catch return null;
return @as(?[:0]const u8, try allocator.dupeZ(u8, prog));
}
const PATH = std.os.getenvZ("PATH") orelse "/usr/local/bin:/bin/:/usr/bin";
var it = std.mem.tokenize(PATH, ":");
while (it.next()) |directory| {
const path = try std.fs.path.joinZ(allocator, &.{ directory, prog });
std.os.access(path, std.os.system.X_OK) catch {
allocator.free(path);
continue;
};
return path;
}
return null;
}
|
src/main.zig
|
const std = @import("std");
const tuple = @import("tuple.zig");
const debug = std.debug;
const fmt = std.fmt;
const math = std.math;
const mem = std.mem;
const testing = std.testing;
pub const Tuple = tuple.Tuple;
/// The result of a successful parse
pub fn Result(comptime T: type) type {
return struct {
const Value = T;
value: T,
rest: []const u8,
pub fn init(v: T, rest: []const u8) @This() {
return .{ .value = v, .rest = rest };
}
};
}
/// The type of all parser that can work with `mecha`
pub fn Parser(comptime T: type) type {
return fn ([]const u8) ?Result(T);
}
/// The reverse of `Parser`. Give it a `Parser` type
/// and this function will give you `T`.
pub fn ParserResult(comptime P: type) type {
return P.ReturnType.Child.Value;
}
/// A parser that only succeeds on the end of the string.
pub fn eos(str: []const u8) ?Result(void) {
if (str.len != 0)
return null;
return Result(void).init({}, str);
}
test "eos" {
testParser({}, "", eos(""));
testParser(null, "", eos("a"));
}
/// A parser that always succeeds with the result being the
/// entire string. The same as the '.*$' regex.
pub fn any(str: []const u8) ?Result([]const u8) {
return Result([]const u8).init(str, str[str.len..]);
}
test "any" {
testParser("", "", any(""));
testParser("a", "", any("a"));
}
/// Constructs a parser that only succeeds if the string starts with `c`.
pub fn char(comptime c: u8) Parser(void) {
return string(&[_]u8{c});
}
test "char" {
testParser({}, "", char('a')("a"));
testParser({}, "a", char('a')("aa"));
testParser(null, "", char('a')("ba"));
testParser(null, "", char('a')(""));
}
/// Constructs a parser that only succeeds if the string starts with
/// a character that is in between `start` and `end` inclusively.
/// The parsers result will be the character parsed.
pub fn range(comptime start: u8, comptime end: u8) Parser(u8) {
return struct {
const Res = Result(u8);
fn func(str: []const u8) ?Res {
if (str.len == 0)
return null;
switch (str[0]) {
start...end => return Res.init(str[0], str[1..]),
else => return null,
}
}
}.func;
}
test "range" {
testParser('a', "", range('a', 'z')("a"));
testParser('c', "", range('a', 'z')("c"));
testParser('z', "", range('a', 'z')("z"));
testParser('a', "a", range('a', 'z')("aa"));
testParser('c', "a", range('a', 'z')("ca"));
testParser('z', "a", range('a', 'z')("za"));
testParser(null, "", range('a', 'z')("1"));
testParser(null, "", range('a', 'z')(""));
}
/// A parser that succeeds if the string starts with an alphabetic
/// character. The parsers result will be the character parsed.
pub const alpha = oneOf(.{ range('a', 'z'), range('A', 'Z') });
test "alpha" {
var i: u16 = 0;
while (i <= 255) : (i += 1) {
const c = @intCast(u8, i);
switch (c) {
'a'...'z',
'A'...'Z',
=> testParser(c, "", alpha(&[_]u8{c})),
else => testParser(null, "", alpha(&[_]u8{c})),
}
}
}
/// Construct a parser that succeeds if the string starts with a
/// character that is a digit in `base`. The parsers result will be
/// the character parsed.
pub fn digit(comptime base: u8) Parser(u8) {
debug.assert(base != 0);
if (base <= 10)
return range('0', '0' + (base - 1));
return comptime oneOf(.{
range('0', '9'),
range('a', 'a' + (base - 11)),
range('A', 'A' + (base - 11)),
});
}
test "alpha" {
var i: u16 = 0;
i = 0;
while (i <= 255) : (i += 1) {
const c = @intCast(u8, i);
switch (c) {
'0'...'1' => testParser(c, "", digit(2)(&[_]u8{c})),
else => testParser(null, "", digit(2)(&[_]u8{c})),
}
}
i = 0;
while (i <= 255) : (i += 1) {
const c = @intCast(u8, i);
switch (c) {
'0'...'9' => testParser(c, "", digit(10)(&[_]u8{c})),
else => testParser(null, "", digit(10)(&[_]u8{c})),
}
}
i = 0;
while (i <= 255) : (i += 1) {
const c = @intCast(u8, i);
switch (c) {
'0'...'9',
'a'...'f',
'A'...'F',
=> testParser(c, "", digit(16)(&[_]u8{c})),
else => testParser(null, "", digit(16)(&[_]u8{c})),
}
}
}
/// Construct a parser that succeeds if the string passed in starts
/// with `str`.
pub fn string(comptime str: []const u8) Parser(void) {
return struct {
const Res = Result(void);
fn func(s: []const u8) ?Res {
if (!mem.startsWith(u8, s, str))
return null;
return Res.init({}, s[str.len..]);
}
}.func;
}
test "string" {
testParser({}, "", string("aa")("aa"));
testParser({}, "a", string("aa")("aaa"));
testParser(null, "", string("aa")("ba"));
testParser(null, "", string("aa")(""));
}
/// Construct a parser that repeatedly uses `parser` until it fails
/// or `m` iterations is reached. The parser constructed will only
/// succeed if `parser` succeeded at least `n` times. The parsers
/// result will be a string containing everything parsed.
pub fn manyRange(
comptime n: usize,
comptime m: usize,
comptime parser: var,
) Parser([]const u8) {
return struct {
const Res = Result([]const u8);
fn func(str: []const u8) ?Res {
var rem = str;
comptime var i: usize = 0;
inline while (i < n) : (i += 1) {
const r = parser(rem) orelse return null;
rem = r.rest;
}
var i_2 = i;
while (i_2 < m) : (i_2 += 1) {
const r = parser(rem) orelse break;
rem = r.rest;
}
return Res.init(str[0 .. str.len - rem.len], rem);
}
}.func;
}
/// Construct a parser that repeatedly uses `parser` until it fails.
/// The parsers result will be a string containing everything parsed.
pub fn many(comptime parser: var) Parser([]const u8) {
return manyRange(0, math.maxInt(usize), parser);
}
test "many" {
const parser1 = comptime many(string("ab"));
testParser("", "", parser1(""));
testParser("", "a", parser1("a"));
testParser("ab", "", parser1("ab"));
testParser("ab", "a", parser1("aba"));
testParser("abab", "", parser1("abab"));
testParser("abab", "a", parser1("ababa"));
testParser("ababab", "", parser1("ababab"));
const parser2 = comptime manyRange(1, 2, string("ab"));
testParser(null, "", parser2(""));
testParser(null, "", parser2("a"));
testParser("ab", "", parser2("ab"));
testParser("ab", "a", parser2("aba"));
testParser("abab", "", parser2("abab"));
testParser("abab", "a", parser2("ababa"));
testParser("abab", "ab", parser2("ababab"));
}
/// Construct a parser that will call `parser` on the string
/// but never fails to parser. The parsers result will be the
/// result of `parser` on success and `null` on failure.
pub fn opt(comptime parser: var) Parser(?ParserResult(@TypeOf(parser))) {
return struct {
const Res = Result(?ParserResult(@TypeOf(parser)));
fn func(str: []const u8) ?Res {
if (parser(str)) |r|
return Res.init(r.value, r.rest);
return Res.init(null, str);
}
}.func;
}
test "opt" {
const parser1 = comptime opt(range('a', 'z'));
testParser(@as(?u8, 'a'), "", parser1("a"));
testParser(@as(?u8, 'a'), "a", parser1("aa"));
testParser(@as(?u8, null), "1", parser1("1"));
}
fn ParsersTypes(comptime parsers: var) []const type {
var types: []const type = &[_]type{};
inline for (parsers) |parser| {
const T = ParserResult(@TypeOf(parser));
if (T != void)
types = types ++ [_]type{T};
}
return types;
}
fn Combine(comptime parsers: var) type {
const types = ParsersTypes(parsers);
if (types.len == 0)
return void;
if (types.len == 1)
return types[0];
return Tuple(types);
}
/// Takes a tuple of `Parser(any)` and constructs a parser that
/// only succeeds if all parsers succeed to parse. The parsers
/// will be called in order and parser `N` will use the `rest`
/// from parser `N-1`. The parsers result will be a `Tuple` of
/// all parser not of type `Parser(void)`. If only one parser
/// is not of type `Parser(void)` then this parsers result is
/// returned instead of a tuple.
pub fn combine(comptime parsers: var) Parser(Combine(parsers)) {
return struct {
const types = ParsersTypes(parsers);
const Res = Result(Combine(parsers));
fn func(str: []const u8) ?Res {
var res: Res = undefined;
res.rest = str;
comptime var i = 0;
comptime var j = 0;
inline while (i < parsers.len) : (i += 1) {
const r = parsers[i](res.rest) orelse return null;
res.rest = r.rest;
if (@TypeOf(r.value) != void) {
if (types.len == 1) {
res.value = r.value;
} else {
res.value.at(j).* = r.value;
j += 1;
}
}
}
return res;
}
}.func;
}
test "combine" {
const parser1 = comptime combine(.{ opt(range('a', 'b')), opt(range('d', 'e')) });
const Res = ParserResult(@TypeOf(parser1));
testParser(Res.init(.{ 'a', 'd' }), "", parser1("ad"));
testParser(Res.init(.{ 'a', null }), "a", parser1("aa"));
testParser(Res.init(.{ null, 'd' }), "a", parser1("da"));
testParser(Res.init(.{ null, null }), "qa", parser1("qa"));
const parser2 = comptime combine(.{ opt(range('a', 'b')), char('d') });
testParser('a', "", parser2("ad"));
testParser('a', "a", parser2("ada"));
testParser(@as(?u8, null), "a", parser2("da"));
testParser(null, "", parser2("qa"));
}
/// Takes a tuple of `Parser(T)` and constructs a parser that
/// only succeeds if one of the parsers succeed to parse. The
/// parsers will be called in order all with `str` as input.
/// The parser will return with the result of the first parser
/// that succeeded. The parsers result will be `Result(T)`
pub fn oneOf(comptime parsers: var) Parser(ParserResult(@TypeOf(parsers[0]))) {
return struct {
fn func(str: []const u8) ?Result(ParserResult(@TypeOf(parsers[0]))) {
inline for (parsers) |p| {
if (p(str)) |res|
return res;
}
return null;
}
}.func;
}
test "oneOf" {
const parser1 = comptime oneOf(.{ range('a', 'b'), range('d', 'e') });
testParser('a', "", parser1("a"));
testParser('b', "", parser1("b"));
testParser('d', "", parser1("d"));
testParser('e', "", parser1("e"));
testParser('a', "a", parser1("aa"));
testParser('b', "a", parser1("ba"));
testParser('d', "a", parser1("da"));
testParser('e', "a", parser1("ea"));
testParser(null, "", parser1("q"));
}
/// Takes any parser (preferable not of type `Parser([]const u8)`)
/// and converts it to a parser where the result is a string that
/// contains all characters parsed by `parser`.
pub fn asStr(comptime parser: var) Parser([]const u8) {
return struct {
const Res = Result([]const u8);
fn func(str: []const u8) ?Res {
const r = parser(str) orelse return null;
return Res.init(str[0 .. str.len - r.rest.len], r.rest);
}
}.func;
}
test "asStr" {
const parser1 = comptime asStr(char('a'));
testParser("a", "", parser1("a"));
testParser("a", "a", parser1("aa"));
testParser(null, "", parser1("ba"));
const parser2 = comptime asStr(combine(.{ opt(range('a', 'b')), opt(range('d', 'e')) }));
testParser("ad", "", parser2("ad"));
testParser("a", "a", parser2("aa"));
testParser("d", "a", parser2("da"));
testParser("", "qa", parser2("qa"));
}
/// Constructs a parser that has its result converted with the
/// `conv` function. The ´conv` functions signature is
/// `fn (ParserResult(parser)) ?T`. The parser constructed will
/// fail if `conv` fails.
pub fn convert(
comptime T: type,
comptime conv: var,
comptime parser: var,
) Parser(T) {
return struct {
const Res = Result(T);
fn func(str: []const u8) ?Res {
const r = parser(str) orelse return null;
const v = conv(r.value) orelse return null;
return Res.init(v, r.rest);
}
}.func;
}
/// Constructs a convert function for `convert` that takes a
/// string and parses it to an int of type `Int`.
pub fn toInt(comptime Int: type, comptime base: u8) fn ([]const u8) ?Int {
return struct {
fn func(str: []const u8) ?Int {
return fmt.parseInt(Int, str, base) catch return null;
}
}.func;
}
/// A convert function for `convert` that takes a string and
/// returns the first character, but only if `string.len == 1`.
pub fn toChar(str: []const u8) ?u8 {
if (str.len != 1)
return null;
return str[0];
}
/// Constructs a convert function for `convert` that takes a
/// string and converts it to an `Enum` with `std.meta.stringToEnum`.
pub fn toEnum(comptime Enum: type) fn ([]const u8) ?Enum {
return struct {
fn func(str: []const u8) ?Enum {
return std.meta.stringToEnum(Enum, str);
}
}.func;
}
/// A convert function for `convert` that takes a string
/// and returns `true` if it is `"true"` and `false` if it
/// is `"false"`.
pub fn toBool(str: []const u8) ?bool {
const r = toEnum(enum { @"false", @"true" })(str) orelse return null;
return r == .@"true";
}
test "convert" {
const parser1 = comptime convert(u8, toInt(u8, 10), asStr(string("123")));
testParser(123, "", parser1("123"));
testParser(123, "a", parser1("123a"));
testParser(null, "", parser1("12"));
const parser2 = comptime convert(u8, toChar, asStr(string("a")));
testParser('a', "", parser2("a"));
testParser('a', "a", parser2("aa"));
testParser(null, "", parser2("b"));
const parser3 = comptime convert(bool, toBool, any);
testParser(true, "", parser3("true"));
testParser(false, "", parser3("false"));
testParser(null, "", parser3("b"));
}
/// Construct a parser that succeeds if it parser an integer of
/// `base`. The result of this parser will be the string containing
/// the match.
pub fn intToken(comptime base: u8) Parser([]const u8) {
return comptime asStr(combine(.{
opt(char('-')),
manyRange(1, math.maxInt(usize), digit(base)),
}));
}
/// Same as `intToken` but also converts the parsed string
/// to an integer.
pub fn int(comptime Int: type, comptime base: u8) Parser(Int) {
return comptime convert(Int, toInt(Int, base), intToken(base));
}
test "int" {
const parser1 = int(u8, 10);
testParser(0, "", parser1("0"));
testParser(1, "", parser1("1"));
testParser(1, "a", parser1("1a"));
testParser(255, "", parser1("255"));
testParser(null, "", parser1("256"));
const parser2 = int(u8, 16);
testParser(0x00, "", parser2("0"));
testParser(0x01, "", parser2("1"));
testParser(0x1a, "", parser2("1a"));
testParser(0x01, "g", parser2("1g"));
testParser(0xff, "", parser2("ff"));
testParser(0xff, "", parser2("FF"));
testParser(null, "", parser2("100"));
}
fn testParser(expected_value: var, rest: []const u8, m_res: var) void {
switch (@typeInfo(@TypeOf(expected_value))) {
.Null => testing.expect(m_res == null),
else => {
testing.expect(m_res != null);
testing.expectEqualSlices(u8, rest, m_res.?.rest);
const T = @TypeOf(m_res.?.value);
switch (T) {
[]u8,
[]const u8,
=> testing.expectEqualSlices(u8, expected_value, m_res.?.value),
else => testing.expectEqual(@as(T, expected_value), m_res.?.value),
}
},
}
}
|
mecha.zig
|
const std = @import("std");
const codes = @import("./codes.zig");
const color = @import("./Color.zig");
const Color = color.Color;
pub inline fn printResetGraphics(writer: anytype) !void {
try writer.print("{s}", .{codes.resetEscapeSequence()});
}
pub inline fn printResetForeground(writer: anytype) !void {
try writer.print("{s}", .{codes.resetForegroundEscapeSequence()});
}
pub inline fn printResetBackground(writer: anytype) !void {
try writer.print("{s}", .{codes.resetBackgroundEscapeSequence()});
}
pub fn printForegroundColor(writer: anytype, c: Color) !void {
const color_code: []const u8 = color.colorToForegroundCode(c);
try writer.print("{s}{s}{s}", .{
codes.EscapePrefix,
color_code,
codes.graphics.SetModeSuffix,
});
}
pub fn printBackgroundColor(writer: anytype, c: Color) !void {
const colorCode: []const u8 = color.colorToBackgroundCode(c);
try writer.print("{s}{s}{s}", .{
codes.EscapePrefix,
colorCode,
codes.graphics.SetModeSuffix,
});
}
test "printForegroundColor works" {
const TestCase = struct {
input_text: []const u8,
input_color: Color,
};
const test_cases = [_]TestCase {
TestCase{ .input_text = "printForegroundColor works for red", .input_color = Color.Red },
TestCase{ .input_text = "printForegroundColor works for green", .input_color = Color.Green },
TestCase{ .input_text = "printForegroundColor works for blue", .input_color = Color.Blue },
};
const buf_size: usize = 100;
for (test_cases) |tc| {
var buf: [buf_size]u8 = [_]u8{0} ** buf_size;
const writer = std.io.fixedBufferStream(&buf).writer();
try printForegroundColor(writer, tc.input_color);
try writer.writeAll(tc.input_text);
try printResetForeground(writer);
// Ouput must contain the Ansi Escape code for the input color, the input text and
// the correct reset escaping suffix
std.testing.expect(std.mem.indexOf(u8, buf[0..], color.colorToForegroundCode(tc.input_color)) != null);
std.testing.expect(std.mem.indexOf(u8, buf[0..], tc.input_text) != null);
std.testing.expect(std.mem.indexOf(u8, buf[0..], codes.resetForegroundEscapeSequence()) != null);
}
}
test "printBackgroundColor works" {
const TestCase = struct {
input_text: []const u8,
input_color: Color,
};
const test_cases = [_]TestCase {
TestCase{ .input_text = "printBackgroundColor works for red", .input_color = Color.Red },
TestCase{ .input_text = "printBackgroundColor works for green", .input_color = Color.Green },
TestCase{ .input_text = "printBackgroundColor works for blue", .input_color = Color.Blue },
};
const buf_size: usize = 100;
for (test_cases) |tc| {
var buf: [buf_size]u8 = [_]u8{0} ** buf_size;
const writer = std.io.fixedBufferStream(&buf).writer();
try printBackgroundColor(writer, tc.input_color);
try writer.writeAll(tc.input_text);
try printResetBackground(writer);
// Ouput must contain the Ansi Escape code for the input color, the input text and
// the correct reset escaping suffix
std.testing.expect(std.mem.indexOf(u8, buf[0..], color.colorToBackgroundCode(tc.input_color)) != null);
std.testing.expect(std.mem.indexOf(u8, buf[0..], tc.input_text) != null);
std.testing.expect(std.mem.indexOf(u8, buf[0..], codes.resetBackgroundEscapeSequence()) != null);
}
}
|
src/io.zig
|
const std = @import("std");
const mem = std.mem;
const TitleMap = @This();
allocator: *mem.Allocator,
map: std.AutoHashMap(u21, u21),
pub fn init(allocator: *mem.Allocator) !TitleMap {
var instance = TitleMap{
.allocator = allocator,
.map = std.AutoHashMap(u21, u21).init(allocator),
};
try instance.map.put(0x0061, 0x0041);
try instance.map.put(0x0062, 0x0042);
try instance.map.put(0x0063, 0x0043);
try instance.map.put(0x0064, 0x0044);
try instance.map.put(0x0065, 0x0045);
try instance.map.put(0x0066, 0x0046);
try instance.map.put(0x0067, 0x0047);
try instance.map.put(0x0068, 0x0048);
try instance.map.put(0x0069, 0x0049);
try instance.map.put(0x006A, 0x004A);
try instance.map.put(0x006B, 0x004B);
try instance.map.put(0x006C, 0x004C);
try instance.map.put(0x006D, 0x004D);
try instance.map.put(0x006E, 0x004E);
try instance.map.put(0x006F, 0x004F);
try instance.map.put(0x0070, 0x0050);
try instance.map.put(0x0071, 0x0051);
try instance.map.put(0x0072, 0x0052);
try instance.map.put(0x0073, 0x0053);
try instance.map.put(0x0074, 0x0054);
try instance.map.put(0x0075, 0x0055);
try instance.map.put(0x0076, 0x0056);
try instance.map.put(0x0077, 0x0057);
try instance.map.put(0x0078, 0x0058);
try instance.map.put(0x0079, 0x0059);
try instance.map.put(0x007A, 0x005A);
try instance.map.put(0x00B5, 0x039C);
try instance.map.put(0x00E0, 0x00C0);
try instance.map.put(0x00E1, 0x00C1);
try instance.map.put(0x00E2, 0x00C2);
try instance.map.put(0x00E3, 0x00C3);
try instance.map.put(0x00E4, 0x00C4);
try instance.map.put(0x00E5, 0x00C5);
try instance.map.put(0x00E6, 0x00C6);
try instance.map.put(0x00E7, 0x00C7);
try instance.map.put(0x00E8, 0x00C8);
try instance.map.put(0x00E9, 0x00C9);
try instance.map.put(0x00EA, 0x00CA);
try instance.map.put(0x00EB, 0x00CB);
try instance.map.put(0x00EC, 0x00CC);
try instance.map.put(0x00ED, 0x00CD);
try instance.map.put(0x00EE, 0x00CE);
try instance.map.put(0x00EF, 0x00CF);
try instance.map.put(0x00F0, 0x00D0);
try instance.map.put(0x00F1, 0x00D1);
try instance.map.put(0x00F2, 0x00D2);
try instance.map.put(0x00F3, 0x00D3);
try instance.map.put(0x00F4, 0x00D4);
try instance.map.put(0x00F5, 0x00D5);
try instance.map.put(0x00F6, 0x00D6);
try instance.map.put(0x00F8, 0x00D8);
try instance.map.put(0x00F9, 0x00D9);
try instance.map.put(0x00FA, 0x00DA);
try instance.map.put(0x00FB, 0x00DB);
try instance.map.put(0x00FC, 0x00DC);
try instance.map.put(0x00FD, 0x00DD);
try instance.map.put(0x00FE, 0x00DE);
try instance.map.put(0x00FF, 0x0178);
try instance.map.put(0x0101, 0x0100);
try instance.map.put(0x0103, 0x0102);
try instance.map.put(0x0105, 0x0104);
try instance.map.put(0x0107, 0x0106);
try instance.map.put(0x0109, 0x0108);
try instance.map.put(0x010B, 0x010A);
try instance.map.put(0x010D, 0x010C);
try instance.map.put(0x010F, 0x010E);
try instance.map.put(0x0111, 0x0110);
try instance.map.put(0x0113, 0x0112);
try instance.map.put(0x0115, 0x0114);
try instance.map.put(0x0117, 0x0116);
try instance.map.put(0x0119, 0x0118);
try instance.map.put(0x011B, 0x011A);
try instance.map.put(0x011D, 0x011C);
try instance.map.put(0x011F, 0x011E);
try instance.map.put(0x0121, 0x0120);
try instance.map.put(0x0123, 0x0122);
try instance.map.put(0x0125, 0x0124);
try instance.map.put(0x0127, 0x0126);
try instance.map.put(0x0129, 0x0128);
try instance.map.put(0x012B, 0x012A);
try instance.map.put(0x012D, 0x012C);
try instance.map.put(0x012F, 0x012E);
try instance.map.put(0x0131, 0x0049);
try instance.map.put(0x0133, 0x0132);
try instance.map.put(0x0135, 0x0134);
try instance.map.put(0x0137, 0x0136);
try instance.map.put(0x013A, 0x0139);
try instance.map.put(0x013C, 0x013B);
try instance.map.put(0x013E, 0x013D);
try instance.map.put(0x0140, 0x013F);
try instance.map.put(0x0142, 0x0141);
try instance.map.put(0x0144, 0x0143);
try instance.map.put(0x0146, 0x0145);
try instance.map.put(0x0148, 0x0147);
try instance.map.put(0x014B, 0x014A);
try instance.map.put(0x014D, 0x014C);
try instance.map.put(0x014F, 0x014E);
try instance.map.put(0x0151, 0x0150);
try instance.map.put(0x0153, 0x0152);
try instance.map.put(0x0155, 0x0154);
try instance.map.put(0x0157, 0x0156);
try instance.map.put(0x0159, 0x0158);
try instance.map.put(0x015B, 0x015A);
try instance.map.put(0x015D, 0x015C);
try instance.map.put(0x015F, 0x015E);
try instance.map.put(0x0161, 0x0160);
try instance.map.put(0x0163, 0x0162);
try instance.map.put(0x0165, 0x0164);
try instance.map.put(0x0167, 0x0166);
try instance.map.put(0x0169, 0x0168);
try instance.map.put(0x016B, 0x016A);
try instance.map.put(0x016D, 0x016C);
try instance.map.put(0x016F, 0x016E);
try instance.map.put(0x0171, 0x0170);
try instance.map.put(0x0173, 0x0172);
try instance.map.put(0x0175, 0x0174);
try instance.map.put(0x0177, 0x0176);
try instance.map.put(0x017A, 0x0179);
try instance.map.put(0x017C, 0x017B);
try instance.map.put(0x017E, 0x017D);
try instance.map.put(0x017F, 0x0053);
try instance.map.put(0x0180, 0x0243);
try instance.map.put(0x0183, 0x0182);
try instance.map.put(0x0185, 0x0184);
try instance.map.put(0x0188, 0x0187);
try instance.map.put(0x018C, 0x018B);
try instance.map.put(0x0192, 0x0191);
try instance.map.put(0x0195, 0x01F6);
try instance.map.put(0x0199, 0x0198);
try instance.map.put(0x019A, 0x023D);
try instance.map.put(0x019E, 0x0220);
try instance.map.put(0x01A1, 0x01A0);
try instance.map.put(0x01A3, 0x01A2);
try instance.map.put(0x01A5, 0x01A4);
try instance.map.put(0x01A8, 0x01A7);
try instance.map.put(0x01AD, 0x01AC);
try instance.map.put(0x01B0, 0x01AF);
try instance.map.put(0x01B4, 0x01B3);
try instance.map.put(0x01B6, 0x01B5);
try instance.map.put(0x01B9, 0x01B8);
try instance.map.put(0x01BD, 0x01BC);
try instance.map.put(0x01BF, 0x01F7);
try instance.map.put(0x01C4, 0x01C5);
try instance.map.put(0x01C5, 0x01C5);
try instance.map.put(0x01C6, 0x01C5);
try instance.map.put(0x01C7, 0x01C8);
try instance.map.put(0x01C8, 0x01C8);
try instance.map.put(0x01C9, 0x01C8);
try instance.map.put(0x01CA, 0x01CB);
try instance.map.put(0x01CB, 0x01CB);
try instance.map.put(0x01CC, 0x01CB);
try instance.map.put(0x01CE, 0x01CD);
try instance.map.put(0x01D0, 0x01CF);
try instance.map.put(0x01D2, 0x01D1);
try instance.map.put(0x01D4, 0x01D3);
try instance.map.put(0x01D6, 0x01D5);
try instance.map.put(0x01D8, 0x01D7);
try instance.map.put(0x01DA, 0x01D9);
try instance.map.put(0x01DC, 0x01DB);
try instance.map.put(0x01DD, 0x018E);
try instance.map.put(0x01DF, 0x01DE);
try instance.map.put(0x01E1, 0x01E0);
try instance.map.put(0x01E3, 0x01E2);
try instance.map.put(0x01E5, 0x01E4);
try instance.map.put(0x01E7, 0x01E6);
try instance.map.put(0x01E9, 0x01E8);
try instance.map.put(0x01EB, 0x01EA);
try instance.map.put(0x01ED, 0x01EC);
try instance.map.put(0x01EF, 0x01EE);
try instance.map.put(0x01F1, 0x01F2);
try instance.map.put(0x01F2, 0x01F2);
try instance.map.put(0x01F3, 0x01F2);
try instance.map.put(0x01F5, 0x01F4);
try instance.map.put(0x01F9, 0x01F8);
try instance.map.put(0x01FB, 0x01FA);
try instance.map.put(0x01FD, 0x01FC);
try instance.map.put(0x01FF, 0x01FE);
try instance.map.put(0x0201, 0x0200);
try instance.map.put(0x0203, 0x0202);
try instance.map.put(0x0205, 0x0204);
try instance.map.put(0x0207, 0x0206);
try instance.map.put(0x0209, 0x0208);
try instance.map.put(0x020B, 0x020A);
try instance.map.put(0x020D, 0x020C);
try instance.map.put(0x020F, 0x020E);
try instance.map.put(0x0211, 0x0210);
try instance.map.put(0x0213, 0x0212);
try instance.map.put(0x0215, 0x0214);
try instance.map.put(0x0217, 0x0216);
try instance.map.put(0x0219, 0x0218);
try instance.map.put(0x021B, 0x021A);
try instance.map.put(0x021D, 0x021C);
try instance.map.put(0x021F, 0x021E);
try instance.map.put(0x0223, 0x0222);
try instance.map.put(0x0225, 0x0224);
try instance.map.put(0x0227, 0x0226);
try instance.map.put(0x0229, 0x0228);
try instance.map.put(0x022B, 0x022A);
try instance.map.put(0x022D, 0x022C);
try instance.map.put(0x022F, 0x022E);
try instance.map.put(0x0231, 0x0230);
try instance.map.put(0x0233, 0x0232);
try instance.map.put(0x023C, 0x023B);
try instance.map.put(0x023F, 0x2C7E);
try instance.map.put(0x0240, 0x2C7F);
try instance.map.put(0x0242, 0x0241);
try instance.map.put(0x0247, 0x0246);
try instance.map.put(0x0249, 0x0248);
try instance.map.put(0x024B, 0x024A);
try instance.map.put(0x024D, 0x024C);
try instance.map.put(0x024F, 0x024E);
try instance.map.put(0x0250, 0x2C6F);
try instance.map.put(0x0251, 0x2C6D);
try instance.map.put(0x0252, 0x2C70);
try instance.map.put(0x0253, 0x0181);
try instance.map.put(0x0254, 0x0186);
try instance.map.put(0x0256, 0x0189);
try instance.map.put(0x0257, 0x018A);
try instance.map.put(0x0259, 0x018F);
try instance.map.put(0x025B, 0x0190);
try instance.map.put(0x025C, 0xA7AB);
try instance.map.put(0x0260, 0x0193);
try instance.map.put(0x0261, 0xA7AC);
try instance.map.put(0x0263, 0x0194);
try instance.map.put(0x0265, 0xA78D);
try instance.map.put(0x0266, 0xA7AA);
try instance.map.put(0x0268, 0x0197);
try instance.map.put(0x0269, 0x0196);
try instance.map.put(0x026A, 0xA7AE);
try instance.map.put(0x026B, 0x2C62);
try instance.map.put(0x026C, 0xA7AD);
try instance.map.put(0x026F, 0x019C);
try instance.map.put(0x0271, 0x2C6E);
try instance.map.put(0x0272, 0x019D);
try instance.map.put(0x0275, 0x019F);
try instance.map.put(0x027D, 0x2C64);
try instance.map.put(0x0280, 0x01A6);
try instance.map.put(0x0282, 0xA7C5);
try instance.map.put(0x0283, 0x01A9);
try instance.map.put(0x0287, 0xA7B1);
try instance.map.put(0x0288, 0x01AE);
try instance.map.put(0x0289, 0x0244);
try instance.map.put(0x028A, 0x01B1);
try instance.map.put(0x028B, 0x01B2);
try instance.map.put(0x028C, 0x0245);
try instance.map.put(0x0292, 0x01B7);
try instance.map.put(0x029D, 0xA7B2);
try instance.map.put(0x029E, 0xA7B0);
try instance.map.put(0x0345, 0x0399);
try instance.map.put(0x0371, 0x0370);
try instance.map.put(0x0373, 0x0372);
try instance.map.put(0x0377, 0x0376);
try instance.map.put(0x037B, 0x03FD);
try instance.map.put(0x037C, 0x03FE);
try instance.map.put(0x037D, 0x03FF);
try instance.map.put(0x03AC, 0x0386);
try instance.map.put(0x03AD, 0x0388);
try instance.map.put(0x03AE, 0x0389);
try instance.map.put(0x03AF, 0x038A);
try instance.map.put(0x03B1, 0x0391);
try instance.map.put(0x03B2, 0x0392);
try instance.map.put(0x03B3, 0x0393);
try instance.map.put(0x03B4, 0x0394);
try instance.map.put(0x03B5, 0x0395);
try instance.map.put(0x03B6, 0x0396);
try instance.map.put(0x03B7, 0x0397);
try instance.map.put(0x03B8, 0x0398);
try instance.map.put(0x03B9, 0x0399);
try instance.map.put(0x03BA, 0x039A);
try instance.map.put(0x03BB, 0x039B);
try instance.map.put(0x03BC, 0x039C);
try instance.map.put(0x03BD, 0x039D);
try instance.map.put(0x03BE, 0x039E);
try instance.map.put(0x03BF, 0x039F);
try instance.map.put(0x03C0, 0x03A0);
try instance.map.put(0x03C1, 0x03A1);
try instance.map.put(0x03C2, 0x03A3);
try instance.map.put(0x03C3, 0x03A3);
try instance.map.put(0x03C4, 0x03A4);
try instance.map.put(0x03C5, 0x03A5);
try instance.map.put(0x03C6, 0x03A6);
try instance.map.put(0x03C7, 0x03A7);
try instance.map.put(0x03C8, 0x03A8);
try instance.map.put(0x03C9, 0x03A9);
try instance.map.put(0x03CA, 0x03AA);
try instance.map.put(0x03CB, 0x03AB);
try instance.map.put(0x03CC, 0x038C);
try instance.map.put(0x03CD, 0x038E);
try instance.map.put(0x03CE, 0x038F);
try instance.map.put(0x03D0, 0x0392);
try instance.map.put(0x03D1, 0x0398);
try instance.map.put(0x03D5, 0x03A6);
try instance.map.put(0x03D6, 0x03A0);
try instance.map.put(0x03D7, 0x03CF);
try instance.map.put(0x03D9, 0x03D8);
try instance.map.put(0x03DB, 0x03DA);
try instance.map.put(0x03DD, 0x03DC);
try instance.map.put(0x03DF, 0x03DE);
try instance.map.put(0x03E1, 0x03E0);
try instance.map.put(0x03E3, 0x03E2);
try instance.map.put(0x03E5, 0x03E4);
try instance.map.put(0x03E7, 0x03E6);
try instance.map.put(0x03E9, 0x03E8);
try instance.map.put(0x03EB, 0x03EA);
try instance.map.put(0x03ED, 0x03EC);
try instance.map.put(0x03EF, 0x03EE);
try instance.map.put(0x03F0, 0x039A);
try instance.map.put(0x03F1, 0x03A1);
try instance.map.put(0x03F2, 0x03F9);
try instance.map.put(0x03F3, 0x037F);
try instance.map.put(0x03F5, 0x0395);
try instance.map.put(0x03F8, 0x03F7);
try instance.map.put(0x03FB, 0x03FA);
try instance.map.put(0x0430, 0x0410);
try instance.map.put(0x0431, 0x0411);
try instance.map.put(0x0432, 0x0412);
try instance.map.put(0x0433, 0x0413);
try instance.map.put(0x0434, 0x0414);
try instance.map.put(0x0435, 0x0415);
try instance.map.put(0x0436, 0x0416);
try instance.map.put(0x0437, 0x0417);
try instance.map.put(0x0438, 0x0418);
try instance.map.put(0x0439, 0x0419);
try instance.map.put(0x043A, 0x041A);
try instance.map.put(0x043B, 0x041B);
try instance.map.put(0x043C, 0x041C);
try instance.map.put(0x043D, 0x041D);
try instance.map.put(0x043E, 0x041E);
try instance.map.put(0x043F, 0x041F);
try instance.map.put(0x0440, 0x0420);
try instance.map.put(0x0441, 0x0421);
try instance.map.put(0x0442, 0x0422);
try instance.map.put(0x0443, 0x0423);
try instance.map.put(0x0444, 0x0424);
try instance.map.put(0x0445, 0x0425);
try instance.map.put(0x0446, 0x0426);
try instance.map.put(0x0447, 0x0427);
try instance.map.put(0x0448, 0x0428);
try instance.map.put(0x0449, 0x0429);
try instance.map.put(0x044A, 0x042A);
try instance.map.put(0x044B, 0x042B);
try instance.map.put(0x044C, 0x042C);
try instance.map.put(0x044D, 0x042D);
try instance.map.put(0x044E, 0x042E);
try instance.map.put(0x044F, 0x042F);
try instance.map.put(0x0450, 0x0400);
try instance.map.put(0x0451, 0x0401);
try instance.map.put(0x0452, 0x0402);
try instance.map.put(0x0453, 0x0403);
try instance.map.put(0x0454, 0x0404);
try instance.map.put(0x0455, 0x0405);
try instance.map.put(0x0456, 0x0406);
try instance.map.put(0x0457, 0x0407);
try instance.map.put(0x0458, 0x0408);
try instance.map.put(0x0459, 0x0409);
try instance.map.put(0x045A, 0x040A);
try instance.map.put(0x045B, 0x040B);
try instance.map.put(0x045C, 0x040C);
try instance.map.put(0x045D, 0x040D);
try instance.map.put(0x045E, 0x040E);
try instance.map.put(0x045F, 0x040F);
try instance.map.put(0x0461, 0x0460);
try instance.map.put(0x0463, 0x0462);
try instance.map.put(0x0465, 0x0464);
try instance.map.put(0x0467, 0x0466);
try instance.map.put(0x0469, 0x0468);
try instance.map.put(0x046B, 0x046A);
try instance.map.put(0x046D, 0x046C);
try instance.map.put(0x046F, 0x046E);
try instance.map.put(0x0471, 0x0470);
try instance.map.put(0x0473, 0x0472);
try instance.map.put(0x0475, 0x0474);
try instance.map.put(0x0477, 0x0476);
try instance.map.put(0x0479, 0x0478);
try instance.map.put(0x047B, 0x047A);
try instance.map.put(0x047D, 0x047C);
try instance.map.put(0x047F, 0x047E);
try instance.map.put(0x0481, 0x0480);
try instance.map.put(0x048B, 0x048A);
try instance.map.put(0x048D, 0x048C);
try instance.map.put(0x048F, 0x048E);
try instance.map.put(0x0491, 0x0490);
try instance.map.put(0x0493, 0x0492);
try instance.map.put(0x0495, 0x0494);
try instance.map.put(0x0497, 0x0496);
try instance.map.put(0x0499, 0x0498);
try instance.map.put(0x049B, 0x049A);
try instance.map.put(0x049D, 0x049C);
try instance.map.put(0x049F, 0x049E);
try instance.map.put(0x04A1, 0x04A0);
try instance.map.put(0x04A3, 0x04A2);
try instance.map.put(0x04A5, 0x04A4);
try instance.map.put(0x04A7, 0x04A6);
try instance.map.put(0x04A9, 0x04A8);
try instance.map.put(0x04AB, 0x04AA);
try instance.map.put(0x04AD, 0x04AC);
try instance.map.put(0x04AF, 0x04AE);
try instance.map.put(0x04B1, 0x04B0);
try instance.map.put(0x04B3, 0x04B2);
try instance.map.put(0x04B5, 0x04B4);
try instance.map.put(0x04B7, 0x04B6);
try instance.map.put(0x04B9, 0x04B8);
try instance.map.put(0x04BB, 0x04BA);
try instance.map.put(0x04BD, 0x04BC);
try instance.map.put(0x04BF, 0x04BE);
try instance.map.put(0x04C2, 0x04C1);
try instance.map.put(0x04C4, 0x04C3);
try instance.map.put(0x04C6, 0x04C5);
try instance.map.put(0x04C8, 0x04C7);
try instance.map.put(0x04CA, 0x04C9);
try instance.map.put(0x04CC, 0x04CB);
try instance.map.put(0x04CE, 0x04CD);
try instance.map.put(0x04CF, 0x04C0);
try instance.map.put(0x04D1, 0x04D0);
try instance.map.put(0x04D3, 0x04D2);
try instance.map.put(0x04D5, 0x04D4);
try instance.map.put(0x04D7, 0x04D6);
try instance.map.put(0x04D9, 0x04D8);
try instance.map.put(0x04DB, 0x04DA);
try instance.map.put(0x04DD, 0x04DC);
try instance.map.put(0x04DF, 0x04DE);
try instance.map.put(0x04E1, 0x04E0);
try instance.map.put(0x04E3, 0x04E2);
try instance.map.put(0x04E5, 0x04E4);
try instance.map.put(0x04E7, 0x04E6);
try instance.map.put(0x04E9, 0x04E8);
try instance.map.put(0x04EB, 0x04EA);
try instance.map.put(0x04ED, 0x04EC);
try instance.map.put(0x04EF, 0x04EE);
try instance.map.put(0x04F1, 0x04F0);
try instance.map.put(0x04F3, 0x04F2);
try instance.map.put(0x04F5, 0x04F4);
try instance.map.put(0x04F7, 0x04F6);
try instance.map.put(0x04F9, 0x04F8);
try instance.map.put(0x04FB, 0x04FA);
try instance.map.put(0x04FD, 0x04FC);
try instance.map.put(0x04FF, 0x04FE);
try instance.map.put(0x0501, 0x0500);
try instance.map.put(0x0503, 0x0502);
try instance.map.put(0x0505, 0x0504);
try instance.map.put(0x0507, 0x0506);
try instance.map.put(0x0509, 0x0508);
try instance.map.put(0x050B, 0x050A);
try instance.map.put(0x050D, 0x050C);
try instance.map.put(0x050F, 0x050E);
try instance.map.put(0x0511, 0x0510);
try instance.map.put(0x0513, 0x0512);
try instance.map.put(0x0515, 0x0514);
try instance.map.put(0x0517, 0x0516);
try instance.map.put(0x0519, 0x0518);
try instance.map.put(0x051B, 0x051A);
try instance.map.put(0x051D, 0x051C);
try instance.map.put(0x051F, 0x051E);
try instance.map.put(0x0521, 0x0520);
try instance.map.put(0x0523, 0x0522);
try instance.map.put(0x0525, 0x0524);
try instance.map.put(0x0527, 0x0526);
try instance.map.put(0x0529, 0x0528);
try instance.map.put(0x052B, 0x052A);
try instance.map.put(0x052D, 0x052C);
try instance.map.put(0x052F, 0x052E);
try instance.map.put(0x0561, 0x0531);
try instance.map.put(0x0562, 0x0532);
try instance.map.put(0x0563, 0x0533);
try instance.map.put(0x0564, 0x0534);
try instance.map.put(0x0565, 0x0535);
try instance.map.put(0x0566, 0x0536);
try instance.map.put(0x0567, 0x0537);
try instance.map.put(0x0568, 0x0538);
try instance.map.put(0x0569, 0x0539);
try instance.map.put(0x056A, 0x053A);
try instance.map.put(0x056B, 0x053B);
try instance.map.put(0x056C, 0x053C);
try instance.map.put(0x056D, 0x053D);
try instance.map.put(0x056E, 0x053E);
try instance.map.put(0x056F, 0x053F);
try instance.map.put(0x0570, 0x0540);
try instance.map.put(0x0571, 0x0541);
try instance.map.put(0x0572, 0x0542);
try instance.map.put(0x0573, 0x0543);
try instance.map.put(0x0574, 0x0544);
try instance.map.put(0x0575, 0x0545);
try instance.map.put(0x0576, 0x0546);
try instance.map.put(0x0577, 0x0547);
try instance.map.put(0x0578, 0x0548);
try instance.map.put(0x0579, 0x0549);
try instance.map.put(0x057A, 0x054A);
try instance.map.put(0x057B, 0x054B);
try instance.map.put(0x057C, 0x054C);
try instance.map.put(0x057D, 0x054D);
try instance.map.put(0x057E, 0x054E);
try instance.map.put(0x057F, 0x054F);
try instance.map.put(0x0580, 0x0550);
try instance.map.put(0x0581, 0x0551);
try instance.map.put(0x0582, 0x0552);
try instance.map.put(0x0583, 0x0553);
try instance.map.put(0x0584, 0x0554);
try instance.map.put(0x0585, 0x0555);
try instance.map.put(0x0586, 0x0556);
try instance.map.put(0x10D0, 0x10D0);
try instance.map.put(0x10D1, 0x10D1);
try instance.map.put(0x10D2, 0x10D2);
try instance.map.put(0x10D3, 0x10D3);
try instance.map.put(0x10D4, 0x10D4);
try instance.map.put(0x10D5, 0x10D5);
try instance.map.put(0x10D6, 0x10D6);
try instance.map.put(0x10D7, 0x10D7);
try instance.map.put(0x10D8, 0x10D8);
try instance.map.put(0x10D9, 0x10D9);
try instance.map.put(0x10DA, 0x10DA);
try instance.map.put(0x10DB, 0x10DB);
try instance.map.put(0x10DC, 0x10DC);
try instance.map.put(0x10DD, 0x10DD);
try instance.map.put(0x10DE, 0x10DE);
try instance.map.put(0x10DF, 0x10DF);
try instance.map.put(0x10E0, 0x10E0);
try instance.map.put(0x10E1, 0x10E1);
try instance.map.put(0x10E2, 0x10E2);
try instance.map.put(0x10E3, 0x10E3);
try instance.map.put(0x10E4, 0x10E4);
try instance.map.put(0x10E5, 0x10E5);
try instance.map.put(0x10E6, 0x10E6);
try instance.map.put(0x10E7, 0x10E7);
try instance.map.put(0x10E8, 0x10E8);
try instance.map.put(0x10E9, 0x10E9);
try instance.map.put(0x10EA, 0x10EA);
try instance.map.put(0x10EB, 0x10EB);
try instance.map.put(0x10EC, 0x10EC);
try instance.map.put(0x10ED, 0x10ED);
try instance.map.put(0x10EE, 0x10EE);
try instance.map.put(0x10EF, 0x10EF);
try instance.map.put(0x10F0, 0x10F0);
try instance.map.put(0x10F1, 0x10F1);
try instance.map.put(0x10F2, 0x10F2);
try instance.map.put(0x10F3, 0x10F3);
try instance.map.put(0x10F4, 0x10F4);
try instance.map.put(0x10F5, 0x10F5);
try instance.map.put(0x10F6, 0x10F6);
try instance.map.put(0x10F7, 0x10F7);
try instance.map.put(0x10F8, 0x10F8);
try instance.map.put(0x10F9, 0x10F9);
try instance.map.put(0x10FA, 0x10FA);
try instance.map.put(0x10FD, 0x10FD);
try instance.map.put(0x10FE, 0x10FE);
try instance.map.put(0x10FF, 0x10FF);
try instance.map.put(0x13F8, 0x13F0);
try instance.map.put(0x13F9, 0x13F1);
try instance.map.put(0x13FA, 0x13F2);
try instance.map.put(0x13FB, 0x13F3);
try instance.map.put(0x13FC, 0x13F4);
try instance.map.put(0x13FD, 0x13F5);
try instance.map.put(0x1C80, 0x0412);
try instance.map.put(0x1C81, 0x0414);
try instance.map.put(0x1C82, 0x041E);
try instance.map.put(0x1C83, 0x0421);
try instance.map.put(0x1C84, 0x0422);
try instance.map.put(0x1C85, 0x0422);
try instance.map.put(0x1C86, 0x042A);
try instance.map.put(0x1C87, 0x0462);
try instance.map.put(0x1C88, 0xA64A);
try instance.map.put(0x1D79, 0xA77D);
try instance.map.put(0x1D7D, 0x2C63);
try instance.map.put(0x1D8E, 0xA7C6);
try instance.map.put(0x1E01, 0x1E00);
try instance.map.put(0x1E03, 0x1E02);
try instance.map.put(0x1E05, 0x1E04);
try instance.map.put(0x1E07, 0x1E06);
try instance.map.put(0x1E09, 0x1E08);
try instance.map.put(0x1E0B, 0x1E0A);
try instance.map.put(0x1E0D, 0x1E0C);
try instance.map.put(0x1E0F, 0x1E0E);
try instance.map.put(0x1E11, 0x1E10);
try instance.map.put(0x1E13, 0x1E12);
try instance.map.put(0x1E15, 0x1E14);
try instance.map.put(0x1E17, 0x1E16);
try instance.map.put(0x1E19, 0x1E18);
try instance.map.put(0x1E1B, 0x1E1A);
try instance.map.put(0x1E1D, 0x1E1C);
try instance.map.put(0x1E1F, 0x1E1E);
try instance.map.put(0x1E21, 0x1E20);
try instance.map.put(0x1E23, 0x1E22);
try instance.map.put(0x1E25, 0x1E24);
try instance.map.put(0x1E27, 0x1E26);
try instance.map.put(0x1E29, 0x1E28);
try instance.map.put(0x1E2B, 0x1E2A);
try instance.map.put(0x1E2D, 0x1E2C);
try instance.map.put(0x1E2F, 0x1E2E);
try instance.map.put(0x1E31, 0x1E30);
try instance.map.put(0x1E33, 0x1E32);
try instance.map.put(0x1E35, 0x1E34);
try instance.map.put(0x1E37, 0x1E36);
try instance.map.put(0x1E39, 0x1E38);
try instance.map.put(0x1E3B, 0x1E3A);
try instance.map.put(0x1E3D, 0x1E3C);
try instance.map.put(0x1E3F, 0x1E3E);
try instance.map.put(0x1E41, 0x1E40);
try instance.map.put(0x1E43, 0x1E42);
try instance.map.put(0x1E45, 0x1E44);
try instance.map.put(0x1E47, 0x1E46);
try instance.map.put(0x1E49, 0x1E48);
try instance.map.put(0x1E4B, 0x1E4A);
try instance.map.put(0x1E4D, 0x1E4C);
try instance.map.put(0x1E4F, 0x1E4E);
try instance.map.put(0x1E51, 0x1E50);
try instance.map.put(0x1E53, 0x1E52);
try instance.map.put(0x1E55, 0x1E54);
try instance.map.put(0x1E57, 0x1E56);
try instance.map.put(0x1E59, 0x1E58);
try instance.map.put(0x1E5B, 0x1E5A);
try instance.map.put(0x1E5D, 0x1E5C);
try instance.map.put(0x1E5F, 0x1E5E);
try instance.map.put(0x1E61, 0x1E60);
try instance.map.put(0x1E63, 0x1E62);
try instance.map.put(0x1E65, 0x1E64);
try instance.map.put(0x1E67, 0x1E66);
try instance.map.put(0x1E69, 0x1E68);
try instance.map.put(0x1E6B, 0x1E6A);
try instance.map.put(0x1E6D, 0x1E6C);
try instance.map.put(0x1E6F, 0x1E6E);
try instance.map.put(0x1E71, 0x1E70);
try instance.map.put(0x1E73, 0x1E72);
try instance.map.put(0x1E75, 0x1E74);
try instance.map.put(0x1E77, 0x1E76);
try instance.map.put(0x1E79, 0x1E78);
try instance.map.put(0x1E7B, 0x1E7A);
try instance.map.put(0x1E7D, 0x1E7C);
try instance.map.put(0x1E7F, 0x1E7E);
try instance.map.put(0x1E81, 0x1E80);
try instance.map.put(0x1E83, 0x1E82);
try instance.map.put(0x1E85, 0x1E84);
try instance.map.put(0x1E87, 0x1E86);
try instance.map.put(0x1E89, 0x1E88);
try instance.map.put(0x1E8B, 0x1E8A);
try instance.map.put(0x1E8D, 0x1E8C);
try instance.map.put(0x1E8F, 0x1E8E);
try instance.map.put(0x1E91, 0x1E90);
try instance.map.put(0x1E93, 0x1E92);
try instance.map.put(0x1E95, 0x1E94);
try instance.map.put(0x1E9B, 0x1E60);
try instance.map.put(0x1EA1, 0x1EA0);
try instance.map.put(0x1EA3, 0x1EA2);
try instance.map.put(0x1EA5, 0x1EA4);
try instance.map.put(0x1EA7, 0x1EA6);
try instance.map.put(0x1EA9, 0x1EA8);
try instance.map.put(0x1EAB, 0x1EAA);
try instance.map.put(0x1EAD, 0x1EAC);
try instance.map.put(0x1EAF, 0x1EAE);
try instance.map.put(0x1EB1, 0x1EB0);
try instance.map.put(0x1EB3, 0x1EB2);
try instance.map.put(0x1EB5, 0x1EB4);
try instance.map.put(0x1EB7, 0x1EB6);
try instance.map.put(0x1EB9, 0x1EB8);
try instance.map.put(0x1EBB, 0x1EBA);
try instance.map.put(0x1EBD, 0x1EBC);
try instance.map.put(0x1EBF, 0x1EBE);
try instance.map.put(0x1EC1, 0x1EC0);
try instance.map.put(0x1EC3, 0x1EC2);
try instance.map.put(0x1EC5, 0x1EC4);
try instance.map.put(0x1EC7, 0x1EC6);
try instance.map.put(0x1EC9, 0x1EC8);
try instance.map.put(0x1ECB, 0x1ECA);
try instance.map.put(0x1ECD, 0x1ECC);
try instance.map.put(0x1ECF, 0x1ECE);
try instance.map.put(0x1ED1, 0x1ED0);
try instance.map.put(0x1ED3, 0x1ED2);
try instance.map.put(0x1ED5, 0x1ED4);
try instance.map.put(0x1ED7, 0x1ED6);
try instance.map.put(0x1ED9, 0x1ED8);
try instance.map.put(0x1EDB, 0x1EDA);
try instance.map.put(0x1EDD, 0x1EDC);
try instance.map.put(0x1EDF, 0x1EDE);
try instance.map.put(0x1EE1, 0x1EE0);
try instance.map.put(0x1EE3, 0x1EE2);
try instance.map.put(0x1EE5, 0x1EE4);
try instance.map.put(0x1EE7, 0x1EE6);
try instance.map.put(0x1EE9, 0x1EE8);
try instance.map.put(0x1EEB, 0x1EEA);
try instance.map.put(0x1EED, 0x1EEC);
try instance.map.put(0x1EEF, 0x1EEE);
try instance.map.put(0x1EF1, 0x1EF0);
try instance.map.put(0x1EF3, 0x1EF2);
try instance.map.put(0x1EF5, 0x1EF4);
try instance.map.put(0x1EF7, 0x1EF6);
try instance.map.put(0x1EF9, 0x1EF8);
try instance.map.put(0x1EFB, 0x1EFA);
try instance.map.put(0x1EFD, 0x1EFC);
try instance.map.put(0x1EFF, 0x1EFE);
try instance.map.put(0x1F00, 0x1F08);
try instance.map.put(0x1F01, 0x1F09);
try instance.map.put(0x1F02, 0x1F0A);
try instance.map.put(0x1F03, 0x1F0B);
try instance.map.put(0x1F04, 0x1F0C);
try instance.map.put(0x1F05, 0x1F0D);
try instance.map.put(0x1F06, 0x1F0E);
try instance.map.put(0x1F07, 0x1F0F);
try instance.map.put(0x1F10, 0x1F18);
try instance.map.put(0x1F11, 0x1F19);
try instance.map.put(0x1F12, 0x1F1A);
try instance.map.put(0x1F13, 0x1F1B);
try instance.map.put(0x1F14, 0x1F1C);
try instance.map.put(0x1F15, 0x1F1D);
try instance.map.put(0x1F20, 0x1F28);
try instance.map.put(0x1F21, 0x1F29);
try instance.map.put(0x1F22, 0x1F2A);
try instance.map.put(0x1F23, 0x1F2B);
try instance.map.put(0x1F24, 0x1F2C);
try instance.map.put(0x1F25, 0x1F2D);
try instance.map.put(0x1F26, 0x1F2E);
try instance.map.put(0x1F27, 0x1F2F);
try instance.map.put(0x1F30, 0x1F38);
try instance.map.put(0x1F31, 0x1F39);
try instance.map.put(0x1F32, 0x1F3A);
try instance.map.put(0x1F33, 0x1F3B);
try instance.map.put(0x1F34, 0x1F3C);
try instance.map.put(0x1F35, 0x1F3D);
try instance.map.put(0x1F36, 0x1F3E);
try instance.map.put(0x1F37, 0x1F3F);
try instance.map.put(0x1F40, 0x1F48);
try instance.map.put(0x1F41, 0x1F49);
try instance.map.put(0x1F42, 0x1F4A);
try instance.map.put(0x1F43, 0x1F4B);
try instance.map.put(0x1F44, 0x1F4C);
try instance.map.put(0x1F45, 0x1F4D);
try instance.map.put(0x1F51, 0x1F59);
try instance.map.put(0x1F53, 0x1F5B);
try instance.map.put(0x1F55, 0x1F5D);
try instance.map.put(0x1F57, 0x1F5F);
try instance.map.put(0x1F60, 0x1F68);
try instance.map.put(0x1F61, 0x1F69);
try instance.map.put(0x1F62, 0x1F6A);
try instance.map.put(0x1F63, 0x1F6B);
try instance.map.put(0x1F64, 0x1F6C);
try instance.map.put(0x1F65, 0x1F6D);
try instance.map.put(0x1F66, 0x1F6E);
try instance.map.put(0x1F67, 0x1F6F);
try instance.map.put(0x1F70, 0x1FBA);
try instance.map.put(0x1F71, 0x1FBB);
try instance.map.put(0x1F72, 0x1FC8);
try instance.map.put(0x1F73, 0x1FC9);
try instance.map.put(0x1F74, 0x1FCA);
try instance.map.put(0x1F75, 0x1FCB);
try instance.map.put(0x1F76, 0x1FDA);
try instance.map.put(0x1F77, 0x1FDB);
try instance.map.put(0x1F78, 0x1FF8);
try instance.map.put(0x1F79, 0x1FF9);
try instance.map.put(0x1F7A, 0x1FEA);
try instance.map.put(0x1F7B, 0x1FEB);
try instance.map.put(0x1F7C, 0x1FFA);
try instance.map.put(0x1F7D, 0x1FFB);
try instance.map.put(0x1F80, 0x1F88);
try instance.map.put(0x1F81, 0x1F89);
try instance.map.put(0x1F82, 0x1F8A);
try instance.map.put(0x1F83, 0x1F8B);
try instance.map.put(0x1F84, 0x1F8C);
try instance.map.put(0x1F85, 0x1F8D);
try instance.map.put(0x1F86, 0x1F8E);
try instance.map.put(0x1F87, 0x1F8F);
try instance.map.put(0x1F90, 0x1F98);
try instance.map.put(0x1F91, 0x1F99);
try instance.map.put(0x1F92, 0x1F9A);
try instance.map.put(0x1F93, 0x1F9B);
try instance.map.put(0x1F94, 0x1F9C);
try instance.map.put(0x1F95, 0x1F9D);
try instance.map.put(0x1F96, 0x1F9E);
try instance.map.put(0x1F97, 0x1F9F);
try instance.map.put(0x1FA0, 0x1FA8);
try instance.map.put(0x1FA1, 0x1FA9);
try instance.map.put(0x1FA2, 0x1FAA);
try instance.map.put(0x1FA3, 0x1FAB);
try instance.map.put(0x1FA4, 0x1FAC);
try instance.map.put(0x1FA5, 0x1FAD);
try instance.map.put(0x1FA6, 0x1FAE);
try instance.map.put(0x1FA7, 0x1FAF);
try instance.map.put(0x1FB0, 0x1FB8);
try instance.map.put(0x1FB1, 0x1FB9);
try instance.map.put(0x1FB3, 0x1FBC);
try instance.map.put(0x1FBE, 0x0399);
try instance.map.put(0x1FC3, 0x1FCC);
try instance.map.put(0x1FD0, 0x1FD8);
try instance.map.put(0x1FD1, 0x1FD9);
try instance.map.put(0x1FE0, 0x1FE8);
try instance.map.put(0x1FE1, 0x1FE9);
try instance.map.put(0x1FE5, 0x1FEC);
try instance.map.put(0x1FF3, 0x1FFC);
try instance.map.put(0x214E, 0x2132);
try instance.map.put(0x2170, 0x2160);
try instance.map.put(0x2171, 0x2161);
try instance.map.put(0x2172, 0x2162);
try instance.map.put(0x2173, 0x2163);
try instance.map.put(0x2174, 0x2164);
try instance.map.put(0x2175, 0x2165);
try instance.map.put(0x2176, 0x2166);
try instance.map.put(0x2177, 0x2167);
try instance.map.put(0x2178, 0x2168);
try instance.map.put(0x2179, 0x2169);
try instance.map.put(0x217A, 0x216A);
try instance.map.put(0x217B, 0x216B);
try instance.map.put(0x217C, 0x216C);
try instance.map.put(0x217D, 0x216D);
try instance.map.put(0x217E, 0x216E);
try instance.map.put(0x217F, 0x216F);
try instance.map.put(0x2184, 0x2183);
try instance.map.put(0x24D0, 0x24B6);
try instance.map.put(0x24D1, 0x24B7);
try instance.map.put(0x24D2, 0x24B8);
try instance.map.put(0x24D3, 0x24B9);
try instance.map.put(0x24D4, 0x24BA);
try instance.map.put(0x24D5, 0x24BB);
try instance.map.put(0x24D6, 0x24BC);
try instance.map.put(0x24D7, 0x24BD);
try instance.map.put(0x24D8, 0x24BE);
try instance.map.put(0x24D9, 0x24BF);
try instance.map.put(0x24DA, 0x24C0);
try instance.map.put(0x24DB, 0x24C1);
try instance.map.put(0x24DC, 0x24C2);
try instance.map.put(0x24DD, 0x24C3);
try instance.map.put(0x24DE, 0x24C4);
try instance.map.put(0x24DF, 0x24C5);
try instance.map.put(0x24E0, 0x24C6);
try instance.map.put(0x24E1, 0x24C7);
try instance.map.put(0x24E2, 0x24C8);
try instance.map.put(0x24E3, 0x24C9);
try instance.map.put(0x24E4, 0x24CA);
try instance.map.put(0x24E5, 0x24CB);
try instance.map.put(0x24E6, 0x24CC);
try instance.map.put(0x24E7, 0x24CD);
try instance.map.put(0x24E8, 0x24CE);
try instance.map.put(0x24E9, 0x24CF);
try instance.map.put(0x2C30, 0x2C00);
try instance.map.put(0x2C31, 0x2C01);
try instance.map.put(0x2C32, 0x2C02);
try instance.map.put(0x2C33, 0x2C03);
try instance.map.put(0x2C34, 0x2C04);
try instance.map.put(0x2C35, 0x2C05);
try instance.map.put(0x2C36, 0x2C06);
try instance.map.put(0x2C37, 0x2C07);
try instance.map.put(0x2C38, 0x2C08);
try instance.map.put(0x2C39, 0x2C09);
try instance.map.put(0x2C3A, 0x2C0A);
try instance.map.put(0x2C3B, 0x2C0B);
try instance.map.put(0x2C3C, 0x2C0C);
try instance.map.put(0x2C3D, 0x2C0D);
try instance.map.put(0x2C3E, 0x2C0E);
try instance.map.put(0x2C3F, 0x2C0F);
try instance.map.put(0x2C40, 0x2C10);
try instance.map.put(0x2C41, 0x2C11);
try instance.map.put(0x2C42, 0x2C12);
try instance.map.put(0x2C43, 0x2C13);
try instance.map.put(0x2C44, 0x2C14);
try instance.map.put(0x2C45, 0x2C15);
try instance.map.put(0x2C46, 0x2C16);
try instance.map.put(0x2C47, 0x2C17);
try instance.map.put(0x2C48, 0x2C18);
try instance.map.put(0x2C49, 0x2C19);
try instance.map.put(0x2C4A, 0x2C1A);
try instance.map.put(0x2C4B, 0x2C1B);
try instance.map.put(0x2C4C, 0x2C1C);
try instance.map.put(0x2C4D, 0x2C1D);
try instance.map.put(0x2C4E, 0x2C1E);
try instance.map.put(0x2C4F, 0x2C1F);
try instance.map.put(0x2C50, 0x2C20);
try instance.map.put(0x2C51, 0x2C21);
try instance.map.put(0x2C52, 0x2C22);
try instance.map.put(0x2C53, 0x2C23);
try instance.map.put(0x2C54, 0x2C24);
try instance.map.put(0x2C55, 0x2C25);
try instance.map.put(0x2C56, 0x2C26);
try instance.map.put(0x2C57, 0x2C27);
try instance.map.put(0x2C58, 0x2C28);
try instance.map.put(0x2C59, 0x2C29);
try instance.map.put(0x2C5A, 0x2C2A);
try instance.map.put(0x2C5B, 0x2C2B);
try instance.map.put(0x2C5C, 0x2C2C);
try instance.map.put(0x2C5D, 0x2C2D);
try instance.map.put(0x2C5E, 0x2C2E);
try instance.map.put(0x2C61, 0x2C60);
try instance.map.put(0x2C65, 0x023A);
try instance.map.put(0x2C66, 0x023E);
try instance.map.put(0x2C68, 0x2C67);
try instance.map.put(0x2C6A, 0x2C69);
try instance.map.put(0x2C6C, 0x2C6B);
try instance.map.put(0x2C73, 0x2C72);
try instance.map.put(0x2C76, 0x2C75);
try instance.map.put(0x2C81, 0x2C80);
try instance.map.put(0x2C83, 0x2C82);
try instance.map.put(0x2C85, 0x2C84);
try instance.map.put(0x2C87, 0x2C86);
try instance.map.put(0x2C89, 0x2C88);
try instance.map.put(0x2C8B, 0x2C8A);
try instance.map.put(0x2C8D, 0x2C8C);
try instance.map.put(0x2C8F, 0x2C8E);
try instance.map.put(0x2C91, 0x2C90);
try instance.map.put(0x2C93, 0x2C92);
try instance.map.put(0x2C95, 0x2C94);
try instance.map.put(0x2C97, 0x2C96);
try instance.map.put(0x2C99, 0x2C98);
try instance.map.put(0x2C9B, 0x2C9A);
try instance.map.put(0x2C9D, 0x2C9C);
try instance.map.put(0x2C9F, 0x2C9E);
try instance.map.put(0x2CA1, 0x2CA0);
try instance.map.put(0x2CA3, 0x2CA2);
try instance.map.put(0x2CA5, 0x2CA4);
try instance.map.put(0x2CA7, 0x2CA6);
try instance.map.put(0x2CA9, 0x2CA8);
try instance.map.put(0x2CAB, 0x2CAA);
try instance.map.put(0x2CAD, 0x2CAC);
try instance.map.put(0x2CAF, 0x2CAE);
try instance.map.put(0x2CB1, 0x2CB0);
try instance.map.put(0x2CB3, 0x2CB2);
try instance.map.put(0x2CB5, 0x2CB4);
try instance.map.put(0x2CB7, 0x2CB6);
try instance.map.put(0x2CB9, 0x2CB8);
try instance.map.put(0x2CBB, 0x2CBA);
try instance.map.put(0x2CBD, 0x2CBC);
try instance.map.put(0x2CBF, 0x2CBE);
try instance.map.put(0x2CC1, 0x2CC0);
try instance.map.put(0x2CC3, 0x2CC2);
try instance.map.put(0x2CC5, 0x2CC4);
try instance.map.put(0x2CC7, 0x2CC6);
try instance.map.put(0x2CC9, 0x2CC8);
try instance.map.put(0x2CCB, 0x2CCA);
try instance.map.put(0x2CCD, 0x2CCC);
try instance.map.put(0x2CCF, 0x2CCE);
try instance.map.put(0x2CD1, 0x2CD0);
try instance.map.put(0x2CD3, 0x2CD2);
try instance.map.put(0x2CD5, 0x2CD4);
try instance.map.put(0x2CD7, 0x2CD6);
try instance.map.put(0x2CD9, 0x2CD8);
try instance.map.put(0x2CDB, 0x2CDA);
try instance.map.put(0x2CDD, 0x2CDC);
try instance.map.put(0x2CDF, 0x2CDE);
try instance.map.put(0x2CE1, 0x2CE0);
try instance.map.put(0x2CE3, 0x2CE2);
try instance.map.put(0x2CEC, 0x2CEB);
try instance.map.put(0x2CEE, 0x2CED);
try instance.map.put(0x2CF3, 0x2CF2);
try instance.map.put(0x2D00, 0x10A0);
try instance.map.put(0x2D01, 0x10A1);
try instance.map.put(0x2D02, 0x10A2);
try instance.map.put(0x2D03, 0x10A3);
try instance.map.put(0x2D04, 0x10A4);
try instance.map.put(0x2D05, 0x10A5);
try instance.map.put(0x2D06, 0x10A6);
try instance.map.put(0x2D07, 0x10A7);
try instance.map.put(0x2D08, 0x10A8);
try instance.map.put(0x2D09, 0x10A9);
try instance.map.put(0x2D0A, 0x10AA);
try instance.map.put(0x2D0B, 0x10AB);
try instance.map.put(0x2D0C, 0x10AC);
try instance.map.put(0x2D0D, 0x10AD);
try instance.map.put(0x2D0E, 0x10AE);
try instance.map.put(0x2D0F, 0x10AF);
try instance.map.put(0x2D10, 0x10B0);
try instance.map.put(0x2D11, 0x10B1);
try instance.map.put(0x2D12, 0x10B2);
try instance.map.put(0x2D13, 0x10B3);
try instance.map.put(0x2D14, 0x10B4);
try instance.map.put(0x2D15, 0x10B5);
try instance.map.put(0x2D16, 0x10B6);
try instance.map.put(0x2D17, 0x10B7);
try instance.map.put(0x2D18, 0x10B8);
try instance.map.put(0x2D19, 0x10B9);
try instance.map.put(0x2D1A, 0x10BA);
try instance.map.put(0x2D1B, 0x10BB);
try instance.map.put(0x2D1C, 0x10BC);
try instance.map.put(0x2D1D, 0x10BD);
try instance.map.put(0x2D1E, 0x10BE);
try instance.map.put(0x2D1F, 0x10BF);
try instance.map.put(0x2D20, 0x10C0);
try instance.map.put(0x2D21, 0x10C1);
try instance.map.put(0x2D22, 0x10C2);
try instance.map.put(0x2D23, 0x10C3);
try instance.map.put(0x2D24, 0x10C4);
try instance.map.put(0x2D25, 0x10C5);
try instance.map.put(0x2D27, 0x10C7);
try instance.map.put(0x2D2D, 0x10CD);
try instance.map.put(0xA641, 0xA640);
try instance.map.put(0xA643, 0xA642);
try instance.map.put(0xA645, 0xA644);
try instance.map.put(0xA647, 0xA646);
try instance.map.put(0xA649, 0xA648);
try instance.map.put(0xA64B, 0xA64A);
try instance.map.put(0xA64D, 0xA64C);
try instance.map.put(0xA64F, 0xA64E);
try instance.map.put(0xA651, 0xA650);
try instance.map.put(0xA653, 0xA652);
try instance.map.put(0xA655, 0xA654);
try instance.map.put(0xA657, 0xA656);
try instance.map.put(0xA659, 0xA658);
try instance.map.put(0xA65B, 0xA65A);
try instance.map.put(0xA65D, 0xA65C);
try instance.map.put(0xA65F, 0xA65E);
try instance.map.put(0xA661, 0xA660);
try instance.map.put(0xA663, 0xA662);
try instance.map.put(0xA665, 0xA664);
try instance.map.put(0xA667, 0xA666);
try instance.map.put(0xA669, 0xA668);
try instance.map.put(0xA66B, 0xA66A);
try instance.map.put(0xA66D, 0xA66C);
try instance.map.put(0xA681, 0xA680);
try instance.map.put(0xA683, 0xA682);
try instance.map.put(0xA685, 0xA684);
try instance.map.put(0xA687, 0xA686);
try instance.map.put(0xA689, 0xA688);
try instance.map.put(0xA68B, 0xA68A);
try instance.map.put(0xA68D, 0xA68C);
try instance.map.put(0xA68F, 0xA68E);
try instance.map.put(0xA691, 0xA690);
try instance.map.put(0xA693, 0xA692);
try instance.map.put(0xA695, 0xA694);
try instance.map.put(0xA697, 0xA696);
try instance.map.put(0xA699, 0xA698);
try instance.map.put(0xA69B, 0xA69A);
try instance.map.put(0xA723, 0xA722);
try instance.map.put(0xA725, 0xA724);
try instance.map.put(0xA727, 0xA726);
try instance.map.put(0xA729, 0xA728);
try instance.map.put(0xA72B, 0xA72A);
try instance.map.put(0xA72D, 0xA72C);
try instance.map.put(0xA72F, 0xA72E);
try instance.map.put(0xA733, 0xA732);
try instance.map.put(0xA735, 0xA734);
try instance.map.put(0xA737, 0xA736);
try instance.map.put(0xA739, 0xA738);
try instance.map.put(0xA73B, 0xA73A);
try instance.map.put(0xA73D, 0xA73C);
try instance.map.put(0xA73F, 0xA73E);
try instance.map.put(0xA741, 0xA740);
try instance.map.put(0xA743, 0xA742);
try instance.map.put(0xA745, 0xA744);
try instance.map.put(0xA747, 0xA746);
try instance.map.put(0xA749, 0xA748);
try instance.map.put(0xA74B, 0xA74A);
try instance.map.put(0xA74D, 0xA74C);
try instance.map.put(0xA74F, 0xA74E);
try instance.map.put(0xA751, 0xA750);
try instance.map.put(0xA753, 0xA752);
try instance.map.put(0xA755, 0xA754);
try instance.map.put(0xA757, 0xA756);
try instance.map.put(0xA759, 0xA758);
try instance.map.put(0xA75B, 0xA75A);
try instance.map.put(0xA75D, 0xA75C);
try instance.map.put(0xA75F, 0xA75E);
try instance.map.put(0xA761, 0xA760);
try instance.map.put(0xA763, 0xA762);
try instance.map.put(0xA765, 0xA764);
try instance.map.put(0xA767, 0xA766);
try instance.map.put(0xA769, 0xA768);
try instance.map.put(0xA76B, 0xA76A);
try instance.map.put(0xA76D, 0xA76C);
try instance.map.put(0xA76F, 0xA76E);
try instance.map.put(0xA77A, 0xA779);
try instance.map.put(0xA77C, 0xA77B);
try instance.map.put(0xA77F, 0xA77E);
try instance.map.put(0xA781, 0xA780);
try instance.map.put(0xA783, 0xA782);
try instance.map.put(0xA785, 0xA784);
try instance.map.put(0xA787, 0xA786);
try instance.map.put(0xA78C, 0xA78B);
try instance.map.put(0xA791, 0xA790);
try instance.map.put(0xA793, 0xA792);
try instance.map.put(0xA794, 0xA7C4);
try instance.map.put(0xA797, 0xA796);
try instance.map.put(0xA799, 0xA798);
try instance.map.put(0xA79B, 0xA79A);
try instance.map.put(0xA79D, 0xA79C);
try instance.map.put(0xA79F, 0xA79E);
try instance.map.put(0xA7A1, 0xA7A0);
try instance.map.put(0xA7A3, 0xA7A2);
try instance.map.put(0xA7A5, 0xA7A4);
try instance.map.put(0xA7A7, 0xA7A6);
try instance.map.put(0xA7A9, 0xA7A8);
try instance.map.put(0xA7B5, 0xA7B4);
try instance.map.put(0xA7B7, 0xA7B6);
try instance.map.put(0xA7B9, 0xA7B8);
try instance.map.put(0xA7BB, 0xA7BA);
try instance.map.put(0xA7BD, 0xA7BC);
try instance.map.put(0xA7BF, 0xA7BE);
try instance.map.put(0xA7C3, 0xA7C2);
try instance.map.put(0xA7C8, 0xA7C7);
try instance.map.put(0xA7CA, 0xA7C9);
try instance.map.put(0xA7F6, 0xA7F5);
try instance.map.put(0xAB53, 0xA7B3);
try instance.map.put(0xAB70, 0x13A0);
try instance.map.put(0xAB71, 0x13A1);
try instance.map.put(0xAB72, 0x13A2);
try instance.map.put(0xAB73, 0x13A3);
try instance.map.put(0xAB74, 0x13A4);
try instance.map.put(0xAB75, 0x13A5);
try instance.map.put(0xAB76, 0x13A6);
try instance.map.put(0xAB77, 0x13A7);
try instance.map.put(0xAB78, 0x13A8);
try instance.map.put(0xAB79, 0x13A9);
try instance.map.put(0xAB7A, 0x13AA);
try instance.map.put(0xAB7B, 0x13AB);
try instance.map.put(0xAB7C, 0x13AC);
try instance.map.put(0xAB7D, 0x13AD);
try instance.map.put(0xAB7E, 0x13AE);
try instance.map.put(0xAB7F, 0x13AF);
try instance.map.put(0xAB80, 0x13B0);
try instance.map.put(0xAB81, 0x13B1);
try instance.map.put(0xAB82, 0x13B2);
try instance.map.put(0xAB83, 0x13B3);
try instance.map.put(0xAB84, 0x13B4);
try instance.map.put(0xAB85, 0x13B5);
try instance.map.put(0xAB86, 0x13B6);
try instance.map.put(0xAB87, 0x13B7);
try instance.map.put(0xAB88, 0x13B8);
try instance.map.put(0xAB89, 0x13B9);
try instance.map.put(0xAB8A, 0x13BA);
try instance.map.put(0xAB8B, 0x13BB);
try instance.map.put(0xAB8C, 0x13BC);
try instance.map.put(0xAB8D, 0x13BD);
try instance.map.put(0xAB8E, 0x13BE);
try instance.map.put(0xAB8F, 0x13BF);
try instance.map.put(0xAB90, 0x13C0);
try instance.map.put(0xAB91, 0x13C1);
try instance.map.put(0xAB92, 0x13C2);
try instance.map.put(0xAB93, 0x13C3);
try instance.map.put(0xAB94, 0x13C4);
try instance.map.put(0xAB95, 0x13C5);
try instance.map.put(0xAB96, 0x13C6);
try instance.map.put(0xAB97, 0x13C7);
try instance.map.put(0xAB98, 0x13C8);
try instance.map.put(0xAB99, 0x13C9);
try instance.map.put(0xAB9A, 0x13CA);
try instance.map.put(0xAB9B, 0x13CB);
try instance.map.put(0xAB9C, 0x13CC);
try instance.map.put(0xAB9D, 0x13CD);
try instance.map.put(0xAB9E, 0x13CE);
try instance.map.put(0xAB9F, 0x13CF);
try instance.map.put(0xABA0, 0x13D0);
try instance.map.put(0xABA1, 0x13D1);
try instance.map.put(0xABA2, 0x13D2);
try instance.map.put(0xABA3, 0x13D3);
try instance.map.put(0xABA4, 0x13D4);
try instance.map.put(0xABA5, 0x13D5);
try instance.map.put(0xABA6, 0x13D6);
try instance.map.put(0xABA7, 0x13D7);
try instance.map.put(0xABA8, 0x13D8);
try instance.map.put(0xABA9, 0x13D9);
try instance.map.put(0xABAA, 0x13DA);
try instance.map.put(0xABAB, 0x13DB);
try instance.map.put(0xABAC, 0x13DC);
try instance.map.put(0xABAD, 0x13DD);
try instance.map.put(0xABAE, 0x13DE);
try instance.map.put(0xABAF, 0x13DF);
try instance.map.put(0xABB0, 0x13E0);
try instance.map.put(0xABB1, 0x13E1);
try instance.map.put(0xABB2, 0x13E2);
try instance.map.put(0xABB3, 0x13E3);
try instance.map.put(0xABB4, 0x13E4);
try instance.map.put(0xABB5, 0x13E5);
try instance.map.put(0xABB6, 0x13E6);
try instance.map.put(0xABB7, 0x13E7);
try instance.map.put(0xABB8, 0x13E8);
try instance.map.put(0xABB9, 0x13E9);
try instance.map.put(0xABBA, 0x13EA);
try instance.map.put(0xABBB, 0x13EB);
try instance.map.put(0xABBC, 0x13EC);
try instance.map.put(0xABBD, 0x13ED);
try instance.map.put(0xABBE, 0x13EE);
try instance.map.put(0xABBF, 0x13EF);
try instance.map.put(0xFF41, 0xFF21);
try instance.map.put(0xFF42, 0xFF22);
try instance.map.put(0xFF43, 0xFF23);
try instance.map.put(0xFF44, 0xFF24);
try instance.map.put(0xFF45, 0xFF25);
try instance.map.put(0xFF46, 0xFF26);
try instance.map.put(0xFF47, 0xFF27);
try instance.map.put(0xFF48, 0xFF28);
try instance.map.put(0xFF49, 0xFF29);
try instance.map.put(0xFF4A, 0xFF2A);
try instance.map.put(0xFF4B, 0xFF2B);
try instance.map.put(0xFF4C, 0xFF2C);
try instance.map.put(0xFF4D, 0xFF2D);
try instance.map.put(0xFF4E, 0xFF2E);
try instance.map.put(0xFF4F, 0xFF2F);
try instance.map.put(0xFF50, 0xFF30);
try instance.map.put(0xFF51, 0xFF31);
try instance.map.put(0xFF52, 0xFF32);
try instance.map.put(0xFF53, 0xFF33);
try instance.map.put(0xFF54, 0xFF34);
try instance.map.put(0xFF55, 0xFF35);
try instance.map.put(0xFF56, 0xFF36);
try instance.map.put(0xFF57, 0xFF37);
try instance.map.put(0xFF58, 0xFF38);
try instance.map.put(0xFF59, 0xFF39);
try instance.map.put(0xFF5A, 0xFF3A);
try instance.map.put(0x10428, 0x10400);
try instance.map.put(0x10429, 0x10401);
try instance.map.put(0x1042A, 0x10402);
try instance.map.put(0x1042B, 0x10403);
try instance.map.put(0x1042C, 0x10404);
try instance.map.put(0x1042D, 0x10405);
try instance.map.put(0x1042E, 0x10406);
try instance.map.put(0x1042F, 0x10407);
try instance.map.put(0x10430, 0x10408);
try instance.map.put(0x10431, 0x10409);
try instance.map.put(0x10432, 0x1040A);
try instance.map.put(0x10433, 0x1040B);
try instance.map.put(0x10434, 0x1040C);
try instance.map.put(0x10435, 0x1040D);
try instance.map.put(0x10436, 0x1040E);
try instance.map.put(0x10437, 0x1040F);
try instance.map.put(0x10438, 0x10410);
try instance.map.put(0x10439, 0x10411);
try instance.map.put(0x1043A, 0x10412);
try instance.map.put(0x1043B, 0x10413);
try instance.map.put(0x1043C, 0x10414);
try instance.map.put(0x1043D, 0x10415);
try instance.map.put(0x1043E, 0x10416);
try instance.map.put(0x1043F, 0x10417);
try instance.map.put(0x10440, 0x10418);
try instance.map.put(0x10441, 0x10419);
try instance.map.put(0x10442, 0x1041A);
try instance.map.put(0x10443, 0x1041B);
try instance.map.put(0x10444, 0x1041C);
try instance.map.put(0x10445, 0x1041D);
try instance.map.put(0x10446, 0x1041E);
try instance.map.put(0x10447, 0x1041F);
try instance.map.put(0x10448, 0x10420);
try instance.map.put(0x10449, 0x10421);
try instance.map.put(0x1044A, 0x10422);
try instance.map.put(0x1044B, 0x10423);
try instance.map.put(0x1044C, 0x10424);
try instance.map.put(0x1044D, 0x10425);
try instance.map.put(0x1044E, 0x10426);
try instance.map.put(0x1044F, 0x10427);
try instance.map.put(0x104D8, 0x104B0);
try instance.map.put(0x104D9, 0x104B1);
try instance.map.put(0x104DA, 0x104B2);
try instance.map.put(0x104DB, 0x104B3);
try instance.map.put(0x104DC, 0x104B4);
try instance.map.put(0x104DD, 0x104B5);
try instance.map.put(0x104DE, 0x104B6);
try instance.map.put(0x104DF, 0x104B7);
try instance.map.put(0x104E0, 0x104B8);
try instance.map.put(0x104E1, 0x104B9);
try instance.map.put(0x104E2, 0x104BA);
try instance.map.put(0x104E3, 0x104BB);
try instance.map.put(0x104E4, 0x104BC);
try instance.map.put(0x104E5, 0x104BD);
try instance.map.put(0x104E6, 0x104BE);
try instance.map.put(0x104E7, 0x104BF);
try instance.map.put(0x104E8, 0x104C0);
try instance.map.put(0x104E9, 0x104C1);
try instance.map.put(0x104EA, 0x104C2);
try instance.map.put(0x104EB, 0x104C3);
try instance.map.put(0x104EC, 0x104C4);
try instance.map.put(0x104ED, 0x104C5);
try instance.map.put(0x104EE, 0x104C6);
try instance.map.put(0x104EF, 0x104C7);
try instance.map.put(0x104F0, 0x104C8);
try instance.map.put(0x104F1, 0x104C9);
try instance.map.put(0x104F2, 0x104CA);
try instance.map.put(0x104F3, 0x104CB);
try instance.map.put(0x104F4, 0x104CC);
try instance.map.put(0x104F5, 0x104CD);
try instance.map.put(0x104F6, 0x104CE);
try instance.map.put(0x104F7, 0x104CF);
try instance.map.put(0x104F8, 0x104D0);
try instance.map.put(0x104F9, 0x104D1);
try instance.map.put(0x104FA, 0x104D2);
try instance.map.put(0x104FB, 0x104D3);
try instance.map.put(0x10CC0, 0x10C80);
try instance.map.put(0x10CC1, 0x10C81);
try instance.map.put(0x10CC2, 0x10C82);
try instance.map.put(0x10CC3, 0x10C83);
try instance.map.put(0x10CC4, 0x10C84);
try instance.map.put(0x10CC5, 0x10C85);
try instance.map.put(0x10CC6, 0x10C86);
try instance.map.put(0x10CC7, 0x10C87);
try instance.map.put(0x10CC8, 0x10C88);
try instance.map.put(0x10CC9, 0x10C89);
try instance.map.put(0x10CCA, 0x10C8A);
try instance.map.put(0x10CCB, 0x10C8B);
try instance.map.put(0x10CCC, 0x10C8C);
try instance.map.put(0x10CCD, 0x10C8D);
try instance.map.put(0x10CCE, 0x10C8E);
try instance.map.put(0x10CCF, 0x10C8F);
try instance.map.put(0x10CD0, 0x10C90);
try instance.map.put(0x10CD1, 0x10C91);
try instance.map.put(0x10CD2, 0x10C92);
try instance.map.put(0x10CD3, 0x10C93);
try instance.map.put(0x10CD4, 0x10C94);
try instance.map.put(0x10CD5, 0x10C95);
try instance.map.put(0x10CD6, 0x10C96);
try instance.map.put(0x10CD7, 0x10C97);
try instance.map.put(0x10CD8, 0x10C98);
try instance.map.put(0x10CD9, 0x10C99);
try instance.map.put(0x10CDA, 0x10C9A);
try instance.map.put(0x10CDB, 0x10C9B);
try instance.map.put(0x10CDC, 0x10C9C);
try instance.map.put(0x10CDD, 0x10C9D);
try instance.map.put(0x10CDE, 0x10C9E);
try instance.map.put(0x10CDF, 0x10C9F);
try instance.map.put(0x10CE0, 0x10CA0);
try instance.map.put(0x10CE1, 0x10CA1);
try instance.map.put(0x10CE2, 0x10CA2);
try instance.map.put(0x10CE3, 0x10CA3);
try instance.map.put(0x10CE4, 0x10CA4);
try instance.map.put(0x10CE5, 0x10CA5);
try instance.map.put(0x10CE6, 0x10CA6);
try instance.map.put(0x10CE7, 0x10CA7);
try instance.map.put(0x10CE8, 0x10CA8);
try instance.map.put(0x10CE9, 0x10CA9);
try instance.map.put(0x10CEA, 0x10CAA);
try instance.map.put(0x10CEB, 0x10CAB);
try instance.map.put(0x10CEC, 0x10CAC);
try instance.map.put(0x10CED, 0x10CAD);
try instance.map.put(0x10CEE, 0x10CAE);
try instance.map.put(0x10CEF, 0x10CAF);
try instance.map.put(0x10CF0, 0x10CB0);
try instance.map.put(0x10CF1, 0x10CB1);
try instance.map.put(0x10CF2, 0x10CB2);
try instance.map.put(0x118C0, 0x118A0);
try instance.map.put(0x118C1, 0x118A1);
try instance.map.put(0x118C2, 0x118A2);
try instance.map.put(0x118C3, 0x118A3);
try instance.map.put(0x118C4, 0x118A4);
try instance.map.put(0x118C5, 0x118A5);
try instance.map.put(0x118C6, 0x118A6);
try instance.map.put(0x118C7, 0x118A7);
try instance.map.put(0x118C8, 0x118A8);
try instance.map.put(0x118C9, 0x118A9);
try instance.map.put(0x118CA, 0x118AA);
try instance.map.put(0x118CB, 0x118AB);
try instance.map.put(0x118CC, 0x118AC);
try instance.map.put(0x118CD, 0x118AD);
try instance.map.put(0x118CE, 0x118AE);
try instance.map.put(0x118CF, 0x118AF);
try instance.map.put(0x118D0, 0x118B0);
try instance.map.put(0x118D1, 0x118B1);
try instance.map.put(0x118D2, 0x118B2);
try instance.map.put(0x118D3, 0x118B3);
try instance.map.put(0x118D4, 0x118B4);
try instance.map.put(0x118D5, 0x118B5);
try instance.map.put(0x118D6, 0x118B6);
try instance.map.put(0x118D7, 0x118B7);
try instance.map.put(0x118D8, 0x118B8);
try instance.map.put(0x118D9, 0x118B9);
try instance.map.put(0x118DA, 0x118BA);
try instance.map.put(0x118DB, 0x118BB);
try instance.map.put(0x118DC, 0x118BC);
try instance.map.put(0x118DD, 0x118BD);
try instance.map.put(0x118DE, 0x118BE);
try instance.map.put(0x118DF, 0x118BF);
try instance.map.put(0x16E60, 0x16E40);
try instance.map.put(0x16E61, 0x16E41);
try instance.map.put(0x16E62, 0x16E42);
try instance.map.put(0x16E63, 0x16E43);
try instance.map.put(0x16E64, 0x16E44);
try instance.map.put(0x16E65, 0x16E45);
try instance.map.put(0x16E66, 0x16E46);
try instance.map.put(0x16E67, 0x16E47);
try instance.map.put(0x16E68, 0x16E48);
try instance.map.put(0x16E69, 0x16E49);
try instance.map.put(0x16E6A, 0x16E4A);
try instance.map.put(0x16E6B, 0x16E4B);
try instance.map.put(0x16E6C, 0x16E4C);
try instance.map.put(0x16E6D, 0x16E4D);
try instance.map.put(0x16E6E, 0x16E4E);
try instance.map.put(0x16E6F, 0x16E4F);
try instance.map.put(0x16E70, 0x16E50);
try instance.map.put(0x16E71, 0x16E51);
try instance.map.put(0x16E72, 0x16E52);
try instance.map.put(0x16E73, 0x16E53);
try instance.map.put(0x16E74, 0x16E54);
try instance.map.put(0x16E75, 0x16E55);
try instance.map.put(0x16E76, 0x16E56);
try instance.map.put(0x16E77, 0x16E57);
try instance.map.put(0x16E78, 0x16E58);
try instance.map.put(0x16E79, 0x16E59);
try instance.map.put(0x16E7A, 0x16E5A);
try instance.map.put(0x16E7B, 0x16E5B);
try instance.map.put(0x16E7C, 0x16E5C);
try instance.map.put(0x16E7D, 0x16E5D);
try instance.map.put(0x16E7E, 0x16E5E);
try instance.map.put(0x16E7F, 0x16E5F);
try instance.map.put(0x1E922, 0x1E900);
try instance.map.put(0x1E923, 0x1E901);
try instance.map.put(0x1E924, 0x1E902);
try instance.map.put(0x1E925, 0x1E903);
try instance.map.put(0x1E926, 0x1E904);
try instance.map.put(0x1E927, 0x1E905);
try instance.map.put(0x1E928, 0x1E906);
try instance.map.put(0x1E929, 0x1E907);
try instance.map.put(0x1E92A, 0x1E908);
try instance.map.put(0x1E92B, 0x1E909);
try instance.map.put(0x1E92C, 0x1E90A);
try instance.map.put(0x1E92D, 0x1E90B);
try instance.map.put(0x1E92E, 0x1E90C);
try instance.map.put(0x1E92F, 0x1E90D);
try instance.map.put(0x1E930, 0x1E90E);
try instance.map.put(0x1E931, 0x1E90F);
try instance.map.put(0x1E932, 0x1E910);
try instance.map.put(0x1E933, 0x1E911);
try instance.map.put(0x1E934, 0x1E912);
try instance.map.put(0x1E935, 0x1E913);
try instance.map.put(0x1E936, 0x1E914);
try instance.map.put(0x1E937, 0x1E915);
try instance.map.put(0x1E938, 0x1E916);
try instance.map.put(0x1E939, 0x1E917);
try instance.map.put(0x1E93A, 0x1E918);
try instance.map.put(0x1E93B, 0x1E919);
try instance.map.put(0x1E93C, 0x1E91A);
try instance.map.put(0x1E93D, 0x1E91B);
try instance.map.put(0x1E93E, 0x1E91C);
try instance.map.put(0x1E93F, 0x1E91D);
try instance.map.put(0x1E940, 0x1E91E);
try instance.map.put(0x1E941, 0x1E91F);
try instance.map.put(0x1E942, 0x1E920);
try instance.map.put(0x1E943, 0x1E921);
// Placeholder: 0. Method suffix, 1. Struct name
return instance;
}
pub fn deinit(self: *TitleMap) void {
self.map.deinit();
}
/// toTitle maps the code point to the desired case or returns the same code point if no mapping exists.
pub fn toTitle(self: TitleMap, cp: u21) u21 {
return if (self.map.get(cp)) |mcp| mcp else cp;
}
|
src/components/autogen/UnicodeData/TitleMap.zig
|
const std = @import("std");
const mem = std.mem;
const Allocator = mem.Allocator;
const json = @import("json");
// fn parseHexFormatColorRgb(str: []const u8) ![3]u8 {
// const r = try std.fmt.parseInt(u8, str[0..2], 16);
// const g = try std.fmt.parseInt(u8, str[2..4], 16);
// const b = try std.fmt.parseInt(u8, str[4..6], 16);
// return [_]u8{ r, g, b };
// }
//
// fn parseHexFormatColorRgba(str: []const u8) ![4]u8 {
// const r = try std.fmt.parseInt(u8, str[0..2], 16);
// const g = try std.fmt.parseInt(u8, str[2..4], 16);
// const b = try std.fmt.parseInt(u8, str[4..6], 16);
// const a = try std.fmt.parseInt(u8, str[6..8], 16);
// return [_]u8{ r, g, b, a };
// }
// TODO probably change the inplementation and just do it the 'normal way'
pub const Data = union(enum) {
gids: []u64,
base64_data: []u8,
};
pub const Chunk = struct {
data: Data,
x: i64,
y: i64,
width: i64,
height: i64,
};
pub const PropertyValue = union(enum) {
string: []u8,
integer: i64,
float: f64,
boolean: bool,
};
pub const Property = struct {
name: []u8,
@"type": []u8,
value: PropertyValue,
};
pub const TileLayer = struct {
chunks: ?[]Chunk = null,
compression: ?[]u8 = null,
data: Data,
encoding: ?[]u8 = null,
id: i64,
name: []u8,
offsetx: ?f64 = null,
offsety: ?f64 = null,
opacity: f64,
properties: ?[]Property = null,
startx: ?i64 = null,
starty: ?i64 = null,
tintcolor: ?[]u8 = null,
@"type": []u8,
visible: bool,
x: i64,
y: i64,
width: ?i64 = null,
height: ?i64 = null,
};
pub const Point = struct {
x: f64,
y: f64,
};
pub const Text = struct {
bold: ?bool = null,
color: ?[]u8 = null,
fontfamily: ?[]u8 = null,
halign: ?[]u8 = null,
italic: ?bool = null,
kerning: ?bool = null,
pixelsize: ?i64 = null,
strikeout: ?bool = null,
text: []u8,
underline: ?bool = null,
valign: ?[]u8 = null,
wrap: ?bool = null,
};
pub const Object = struct {
ellipse: ?bool = null,
height: f64,
gid: ?i64 = null,
id: i64,
name: []u8,
point: ?bool = null,
polygon: ?[]Point = null,
polyline: ?[]Point = null,
properties: ?[]Property = null,
rotation: f64,
template: ?[]u8 = null,
text: ?Text = null,
@"type": []u8,
visible: bool,
width: f64,
x: f64,
y: f64,
};
pub const ObjectGroup = struct {
draworder: []u8,
id: i64,
name: []u8,
objects: []Object,
offsetx: ?f64 = null,
offsety: ?f64 = null,
opacity: f64,
properties: ?[]Property = null,
startx: ?i64 = null,
starty: ?i64 = null,
tintcolor: ?[]u8 = null,
@"type": []u8,
visible: bool,
x: i64,
y: i64,
width: ?i64 = null,
height: ?i64 = null,
};
pub const ImageLayer = struct {
id: i64,
image: []u8,
name: []u8,
offsetx: ?f64 = null,
offsety: ?f64 = null,
opacity: f64,
properties: ?[]Property = null,
startx: ?i64 = null,
starty: ?i64 = null,
tintcolor: ?[]u8 = null,
transparentcolor: ?[]u8 = null,
@"type": []u8,
visible: bool,
x: i64,
y: i64,
width: ?i64 = null,
height: ?i64 = null,
};
pub const Group = struct {
id: i64,
layers: []Layer,
name: []u8,
offsetx: f64,
offsety: f64,
opacity: f64,
properties: ?[]Property,
startx: i64,
starty: i64,
tintcolor: ?[]u8,
@"type": []u8,
visible: bool,
x: i64,
y: i64,
width: i64,
height: i64,
};
pub const Layer = union(enum) {
TileLayer: TileLayer,
ObjectGroup: ObjectGroup,
ImageLayer: ImageLayer,
Group: Group,
};
pub const Frame = struct {
tileid: i64,
duration: i64,
};
pub const Tile = struct {
id: i64,
image: []u8,
imagewidth: i64,
imageheight: i64,
properties: ?[]Property = null,
object_group: ?ObjectGroup = null,
animation: ?[]Frame = null,
tile_type: ?[]u8 = null,
probability: ?f64 = null,
};
pub const Grid = struct {
height: i64,
orientation: ?[]u8 = null,
width: i64,
};
pub const TileOffset = struct {
x: i64,
y: i64,
};
pub const Terrain = struct {
name: []u8,
properties: ?[]Property = null,
tile: i64,
};
pub const Tileset = struct {
backgroundcolor: ?[]u8 = null,
columns: i64,
firstgid: i64,
grid: ?Grid = null,
image: []u8,
imagewidth: i64,
imageheight: i64,
margin: i64,
name: []u8,
objectalignment: ?[]u8 = null,
properties: ?[]Property = null,
source: ?[]u8 = null,
spacing: i64,
terrains: ?[]Terrain = null,
tilecount: i64,
tiledversion: ?[]u8 = null,
tileheight: i64,
tileoffset: ?TileOffset = null,
tiles: ?[]Tile = null,
tilewidth: i64,
transparentcolor: ?[]u8 = null,
@"type": ?[]u8 = null,
version: ?f64 = null,
// TODO
// wangsets
};
pub const Map = struct {
const Self = @This();
backgroundcolor: ?[]u8 = null,
compressionlevel: i64,
height: i64,
hexsidelength: ?i64 = null,
infinite: bool,
layers: []Layer,
nextlayerid: i64,
nextobjectid: i64,
orientation: []u8,
properties: ?[]Property = null,
renderorder: ?[]u8 = null,
staggeraxis: ?[]u8 = null,
staggerindex: ?[]u8 = null,
tiledversion: []u8,
tileheight: i64,
tilewidth: i64,
tilesets: []Tileset,
@"type": []u8,
version: []u8,
width: i64,
pub fn init(allocator: *Allocator, json_str: []const u8) !Self {
@setEvalBranchQuota(7000);
var stream = json.TokenStream.init(json_str);
return json.parse(Map, &stream, .{ .allocator = allocator });
}
pub fn deinit(self: *Self, allocator: *Allocator) void {
json.parseFree(Self, self.*, .{ .allocator = allocator });
}
};
test "tiled" {
var alloc = std.testing.allocator;
// TODO read file differently
var file = try std.fs.cwd().openFile("tests/debug.json", .{ .read = true });
defer file.close();
const sz = try file.getEndPos();
var buf = try alloc.alloc(u8, sz);
defer alloc.free(buf);
const read = file.readAll(buf);
// std.log.warn("\n{}\n", .{buf});
var m = try Map.init(alloc, buf);
defer m.deinit(alloc);
std.log.warn("image name: {}", .{m.layers[0].ImageLayer.image});
std.log.warn("compression level: {}", .{m.compressionlevel});
std.log.warn("height: {}", .{m.layers[2].TileLayer.height});
std.log.warn("len: {}", .{m.layers[2].TileLayer.data.gids.len});
}
|
src/tiled.zig
|
const builtin = @import("builtin");
const std = @import("std");
const input = @import("input.zig");
pub fn run(allocator: std.mem.Allocator, stdout: anytype) anyerror!void {
{
var input_ = try input.readFile("inputs/day22");
defer input_.deinit();
const result = try part1(allocator, &input_);
try stdout.print("22a: {}\n", .{ result });
std.debug.assert(result == 537042);
}
if (builtin.mode != .Debug) {
var input_ = try input.readFile("inputs/day22");
defer input_.deinit();
const result = try part2(allocator, &input_);
try stdout.print("22b: {}\n", .{ result });
std.debug.assert(result == 1304385553084863);
}
}
fn part1(allocator: std.mem.Allocator, input_: anytype) !usize {
return solve(allocator, input_, -50, 50);
}
fn part2(allocator: std.mem.Allocator, input_: anytype) !usize {
return solve(allocator, input_, min_dimension, max_dimension - 1);
}
const min_dimension = -200000;
const max_dimension = 200000;
const Dimension = std.math.IntFittingRange(min_dimension, max_dimension);
const max_num_steps = 1 + 420;
const Step = struct {
state: bool,
x_start: Dimension,
x_end: Dimension,
y_start: Dimension,
y_end: Dimension,
z_start: Dimension,
z_end: Dimension,
};
fn solve(allocator: std.mem.Allocator, input_: anytype, min: Dimension, max: Dimension) !usize {
var steps = std.BoundedArray(Step, max_num_steps).init(0) catch unreachable;
while (try input_.next()) |line| {
var state: bool = undefined;
var rest: []const u8 = undefined;
if (std.mem.startsWith(u8, line, "on ")) {
state = true;
rest = line[("on ".len)..];
}
else if (std.mem.startsWith(u8, line, "off ")) {
state = false;
rest = line[("off ".len)..];
}
else {
return error.InvalidInput;
}
var parts = std.mem.split(u8, rest, ",");
var x_start: Dimension = undefined;
var x_end: Dimension = undefined;
try parseRange(parts.next() orelse return error.InvalidInput, 'x', &x_start, &x_end);
x_start = std.math.max(x_start, min);
x_end = std.math.min(x_end, max + 1);
if (x_start >= x_end) {
continue;
}
var y_start: Dimension = undefined;
var y_end: Dimension = undefined;
try parseRange(parts.next() orelse return error.InvalidInput, 'y', &y_start, &y_end);
y_start = std.math.max(y_start, min);
y_end = std.math.min(y_end, max + 1);
if (y_start >= y_end) {
continue;
}
var z_start: Dimension = undefined;
var z_end: Dimension = undefined;
try parseRange(parts.next() orelse return error.InvalidInput, 'z', &z_start, &z_end);
z_start = std.math.max(z_start, min);
z_end = std.math.min(z_end, max + 1);
if (z_start >= z_end) {
continue;
}
try steps.append(.{
.state = state,
.x_start = x_start,
.x_end = x_end,
.y_start = y_start,
.y_end = y_end,
.z_start = z_start,
.z_end = z_end,
});
}
var interest_points_x = std.BoundedArray(Dimension, max_num_steps * 2 + 2).init(0) catch unreachable;
var interest_points_y = std.BoundedArray(Dimension, max_num_steps * 2 + 2).init(0) catch unreachable;
var interest_points_z = std.BoundedArray(Dimension, max_num_steps * 2 + 2).init(0) catch unreachable;
for (steps.constSlice()) |step| {
try append(&interest_points_x, step.x_start);
try append(&interest_points_x, step.x_end);
try append(&interest_points_y, step.y_start);
try append(&interest_points_y, step.y_end);
try append(&interest_points_z, step.z_start);
try append(&interest_points_z, step.z_end);
}
std.sort.sort(Dimension, interest_points_x.slice(), {}, comptime std.sort.asc(Dimension));
std.sort.sort(Dimension, interest_points_y.slice(), {}, comptime std.sort.asc(Dimension));
std.sort.sort(Dimension, interest_points_z.slice(), {}, comptime std.sort.asc(Dimension));
return solve_inner(
allocator,
steps.constSlice(),
interest_points_x.constSlice(),
interest_points_y.constSlice(),
interest_points_z.constSlice(),
);
}
fn solve_inner(
allocator: std.mem.Allocator,
steps: []const Step,
interest_points_x: []const Dimension,
interest_points_y: []const Dimension,
interest_points_z: []const Dimension,
) !usize {
var regions = try std.DynamicBitSet.initEmpty(allocator, interest_points_x.len * interest_points_y.len * interest_points_z.len);
defer regions.deinit();
const x_stride = interest_points_y.len * interest_points_z.len;
const y_stride = interest_points_z.len;
for (steps) |step| {
const x_start_i = std.sort.binarySearch(Dimension, step.x_start, interest_points_x, {}, orderDimension).?;
const x_start = x_start_i * x_stride;
const x_end_i = std.sort.binarySearch(Dimension, step.x_end, interest_points_x[(x_start_i + 1)..], {}, orderDimension).? + x_start_i + 1;
const x_end = x_end_i * x_stride;
const y_start_i = std.sort.binarySearch(Dimension, step.y_start, interest_points_y, {}, orderDimension).?;
const y_start = y_start_i * y_stride;
const y_end_i = std.sort.binarySearch(Dimension, step.y_end, interest_points_y[(y_start_i + 1)..], {}, orderDimension).? + y_start_i + 1;
const y_end = y_end_i * y_stride;
const z_start_i = std.sort.binarySearch(Dimension, step.z_start, interest_points_z, {}, orderDimension).?;
const z_start = z_start_i;
const z_end_i = std.sort.binarySearch(Dimension, step.z_end, interest_points_z[(z_start + 1)..], {}, orderDimension).? + z_start_i + 1;
const z_end = z_end_i;
var x = x_start;
while (x < x_end) : (x += x_stride) {
var y = x + y_start;
while (y < x + y_end) : (y += y_stride) {
var z = y + z_start;
while (z < y + z_end) : (z += 1) {
regions.setValue(z, step.state);
}
}
}
}
var result: usize = 0;
var set_regions = regions.iterator(.{});
while (set_regions.next()) |*i| {
const x = i.* / x_stride;
i.* %= x_stride;
const y = i.* / y_stride;
i.* %= y_stride;
const z = i.*;
result +=
@intCast(usize, interest_points_x[x + 1] - interest_points_x[x]) *
@intCast(usize, interest_points_y[y + 1] - interest_points_y[y]) *
@intCast(usize, interest_points_z[z + 1] - interest_points_z[z]);
}
return result;
}
fn parseRange(s: []const u8, expected_coord: u8, start: *Dimension, end: *Dimension) !void {
if (s[0] != expected_coord or s[1] != '=') {
return error.InvalidInput;
}
var parts = std.mem.split(u8, s[2..], "..");
const start_s = parts.next() orelse return error.InvalidInput;
start.* = try std.fmt.parseInt(Dimension, start_s, 10);
const end_s = parts.next() orelse return error.InvalidInput;
end.* = (try std.fmt.parseInt(Dimension, end_s, 10)) + 1;
}
fn append(interest_points: *std.BoundedArray(Dimension, max_num_steps * 2 + 2), point: Dimension) !void {
if (std.mem.indexOfScalar(Dimension, interest_points.constSlice(), point) != null) {
return;
}
try interest_points.append(point);
}
fn orderDimension(context: void, lhs: Dimension, rhs: Dimension) std.math.Order {
_ = context;
return std.math.order(lhs, rhs);
}
test "day 22 example 1" {
const input_ =
\\on x=10..12,y=10..12,z=10..12
\\on x=11..13,y=11..13,z=11..13
\\off x=9..11,y=9..11,z=9..11
\\on x=10..10,y=10..10,z=10..10
;
try std.testing.expectEqual(@as(usize, 39), try part1(std.testing.allocator, &input.readString(input_)));
}
test "day 22 example 2" {
const input_ =
\\on x=-20..26,y=-36..17,z=-47..7
\\on x=-20..33,y=-21..23,z=-26..28
\\on x=-22..28,y=-29..23,z=-38..16
\\on x=-46..7,y=-6..46,z=-50..-1
\\on x=-49..1,y=-3..46,z=-24..28
\\on x=2..47,y=-22..22,z=-23..27
\\on x=-27..23,y=-28..26,z=-21..29
\\on x=-39..5,y=-6..47,z=-3..44
\\on x=-30..21,y=-8..43,z=-13..34
\\on x=-22..26,y=-27..20,z=-29..19
\\off x=-48..-32,y=26..41,z=-47..-37
\\on x=-12..35,y=6..50,z=-50..-2
\\off x=-48..-32,y=-32..-16,z=-15..-5
\\on x=-18..26,y=-33..15,z=-7..46
\\off x=-40..-22,y=-38..-28,z=23..41
\\on x=-16..35,y=-41..10,z=-47..6
\\off x=-32..-23,y=11..30,z=-14..3
\\on x=-49..-5,y=-3..45,z=-29..18
\\off x=18..30,y=-20..-8,z=-3..13
\\on x=-41..9,y=-7..43,z=-33..15
\\on x=-54112..-39298,y=-85059..-49293,z=-27449..7877
\\on x=967..23432,y=45373..81175,z=27513..53682
;
try std.testing.expectEqual(@as(usize, 590784), try part1(std.testing.allocator, &input.readString(input_)));
}
test "day 22 example 3" {
const input_ =
\\on x=-5..47,y=-31..22,z=-19..33
\\on x=-44..5,y=-27..21,z=-14..35
\\on x=-49..-1,y=-11..42,z=-10..38
\\on x=-20..34,y=-40..6,z=-44..1
\\off x=26..39,y=40..50,z=-2..11
\\on x=-41..5,y=-41..6,z=-36..8
\\off x=-43..-33,y=-45..-28,z=7..25
\\on x=-33..15,y=-32..19,z=-34..11
\\off x=35..47,y=-46..-34,z=-11..5
\\on x=-14..36,y=-6..44,z=-16..29
\\on x=-57795..-6158,y=29564..72030,z=20435..90618
\\on x=36731..105352,y=-21140..28532,z=16094..90401
\\on x=30999..107136,y=-53464..15513,z=8553..71215
\\on x=13528..83982,y=-99403..-27377,z=-24141..23996
\\on x=-72682..-12347,y=18159..111354,z=7391..80950
\\on x=-1060..80757,y=-65301..-20884,z=-103788..-16709
\\on x=-83015..-9461,y=-72160..-8347,z=-81239..-26856
\\on x=-52752..22273,y=-49450..9096,z=54442..119054
\\on x=-29982..40483,y=-108474..-28371,z=-24328..38471
\\on x=-4958..62750,y=40422..118853,z=-7672..65583
\\on x=55694..108686,y=-43367..46958,z=-26781..48729
\\on x=-98497..-18186,y=-63569..3412,z=1232..88485
\\on x=-726..56291,y=-62629..13224,z=18033..85226
\\on x=-110886..-34664,y=-81338..-8658,z=8914..63723
\\on x=-55829..24974,y=-16897..54165,z=-121762..-28058
\\on x=-65152..-11147,y=22489..91432,z=-58782..1780
\\on x=-120100..-32970,y=-46592..27473,z=-11695..61039
\\on x=-18631..37533,y=-124565..-50804,z=-35667..28308
\\on x=-57817..18248,y=49321..117703,z=5745..55881
\\on x=14781..98692,y=-1341..70827,z=15753..70151
\\on x=-34419..55919,y=-19626..40991,z=39015..114138
\\on x=-60785..11593,y=-56135..2999,z=-95368..-26915
\\on x=-32178..58085,y=17647..101866,z=-91405..-8878
\\on x=-53655..12091,y=50097..105568,z=-75335..-4862
\\on x=-111166..-40997,y=-71714..2688,z=5609..50954
\\on x=-16602..70118,y=-98693..-44401,z=5197..76897
\\on x=16383..101554,y=4615..83635,z=-44907..18747
\\off x=-95822..-15171,y=-19987..48940,z=10804..104439
\\on x=-89813..-14614,y=16069..88491,z=-3297..45228
\\on x=41075..99376,y=-20427..49978,z=-52012..13762
\\on x=-21330..50085,y=-17944..62733,z=-112280..-30197
\\on x=-16478..35915,y=36008..118594,z=-7885..47086
\\off x=-98156..-27851,y=-49952..43171,z=-99005..-8456
\\off x=2032..69770,y=-71013..4824,z=7471..94418
\\on x=43670..120875,y=-42068..12382,z=-24787..38892
\\off x=37514..111226,y=-45862..25743,z=-16714..54663
\\off x=25699..97951,y=-30668..59918,z=-15349..69697
\\off x=-44271..17935,y=-9516..60759,z=49131..112598
\\on x=-61695..-5813,y=40978..94975,z=8655..80240
\\off x=-101086..-9439,y=-7088..67543,z=33935..83858
\\off x=18020..114017,y=-48931..32606,z=21474..89843
\\off x=-77139..10506,y=-89994..-18797,z=-80..59318
\\off x=8476..79288,y=-75520..11602,z=-96624..-24783
\\on x=-47488..-1262,y=24338..100707,z=16292..72967
\\off x=-84341..13987,y=2429..92914,z=-90671..-1318
\\off x=-37810..49457,y=-71013..-7894,z=-105357..-13188
\\off x=-27365..46395,y=31009..98017,z=15428..76570
\\off x=-70369..-16548,y=22648..78696,z=-1892..86821
\\on x=-53470..21291,y=-120233..-33476,z=-44150..38147
\\off x=-93533..-4276,y=-16170..68771,z=-104985..-24507
;
try std.testing.expectEqual(@as(usize, 474140), try part1(std.testing.allocator, &input.readString(input_)));
try std.testing.expectEqual(@as(usize, 2758514936282235), try part2(std.testing.allocator, &input.readString(input_)));
}
|
src/day22.zig
|
const std = @import("std");
const Allocator = mem.Allocator;
const ascii = std.ascii;
const build_options = @import("build_options");
const builtin = std.builtin;
const fs = std.fs;
const io = std.io;
const mem = std.mem;
const nkeys = @import("nkeys");
const process = std.process;
const testing = std.testing;
pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
std.debug.print("error: " ++ format ++ "\n", args);
process.exit(1);
}
pub fn info(comptime format: []const u8, args: anytype) void {
std.debug.print(format ++ "\n", args);
}
const usage =
\\Usage: znk [command] [options]
\\
\\Commands:
\\
\\ gen Generate a new key pair
\\ help Print this help and exit
\\ sign Sign a file
\\ verify Verify a file with a signature
\\ version Print version number and exit
\\
\\General Options:
\\
\\ -h, --help Print this help and exit
\\
;
pub fn main() anyerror!void {
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
defer std.debug.assert(!general_purpose_allocator.deinit());
const gpa = &general_purpose_allocator.allocator;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = &arena_instance.allocator;
const args = try process.argsAlloc(arena);
return mainArgs(arena, args);
}
pub fn mainArgs(arena: *Allocator, args: []const []const u8) !void {
if (args.len <= 1) {
info("{s}", .{usage});
fatal("expected command argument", .{});
}
const cmd = args[1];
const cmd_args = args[2..];
if (mem.eql(u8, cmd, "gen")) {
return cmdGen(arena, cmd_args);
} else if (mem.eql(u8, cmd, "sign")) {
return cmdSign(arena, cmd_args);
} else if (mem.eql(u8, cmd, "verify")) {
return cmdVerify(arena, cmd_args);
} else if (mem.eql(u8, cmd, "version")) {
return io.getStdOut().writeAll(build_options.version ++ "\n");
} else if (mem.eql(u8, cmd, "help") or mem.eql(u8, cmd, "-h") or mem.eql(u8, cmd, "--help")) {
return io.getStdOut().writeAll(usage);
} else {
info("{s}", .{usage});
fatal("unknown command: {s}", .{cmd});
}
}
const usage_gen =
\\Usage: znk gen [options] <role>
\\
\\Supported Roles:
\\
\\ account
\\ cluster
\\ operator
\\ server
\\ user
\\
\\General Options:
\\
\\ -h, --help Print this help and exit
\\
\\Generate Options:
\\
\\ -e, --entropy Path of file to get entropy from
\\ -o, --pub-out Print the public key to stdout
\\ -p, --prefix Vanity public key prefix, turns -o on
\\
;
pub fn cmdGen(arena: *Allocator, args: []const []const u8) !void {
const stdin = io.getStdIn();
const stdout = io.getStdOut();
var role: ?nkeys.Role = null;
var pub_out: bool = false;
var prefix: ?[]const u8 = null;
var entropy: ?fs.File = null;
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
return stdout.writeAll(usage_gen);
} else if (mem.eql(u8, arg, "-o") or mem.eql(u8, arg, "--pub-out")) {
pub_out = true;
} else if (mem.eql(u8, arg, "-p") or mem.eql(u8, arg, "--prefix")) {
if (i + 1 >= args.len)
fatal("expected argument after '{s}'", .{arg});
i += 1;
if (args[i].len > nkeys.text_public_len - 1)
fatal("public key prefix '{s}' is too long", .{arg});
prefix = args[i];
} else if (mem.eql(u8, arg, "-e") or mem.eql(u8, arg, "--entropy")) {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
i += 1;
if (entropy != null) fatal("parameter '{s}' provided more than once", .{arg});
if (std.mem.eql(u8, args[i], "-")) {
entropy = stdin;
} else {
entropy = fs.cwd().openFile(args[i], .{}) catch {
fatal("could not open entropy file at {s}", .{args[i]});
};
}
} else {
fatal("unrecognized parameter: '{s}'", .{arg});
}
} else if (role != null) {
fatal("more than one role to generate for provided", .{});
} else if (mem.eql(u8, arg, "account")) {
role = .account;
} else if (mem.eql(u8, arg, "cluster")) {
role = .cluster;
} else if (mem.eql(u8, arg, "operator")) {
role = .operator;
} else if (mem.eql(u8, arg, "server")) {
role = .server;
} else if (mem.eql(u8, arg, "user")) {
role = .user;
} else {
fatal("unrecognized extra parameter: '{s}'", .{arg});
}
}
if (role == null) {
info("{s}", .{usage_gen});
fatal("no role to generate seed for provided", .{});
}
if (prefix != null) {
const capitalized_prefix = try toUpper(arena, prefix.?);
const entropy_reader = if (entropy) |e| e.reader() else null;
const Generator = PrefixKeyGenerator(@TypeOf(entropy_reader.?));
var generator = Generator.init(arena, role.?, capitalized_prefix, entropy_reader);
generator.generate() catch {
fatal("failed to generate key", .{});
};
} else {
var gen_result = res: {
if (entropy) |e| {
break :res nkeys.SeedKeyPair.generateWithCustomEntropy(role.?, e.reader());
} else {
break :res nkeys.SeedKeyPair.generate(role.?);
}
};
var kp = gen_result catch fatal("could not generate seed", .{});
defer kp.wipe();
try stdout.writeAll(&kp.seedText());
try stdout.writeAll("\n");
var public_key = kp.publicKeyText();
if (pub_out) {
try stdout.writeAll(&public_key);
try stdout.writeAll("\n");
}
}
}
const usage_sign =
\\Usage: znk sign -k <file> [options] <file>
\\
\\General Options:
\\
\\ -h, --help Print this help and exit
\\
\\Sign Options:
\\
\\ -k, --key Path of private key/seed to sign with
\\
;
pub fn cmdSign(arena: *Allocator, args: []const []const u8) !void {
const stdin = io.getStdIn();
const stdout = io.getStdOut();
var file_stdin = false;
var key_stdin = false;
var file: ?fs.File = null;
var key: ?fs.File = null;
defer if (!key_stdin) if (file) |f| f.close();
defer if (!file_stdin) if (key) |f| f.close();
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (mem.startsWith(u8, arg, "-") and arg.len > 1) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
return stdout.writeAll(usage_sign);
} else if (mem.eql(u8, arg, "-k") or mem.eql(u8, arg, "--key")) {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
i += 1;
if (key != null) fatal("parameter '{s}' provided more than once", .{arg});
if (std.mem.eql(u8, args[i], "-")) {
key = stdin;
key_stdin = true;
} else {
key = fs.cwd().openFile(args[i], .{}) catch {
fatal("could not open key file at {s}", .{args[i]});
};
}
} else {
fatal("unrecognized parameter: '{s}'", .{arg});
}
} else if (file != null) {
fatal("more than one file to generate a signature for provided", .{});
} else if (mem.eql(u8, args[i], "-")) {
file = stdin;
file_stdin = true;
} else {
file = fs.cwd().openFile(args[i], .{}) catch {
fatal("could not open file to generate signature for (at {s})", .{args[i]});
};
}
}
if (file == null) {
info("{s}", .{usage_sign});
fatal("no file to generate a signature for provided", .{});
}
if (key == null) {
info("{s}", .{usage_sign});
fatal("no key to sign with provided", .{});
}
if (file_stdin and key_stdin) {
fatal("can't use stdin for reading multiple files", .{});
}
const content = file.?.readToEndAlloc(arena, std.math.maxInt(usize)) catch {
fatal("could not read file to generate signature for", .{});
};
var nkey = readKeyFile(arena, key.?) orelse fatal("could not find a valid key", .{});
if (nkey == .public_key) fatal("key was provided but is not a seed or private key", .{});
defer nkey.wipe();
const sig = nkey.sign(content) catch fatal("could not generate signature", .{});
var encoded_sig = try arena.alloc(u8, std.base64.standard.Encoder.calcSize(sig.len));
_ = std.base64.standard.Encoder.encode(encoded_sig, &sig);
try stdout.writeAll(encoded_sig);
try stdout.writeAll("\n");
}
const usage_verify =
\\Usage: znk verify [options] <file>
\\
\\General Options:
\\
\\ -h, --help Print this help and exit
\\
\\Verify Options:
\\
\\ -k, --key Path of key to verify with
\\ -s, --sig Path of signature to verify
\\
;
pub fn cmdVerify(arena: *Allocator, args: []const []const u8) !void {
const stdin = io.getStdIn();
const stdout = io.getStdOut();
var file_stdin = false;
var key_stdin = false;
var sig_stdin = false;
var key: ?fs.File = null;
var file: ?fs.File = null;
var sig: ?fs.File = null;
defer if (!file_stdin) if (file) |f| f.close();
defer if (!key_stdin) if (key) |f| f.close();
defer if (!sig_stdin) if (sig) |f| f.close();
var i: usize = 0;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (mem.startsWith(u8, arg, "-") and arg.len > 1) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
return stdout.writeAll(usage_verify);
} else if (mem.eql(u8, arg, "-k") or mem.eql(u8, arg, "--key")) {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
i += 1;
if (key != null) fatal("parameter '{s}' provided more than once", .{arg});
if (std.mem.eql(u8, args[i], "-")) {
key = stdin;
key_stdin = true;
} else {
key = fs.cwd().openFile(args[i], .{}) catch {
fatal("could not open file of key to verify with (at {s})", .{args[i]});
};
}
} else if (mem.eql(u8, arg, "-s") or mem.eql(u8, arg, "--sig")) {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
i += 1;
if (sig != null) fatal("parameter '{s}' provided more than once", .{arg});
if (std.mem.eql(u8, args[i], "-")) {
sig = stdin;
sig_stdin = true;
} else {
sig = fs.cwd().openFile(args[i], .{}) catch {
fatal("could not open signature file at {s}", .{args[i]});
};
}
} else {
fatal("unrecognized parameter: '{s}'", .{arg});
}
} else if (file != null) {
fatal("more than one file to verify signature of provided", .{});
} else if (mem.eql(u8, args[i], "-")) {
file = stdin;
file_stdin = true;
} else {
file = fs.cwd().openFile(args[i], .{}) catch {
fatal("could not open file to verify signature of (at {s})", .{args[i]});
};
}
}
if (file == null) {
info("{s}", .{usage_verify});
fatal("no file to verify signature of provided", .{});
}
if (key == null) {
info("{s}", .{usage_verify});
fatal("no key to verify signature with provided", .{});
}
if (sig == null) {
info("{s}", .{usage_verify});
fatal("no file to generate a signature for provided", .{});
}
if ((file_stdin and key_stdin) or (file_stdin and sig_stdin) or (key_stdin and sig_stdin)) {
fatal("can't use stdin for reading multiple files", .{});
}
const content = file.?.readToEndAlloc(arena, std.math.maxInt(usize)) catch {
fatal("could not read file to generate signature for", .{});
};
const signature_b64 = sig.?.readToEndAlloc(arena, std.math.maxInt(usize)) catch {
fatal("could not read signature", .{});
};
var k = readKeyFile(arena, key.?) orelse fatal("could not find a valid key", .{});
defer k.wipe();
const trimmed_signature_b64 = mem.trim(u8, signature_b64, " \n\t\r");
const decoded_len = std.base64.standard.Decoder.calcSizeForSlice(trimmed_signature_b64) catch {
fatal("invalid signature encoding", .{});
};
if (decoded_len != std.crypto.sign.Ed25519.signature_length)
fatal("invalid signature length", .{});
const signature = try arena.alloc(u8, decoded_len);
_ = std.base64.standard.Decoder.decode(signature, trimmed_signature_b64) catch {
fatal("invalid signature encoding", .{});
};
k.verify(content, signature[0..std.crypto.sign.Ed25519.signature_length].*) catch {
fatal("bad signature", .{});
};
try stdout.writeAll("good signature\n");
}
fn PrefixKeyGenerator(comptime EntropyReaderType: type) type {
return struct {
role: nkeys.Role,
prefix: []const u8,
allocator: *Allocator,
done: std.atomic.Atomic(bool),
entropy: ?EntropyReaderType,
const Self = @This();
pub fn init(allocator: *Allocator, role: nkeys.Role, prefix: []const u8, entropy: ?EntropyReaderType) Self {
return .{
.role = role,
.prefix = prefix,
.allocator = allocator,
.done = std.atomic.Atomic(bool).init(false),
.entropy = entropy,
};
}
fn generatePrivate(self: *Self) !void {
while (true) {
if (self.done.load(.SeqCst)) return;
var gen_result = res: {
if (self.entropy) |entropy| {
break :res nkeys.SeedKeyPair.generateWithCustomEntropy(self.role, entropy);
} else {
break :res nkeys.SeedKeyPair.generate(self.role);
}
};
var kp = gen_result catch fatal("could not generate seed", .{});
defer kp.wipe();
var public_key = kp.publicKeyText();
if (!mem.startsWith(u8, public_key[1..], self.prefix)) continue;
if (self.done.swap(true, .SeqCst)) return; // another thread is already done
info("{s}", .{kp.seedText()});
info("{s}", .{public_key});
return;
}
}
pub usingnamespace if (builtin.single_threaded) struct {
pub fn generate(self: *Self) !void {
return self.generatePrivate();
}
} else struct {
pub fn generate(self: *Self) !void {
var cpu_count = try std.Thread.getCpuCount();
var threads = try self.allocator.alloc(std.Thread, cpu_count);
defer self.allocator.free(threads);
for (threads) |*thread| thread.* = try std.Thread.spawn(.{}, Self.generatePrivate, .{self});
for (threads) |thread| thread.join();
}
};
};
}
fn toUpper(allocator: *Allocator, slice: []const u8) ![]u8 {
const result = try allocator.alloc(u8, slice.len);
for (slice) |c, i| result[i] = ascii.toUpper(c);
return result;
}
pub const Nkey = union(enum) {
const Self = @This();
seed_key_pair: nkeys.SeedKeyPair,
public_key: nkeys.PublicKey,
private_key: nkeys.PrivateKey,
pub fn wipe(self: *Self) void {
switch (self.*) {
.seed_key_pair => |*kp| kp.wipe(),
.public_key => |*pk| pk.wipe(),
.private_key => |*pk| pk.wipe(),
}
}
pub fn verify(
self: *const Self,
msg: []const u8,
sig: [std.crypto.sign.Ed25519.signature_length]u8,
) !void {
return switch (self.*) {
.seed_key_pair => |*kp| try kp.verify(msg, sig),
.public_key => |*pk| try pk.verify(msg, sig),
.private_key => |*pk| try pk.verify(msg, sig),
};
}
pub fn sign(
self: *const Self,
msg: []const u8,
) ![std.crypto.sign.Ed25519.signature_length]u8 {
return switch (self.*) {
.seed_key_pair => |*kp| try kp.sign(msg),
.private_key => |*pk| try pk.sign(msg),
.public_key => return error.CantSign,
};
}
pub fn fromText(text: []const u8) !Self {
if (!nkeys.isValidEncoding(text)) return error.InvalidEncoding;
switch (text[0]) {
'S' => {
// It's a seed.
if (text.len != nkeys.text_seed_len) return error.InvalidSeed;
return Self{ .seed_key_pair = try nkeys.SeedKeyPair.fromTextSeed(text[0..nkeys.text_seed_len]) };
},
'P' => {
// It's a private key.
if (text.len != nkeys.text_private_len) return error.InvalidPrivateKey;
return Self{ .private_key = try nkeys.PrivateKey.fromTextPrivateKey(text[0..nkeys.text_private_len]) };
},
else => {
// It should be a public key.
if (text.len != nkeys.text_public_len) return error.InvalidEncoding;
return Self{ .public_key = try nkeys.PublicKey.fromTextPublicKey(text[0..nkeys.text_public_len]) };
},
}
}
};
pub fn readKeyFile(allocator: *Allocator, file: fs.File) ?Nkey {
var bytes = file.readToEndAlloc(allocator, std.math.maxInt(usize)) catch fatal("could not read key file", .{});
defer {
for (bytes) |*b| b.* = 0;
allocator.free(bytes);
}
var iterator = mem.split(u8, bytes, "\n");
while (iterator.next()) |line| {
if (nkeys.isValidEncoding(line) and line.len == nkeys.text_seed_len) {
var k = Nkey.fromText(line) catch continue;
defer k.wipe();
return k;
}
}
return null;
}
test "reference all declarations" {
testing.refAllDecls(@This());
testing.refAllDecls(Nkey);
testing.refAllDecls(PrefixKeyGenerator(std.fs.File.Reader));
}
|
tool/znk.zig
|
const std = @import("std");
const panic = std.debug.panic;
const Allocator = std.mem.Allocator;
//Core types
usingnamespace @import("zalgebra");
usingnamespace @import("camera.zig");
usingnamespace @import("transform.zig");
usingnamespace @import("chunk/chunk.zig");
usingnamespace @import("world/world.zig");
usingnamespace @import("collision/aabb.zig");
usingnamespace @import("test_box.zig");
const c = @import("c.zig");
const glfw = @import("glfw_platform.zig");
const opengl = @import("opengl_renderer.zig");
const png = @import("png.zig");
pub fn main() !void {
const stdout = std.io.getStdOut().writer();
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer {
const leaked = gpa.deinit();
if (leaked) panic("Error: memory leaked", .{});
}
glfw.init();
defer glfw.deinit();
var window = glfw.createWindow(1600, 900, "ZigCraft V0.1");
defer glfw.destoryWindow(window);
//glfw.setMouseCaptured(window, true);
glfw.maximizeWindow(window);
var png_file = @embedFile("spritesheet.png");
var png_image = try png.Png.initMemory(png_file);
defer png_image.deinit();
var png_texture = opengl.Texture.init(png_image.size, png_image.data);
defer png_texture.deinit();
var camera = Camera.new(64.0, 0.1, 1000.0);
var camera_transform = Transform.zero();
camera_transform.move(vec3.new(0.0, 1.0, -5.0));
var texture_vertex_code = @embedFile("texture.vert.glsl");
var texture_fragment_code = @embedFile("texture.frag.glsl");
var texture_shader = opengl.Shader.init(texture_vertex_code, texture_fragment_code);
defer texture_shader.deinit();
var color_vertex_code = @embedFile("color.vert.glsl");
var color_fragment_code = @embedFile("color.frag.glsl");
var color_shader = opengl.Shader.init(color_vertex_code, color_fragment_code);
defer color_shader.deinit();
//Uniform Indexes
var view_projection_matrix_index = c.glGetUniformLocation(texture_shader.shader_program, "view_projection_matrix");
var model_matrix_index = c.glGetUniformLocation(texture_shader.shader_program, "model_matrix");
var texture_index = c.glGetUniformLocation(texture_shader.shader_program, "block_texture");
//World stuff
var world = World.init(&gpa.allocator);
defer world.deinit();
var chunk = TestChunk.init(vec3i.zero());
defer chunk.deinit();
{
var index: vec3i = vec3i.zero();
while (index.x < TestChunk.Size.x) : (index.x += 1) {
index.y = 0;
while (index.y < TestChunk.Size.y) : (index.y += 1) {
index.z = 0;
while (index.z < TestChunk.Size.z) : (index.z += 1) {
if (index.y == 24) {
chunk.setBlock(&index, 3);
}
else if (index.y < 24 and index.y > 18) {
chunk.setBlock(&index, 2);
}
else if (index.y <= 18) {
chunk.setBlock(&index, 1);
}
}
}
}
}
chunk.generateChunkMesh(&gpa.allocator);
var test_box1 = TestBox.init(vec3.new(0.0, 3.0, 0.0), vec3.one(), vec3.new(1.0, 0.0, 0.0));
defer test_box1.deinit();
var test_box2 = TestBox.init(vec3.zero(), vec3.one().scale(1.2), vec3.one());
defer test_box2.deinit();
var frameCount: u32 = 0;
var lastTime = glfw.getTime();
while (glfw.shouldCloseWindow(window)) {
glfw.update();
if (glfw.getMouseCaptured(window)) {
//Free mouse if escape is at all pressed
if (glfw.input.getKeyDown(c.GLFW_KEY_ESCAPE)) {
glfw.setMouseCaptured(window, false);
}
}
else {
if (glfw.input.getMousePressed(c.GLFW_MOUSE_BUTTON_LEFT)) {
glfw.setMouseCaptured(window, true);
}
}
var windowSize = glfw.getWindowSize(window);
opengl.setViewport(windowSize);
opengl.init3dRendering();
opengl.clearFramebuffer();
//world.update(&camera_transform.position);
moveCamera(1.0/60.0, &camera_transform);
test_box1.aabb.position = test_box1.aabb.position.add(vec3.new(0.0, -0.1 / 60.0, 0.0));
test_box1.update(&test_box2);
c.glUseProgram(texture_shader.shader_program);
//View Projection Matrix
var projection_matrix = camera.getPerspective(@intToFloat(f32, windowSize[0]) / @intToFloat(f32, windowSize[1]));
var view_matrix = camera_transform.getViewMatrix();
var view_projection_matrix = mat4.mult(projection_matrix, view_matrix);
//World render
{
c.glUniformMatrix4fv(view_projection_matrix_index, 1, c.GL_FALSE, view_projection_matrix.get_data());
//Texture
const bind_point = 0;
png_texture.bind(bind_point);
c.glUniform1i(texture_index, bind_point);
chunk.render(model_matrix_index);
//world.render(model_matrix_index);
}
//TestBox Renders
{
var box_model_index = c.glGetUniformLocation(color_shader.shader_program, "model_matrix");
var box_color_index = c.glGetUniformLocation(color_shader.shader_program, "color");
c.glUseProgram(color_shader.shader_program);
c.glUniformMatrix4fv( c.glGetUniformLocation(color_shader.shader_program, "view_projection_matrix"), 1, c.GL_FALSE, view_projection_matrix.get_data());
test_box1.render(box_model_index, box_color_index);
test_box2.render(box_model_index, box_color_index);
}
glfw.refreshWindow(window);
frameCount += 1;
var currentTime = glfw.getTime();
if ((currentTime - 1.0) > lastTime) {
//try stdout.print("FPS: {}\n", .{frameCount});
frameCount = 0;
lastTime = currentTime;
}
}
try stdout.print("Hello, {s}!\n", .{"world"});
}
fn moveCamera(timeStep: f32, transform: *Transform) void {
const moveSpeed: f32 = 3.0; //Meters
const rotateSpeed: f32 = 45.0; //Degrees
const left = transform.getLeft();
const up = transform.getUp();
const forward = transform.getForward();
{
var leftMove: f32 = 0.0;
if(glfw.input.getKeyDown(c.GLFW_KEY_A)) {
leftMove += 1.0;
}
if(glfw.input.getKeyDown(c.GLFW_KEY_D)) {
leftMove -= 1.0;
}
transform.move(left.scale(leftMove * moveSpeed * timeStep));
}
{
var upMove: f32 = 0.0;
if(glfw.input.getKeyDown(c.GLFW_KEY_SPACE)) {
upMove += 1.0;
}
if(glfw.input.getKeyDown(c.GLFW_KEY_LEFT_SHIFT)) {
upMove -= 1.0;
}
transform.move(up.scale(upMove * moveSpeed * timeStep));
}
{
var forwardMove: f32 = 0.0;
if(glfw.input.getKeyDown(c.GLFW_KEY_W)) {
forwardMove += 1.0;
}
if(glfw.input.getKeyDown(c.GLFW_KEY_S)) {
forwardMove -= 1.0;
}
transform.move(forward.scale(forwardMove * moveSpeed * timeStep));
}
var mouse_axes = glfw.input.getMouseAxes();
const mouse_sensitivity: f32 = 25.0;
{
var pitchRotate: f32 = mouse_axes[1] / mouse_sensitivity;
if(glfw.input.getKeyDown(c.GLFW_KEY_UP)) {
pitchRotate += 1.0;
}
if(glfw.input.getKeyDown(c.GLFW_KEY_DOWN)) {
pitchRotate -= 1.0;
}
transform.rotate(quat.from_axis(pitchRotate * rotateSpeed * timeStep, transform.getLeft()));
}
{
var yawRotate: f32 = -mouse_axes[0] / mouse_sensitivity;
if(glfw.input.getKeyDown(c.GLFW_KEY_LEFT)) {
yawRotate += 1.0;
}
if(glfw.input.getKeyDown(c.GLFW_KEY_RIGHT)) {
yawRotate -= 1.0;
}
transform.rotate(quat.from_axis(yawRotate * rotateSpeed * timeStep, vec3.up()));
}
}
|
src/main.zig
|
const std = @import("std");
const word_string = @embedFile("5lw.txt");
const word_count = word_string.len / 6;
var buffer = [_]u8{0} ** 100;
const csi = "\x1b[";
const ansi_red = csi ++ "31;1m";
const ansi_green = csi ++ "32;1m";
const ansi_yellow = csi ++ "33;1m";
const ansi_reset = csi ++ "0m";
fn isWord(string: []const u8) bool {
return std.mem.indexOf(u8, word_string, string) != null;
}
fn isLower(string: []const u8) bool {
for (string) |c| {
if (!std.ascii.isLower(c)) return false;
}
return true;
}
pub fn main() anyerror!void {
var stdout = std.io.getStdOut().writer();
var stdin = std.io.getStdIn().reader();
const seed = @bitCast(u64, std.time.milliTimestamp());
var random = std.rand.DefaultPrng.init(seed).random();
const word_index = random.uintLessThan(usize, word_count);
const random_word = word_string[6 * word_index .. 6 * word_index + 5];
var did_win: bool = false;
var guesses: u8 = 0;
while (guesses < 6 and !did_win) {
try stdout.print(csi ++ "0K" ++ "{}. _____" ++ csi ++ "5D", .{guesses + 1});
const guess = try stdin.readUntilDelimiter(&buffer, '\n');
if (!isLower(guess)) {
try stdout.print(csi ++ "0K" ++ ansi_red ++ "Your guess should only contain lower case letters a-z.\n" ++ ansi_reset ++ csi ++ "2F", .{});
} else if (guess.len != 5) {
try stdout.print(csi ++ "0K" ++ ansi_red ++ "Your guess isn't 5 letters long.\n" ++ ansi_reset ++ csi ++ "2F", .{});
} else if (!isWord(guess)) {
try stdout.print(csi ++ "0K" ++ ansi_red ++ "{s} is not a known word.\n" ++ ansi_reset ++ csi ++ "2F", .{guess});
} else {
try stdout.writeAll(csi ++ "1F" ++ csi ++ "4G");
did_win = true;
for (guess) |c, i| {
if (random_word[i] == c) {
try stdout.writeAll(ansi_green);
} else if (std.mem.indexOfScalar(u8, random_word, c) != null) {
try stdout.writeAll(ansi_yellow);
did_win = false;
} else {
try stdout.writeAll(ansi_reset);
did_win = false;
}
try stdout.writeByte(c);
}
try stdout.writeAll(ansi_reset ++ "\n");
guesses += 1;
}
}
if (did_win) {
try stdout.print("You won! Your score is {}/6.\n", .{guesses});
} else {
try stdout.print("You lost. The word was {s}.\n", .{random_word});
}
}
|
src/main.zig
|
const std = @import("std");
const tools = @import("tools");
const with_trace = false;
fn trace(comptime fmt: []const u8, args: anytype) void {
if (with_trace) std.debug.print(fmt, args);
}
const assert = std.debug.assert;
pub const main = tools.defaultMain("2021/day22.txt", run);
const Vec3 = @Vector(3, i32);
const Box = struct {
min: Vec3,
max: Vec3,
on: bool,
order: u32,
fn lessThan2(ctx: void, a: @This(), b: @This()) bool {
_ = ctx;
if (a.min[2] < b.min[2]) return true;
if (a.min[2] > b.min[2]) return false;
// if (a.max[2] < b.max[2]) return true;
// if (a.max[2] > b.max[2]) return false;
return false; // eq.
}
fn compareOrder(_: void, a: @This(), b: @This()) std.math.Order {
return std.math.order(b.order, a.order);
}
};
fn boxListEqual(as: []const Box, bs: []const Box) bool {
if (as.len != bs.len) return false;
for (as) |a, i| {
const b = bs[i];
if (a.order != b.order) return false;
assert(a.on == b.on and @reduce(.And, a.min == b.min) and @reduce(.And, a.max == b.max));
} else return true;
}
fn countNbLit(allocator: std.mem.Allocator, min: Vec3, max: Vec3, sorted_list: []const Box) !u64 {
var sublist1 = std.ArrayList(Box).init(allocator);
defer sublist1.deinit();
var sublist2 = std.ArrayList(Box).init(allocator);
defer sublist2.deinit();
var nb_lit_1: u64 = 0;
var cursublist1 = std.ArrayList(Box).init(allocator);
defer cursublist1.deinit();
var nb_lit_2: u64 = 0;
var cursublist2 = std.ArrayList(Box).init(allocator);
defer cursublist2.deinit();
var stack = std.PriorityQueue(Box, void, Box.compareOrder).init(allocator, {});
defer stack.deinit();
var nb_lit: u64 = 0;
var p = min;
while (p[0] <= max[0]) : (p += Vec3{ 1, 0, 0 }) {
sublist1.clearRetainingCapacity();
for (sorted_list) |box| {
if (box.min[0] <= p[0] and p[0] <= box.max[0]) try sublist1.append(box);
}
//trace("p.x={} {} boxes\n", .{ p[0], sublist1.items.len });
if (!boxListEqual(cursublist1.items, sublist1.items)) {
cursublist1.clearRetainingCapacity();
try cursublist1.appendSlice(sublist1.items);
nb_lit_1 = 0;
nb_lit_2 = 0;
p[1] = min[1];
while (p[1] <= max[1]) : (p += Vec3{ 0, 1, 0 }) {
sublist2.clearRetainingCapacity();
for (sublist1.items) |box| {
if (box.min[1] <= p[1] and p[1] <= box.max[1]) try sublist2.append(box);
}
// trace(" p.y={} {} boxes\n", .{ p[1], sublist2.items.len });
if (!boxListEqual(cursublist2.items, sublist2.items)) {
cursublist2.clearRetainingCapacity();
try cursublist2.appendSlice(sublist2.items);
//if (p[0] == 20 and p[1]==-10)
// trace("boxes={any}\n", .{ sublist2.items });
while (stack.removeOrNull()) |_| {}
nb_lit_2 = 0;
var idx: u32 = 0;
p[2] = min[2];
while (p[2] <= max[2]) : (p += Vec3{ 0, 0, 1 }) {
while (idx < sublist2.items.len and sublist2.items[idx].min[2] <= p[2]) : (idx += 1)
try stack.add(sublist2.items[idx]);
var it = stack.iterator();
var i: u32 = 0;
while (it.next()) |box| {
assert(@reduce(.And, p >= box.min));
if (p[2] > box.max[2]) {
_ = stack.removeIndex(i);
it.reset();
i = 0;
} else {
assert(@reduce(.And, p <= box.max));
i += 1;
}
}
if (@reduce(.And, p == Vec3{ 0, 0, 0 }))
trace("raster: pointlit00={}\n", .{(if (stack.peek()) |b| b.on else false)});
nb_lit_2 += @boolToInt(if (stack.peek()) |b| b.on else false);
}
}
nb_lit_1 += nb_lit_2;
}
}
nb_lit += nb_lit_1;
}
return nb_lit;
}
fn countNbLit_octree_rasterize(min: Vec3, max: Vec3, octree: *const Octree) u64 {
var nb_lit: u64 = 0;
var p = min;
while (p[0] <= max[0]) : (p += Vec3{ 1, 0, 0 }) {
p[1] = min[1];
while (p[1] <= max[1]) : (p += Vec3{ 0, 1, 0 }) {
p[2] = min[2];
while (p[2] <= max[2]) : (p += Vec3{ 0, 0, 1 }) {
nb_lit += @boolToInt(octree.isPointLit(p));
}
}
}
return nb_lit;
}
const Octree = struct {
const Node = union(enum) {
leave_islit: bool,
node: struct {
p: Vec3,
childs: [8]*Node,
},
};
root: *Node,
arena: std.heap.ArenaAllocator,
pub fn init(allocator: std.mem.Allocator) !@This() {
var arena = std.heap.ArenaAllocator.init(allocator);
errdefer arena.deinit();
const root = try arena.allocator().create(Node);
root.* = Node{ .leave_islit = false };
return Octree{
.arena = arena,
.root = root,
};
}
pub fn deinit(self: *const @This()) void {
self.arena.deinit();
}
fn addBoxRecurse(self: *@This(), min: Vec3, max: Vec3, is_lit: bool, cur: *Node) std.mem.Allocator.Error!*Node {
if (@reduce(.Or, max <= min)) return cur;
const allocator = self.arena.allocator();
if (cur.* == .leave_islit) {
if (is_lit == cur.leave_islit) return cur;
const newleave = try allocator.create(Node);
const newnode1 = try allocator.create(Node);
const newnode2 = try allocator.create(Node);
newnode1.* = Node{ .node = .{ .p = min, .childs = undefined } };
newnode2.* = Node{ .node = .{ .p = max, .childs = undefined } };
newleave.* = Node{ .leave_islit = is_lit };
newnode1.node.childs[0b000] = cur;
newnode1.node.childs[0b001] = cur;
newnode1.node.childs[0b010] = cur;
newnode1.node.childs[0b011] = cur;
newnode1.node.childs[0b100] = cur;
newnode1.node.childs[0b101] = cur;
newnode1.node.childs[0b110] = cur;
newnode1.node.childs[0b111] = newnode2;
newnode2.node.childs[0b000] = newleave;
newnode2.node.childs[0b001] = cur;
newnode2.node.childs[0b010] = cur;
newnode2.node.childs[0b011] = cur;
newnode2.node.childs[0b100] = cur;
newnode2.node.childs[0b101] = cur;
newnode2.node.childs[0b110] = cur;
newnode2.node.childs[0b111] = cur;
return newnode1;
} else {
const min_000 = @maximum(min, Vec3{ min[0], min[1], min[2] });
const min_001 = @maximum(min, Vec3{ min[0], min[1], cur.node.p[2] });
const min_010 = @maximum(min, Vec3{ min[0], cur.node.p[1], min[2] });
const min_011 = @maximum(min, Vec3{ min[0], cur.node.p[1], cur.node.p[2] });
const min_100 = @maximum(min, Vec3{ cur.node.p[0], min[1], min[2] });
const min_101 = @maximum(min, Vec3{ cur.node.p[0], min[1], cur.node.p[2] });
const min_110 = @maximum(min, Vec3{ cur.node.p[0], cur.node.p[1], min[2] });
const min_111 = @maximum(min, Vec3{ cur.node.p[0], cur.node.p[1], cur.node.p[2] });
const max_000 = @minimum(max, Vec3{ cur.node.p[0], cur.node.p[1], cur.node.p[2] });
const max_001 = @minimum(max, Vec3{ cur.node.p[0], cur.node.p[1], max[2] });
const max_010 = @minimum(max, Vec3{ cur.node.p[0], max[1], cur.node.p[2] });
const max_011 = @minimum(max, Vec3{ cur.node.p[0], max[1], max[2] });
const max_100 = @minimum(max, Vec3{ max[0], cur.node.p[1], cur.node.p[2] });
const max_101 = @minimum(max, Vec3{ max[0], cur.node.p[1], max[2] });
const max_110 = @minimum(max, Vec3{ max[0], max[1], cur.node.p[2] });
const max_111 = @minimum(max, Vec3{ max[0], max[1], max[2] });
cur.node.childs[0b000] = try addBoxRecurse(self, min_000, max_000, is_lit, cur.node.childs[0b000]);
cur.node.childs[0b001] = try addBoxRecurse(self, min_001, max_001, is_lit, cur.node.childs[0b001]);
cur.node.childs[0b010] = try addBoxRecurse(self, min_010, max_010, is_lit, cur.node.childs[0b010]);
cur.node.childs[0b011] = try addBoxRecurse(self, min_011, max_011, is_lit, cur.node.childs[0b011]);
cur.node.childs[0b100] = try addBoxRecurse(self, min_100, max_100, is_lit, cur.node.childs[0b100]);
cur.node.childs[0b101] = try addBoxRecurse(self, min_101, max_101, is_lit, cur.node.childs[0b101]);
cur.node.childs[0b110] = try addBoxRecurse(self, min_110, max_110, is_lit, cur.node.childs[0b110]);
cur.node.childs[0b111] = try addBoxRecurse(self, min_111, max_111, is_lit, cur.node.childs[0b111]);
//const volume = @reduce(.Mul, @intCast(@Vector(3, u64), max - min));
//if (computeVolumeRecurse(self, is_lit, min, max, cur) == volume) {
// const newleave = try allocator.create(Node);
// newleave.* = Node{ .leave_islit = is_lit };
// trace("collapsing to leave.\n", .{});
// return newleave;
//} else {
// return cur;
//}
return cur;
}
}
pub fn addBox(self: *@This(), box: Box) !void {
self.root = try addBoxRecurse(self, box.min, box.max + Vec3{ 1, 1, 1 }, box.on, self.root);
}
fn isPointLitRecurse(self: *const @This(), p: Vec3, cur: *const Node) bool {
//trace(" node: {}\n", .{cur.*});
switch (cur.*) {
.leave_islit => |on| return on,
.node => |n| {
const quadrant: u3 = @boolToInt(p[0] >= n.p[0]) * @as(u3, 0b100) + @boolToInt(p[1] >= n.p[1]) * @as(u3, 0b010) + @boolToInt(p[2] >= n.p[2]) * @as(u3, 0b001);
return isPointLitRecurse(self, p, n.childs[quadrant]);
},
}
}
pub fn isPointLit(self: *const @This(), p: Vec3) bool {
//trace("octree: ispointlit@{}\n", .{p});
return isPointLitRecurse(self, p, self.root);
}
fn computeVolumeRecurse(self: *const @This(), is_lit: bool, min: Vec3, max: Vec3, cur: *const Node) u64 {
if (@reduce(.Or, max <= min)) return 0;
switch (cur.*) {
.leave_islit => |on| {
//trace("volume+= {} * {}..{}\n", .{@boolToInt(on), min, max });
return @boolToInt(on == is_lit) * @reduce(.Mul, @intCast(@Vector(3, u64), max - min));
},
.node => |n| {
const vol000 = computeVolumeRecurse(self, is_lit, @maximum(min, Vec3{ min[0], min[1], min[2] }), @minimum(max, Vec3{ n.p[0], n.p[1], n.p[2] }), n.childs[0b000]);
const vol001 = computeVolumeRecurse(self, is_lit, @maximum(min, Vec3{ min[0], min[1], n.p[2] }), @minimum(max, Vec3{ n.p[0], n.p[1], max[2] }), n.childs[0b001]);
const vol010 = computeVolumeRecurse(self, is_lit, @maximum(min, Vec3{ min[0], n.p[1], min[2] }), @minimum(max, Vec3{ n.p[0], max[1], n.p[2] }), n.childs[0b010]);
const vol011 = computeVolumeRecurse(self, is_lit, @maximum(min, Vec3{ min[0], n.p[1], n.p[2] }), @minimum(max, Vec3{ n.p[0], max[1], max[2] }), n.childs[0b011]);
const vol100 = computeVolumeRecurse(self, is_lit, @maximum(min, Vec3{ n.p[0], min[1], min[2] }), @minimum(max, Vec3{ max[0], n.p[1], n.p[2] }), n.childs[0b100]);
const vol101 = computeVolumeRecurse(self, is_lit, @maximum(min, Vec3{ n.p[0], min[1], n.p[2] }), @minimum(max, Vec3{ max[0], n.p[1], max[2] }), n.childs[0b101]);
const vol110 = computeVolumeRecurse(self, is_lit, @maximum(min, Vec3{ n.p[0], n.p[1], min[2] }), @minimum(max, Vec3{ max[0], max[1], n.p[2] }), n.childs[0b110]);
const vol111 = computeVolumeRecurse(self, is_lit, @maximum(min, Vec3{ n.p[0], n.p[1], n.p[2] }), @minimum(max, Vec3{ max[0], max[1], max[2] }), n.childs[0b111]);
return vol000 + vol001 + vol010 + vol011 + vol100 + vol101 + vol110 + vol111;
},
}
}
pub fn computeVolumeLit(self: *const @This(), min: Vec3, max: Vec3) u64 {
return computeVolumeRecurse(self, true, min, max, self.root);
}
};
test "octree" {
var octree = try Octree.init(std.testing.allocator);
defer octree.deinit();
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 2, 3 }));
try std.testing.expectEqual(@as(u64, 0), octree.computeVolumeLit(Vec3{ -10000, -10000, -10000 }, Vec3{ 10000, 10000, 10000 }));
try octree.addBox(Box{ .min = Vec3{ -1, -2, -3 }, .max = Vec3{ 1, 2, 3 }, .on = true, .order = 0 });
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -2, -3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 0, 0, 0 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 2, 2, 3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 3, 3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 2, 4 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -3, -3, -3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -2, -2, -2 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -1, -1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -10, 0, 10 }));
try std.testing.expectEqual(@as(u64, 8), octree.computeVolumeLit(Vec3{ -1, -1, -1 }, Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(@as(u64, 105), octree.computeVolumeLit(Vec3{ -10000, -10000, -10000 }, Vec3{ 10000, 10000, 10000 }));
try octree.addBox(Box{ .min = Vec3{ -3, -3, -3 }, .max = Vec3{ 3, 3, 3 }, .on = true, .order = 0 });
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -2, -3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 0, 0, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 3, 3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 2, 4 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -3, -3, -3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -2, -2, -2 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -1, -1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -10, 0, 10 }));
try std.testing.expectEqual(@as(u64, 8), octree.computeVolumeLit(Vec3{ -1, -1, -1 }, Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(@as(u64, 343), octree.computeVolumeLit(Vec3{ -10000, -10000, -10000 }, Vec3{ 10000, 10000, 10000 }));
try octree.addBox(Box{ .min = Vec3{ -3, -3, -3 }, .max = Vec3{ -2, -2, -2 }, .on = false, .order = 0 });
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -2, -3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 0, 0, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 3, 3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 2, 4 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -3, -3, -3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -2, -2, -2 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -1, -1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -10, 0, 10 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 0, 1, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 0, 1, 1 }));
try std.testing.expectEqual(@as(u64, 8), octree.computeVolumeLit(Vec3{ -1, -1, -1 }, Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(@as(u64, 343 - 8), octree.computeVolumeLit(Vec3{ -10000, -10000, -10000 }, Vec3{ 10000, 10000, 10000 }));
try octree.addBox(Box{ .min = Vec3{ 0, 0, 0 }, .max = Vec3{ 1, 1, 1 }, .on = false, .order = 0 });
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -2, -3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 0, 0, 0 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 0, 1, 0 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 0, 1, 1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, 0, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 0, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 1, 1 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 3, 3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 2, 4 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -3, -3, -3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -2, -2, -2 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -1, -1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -10, 0, 10 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -10, 0, 10 }));
try std.testing.expectEqual(@as(u64, 7), octree.computeVolumeLit(Vec3{ -1, -1, -1 }, Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(@as(u64, 343 - 8 - 8), octree.computeVolumeLit(Vec3{ -10000, -10000, -10000 }, Vec3{ 10000, 10000, 10000 }));
try octree.addBox(Box{ .min = Vec3{ 0, 1, 0 }, .max = Vec3{ 0, 1, 0 }, .on = true, .order = 0 });
try octree.addBox(Box{ .min = Vec3{ 0, 1, 0 }, .max = Vec3{ 0, 1, 0 }, .on = false, .order = 0 });
try octree.addBox(Box{ .min = Vec3{ 0, 1, 0 }, .max = Vec3{ 0, 1, 0 }, .on = true, .order = 0 });
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -2, -3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 0, 0, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 0, 1, 0 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 0, 1, 1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, 0, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 0, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 1, 1 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 3, 3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 2, 4 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -3, -3, -3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -2, -2, -2 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -1, -1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -10, 0, 10 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 10, 10000, -10 }));
try std.testing.expectEqual(@as(u64, 7), octree.computeVolumeLit(Vec3{ -1, -1, -1 }, Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(@as(u64, 343 - 8 - 8 + 1), octree.computeVolumeLit(Vec3{ -10000, -10000, -10000 }, Vec3{ 10000, 10000, 10000 }));
try octree.addBox(Box{ .min = Vec3{ -100, -100, -100 }, .max = Vec3{ 100, 100, 0 }, .on = true, .order = 0 });
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -2, -3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 0, 0, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 0, 1, 0 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 0, 1, 1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, 0, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 0, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 1, 1 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 3, 3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 2, 4 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -3, -3, -3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -2, -2, -2 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ -1, -1, -1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -10, 0, 10 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 10, 10000, -10 }));
try std.testing.expectEqual(@as(u64, 8), octree.computeVolumeLit(Vec3{ -1, -1, -1 }, Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(@as(u64, 4080644), octree.computeVolumeLit(Vec3{ -10000, -10000, -10000 }, Vec3{ 10000, 10000, 10000 }));
try octree.addBox(Box{ .min = Vec3{ -100, -100, -100 }, .max = Vec3{ 100, 100, 0 }, .on = false, .order = 0 });
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 2, 3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -1, -2, -3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 0, 0, 0 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 0, 1, 0 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 0, 1, 1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -1, 0, 0 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 2, 0, 0 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 1, 1 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 2, 2, 3 }));
try std.testing.expectEqual(true, octree.isPointLit(Vec3{ 1, 3, 3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 1, 2, 4 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -3, -3, -3 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -2, -2, -2 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -1, -1, -1 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ -10, 0, 10 }));
try std.testing.expectEqual(false, octree.isPointLit(Vec3{ 10, 10000, -10 }));
try std.testing.expectEqual(@as(u64, 0), octree.computeVolumeLit(Vec3{ -1, -1, -1 }, Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(@as(u64, 143), octree.computeVolumeLit(Vec3{ -10000, -10000, -10000 }, Vec3{ 10000, 10000, 10000 }));
try octree.addBox(Box{ .min = Vec3{ -1000, -1000, -1000 }, .max = Vec3{ 1000, 1000, 1000 }, .on = false, .order = 0 });
try std.testing.expectEqual(@as(u64, 0), octree.computeVolumeLit(Vec3{ -1, -1, -1 }, Vec3{ 1, 1, 1 }));
try std.testing.expectEqual(@as(u64, 0), octree.computeVolumeLit(Vec3{ -10000, -10000, -10000 }, Vec3{ 10000, 10000, 10000 }));
}
pub fn run(input: []const u8, gpa: std.mem.Allocator) tools.RunError![2][]const u8 {
// var arena_alloc = std.heap.ArenaAllocator.init(gpa);
// defer arena_alloc.deinit();
// const arena = arena_alloc.allocator();
var list = std.ArrayList(Box).init(gpa);
defer list.deinit();
{
var it = std.mem.tokenize(u8, input, "\n");
while (it.next()) |line| {
if (tools.match_pattern("on x={}..{},y={}..{},z={}..{}", line)) |val| {
const min = Vec3{ @intCast(i32, val[0].imm), @intCast(i32, val[2].imm), @intCast(i32, val[4].imm) };
const max = Vec3{ @intCast(i32, val[1].imm), @intCast(i32, val[3].imm), @intCast(i32, val[5].imm) };
try list.append(Box{ .min = min, .max = max, .on = true, .order = @intCast(u32, list.items.len) });
} else if (tools.match_pattern("off x={}..{},y={}..{},z={}..{}", line)) |val| {
const min = Vec3{ @intCast(i32, val[0].imm), @intCast(i32, val[2].imm), @intCast(i32, val[4].imm) };
const max = Vec3{ @intCast(i32, val[1].imm), @intCast(i32, val[3].imm), @intCast(i32, val[5].imm) };
try list.append(Box{ .min = min, .max = max, .on = false, .order = @intCast(u32, list.items.len) });
} else {
trace("skipping '{s}'...\n", .{line});
}
}
}
var octree = try Octree.init(gpa);
defer octree.deinit();
for (list.items) |box| {
try octree.addBox(box);
}
std.sort.sort(Box, list.items, {}, Box.lessThan2);
const ans1 = try countNbLit(gpa, Vec3{ -50, -50, -50 }, Vec3{ 50, 50, 50 }, list.items);
const ans1b = countNbLit_octree_rasterize(Vec3{ -50, -50, -50 }, Vec3{ 50, 50, 50 }, &octree);
trace("ans1a={}, ans1b={}\n", .{ ans1, ans1b });
assert(ans1 == ans1b);
//const ans2 = ans: {
// var min = Vec3{ 0, 0, 0 };
// var max = Vec3{ 0, 0, 0 };
// for (list.items) |b| {
// min = @minimum(min, b.min);
// max = @maximum(max, b.max);
// }
// min -= Vec3{ 1, 1, 1 };
// max += Vec3{ 1, 1, 1 };
// trace("range={}...{}, boxes={}\n", .{ min, max, list.items.len });
// break :ans try countNbLit(gpa, min, max, list.items); // 1227348091315801 too high
//};
const ans2b = octree.computeVolumeLit(Vec3{ -1000000, -1000000, -1000000 }, Vec3{ 1000000, 1000000, 1000000 });
//trace("ans2a={}, ans2b={}\n", .{ ans2, ans2b });
//assert(ans2 == ans2b);
return [_][]const u8{
try std.fmt.allocPrint(gpa, "{}", .{ans1b}),
try std.fmt.allocPrint(gpa, "{}", .{ans2b}),
};
}
test {
if (false) {
const res = try run(
\\on x=-20..26,y=-36..17,z=-47..7
\\on x=-20..33,y=-21..23,z=-26..28
\\on x=-22..28,y=-29..23,z=-38..16
\\on x=-46..7,y=-6..46,z=-50..-1
\\on x=-49..1,y=-3..46,z=-24..28
\\on x=2..47,y=-22..22,z=-23..27
\\on x=-27..23,y=-28..26,z=-21..29
\\on x=-39..5,y=-6..47,z=-3..44
\\on x=-30..21,y=-8..43,z=-13..34
\\on x=-22..26,y=-27..20,z=-29..19
\\off x=-48..-32,y=26..41,z=-47..-37
\\on x=-12..35,y=6..50,z=-50..-2
\\off x=-48..-32,y=-32..-16,z=-15..-5
\\on x=-18..26,y=-33..15,z=-7..46
\\off x=-40..-22,y=-38..-28,z=23..41
\\on x=-16..35,y=-41..10,z=-47..6
\\off x=-32..-23,y=11..30,z=-14..3
\\on x=-49..-5,y=-3..45,z=-29..18
\\off x=18..30,y=-20..-8,z=-3..13
\\on x=-41..9,y=-7..43,z=-33..15
\\on x=-54112..-39298,y=-85059..-49293,z=-27449..7877
\\on x=967..23432,y=45373..81175,z=27513..53682
, std.testing.allocator);
defer std.testing.allocator.free(res[0]);
defer std.testing.allocator.free(res[1]);
try std.testing.expectEqualStrings("590784", res[0]);
try std.testing.expectEqualStrings("39769202357779", res[1]);
}
if (false) {
const res = try run(
\\on x=-5..47,y=-31..22,z=-19..33
\\on x=-44..5,y=-27..21,z=-14..35
\\on x=-49..-1,y=-11..42,z=-10..38
\\on x=-20..34,y=-40..6,z=-44..1
\\off x=26..39,y=40..50,z=-2..11
\\on x=-41..5,y=-41..6,z=-36..8
\\off x=-43..-33,y=-45..-28,z=7..25
\\on x=-33..15,y=-32..19,z=-34..11
\\off x=35..47,y=-46..-34,z=-11..5
\\on x=-14..36,y=-6..44,z=-16..29
\\on x=-57795..-6158,y=29564..72030,z=20435..90618
\\on x=36731..105352,y=-21140..28532,z=16094..90401
\\on x=30999..107136,y=-53464..15513,z=8553..71215
\\on x=13528..83982,y=-99403..-27377,z=-24141..23996
\\on x=-72682..-12347,y=18159..111354,z=7391..80950
\\on x=-1060..80757,y=-65301..-20884,z=-103788..-16709
\\on x=-83015..-9461,y=-72160..-8347,z=-81239..-26856
\\on x=-52752..22273,y=-49450..9096,z=54442..119054
\\on x=-29982..40483,y=-108474..-28371,z=-24328..38471
\\on x=-4958..62750,y=40422..118853,z=-7672..65583
\\on x=55694..108686,y=-43367..46958,z=-26781..48729
\\on x=-98497..-18186,y=-63569..3412,z=1232..88485
\\on x=-726..56291,y=-62629..13224,z=18033..85226
\\on x=-110886..-34664,y=-81338..-8658,z=8914..63723
\\on x=-55829..24974,y=-16897..54165,z=-121762..-28058
\\on x=-65152..-11147,y=22489..91432,z=-58782..1780
\\on x=-120100..-32970,y=-46592..27473,z=-11695..61039
\\on x=-18631..37533,y=-124565..-50804,z=-35667..28308
\\on x=-57817..18248,y=49321..117703,z=5745..55881
\\on x=14781..98692,y=-1341..70827,z=15753..70151
\\on x=-34419..55919,y=-19626..40991,z=39015..114138
\\on x=-60785..11593,y=-56135..2999,z=-95368..-26915
\\on x=-32178..58085,y=17647..101866,z=-91405..-8878
\\on x=-53655..12091,y=50097..105568,z=-75335..-4862
\\on x=-111166..-40997,y=-71714..2688,z=5609..50954
\\on x=-16602..70118,y=-98693..-44401,z=5197..76897
\\on x=16383..101554,y=4615..83635,z=-44907..18747
\\off x=-95822..-15171,y=-19987..48940,z=10804..104439
\\on x=-89813..-14614,y=16069..88491,z=-3297..45228
\\on x=41075..99376,y=-20427..49978,z=-52012..13762
\\on x=-21330..50085,y=-17944..62733,z=-112280..-30197
\\on x=-16478..35915,y=36008..118594,z=-7885..47086
\\off x=-98156..-27851,y=-49952..43171,z=-99005..-8456
\\off x=2032..69770,y=-71013..4824,z=7471..94418
\\on x=43670..120875,y=-42068..12382,z=-24787..38892
\\off x=37514..111226,y=-45862..25743,z=-16714..54663
\\off x=25699..97951,y=-30668..59918,z=-15349..69697
\\off x=-44271..17935,y=-9516..60759,z=49131..112598
\\on x=-61695..-5813,y=40978..94975,z=8655..80240
\\off x=-101086..-9439,y=-7088..67543,z=33935..83858
\\off x=18020..114017,y=-48931..32606,z=21474..89843
\\off x=-77139..10506,y=-89994..-18797,z=-80..59318
\\off x=8476..79288,y=-75520..11602,z=-96624..-24783
\\on x=-47488..-1262,y=24338..100707,z=16292..72967
\\off x=-84341..13987,y=2429..92914,z=-90671..-1318
\\off x=-37810..49457,y=-71013..-7894,z=-105357..-13188
\\off x=-27365..46395,y=31009..98017,z=15428..76570
\\off x=-70369..-16548,y=22648..78696,z=-1892..86821
\\on x=-53470..21291,y=-120233..-33476,z=-44150..38147
\\off x=-93533..-4276,y=-16170..68771,z=-104985..-24507
, std.testing.allocator);
defer std.testing.allocator.free(res[0]);
defer std.testing.allocator.free(res[1]);
try std.testing.expectEqualStrings("474140", res[0]);
try std.testing.expectEqualStrings("2758514936282235", res[1]);
}
}
|
2021/day22.zig
|
const std = @import("std");
const tools = @import("tools");
const with_trace = true;
const assert = std.debug.assert;
fn trace(comptime fmt: []const u8, args: anytype) void {
if (with_trace) std.debug.print(fmt, args);
}
pub fn main() anyerror!void {
const stdout = std.io.getStdOut().writer();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const num = 7;
const State = struct {
elevator: u32,
objects: [4][num * 2]bool,
fn is_consistent(s: *const @This()) bool {
{
var i: u32 = 0;
while (i < num) : (i += 1) {
var countm: u32 = 0;
var countg: u32 = 0;
for (s.objects) |floor| {
if (floor[0 + i]) countm += 1;
if (floor[num + i]) countg += 1;
}
if (countm != 1 or countg != 1)
return false;
}
}
return true;
}
fn is_valid(s: *const @This()) bool {
assert(s.is_consistent());
for (s.objects) |floor| {
var i: u32 = 0;
while (i < num) : (i += 1) {
const haschip = floor[0 + i];
const hasgen = floor[num + i];
if (!haschip or hasgen)
continue; // safe.
var j: u32 = 0;
while (j < num) : (j += 1) {
if (j == i)
continue;
if (floor[num + j])
return false; // unshiled + other generator
}
}
}
return true;
}
fn is_done(s: *const @This()) bool {
for (s.objects[0]) |o| {
if (!o) return false;
}
return true;
}
fn tracefloors(s: *const @This()) void {
for (s.objects) |floor, i| {
trace("F{} ", .{i});
if (i == s.elevator) {
trace("E ", .{});
} else {
trace(" ", .{});
}
for (floor) |o| {
const char: u8 = if (o) '*' else '.';
trace("{c} ", .{char});
}
trace("\n", .{});
}
}
fn floorsum(s: *const @This()) u32 {
var sum: usize = 0;
for (s.objects) |floor, i| {
for (floor) |o| {
if (o) sum += i;
}
}
return @intCast(u32, sum);
}
};
const initial_state = State{
.elevator = 3,
.objects = .{
// .{ false, false, false, false },
// .{ false, false, false, true },
// .{ false, false, true, false },
// .{ true, true, false, false },
//M: thu plu str pro rut ele dil |G: thu plu str pro rut ele dil
.{ false, false, false, false, false, false, false, false, false, false, false, false, false, false },
.{ false, false, false, true, true, false, false, false, false, false, true, true, false, false },
.{ false, true, true, false, false, false, false, false, false, false, false, false, false, false },
.{ true, false, false, false, false, true, true, true, true, true, false, false, true, true },
},
};
assert(initial_state.is_valid());
const BFS = tools.BestFirstSearch(State, void);
var searcher = BFS.init(allocator);
defer searcher.deinit();
try searcher.insert(BFS.Node{
.rating = 0,
.steps = 0,
.state = initial_state,
.trace = {},
});
var trace_dep: usize = 999999;
var best: u32 = 999999;
while (searcher.pop()) |node| {
if (node.state.floorsum() < trace_dep) {
trace_dep = node.state.floorsum();
trace("examining (floorsum={}, agenda={}, visisted={})\n", .{ trace_dep, searcher.agenda.items.len, searcher.visited.count() });
node.state.tracefloors();
}
if (node.state.is_done()) {
try stdout.print("go solution. steps={}, elevator={}, agenda={}, visisted={}\n", .{ node.steps, node.state.elevator, searcher.agenda.items.len, searcher.visited.count() });
if (node.steps < best) {
best = node.steps;
}
continue;
}
if (node.steps >= best)
continue;
var next = BFS.Node{
.rating = node.rating + 1,
.steps = node.steps + 1,
.state = undefined,
.trace = {},
};
const s = &node.state;
for ([2]bool{ true, false }) |goup| {
if (goup and s.elevator == 0) continue;
if (!goup and s.elevator == 3) continue;
const ns = &next.state;
ns.elevator = if (goup) s.elevator - 1 else s.elevator + 1;
const floor = s.objects[s.elevator];
for (floor) |has1, obj1| {
if (!has1)
continue;
ns.objects = s.objects;
assert(ns.is_valid());
ns.objects[s.elevator][obj1] = false;
ns.objects[ns.elevator][obj1] = true;
if (ns.is_valid()) {
//trace("...inserting solo {}\n", .{obj1});
next.rating = @intCast(i32, next.steps + 3 * ns.floorsum());
try searcher.insert(next);
}
for (floor[obj1 + 1 ..]) |has2, index2| {
if (!has2)
continue;
const obj2 = obj1 + 1 + index2;
assert(ns.is_consistent());
ns.objects[s.elevator][obj2] = false;
defer ns.objects[s.elevator][obj2] = true;
ns.objects[ns.elevator][obj2] = true;
defer ns.objects[ns.elevator][obj2] = false;
if (ns.is_valid()) {
//trace("...inserting pair {},{}\n", .{ obj1, obj2 });
next.rating = @intCast(i32, next.steps + 3 * ns.floorsum());
try searcher.insert(next);
}
}
}
}
}
// try stdout.print("outputs[{}, {}, {}] = {}\n", .{ outputs[0], outputs[1], outputs[2], outputs[0] * outputs[1] * outputs[2] });
}
|
2016/day11.zig
|
const wlr = @import("../wlroots.zig");
const wayland = @import("wayland");
const wl = wayland.server.wl;
pub const Renderer = extern struct {
const Impl = opaque {};
impl: *const Impl,
rendering: bool,
rendering_with_buffer: bool,
events: extern struct {
destroy: wl.Signal(*Renderer),
},
// wlr_renderer functions:
extern fn wlr_renderer_autocreate(backend: *wlr.Backend) ?*Renderer;
pub fn autocreate(backend: *wlr.Backend) !*Renderer {
return wlr_renderer_autocreate(backend) orelse error.RendererCreateFailed;
}
extern fn wlr_renderer_begin(renderer: *Renderer, width: u32, height: u32) void;
pub const begin = wlr_renderer_begin;
extern fn wlr_renderer_begin_with_buffer(renderer: *Renderer, buffer: *wlr.Buffer) bool;
pub const beginWithBuffer = wlr_renderer_begin_with_buffer;
pub extern fn wlr_renderer_end(renderer: *Renderer) void;
pub const end = wlr_renderer_end;
extern fn wlr_renderer_clear(renderer: *Renderer, color: *const [4]f32) void;
pub const clear = wlr_renderer_clear;
extern fn wlr_renderer_init_wl_display(renderer: *Renderer, server: *wl.Server) bool;
pub fn initServer(renderer: *Renderer, server: *wl.Server) !void {
if (!wlr_renderer_init_wl_display(renderer, server)) {
return error.RenderInitFailed;
}
}
extern fn wlr_renderer_scissor(renderer: *Renderer, box: ?*wlr.Box) void;
pub const scissor = wlr_renderer_scissor;
extern fn wlr_renderer_get_shm_texture_formats(renderer: *Renderer, len: *usize) [*]const u32;
pub const getShmTextureFormats = wlr_renderer_get_shm_texture_formats;
// TODO:
//extern fn wlr_renderer_get_dmabuf_texture_formats(renderer: *Renderer) [*c]const struct_wlr_drm_format_set;
//pub const getDmabufFormats = wlr_renderer_get_dmabuf_formats;
extern fn wlr_renderer_read_pixels(
renderer: *Renderer,
fmt: u32,
flags: ?*u32,
stride: u32,
width: u32,
height: u32,
src_x: u32,
src_y: u32,
dst_x: u32,
dst_y: u32,
data: [*]u8,
) bool;
pub const readPixels = wlr_renderer_read_pixels;
extern fn wlr_renderer_get_drm_fd(renderer: *Renderer) c_int;
pub const getDrmFd = wlr_renderer_get_drm_fd;
extern fn wlr_renderer_destroy(renderer: *Renderer) void;
pub const destroy = wlr_renderer_destroy;
// wlr_render functions:
extern fn wlr_render_texture(
renderer: *Renderer,
texture: *wlr.Texture,
projection: *const [9]f32,
x: c_int,
y: c_int,
alpha: f32,
) bool;
pub fn renderTexture(
renderer: *Renderer,
texture: *wlr.Texture,
projection: *const [9]f32,
x: c_int,
y: c_int,
alpha: f32,
) !void {
if (!wlr_render_texture(renderer, texture, projection, x, y, alpha))
return error.RenderFailed;
}
extern fn wlr_render_texture_with_matrix(
renderer: *Renderer,
texture: *wlr.Texture,
matrix: *const [9]f32,
alpha: f32,
) bool;
pub fn renderTextureWithMatrix(
renderer: *Renderer,
texture: *wlr.Texture,
matrix: *const [9]f32,
alpha: f32,
) !void {
if (!wlr_render_texture_with_matrix(renderer, texture, matrix, alpha))
return error.RenderFailed;
}
extern fn wlr_render_subtexture_with_matrix(
renderer: *Renderer,
texture: *wlr.Texture,
box: *const wlr.FBox,
matrix: *const [9]f32,
alpha: f32,
) bool;
pub fn renderSubtextureWithMatrix(
renderer: *Renderer,
texture: *wlr.Texture,
box: *const wlr.FBox,
matrix: *const [9]f32,
alpha: f32,
) !void {
if (!wlr_render_subtexture_with_matrix(renderer, texture, box, matrix, alpha))
return error.RenderFailed;
}
extern fn wlr_render_rect(
renderer: *Renderer,
box: *const wlr.Box,
color: *const [4]f32,
projection: *const [9]f32,
) void;
pub const renderRect = wlr_render_rect;
extern fn wlr_render_quad_with_matrix(
renderer: *Renderer,
color: *const [4]f32,
matrix: *const [9]f32,
) void;
pub const renderQuadWithMatrix = wlr_render_quad_with_matrix;
};
|
src/render/renderer.zig
|
const std = @import("std");
const Answer = struct { @"0": u32, @"1": u32 };
const Range = struct { lo: i32, hi: i32 };
const Target = struct { x: Range, y: Range };
fn in_range(x: i32, range: Range) bool {
return range.lo <= x and x <= range.hi;
}
fn in_target(y: i32, x: i32, target: Target) bool {
return in_range(y, target.y) and in_range(x, target.x);
}
fn simulate(yv_: i32, xv_: i32, target: Target) bool {
var step_num: usize = 0;
var yv = yv_;
var xv = xv_;
var y: i32 = 0;
var x: i32 = 0;
while (true) : (step_num += 1) {
y += yv;
x += xv;
if (in_target(y, x, target)) {
return true;
} else if (y < target.y.lo or x > target.x.hi) {
return false;
}
yv -= 1;
if (xv > 0) {
xv -= 1;
} else if (xv < 0) {
xv += 1;
}
}
}
fn run(filename: []const u8) !Answer {
const file = try std.fs.cwd().openFile(filename, .{ .read = true });
defer file.close();
var reader = std.io.bufferedReader(file.reader()).reader();
var buffer: [4096]u8 = undefined;
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
var target: Target = undefined;
while (try reader.readUntilDelimiterOrEof(&buffer, '\n')) |line| {
var tokens = std.mem.tokenize(u8, line, "=,. ");
_ = tokens.next().?; // target
_ = tokens.next().?; // area
_ = tokens.next().?; // x
const x_lo = try std.fmt.parseInt(i32, tokens.next().?, 10);
const x_hi = try std.fmt.parseInt(i32, tokens.next().?, 10);
const x_range = Range{ .lo = x_lo, .hi = x_hi };
_ = tokens.next().?; // y
const y_lo = try std.fmt.parseInt(i32, tokens.next().?, 10);
const y_hi = try std.fmt.parseInt(i32, tokens.next().?, 10);
const y_range = Range{ .lo = y_lo, .hi = y_hi };
target = Target{ .x = x_range, .y = y_range };
}
std.debug.assert(target.x.lo > 0);
std.debug.assert(target.y.hi < 0);
var counter: u32 = 0;
var max_yv: i32 = 0;
{
var yv: i32 = target.y.lo;
while (yv < -target.y.lo) : (yv += 1) {
var xv: i32 = 0;
while (xv <= target.x.hi) : (xv += 1) {
if (simulate(yv, xv, target)) {
max_yv = @maximum(yv, max_yv);
counter += 1;
}
}
}
}
std.debug.assert(max_yv > 0);
const max_y = @intCast(u32, max_yv) * (@intCast(u32, max_yv) + 1) / 2;
return Answer{ .@"0" = max_y, .@"1" = counter };
}
pub fn main() !void {
const answer = try run("inputs/" ++ @typeName(@This()) ++ ".txt");
std.debug.print("{d}\n", .{answer.@"0"});
std.debug.print("{d}\n", .{answer.@"1"});
}
test {
const answer = try run("test-inputs/" ++ @typeName(@This()) ++ ".txt");
try std.testing.expectEqual(answer.@"0", 45);
try std.testing.expectEqual(answer.@"1", 112);
}
|
src/day17.zig
|
const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const epsilon: f32 = 0.00001;
pub fn modAngle(in_angle: f32) f32 {
const angle = in_angle + math.pi;
var temp: f32 = math.fabs(angle);
temp = temp - (2.0 * math.pi * @intToFloat(f32, @floatToInt(i32, temp / math.pi)));
temp = temp - math.pi;
if (angle < 0.0) {
temp = -temp;
}
return temp;
}
pub const Vec2 = extern struct {
c: [2]f32,
pub inline fn init(x: f32, y: f32) Vec2 {
return .{ .c = [_]f32{ x, y } };
}
pub inline fn initS(s: f32) Vec2 {
return init(s, s);
}
pub inline fn initZero() Vec2 {
const static = struct {
const zero = init(0.0, 0.0);
};
return static.zero;
}
pub inline fn toVec3(v: Vec2) Vec3 {
return Vec3.init(v.c[0], v.c[1], 0.0);
}
pub inline fn toVec4(v: Vec2) Vec4 {
return Vec4.init(v.c[0], v.c[1], 0.0, 1.0);
}
pub inline fn approxEq(v0: Vec2, v1: Vec2, eps: f32) bool {
return math.approxEqAbs(f32, v0.c[0], v1.c[0], eps) and
math.approxEqAbs(f32, v0.c[1], v1.c[1], eps);
}
pub inline fn add(v0: Vec2, v1: Vec2) Vec2 {
return init(
v0.c[0] + v1.c[0],
v0.c[1] + v1.c[1],
);
}
pub inline fn sub(v0: Vec2, v1: Vec2) Vec2 {
return init(
v0.c[0] - v1.c[0],
v0.c[1] - v1.c[1],
);
}
pub inline fn mul(v0: Vec2, v1: Vec2) Vec2 {
return init(
v0.c[0] * v1.c[0],
v0.c[1] * v1.c[1],
);
}
pub inline fn div(v0: Vec2, v1: Vec2) Vec2 {
assert(!approxEq(v1, initZero(), epsilon));
return init(
v0.c[0] / v1.c[0],
v0.c[1] / v1.c[1],
);
}
pub inline fn scale(v: Vec2, s: f32) Vec2 {
return init(
v.c[0] * s,
v.c[1] * s,
);
}
pub inline fn neg(v: Vec2) Vec2 {
return init(
-v.c[0],
-v.c[1],
);
}
pub inline fn mulAdd(v0: Vec2, v1: Vec2, v2: Vec2) Vec2 {
return init(
v0.c[0] * v1.c[0] + v2.c[0],
v0.c[1] * v1.c[1] + v2.c[1],
);
}
pub inline fn negMulAdd(v0: Vec2, v1: Vec2, v2: Vec2) Vec2 {
return init(
-v0.c[0] * v1.c[0] + v2.c[0],
-v0.c[1] * v1.c[1] + v2.c[1],
);
}
pub inline fn mulSub(v0: Vec2, v1: Vec2, v2: Vec2) Vec2 {
return init(
v0.c[0] * v1.c[0] - v2.c[0],
v0.c[1] * v1.c[1] - v2.c[1],
);
}
pub inline fn negMulSub(v0: Vec2, v1: Vec2, v2: Vec2) Vec2 {
return init(
-v0.c[0] * v1.c[0] - v2.c[0],
-v0.c[1] * v1.c[1] - v2.c[1],
);
}
pub inline fn lerp(v0: Vec2, v1: Vec2, t: f32) Vec2 {
const tt = initS(t);
const v0v1 = v1.sub(v0);
return v0v1.mulAdd(tt, v0);
}
pub inline fn rcp(v: Vec2) Vec2 {
assert(!approxEq(v, initZero(), epsilon));
return init(
1.0 / v.c[0],
1.0 / v.c[1],
);
}
pub inline fn dot(v0: Vec2, v1: Vec2) f32 {
return v0.c[0] * v1.c[0] +
v0.c[1] * v1.c[1];
}
pub inline fn length(v: Vec2) f32 {
return math.sqrt(dot(v, v));
}
pub inline fn lengthSq(v: Vec2) f32 {
return dot(v, v);
}
pub inline fn normalize(v: Vec2) Vec2 {
const len = length(v);
assert(!math.approxEqAbs(f32, len, 0.0, epsilon));
const rcplen = 1.0 / len;
return v.scale(rcplen);
}
pub inline fn transform(v: Vec2, m: Mat4) Vec2 {
return init(
v.c[0] * m.r[0].c[0] + v.c[1] * m.r[1].c[0] + m.r[3].c[0],
v.c[0] * m.r[0].c[1] + v.c[1] * m.r[1].c[1] + m.r[3].c[1],
);
}
pub inline fn transformNormal(v: Vec2, m: Mat4) Vec2 {
return init(
v.c[0] * m.r[0].c[0] + v.c[1] * m.r[1].c[0],
v.c[0] * m.r[0].c[1] + v.c[1] * m.r[1].c[1],
);
}
};
pub const Vec3 = extern struct {
c: [3]f32,
pub inline fn init(x: f32, y: f32, z: f32) Vec3 {
return .{ .c = [_]f32{ x, y, z } };
}
pub inline fn initS(s: f32) Vec3 {
return init(s, s, s);
}
pub inline fn initZero() Vec3 {
const static = struct {
const zero = init(0.0, 0.0, 0.0);
};
return static.zero;
}
pub inline fn toVec2(v: Vec3) Vec2 {
return Vec2.init(v.c[0], v.c[1]);
}
pub inline fn toVec4(v: Vec3) Vec4 {
return Vec4.init(v.c[0], v.c[1], v.c[2], 1.0);
}
pub inline fn approxEq(v0: Vec3, v1: Vec3, eps: f32) bool {
return math.approxEqAbs(f32, v0.c[0], v1.c[0], eps) and
math.approxEqAbs(f32, v0.c[1], v1.c[1], eps) and
math.approxEqAbs(f32, v0.c[2], v1.c[2], eps);
}
pub inline fn dot(v0: Vec3, v1: Vec3) f32 {
return v0.c[0] * v1.c[0] +
v0.c[1] * v1.c[1] +
v0.c[2] * v1.c[2];
}
pub inline fn cross(v0: Vec3, v1: Vec3) Vec3 {
return init(
v0.c[1] * v1.c[2] - v0.c[2] * v1.c[1],
v0.c[2] * v1.c[0] - v0.c[0] * v1.c[2],
v0.c[0] * v1.c[1] - v0.c[1] * v1.c[0],
);
}
pub inline fn add(v0: Vec3, v1: Vec3) Vec3 {
return init(
v0.c[0] + v1.c[0],
v0.c[1] + v1.c[1],
v0.c[2] + v1.c[2],
);
}
pub inline fn sub(v0: Vec3, v1: Vec3) Vec3 {
return init(
v0.c[0] - v1.c[0],
v0.c[1] - v1.c[1],
v0.c[2] - v1.c[2],
);
}
pub inline fn mul(v0: Vec3, v1: Vec3) Vec3 {
return init(
v0.c[0] * v1.c[0],
v0.c[1] * v1.c[1],
v0.c[2] * v1.c[2],
);
}
pub inline fn div(v0: Vec3, v1: Vec3) Vec3 {
assert(!approxEq(v1, initZero(), epsilon));
return init(
v0.c[0] / v1.c[0],
v0.c[1] / v1.c[1],
v0.c[2] / v1.c[2],
);
}
pub inline fn scale(v: Vec3, s: f32) Vec3 {
return init(
v.c[0] * s,
v.c[1] * s,
v.c[2] * s,
);
}
pub inline fn neg(v: Vec3) Vec3 {
return init(
-v.c[0],
-v.c[1],
-v.c[2],
);
}
pub inline fn mulAdd(v0: Vec3, v1: Vec3, v2: Vec3) Vec3 {
return init(
v0.c[0] * v1.c[0] + v2.c[0],
v0.c[1] * v1.c[1] + v2.c[1],
v0.c[2] * v1.c[2] + v2.c[2],
);
}
pub inline fn negMulAdd(v0: Vec3, v1: Vec3, v2: Vec3) Vec3 {
return init(
-v0.c[0] * v1.c[0] + v2.c[0],
-v0.c[1] * v1.c[1] + v2.c[1],
-v0.c[2] * v1.c[2] + v2.c[2],
);
}
pub inline fn mulSub(v0: Vec3, v1: Vec3, v2: Vec3) Vec3 {
return init(
v0.c[0] * v1.c[0] - v2.c[0],
v0.c[1] * v1.c[1] - v2.c[1],
v0.c[2] * v1.c[2] - v2.c[2],
);
}
pub inline fn negMulSub(v0: Vec3, v1: Vec3, v2: Vec3) Vec3 {
return init(
-v0.c[0] * v1.c[0] - v2.c[0],
-v0.c[1] * v1.c[1] - v2.c[1],
-v0.c[2] * v1.c[2] - v2.c[2],
);
}
pub inline fn lerp(v0: Vec3, v1: Vec3, t: f32) Vec3 {
const ttt = Vec3.initS(t);
const v0v1 = v1.sub(v0);
return v0v1.mulAdd(ttt, v0);
}
pub inline fn rcp(v: Vec3) Vec3 {
assert(!approxEq(v, initZero(), epsilon));
return init(
1.0 / v.c[0],
1.0 / v.c[1],
1.0 / v.c[2],
);
}
pub inline fn length(v: Vec3) f32 {
return math.sqrt(dot(v, v));
}
pub inline fn lengthSq(v: Vec3) f32 {
return dot(v, v);
}
pub inline fn normalize(v: Vec3) Vec3 {
const len = length(v);
assert(!math.approxEqAbs(f32, len, 0.0, epsilon));
const rcplen = 1.0 / len;
return v.scale(rcplen);
}
pub inline fn transform(v: Vec3, m: Mat4) Vec3 {
return init(
v.c[0] * m.r[0].c[0] + v.c[1] * m.r[1].c[0] + v.c[2] * m.r[2].c[0] + m.r[3].c[0],
v.c[0] * m.r[0].c[1] + v.c[1] * m.r[1].c[1] + v.c[2] * m.r[2].c[1] + m.r[3].c[1],
v.c[0] * m.r[0].c[2] + v.c[1] * m.r[1].c[2] + v.c[2] * m.r[2].c[2] + m.r[3].c[2],
);
}
pub inline fn transformNormal(v: Vec3, m: Mat4) Vec3 {
return init(
v.c[0] * m.r[0].c[0] + v.c[1] * m.r[1].c[0] + v.c[2] * m.r[2].c[0],
v.c[0] * m.r[0].c[1] + v.c[1] * m.r[1].c[1] + v.c[2] * m.r[2].c[1],
v.c[0] * m.r[0].c[2] + v.c[1] * m.r[1].c[2] + v.c[2] * m.r[2].c[2],
);
}
};
pub const Vec4 = extern struct {
c: [4]f32,
pub inline fn init(x: f32, y: f32, z: f32, w: f32) Vec4 {
return .{ .c = [_]f32{ x, y, z, w } };
}
pub inline fn initS(s: f32) Vec4 {
return init(s, s, s, s);
}
pub inline fn initZero() Vec4 {
const static = struct {
const zero = init(0.0, 0.0, 0.0, 0.0);
};
return static.zero;
}
pub inline fn toVec2(v: Vec4) Vec2 {
return Vec2.init(v.c[0], v.c[1]);
}
pub inline fn toVec3(v: Vec4) Vec3 {
return Vec3.init(v.c[0], v.c[1], v.c[2]);
}
pub inline fn approxEq(v0: Vec4, v1: Vec4, eps: f32) bool {
return math.approxEqAbs(f32, v0.c[0], v1.c[0], eps) and
math.approxEqAbs(f32, v0.c[1], v1.c[1], eps) and
math.approxEqAbs(f32, v0.c[2], v1.c[2], eps) and
math.approxEqAbs(f32, v0.c[3], v1.c[3], eps);
}
pub inline fn add(v0: Vec4, v1: Vec4) Vec4 {
return init(
v0.c[0] + v1.c[0],
v0.c[1] + v1.c[1],
v0.c[2] + v1.c[2],
v0.c[3] + v1.c[3],
);
}
pub inline fn sub(v0: Vec4, v1: Vec4) Vec4 {
return init(
v0.c[0] - v1.c[0],
v0.c[1] - v1.c[1],
v0.c[2] - v1.c[2],
v0.c[3] - v1.c[3],
);
}
pub inline fn mul(v0: Vec4, v1: Vec4) Vec4 {
return init(
v0.c[0] * v1.c[0],
v0.c[1] * v1.c[1],
v0.c[2] * v1.c[2],
v0.c[3] * v1.c[3],
);
}
pub inline fn div(v0: Vec4, v1: Vec4) Vec4 {
assert(!approxEq(v1, initZero(), epsilon));
return init(
v0.c[0] / v1.c[0],
v0.c[1] / v1.c[1],
v0.c[2] / v1.c[2],
v0.c[3] / v1.c[3],
);
}
pub inline fn scale(v: Vec4, s: f32) Vec4 {
return init(
v.c[0] * s,
v.c[1] * s,
v.c[2] * s,
v.c[3] * s,
);
}
pub inline fn neg(v: Vec4) Vec4 {
return init(
-v.c[0],
-v.c[1],
-v.c[2],
-v.c[3],
);
}
pub inline fn mulAdd(v0: Vec4, v1: Vec4, v2: Vec4) Vec4 {
return init(
v0.c[0] * v1.c[0] + v2.c[0],
v0.c[1] * v1.c[1] + v2.c[1],
v0.c[2] * v1.c[2] + v2.c[2],
v0.c[3] * v1.c[3] + v2.c[3],
);
}
pub inline fn negMulAdd(v0: Vec4, v1: Vec4, v2: Vec4) Vec4 {
return init(
-v0.c[0] * v1.c[0] + v2.c[0],
-v0.c[1] * v1.c[1] + v2.c[1],
-v0.c[2] * v1.c[2] + v2.c[2],
-v0.c[3] * v1.c[3] + v2.c[3],
);
}
pub inline fn mulSub(v0: Vec4, v1: Vec4, v2: Vec4) Vec4 {
return init(
v0.c[0] * v1.c[0] - v2.c[0],
v0.c[1] * v1.c[1] - v2.c[1],
v0.c[2] * v1.c[2] - v2.c[2],
v0.c[3] * v1.c[3] - v2.c[3],
);
}
pub inline fn negMulSub(v0: Vec4, v1: Vec4, v2: Vec4) Vec4 {
return init(
-v0.c[0] * v1.c[0] - v2.c[0],
-v0.c[1] * v1.c[1] - v2.c[1],
-v0.c[2] * v1.c[2] - v2.c[2],
-v0.c[3] * v1.c[3] - v2.c[3],
);
}
pub inline fn lerp(v0: Vec4, v1: Vec4, t: f32) Vec4 {
const tttt = Vec4.initS(t);
const v0v1 = v1.sub(v0);
return v0v1.mulAdd(tttt, v0);
}
pub inline fn rcp(v: Vec4) Vec4 {
assert(!approxEq(v, initZero(), epsilon));
return init(
1.0 / v.c[0],
1.0 / v.c[1],
1.0 / v.c[2],
1.0 / v.c[3],
);
}
pub inline fn dot(v0: Vec4, v1: Vec4) f32 {
return v0.c[0] * v1.c[0] +
v0.c[1] * v1.c[1] +
v0.c[2] * v1.c[2] +
v0.c[3] * v1.c[3];
}
pub inline fn length(v: Vec4) f32 {
return math.sqrt(dot(v, v));
}
pub inline fn lengthSq(v: Vec4) f32 {
return dot(v, v);
}
pub inline fn normalize(v: Vec4) Vec4 {
const len = length(v);
assert(!math.approxEqAbs(f32, len, 0.0, epsilon));
const rcplen = 1.0 / len;
return v.scale(rcplen);
}
pub inline fn transform(v: Vec4, m: Mat4) Vec4 {
return init(
v.c[0] * m.r[0].c[0] + v.c[1] * m.r[1].c[0] + v.c[2] * m.r[2].c[0] + v.c[3] * m.r[3].c[0],
v.c[0] * m.r[0].c[1] + v.c[1] * m.r[1].c[1] + v.c[2] * m.r[2].c[1] + v.c[3] * m.r[3].c[1],
v.c[0] * m.r[0].c[2] + v.c[1] * m.r[1].c[2] + v.c[2] * m.r[2].c[2] + v.c[3] * m.r[3].c[2],
v.c[0] * m.r[0].c[3] + v.c[1] * m.r[1].c[3] + v.c[2] * m.r[2].c[3] + v.c[3] * m.r[3].c[3],
);
}
};
pub const Quat = extern struct {
q: [4]f32,
pub inline fn init(x: f32, y: f32, z: f32, w: f32) Quat {
return .{ .q = [_]f32{ x, y, z, w } };
}
pub inline fn initZero() Quat {
const static = struct {
const zero = init(0.0, 0.0, 0.0, 0.0);
};
return static.zero;
}
pub inline fn initIdentity() Quat {
const static = struct {
const identity = init(0.0, 0.0, 0.0, 1.0);
};
return static.identity;
}
pub inline fn approxEq(a: Quat, b: Quat, eps: f32) bool {
return math.approxEqAbs(f32, a.q[0], b.q[0], eps) and
math.approxEqAbs(f32, a.q[1], b.q[1], eps) and
math.approxEqAbs(f32, a.q[2], b.q[2], eps) and
math.approxEqAbs(f32, a.q[3], b.q[3], eps);
}
pub inline fn add(a: Quat, b: Quat) Quat {
return init(
a.q[0] + b.q[0],
a.q[1] + b.q[1],
a.q[2] + b.q[2],
a.q[3] + b.q[3],
);
}
pub inline fn mul(a: Quat, b: Quat) Quat {
// Returns the product b * a (which is the concatenation of a rotation 'a' followed by the rotation 'b').
return init(
(b.q[3] * a.q[0]) + (b.q[0] * a.q[3]) + (b.q[1] * a.q[2]) - (b.q[2] * a.q[1]),
(b.q[3] * a.q[1]) - (b.q[0] * a.q[2]) + (b.q[1] * a.q[3]) + (b.q[2] * a.q[0]),
(b.q[3] * a.q[2]) + (b.q[0] * a.q[1]) - (b.q[1] * a.q[0]) + (b.q[2] * a.q[3]),
(b.q[3] * a.q[3]) - (b.q[0] * a.q[0]) - (b.q[1] * a.q[1]) - (b.q[2] * a.q[2]),
);
}
pub inline fn scale(a: Quat, b: f32) Quat {
return init(
a.q[0] * b,
a.q[1] * b,
a.q[2] * b,
a.q[3] * b,
);
}
pub inline fn dot(a: Quat, b: Quat) f32 {
return a.q[0] * b.q[0] +
a.q[1] * b.q[1] +
a.q[2] * b.q[2] +
a.q[3] * b.q[3];
}
pub inline fn length(a: Quat) f32 {
return math.sqrt(dot(a, a));
}
pub inline fn lengthSq(a: Quat) f32 {
return dot(a, a);
}
pub inline fn normalize(a: Quat) Quat {
const len = length(a);
assert(!math.approxEqAbs(f32, len, 0.0, epsilon));
const rcplen = 1.0 / len;
return a.scale(rcplen);
}
pub inline fn conj(a: Quat) Quat {
return init(
-a.q[0],
-a.q[1],
-a.q[2],
a.q[3],
);
}
pub inline fn inv(a: Quat) Quat {
const lensq = lengthSq(a);
const con = conj(a);
assert(!math.approxEqAbs(f32, lensq, 0.0, epsilon));
const rcp_lensq = 1.0 / lensq;
return con.scale(rcp_lensq);
}
pub inline fn slerp(a: Quat, b: Quat, t: f32) Quat {
const cos_angle = dot(a, b);
const angle = math.acos(cos_angle);
const fa = math.sin((1.0 - t) * angle);
const fb = math.sin(t * angle);
const sin_angle = math.sin(angle);
assert(!math.approxEqAbs(f32, sin_angle, 0.0, epsilon));
const rcp_sin_angle = 1.0 / sin_angle;
const ra = a.scale(fa);
const rb = b.scale(fb);
return ra.add(rb).scale(rcp_sin_angle);
}
pub fn initRotationMat4(m: Mat4) Quat {
var q: Quat = undefined;
const r22 = m.r[2].c[2];
if (r22 <= 0.0) // x^2 + y^2 >= z^2 + w^2
{
const dif10 = m.r[1].c[1] - m.r[0].c[0];
const omr22 = 1.0 - r22;
if (dif10 <= 0.0) // x^2 >= y^2
{
const fourXSqr = omr22 - dif10;
const inv4x = 0.5 / math.sqrt(fourXSqr);
q.q[0] = fourXSqr * inv4x;
q.q[1] = (m.r[0].c[1] + m.r[1].c[0]) * inv4x;
q.q[2] = (m.r[0].c[2] + m.r[2].c[0]) * inv4x;
q.q[3] = (m.r[1].c[2] - m.r[2].c[1]) * inv4x;
} else // y^2 >= x^2
{
const fourYSqr = omr22 + dif10;
const inv4y = 0.5 / math.sqrt(fourYSqr);
q.q[0] = (m.r[0].c[1] + m.r[1].c[0]) * inv4y;
q.q[1] = fourYSqr * inv4y;
q.q[2] = (m.r[1].c[2] + m.r[2].c[1]) * inv4y;
q.q[3] = (m.r[2].c[0] - m.r[0].c[2]) * inv4y;
}
} else // z^2 + w^2 >= x^2 + y^2
{
const sum10 = m.r[1].c[1] + m.r[0].c[0];
const opr22 = 1.0 + r22;
if (sum10 <= 0.0) // z^2 >= w^2
{
const fourZSqr = opr22 - sum10;
const inv4z = 0.5 / math.sqrt(fourZSqr);
q.q[0] = (m.r[0].c[2] + m.r[2].c[0]) * inv4z;
q.q[1] = (m.r[1].c[2] + m.r[2].c[1]) * inv4z;
q.q[2] = fourZSqr * inv4z;
q.q[3] = (m.r[0].c[1] - m.r[1].c[0]) * inv4z;
} else // w^2 >= z^2
{
const fourWSqr = opr22 + sum10;
const inv4w = 0.5 / math.sqrt(fourWSqr);
q.q[0] = (m.r[1].c[2] - m.r[2].c[1]) * inv4w;
q.q[1] = (m.r[2].c[0] - m.r[0].c[2]) * inv4w;
q.q[2] = (m.r[0].c[1] - m.r[1].c[0]) * inv4w;
q.q[3] = fourWSqr * inv4w;
}
}
return q;
}
pub fn initRotationNormal(normal_axis: Vec3, angle: f32) Quat {
const half_angle = 0.5 * angle;
const sinv = math.sin(half_angle);
const cosv = math.cos(half_angle);
return init(
normal_axis.c[0] * sinv,
normal_axis.c[1] * sinv,
normal_axis.c[2] * sinv,
cosv,
);
}
pub fn initRotationAxis(axis: Vec3, angle: f32) Quat {
assert(!axis.approxEq(Vec3.initZero(), epsilon));
const n = axis.normalize();
return initRotationNormal(n, angle);
}
};
pub const Mat4 = extern struct {
r: [4]Vec4,
pub fn init(
// zig fmt: off
r0x: f32, r0y: f32, r0z: f32, r0w: f32,
r1x: f32, r1y: f32, r1z: f32, r1w: f32,
r2x: f32, r2y: f32, r2z: f32, r2w: f32,
r3x: f32, r3y: f32, r3z: f32, r3w: f32,
// zig fmt: on
) Mat4 {
return initVec4(
Vec4.init(r0x, r0y, r0z, r0w),
Vec4.init(r1x, r1y, r1z, r1w),
Vec4.init(r2x, r2y, r2z, r2w),
Vec4.init(r3x, r3y, r3z, r3w),
);
}
pub fn initVec4(r0: Vec4, r1: Vec4, r2: Vec4, r3: Vec4) Mat4 {
return .{ .r = [4]Vec4{ r0, r1, r2, r3 } };
}
pub fn initArray4x3(a: [4][3]f32) Mat4 {
// zig fmt: off
return init(
a[0][0], a[0][1], a[0][2], 0.0,
a[1][0], a[1][1], a[1][2], 0.0,
a[2][0], a[2][1], a[2][2], 0.0,
a[3][0], a[3][1], a[3][2], 1.0,
);
// zig fmt: on
}
pub fn toArray4x3(m: Mat4) [4][3]f32 {
return [4][3]f32{
[3]f32{ m.r[0].c[0], m.r[0].c[1], m.r[0].c[2] },
[3]f32{ m.r[1].c[0], m.r[1].c[1], m.r[1].c[2] },
[3]f32{ m.r[2].c[0], m.r[2].c[1], m.r[2].c[2] },
[3]f32{ m.r[3].c[0], m.r[3].c[1], m.r[3].c[2] },
};
}
pub inline fn approxEq(a: Mat4, b: Mat4, eps: f32) bool {
return math.approxEqAbs(f32, a.r[0].c[0], b.r[0].c[0], eps) and
math.approxEqAbs(f32, a.r[0].c[1], b.r[0].c[1], eps) and
math.approxEqAbs(f32, a.r[0].c[2], b.r[0].c[2], eps) and
math.approxEqAbs(f32, a.r[0].c[3], b.r[0].c[3], eps) and
math.approxEqAbs(f32, a.r[1].c[0], b.r[1].c[0], eps) and
math.approxEqAbs(f32, a.r[1].c[1], b.r[1].c[1], eps) and
math.approxEqAbs(f32, a.r[1].c[2], b.r[1].c[2], eps) and
math.approxEqAbs(f32, a.r[1].c[3], b.r[1].c[3], eps) and
math.approxEqAbs(f32, a.r[2].c[0], b.r[2].c[0], eps) and
math.approxEqAbs(f32, a.r[2].c[1], b.r[2].c[1], eps) and
math.approxEqAbs(f32, a.r[2].c[2], b.r[2].c[2], eps) and
math.approxEqAbs(f32, a.r[2].c[3], b.r[2].c[3], eps) and
math.approxEqAbs(f32, a.r[3].c[0], b.r[3].c[0], eps) and
math.approxEqAbs(f32, a.r[3].c[1], b.r[3].c[1], eps) and
math.approxEqAbs(f32, a.r[3].c[2], b.r[3].c[2], eps) and
math.approxEqAbs(f32, a.r[3].c[3], b.r[3].c[3], eps);
}
pub fn transpose(a: Mat4) Mat4 {
return initVec4(
Vec4.init(a.r[0].c[0], a.r[1].c[0], a.r[2].c[0], a.r[3].c[0]),
Vec4.init(a.r[0].c[1], a.r[1].c[1], a.r[2].c[1], a.r[3].c[1]),
Vec4.init(a.r[0].c[2], a.r[1].c[2], a.r[2].c[2], a.r[3].c[2]),
Vec4.init(a.r[0].c[3], a.r[1].c[3], a.r[2].c[3], a.r[3].c[3]),
);
}
pub fn scale(m: Mat4, s: f32) Mat4 {
return initVec4(
m.r[0].scale(s),
m.r[1].scale(s),
m.r[2].scale(s),
m.r[3].scale(s),
);
}
pub fn mul(a: Mat4, b: Mat4) Mat4 {
// zig fmt: off
return init(
a.r[0].c[0] * b.r[0].c[0] + a.r[0].c[1] * b.r[1].c[0] + a.r[0].c[2] * b.r[2].c[0] + a.r[0].c[3] * b.r[3].c[0],
a.r[0].c[0] * b.r[0].c[1] + a.r[0].c[1] * b.r[1].c[1] + a.r[0].c[2] * b.r[2].c[1] + a.r[0].c[3] * b.r[3].c[1],
a.r[0].c[0] * b.r[0].c[2] + a.r[0].c[1] * b.r[1].c[2] + a.r[0].c[2] * b.r[2].c[2] + a.r[0].c[3] * b.r[3].c[2],
a.r[0].c[0] * b.r[0].c[3] + a.r[0].c[1] * b.r[1].c[3] + a.r[0].c[2] * b.r[2].c[3] + a.r[0].c[3] * b.r[3].c[3],
a.r[1].c[0] * b.r[0].c[0] + a.r[1].c[1] * b.r[1].c[0] + a.r[1].c[2] * b.r[2].c[0] + a.r[1].c[3] * b.r[3].c[0],
a.r[1].c[0] * b.r[0].c[1] + a.r[1].c[1] * b.r[1].c[1] + a.r[1].c[2] * b.r[2].c[1] + a.r[1].c[3] * b.r[3].c[1],
a.r[1].c[0] * b.r[0].c[2] + a.r[1].c[1] * b.r[1].c[2] + a.r[1].c[2] * b.r[2].c[2] + a.r[1].c[3] * b.r[3].c[2],
a.r[1].c[0] * b.r[0].c[3] + a.r[1].c[1] * b.r[1].c[3] + a.r[1].c[2] * b.r[2].c[3] + a.r[1].c[3] * b.r[3].c[3],
a.r[2].c[0] * b.r[0].c[0] + a.r[2].c[1] * b.r[1].c[0] + a.r[2].c[2] * b.r[2].c[0] + a.r[2].c[3] * b.r[3].c[0],
a.r[2].c[0] * b.r[0].c[1] + a.r[2].c[1] * b.r[1].c[1] + a.r[2].c[2] * b.r[2].c[1] + a.r[2].c[3] * b.r[3].c[1],
a.r[2].c[0] * b.r[0].c[2] + a.r[2].c[1] * b.r[1].c[2] + a.r[2].c[2] * b.r[2].c[2] + a.r[2].c[3] * b.r[3].c[2],
a.r[2].c[0] * b.r[0].c[3] + a.r[2].c[1] * b.r[1].c[3] + a.r[2].c[2] * b.r[2].c[3] + a.r[2].c[3] * b.r[3].c[3],
a.r[3].c[0] * b.r[0].c[0] + a.r[3].c[1] * b.r[1].c[0] + a.r[3].c[2] * b.r[2].c[0] + a.r[3].c[3] * b.r[3].c[0],
a.r[3].c[0] * b.r[0].c[1] + a.r[3].c[1] * b.r[1].c[1] + a.r[3].c[2] * b.r[2].c[1] + a.r[3].c[3] * b.r[3].c[1],
a.r[3].c[0] * b.r[0].c[2] + a.r[3].c[1] * b.r[1].c[2] + a.r[3].c[2] * b.r[2].c[2] + a.r[3].c[3] * b.r[3].c[2],
a.r[3].c[0] * b.r[0].c[3] + a.r[3].c[1] * b.r[1].c[3] + a.r[3].c[2] * b.r[2].c[3] + a.r[3].c[3] * b.r[3].c[3],
);
// zig fmt: on
}
pub fn inv(m: Mat4, out_det: ?*f32) Mat4 {
const mt = m.transpose();
var v0: [4]Vec4 = undefined;
var v1: [4]Vec4 = undefined;
v0[0] = Vec4.init(mt.r[2].c[0], mt.r[2].c[0], mt.r[2].c[1], mt.r[2].c[1]);
v1[0] = Vec4.init(mt.r[3].c[2], mt.r[3].c[3], mt.r[3].c[2], mt.r[3].c[3]);
v0[1] = Vec4.init(mt.r[0].c[0], mt.r[0].c[0], mt.r[0].c[1], mt.r[0].c[1]);
v1[1] = Vec4.init(mt.r[1].c[2], mt.r[1].c[3], mt.r[1].c[2], mt.r[1].c[3]);
v0[2] = Vec4.init(mt.r[2].c[0], mt.r[2].c[2], mt.r[0].c[0], mt.r[0].c[2]);
v1[2] = Vec4.init(mt.r[3].c[1], mt.r[3].c[3], mt.r[1].c[1], mt.r[1].c[3]);
var d0 = v0[0].mul(v1[0]);
var d1 = v0[1].mul(v1[1]);
var d2 = v0[2].mul(v1[2]);
v0[0] = Vec4.init(mt.r[2].c[2], mt.r[2].c[3], mt.r[2].c[2], mt.r[2].c[3]);
v1[0] = Vec4.init(mt.r[3].c[0], mt.r[3].c[0], mt.r[3].c[1], mt.r[3].c[1]);
v0[1] = Vec4.init(mt.r[0].c[2], mt.r[0].c[3], mt.r[0].c[2], mt.r[0].c[3]);
v1[1] = Vec4.init(mt.r[1].c[0], mt.r[1].c[0], mt.r[1].c[1], mt.r[1].c[1]);
v0[2] = Vec4.init(mt.r[2].c[1], mt.r[2].c[3], mt.r[0].c[1], mt.r[0].c[3]);
v1[2] = Vec4.init(mt.r[3].c[0], mt.r[3].c[2], mt.r[1].c[0], mt.r[1].c[2]);
d0 = v0[0].negMulAdd(v1[0], d0);
d1 = v0[1].negMulAdd(v1[1], d1);
d2 = v0[2].negMulAdd(v1[2], d2);
v0[0] = Vec4.init(mt.r[1].c[1], mt.r[1].c[2], mt.r[1].c[0], mt.r[1].c[1]);
v1[0] = Vec4.init(d2.c[1], d0.c[1], d0.c[3], d0.c[0]);
v0[1] = Vec4.init(mt.r[0].c[2], mt.r[0].c[0], mt.r[0].c[1], mt.r[0].c[0]);
v1[1] = Vec4.init(d0.c[3], d2.c[1], d0.c[1], d0.c[2]);
v0[2] = Vec4.init(mt.r[3].c[1], mt.r[3].c[2], mt.r[3].c[0], mt.r[3].c[1]);
v1[2] = Vec4.init(d2.c[3], d1.c[1], d1.c[3], d1.c[0]);
v0[3] = Vec4.init(mt.r[2].c[2], mt.r[2].c[0], mt.r[2].c[1], mt.r[2].c[0]);
v1[3] = Vec4.init(d1.c[3], d2.c[3], d1.c[1], d1.c[2]);
var c0 = v0[0].mul(v1[0]);
var c2 = v0[1].mul(v1[1]);
var c4 = v0[2].mul(v1[2]);
var c6 = v0[3].mul(v1[3]);
v0[0] = Vec4.init(mt.r[1].c[2], mt.r[1].c[3], mt.r[1].c[1], mt.r[1].c[2]);
v1[0] = Vec4.init(d0.c[3], d0.c[0], d0.c[1], d2.c[0]);
v0[1] = Vec4.init(mt.r[0].c[3], mt.r[0].c[2], mt.r[0].c[3], mt.r[0].c[1]);
v1[1] = Vec4.init(d0.c[2], d0.c[1], d2.c[0], d0.c[0]);
v0[2] = Vec4.init(mt.r[3].c[2], mt.r[3].c[3], mt.r[3].c[1], mt.r[3].c[2]);
v1[2] = Vec4.init(d1.c[3], d1.c[0], d1.c[1], d2.c[2]);
v0[3] = Vec4.init(mt.r[2].c[3], mt.r[2].c[2], mt.r[2].c[3], mt.r[2].c[1]);
v1[3] = Vec4.init(d1.c[2], d1.c[1], d2.c[2], d1.c[0]);
c0 = v0[0].negMulAdd(v1[0], c0);
c2 = v0[1].negMulAdd(v1[1], c2);
c4 = v0[2].negMulAdd(v1[2], c4);
c6 = v0[3].negMulAdd(v1[3], c6);
v0[0] = Vec4.init(mt.r[1].c[3], mt.r[1].c[0], mt.r[1].c[3], mt.r[1].c[0]);
v1[0] = Vec4.init(d0.c[2], d2.c[1], d2.c[0], d0.c[2]);
v0[1] = Vec4.init(mt.r[0].c[1], mt.r[0].c[3], mt.r[0].c[0], mt.r[0].c[2]);
v1[1] = Vec4.init(d2.c[1], d0.c[0], d0.c[3], d2.c[0]);
v0[2] = Vec4.init(mt.r[3].c[3], mt.r[3].c[0], mt.r[3].c[3], mt.r[3].c[0]);
v1[2] = Vec4.init(d1.c[2], d2.c[3], d2.c[2], d1.c[2]);
v0[3] = Vec4.init(mt.r[2].c[1], mt.r[2].c[3], mt.r[2].c[0], mt.r[2].c[2]);
v1[3] = Vec4.init(d2.c[3], d1.c[0], d1.c[3], d2.c[2]);
const c1 = v0[0].negMulAdd(v1[0], c0);
c0 = v0[0].mulAdd(v1[0], c0);
const c3 = v0[1].mulAdd(v1[1], c2);
c2 = v0[1].negMulAdd(v1[1], c2);
const c5 = v0[2].negMulAdd(v1[2], c4);
c4 = v0[2].mulAdd(v1[2], c4);
const c7 = v0[3].mulAdd(v1[3], c6);
c6 = v0[3].negMulAdd(v1[3], c6);
var mr = Mat4.initVec4(
Vec4.init(c0.c[0], c1.c[1], c0.c[2], c1.c[3]),
Vec4.init(c2.c[0], c3.c[1], c2.c[2], c3.c[3]),
Vec4.init(c4.c[0], c5.c[1], c4.c[2], c5.c[3]),
Vec4.init(c6.c[0], c7.c[1], c6.c[2], c7.c[3]),
);
const d = mr.r[0].dot(mt.r[0]);
if (out_det != null) {
out_det.?.* = d;
}
if (math.approxEqAbs(f32, d, 0.0, epsilon)) {
return initZero();
}
return mr.scale(1.0 / d);
}
pub fn det(a: Mat4) f32 {
const static = struct {
const sign = Vec4.init(1.0, -1.0, 1.0, -1.0);
};
var v0 = Vec4.init(a.r[2].c[1], a.r[2].c[0], a.r[2].c[0], a.r[2].c[0]);
var v1 = Vec4.init(a.r[3].c[2], a.r[3].c[2], a.r[3].c[1], a.r[3].c[1]);
var v2 = Vec4.init(a.r[2].c[1], a.r[2].c[0], a.r[2].c[0], a.r[2].c[0]);
var v3 = Vec4.init(a.r[3].c[3], a.r[3].c[3], a.r[3].c[3], a.r[3].c[2]);
var v4 = Vec4.init(a.r[2].c[2], a.r[2].c[2], a.r[2].c[1], a.r[2].c[1]);
var v5 = Vec4.init(a.r[3].c[3], a.r[3].c[3], a.r[3].c[3], a.r[3].c[2]);
var p0 = v0.mul(v1);
var p1 = v2.mul(v3);
var p2 = v4.mul(v5);
v0 = Vec4.init(a.r[2].c[2], a.r[2].c[2], a.r[2].c[1], a.r[2].c[1]);
v1 = Vec4.init(a.r[3].c[1], a.r[3].c[0], a.r[3].c[0], a.r[3].c[0]);
v2 = Vec4.init(a.r[2].c[3], a.r[2].c[3], a.r[2].c[3], a.r[2].c[2]);
v3 = Vec4.init(a.r[3].c[1], a.r[3].c[0], a.r[3].c[0], a.r[3].c[0]);
v4 = Vec4.init(a.r[2].c[3], a.r[2].c[3], a.r[2].c[3], a.r[2].c[2]);
v5 = Vec4.init(a.r[3].c[2], a.r[3].c[2], a.r[3].c[1], a.r[3].c[1]);
p0 = v0.negMulAdd(v1, p0);
p1 = v2.negMulAdd(v3, p1);
p2 = v4.negMulAdd(v5, p2);
v0 = Vec4.init(a.r[1].c[3], a.r[1].c[3], a.r[1].c[3], a.r[1].c[2]);
v1 = Vec4.init(a.r[1].c[2], a.r[1].c[2], a.r[1].c[1], a.r[1].c[1]);
v2 = Vec4.init(a.r[1].c[1], a.r[1].c[0], a.r[1].c[0], a.r[1].c[0]);
var s = a.r[0].mul(static.sign);
var r = v0.mul(p0);
r = v1.negMulAdd(p1, r);
r = v2.mulAdd(p2, r);
return s.dot(r);
}
pub fn initRotationX(angle: f32) Mat4 {
const sinv = math.sin(angle);
const cosv = math.cos(angle);
// zig fmt: off
return init(
1.0, 0.0, 0.0, 0.0,
0.0, cosv, sinv, 0.0,
0.0, -sinv, cosv, 0.0,
0.0, 0.0, 0.0, 1.0,
);
// zig fmt: on
}
pub fn initRotationY(angle: f32) Mat4 {
const sinv = math.sin(angle);
const cosv = math.cos(angle);
// zig fmt: off
return init(
cosv, 0.0, -sinv, 0.0,
0.0, 1.0, 0.0, 0.0,
sinv, 0.0, cosv, 0.0,
0.0, 0.0, 0.0, 1.0,
);
// zig fmt: on
}
pub fn initRotationZ(angle: f32) Mat4 {
const sinv = math.sin(angle);
const cosv = math.cos(angle);
// zig fmt: off
return init(
cosv, sinv, 0.0, 0.0,
-sinv, cosv, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0,
);
// zig fmt: on
}
pub fn initRotationQuat(q: Quat) Mat4 {
const static = struct {
const const1110 = Vec4.init(1.0, 1.0, 1.0, 0.0);
};
const vin = Vec4.init(q.q[0], q.q[1], q.q[2], q.q[3]);
const q0 = vin.add(vin);
const q1 = vin.mul(q0);
var v0 = Vec4.init(q1.c[1], q1.c[0], q1.c[0], static.const1110.c[3]);
var v1 = Vec4.init(q1.c[2], q1.c[2], q1.c[1], static.const1110.c[3]);
var r0 = static.const1110.sub(v0);
r0 = r0.sub(v1);
v0 = Vec4.init(vin.c[0], vin.c[0], vin.c[1], vin.c[3]);
v1 = Vec4.init(q0.c[2], q0.c[1], q0.c[2], q0.c[3]);
v0 = v0.mul(v1);
v1 = Vec4.init(vin.c[3], vin.c[3], vin.c[3], vin.c[3]);
var v2 = Vec4.init(q0.c[1], q0.c[2], q0.c[0], q0.c[3]);
v1 = v1.mul(v2);
var r1 = v0.add(v1);
var r2 = v0.sub(v1);
v0 = Vec4.init(r1.c[1], r2.c[0], r2.c[1], r1.c[2]);
v1 = Vec4.init(r1.c[0], r2.c[2], r1.c[0], r2.c[2]);
return Mat4.initVec4(
Vec4.init(r0.c[0], v0.c[0], v0.c[1], r0.c[3]),
Vec4.init(v0.c[2], r0.c[1], v0.c[3], r0.c[3]),
Vec4.init(v1.c[0], v1.c[1], r0.c[2], r0.c[3]),
Vec4.init(0.0, 0.0, 0.0, 1.0),
);
}
pub fn initPerspectiveFovLh(fovy: f32, aspect: f32, near: f32, far: f32) Mat4 {
const sinfov = math.sin(0.5 * fovy);
const cosfov = math.cos(0.5 * fovy);
assert(near > 0.0 and far > 0.0 and far > near);
assert(!math.approxEqAbs(f32, sinfov, 0.0, 0.001));
assert(!math.approxEqAbs(f32, far, near, 0.001));
assert(!math.approxEqAbs(f32, aspect, 0.0, 0.01));
const h = cosfov / sinfov;
const w = h / aspect;
const r = far / (far - near);
// zig fmt: off
return init(
w, 0.0, 0.0, 0.0,
0.0, h, 0.0, 0.0,
0.0, 0.0, r, 1.0,
0.0, 0.0, -r * near, 0.0,
);
// zig fmt: on
}
pub inline fn initZero() Mat4 {
const static = struct {
// zig fmt: off
const zero = init(
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
);
// zig fmt: on
};
return static.zero;
}
pub inline fn initIdentity() Mat4 {
const static = struct {
// zig fmt: off
const identity = init(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0,
);
// zig fmt: on
};
return static.identity;
}
pub fn initTranslation(v: Vec3) Mat4 {
// zig fmt: off
return init(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
v.c[0], v.c[1], v.c[2], 1.0,
);
// zig fmt: on
}
pub fn initScaling(v: Vec3) Mat4 {
// zig fmt: off
return init(
v.c[0], 0.0, 0.0, 0.0,
0.0, v.c[1], 0.0, 0.0,
0.0, 0.0, v.c[2], 0.0,
0.0, 0.0, 0.0, 1.0,
);
// zig fmt: on
}
pub fn initDiagonal(s: f32) Mat4 {
// zig fmt: off
return init(
s, 0.0, 0.0, 0.0,
0.0, s, 0.0, 0.0,
0.0, 0.0, s, 0.0,
0.0, 0.0, 0.0, s,
);
// zig fmt: on
}
pub fn initLookToLh(eye_pos: Vec3, eye_dir: Vec3, up_dir: Vec3) Mat4 {
const az = Vec3.normalize(eye_dir);
const ax = Vec3.normalize(Vec3.cross(up_dir, az));
const ay = Vec3.normalize(Vec3.cross(az, ax));
// zig fmt: off
return init(
ax.c[0], ay.c[0], az.c[0], 0.0,
ax.c[1], ay.c[1], az.c[1], 0.0,
ax.c[2], ay.c[2], az.c[2], 0.0,
-Vec3.dot(ax, eye_pos), -Vec3.dot(ay, eye_pos), -Vec3.dot(az, eye_pos), 1.0,
);
// zig fmt: on
}
pub inline fn initLookAtLh(eye_pos: Vec3, focus_pos: Vec3, up_dir: Vec3) Mat4 {
return initLookToLh(eye_pos, focus_pos.sub(eye_pos), up_dir);
}
pub fn initOrthoOffCenterLh(
view_left: f32,
view_right: f32,
view_bottom: f32,
view_top: f32,
near_z: f32,
far_z: f32,
) Mat4 {
assert(!math.approxEqAbs(f32, view_right, view_left, 0.001));
assert(!math.approxEqAbs(f32, view_top, view_bottom, 0.001));
assert(!math.approxEqAbs(f32, far_z, near_z, 0.001));
const rcp_w = 1.0 / (view_right - view_left);
const rcp_h = 1.0 / (view_top - view_bottom);
const range = 1.0 / (far_z - near_z);
// zig fmt: off
return init(
rcp_w + rcp_w, 0.0, 0.0, 0.0,
0.0, rcp_h + rcp_h, 0.0, 0.0,
0.0, 0.0, range, 0.0,
-(view_left + view_right) * rcp_w, -(view_top + view_bottom) * rcp_h, -range * near_z, 1.0,
);
// zig fmt: on
}
};
test "dot" {
{
const a = Vec2.init(1.0, 2.0);
const b = Vec2.init(3.0, 4.0);
assert(math.approxEqAbs(f32, a.dot(b), 11.0, epsilon));
}
{
const a = Vec3.init(1.0, 2.0, 3.0);
const b = Vec3.init(4.0, 5.0, 6.0);
assert(math.approxEqAbs(f32, a.dot(b), 32.0, epsilon));
}
{
const a = Vec4.init(1.0, 2.0, 3.0, 4.0);
const b = Vec4.init(5.0, 6.0, 7.0, 8.0);
assert(math.approxEqAbs(f32, a.dot(b), 70.0, epsilon));
}
}
test "cross" {
{
const a = Vec3.init(1.0, 0.0, 0.0);
const b = Vec3.init(0.0, 1.0, 0.0);
assert(a.cross(b).approxEq(Vec3.init(0.0, 0.0, 1.0), epsilon));
}
{
const a = Vec3.init(0.0, 0.0, -1.0);
const b = Vec3.init(1.0, 0.0, 0.0);
assert(a.cross(b).approxEq(Vec3.init(0.0, -1.0, 0.0), epsilon));
}
}
test "VecN add, sub, scale" {
{
const a = Vec2.init(1.0, 2.0);
const b = Vec2.init(3.0, 4.0);
assert(a.add(b).approxEq(Vec2.init(4.0, 6.0), epsilon));
}
{
const a = Vec3.init(1.0, 2.0, 3.0);
const b = Vec3.init(3.0, 4.0, 5.0);
assert(a.add(b).approxEq(Vec3.init(4.0, 6.0, 8.0), epsilon));
}
{
const a = Vec4.init(1.0, 2.0, 3.0, -1.0);
const b = Vec4.init(3.0, 4.0, 5.0, 2.0);
assert(a.add(b).approxEq(Vec4.init(4.0, 6.0, 8.0, 1.0), epsilon));
}
{
const a = Vec2.init(1.0, 2.0);
const b = Vec2.init(3.0, 4.0);
assert(a.sub(b).approxEq(Vec2.init(-2.0, -2.0), epsilon));
}
{
const a = Vec3.init(1.0, 2.0, 3.0);
const b = Vec3.init(3.0, 4.0, 5.0);
assert(a.sub(b).approxEq(Vec3.init(-2.0, -2.0, -2.0), epsilon));
}
{
const a = Vec4.init(1.0, 2.0, 3.0, -1.0);
const b = Vec4.init(3.0, 4.0, 5.0, 2.0);
assert(a.sub(b).approxEq(Vec4.init(-2.0, -2.0, -2.0, -3.0), epsilon));
}
{
const a = Vec2.init(1.0, 2.0);
assert(a.scale(2.0).approxEq(Vec2.init(2.0, 4.0), epsilon));
}
{
const a = Vec3.init(1.0, 2.0, 3.0);
assert(a.scale(-1.0).approxEq(Vec3.init(-1.0, -2.0, -3.0), epsilon));
}
{
const a = Vec4.init(1.0, 2.0, 3.0, -1.0);
assert(a.scale(3.0).approxEq(Vec4.init(3.0, 6.0, 9.0, -3.0), epsilon));
}
}
test "length, normalize" {
{
const a = Vec2.init(2.0, 3.0).length();
assert(math.approxEqAbs(f32, a, 3.60555, epsilon));
}
{
const a = Vec3.init(1.0, 1.0, 1.0).length();
assert(math.approxEqAbs(f32, a, 1.73205, epsilon));
}
{
const a = Vec4.init(1.0, 1.0, 1.0, 1.0).length();
assert(math.approxEqAbs(f32, a, 2.0, epsilon));
}
{
const a = Vec2.init(2.0, 4.0).normalize();
assert(Vec2.approxEq(a, Vec2.init(0.447214, 0.894427), epsilon));
}
{
const a = Vec3.init(2.0, -5.0, 4.0).normalize();
assert(Vec3.approxEq(a, Vec3.init(0.298142, -0.745356, 0.596285), epsilon));
}
{
const a = Vec4.init(-1.0, 2.0, -5.0, 4.0).normalize();
assert(Vec4.approxEq(a, Vec4.init(-0.147442, 0.294884, -0.73721, 0.589768), epsilon));
}
{
const a = Quat.init(-1.0, 2.0, -5.0, 4.0).normalize();
assert(Quat.approxEq(a, Quat.init(-0.147442, 0.294884, -0.73721, 0.589768), epsilon));
}
}
test "Mat4 transpose" {
const m = Mat4.initVec4(
Vec4.init(1.0, 2.0, 3.0, 4.0),
Vec4.init(5.0, 6.0, 7.0, 8.0),
Vec4.init(9.0, 10.0, 11.0, 12.0),
Vec4.init(13.0, 14.0, 15.0, 16.0),
);
const mt = m.transpose();
assert(
mt.approxEq(
Mat4.init(1.0, 5.0, 9.0, 13.0, 2.0, 6.0, 10.0, 14.0, 3.0, 7.0, 11.0, 15.0, 4.0, 8.0, 12.0, 16.0),
epsilon,
),
);
}
test "Mat4 mul" {
const a = Mat4.initVec4(
Vec4.init(0.1, 0.2, 0.3, 0.4),
Vec4.init(0.5, 0.6, 0.7, 0.8),
Vec4.init(0.9, 1.0, 1.1, 1.2),
Vec4.init(1.3, 1.4, 1.5, 1.6),
);
const b = Mat4.initVec4(
Vec4.init(1.7, 1.8, 1.9, 2.0),
Vec4.init(2.1, 2.2, 2.3, 2.4),
Vec4.init(2.5, 2.6, 2.7, 2.8),
Vec4.init(2.9, 3.0, 3.1, 3.2),
);
const c = a.mul(b);
assert(c.approxEq(
Mat4.initVec4(
Vec4.init(2.5, 2.6, 2.7, 2.8),
Vec4.init(6.18, 6.44, 6.7, 6.96),
Vec4.init(9.86, 10.28, 10.7, 11.12),
Vec4.init(13.54, 14.12, 14.7, 15.28),
),
epsilon,
));
}
test "Mat4 inv, det" {
var m = Mat4.initVec4(
Vec4.init(10.0, -9.0, -12.0, 1.0),
Vec4.init(7.0, -12.0, 11.0, 1.0),
Vec4.init(-10.0, 10.0, 3.0, 1.0),
Vec4.init(1.0, 2.0, 3.0, 4.0),
);
assert(math.approxEqAbs(f32, m.det(), 2939.0, epsilon));
var det: f32 = 0.0;
m = m.inv(&det);
assert(math.approxEqAbs(f32, det, 2939.0, epsilon));
assert(m.approxEq(
Mat4.initVec4(
Vec4.init(-0.170806, -0.13576, -0.349439, 0.164001),
Vec4.init(-0.163661, -0.14801, -0.253147, 0.141204),
Vec4.init(-0.0871045, 0.00646478, -0.0785982, 0.0398095),
Vec4.init(0.18986, 0.103096, 0.272882, 0.10854),
),
epsilon,
));
}
test "Quat mul, inv" {
{
const a = Quat.init(2.0, 3.0, 4.0, 1.0);
const b = Quat.init(6.0, 7.0, 8.0, 5.0);
assert(a.mul(b).approxEq(Quat.init(20.0, 14.0, 32.0, -60.0), epsilon));
assert(b.mul(a).approxEq(Quat.init(12.0, 30.0, 24.0, -60.0), epsilon));
}
{
const a = Quat.init(2.0, 3.0, 4.0, 1.0);
const b = a.inv();
assert(a.approxEq(Quat.init(2.0, 3.0, 4.0, 1.0), epsilon));
assert(b.approxEq(Quat.init(-0.0666667, -0.1, -0.133333, 0.0333333), epsilon));
}
}
test "Mat4 transforms" {
const a = Mat4.initTranslation(Vec3.init(1.0, 0.0, 0.0));
const b = Mat4.initRotationY(math.pi * 0.5);
const c = Vec3.init(1.0, 0.0, 0.0);
const e = Mat4.initTranslation(Vec3.init(0.0, 1.0, 0.0));
const d = c.transform(a.mul(b).mul(e));
assert(d.approxEq(Vec3.init(0.0, 1.0, -2.0), epsilon));
}
test "Mat4 <-> Quat" {
{
const a = Quat.initRotationMat4(Mat4.initIdentity());
assert(a.approxEq(Quat.initIdentity(), epsilon));
const b = Mat4.initRotationQuat(a);
assert(b.approxEq(Mat4.initIdentity(), epsilon));
}
{
const a = Quat.initRotationMat4(Mat4.initTranslation(Vec3.init(1.0, 2.0, 3.0)));
assert(a.approxEq(Quat.initIdentity(), epsilon));
const b = Mat4.initRotationQuat(a);
assert(b.approxEq(Mat4.initIdentity(), epsilon));
}
{
const a = Quat.initRotationMat4(Mat4.initRotationY(math.pi * 0.5));
const b = Mat4.initRotationQuat(a);
assert(b.approxEq(Mat4.initRotationY(math.pi * 0.5), epsilon));
}
{
const a = Quat.initRotationMat4(Mat4.initRotationY(math.pi * 0.25));
const b = Quat.initRotationAxis(Vec3.init(0.0, 1.0, 0.0), math.pi * 0.25);
assert(a.approxEq(b, epsilon));
}
{
const m0 = Mat4.initRotationX(math.pi * 0.125);
const m1 = Mat4.initRotationY(math.pi * 0.25);
const m2 = Mat4.initRotationZ(math.pi * 0.5);
const q0 = Quat.initRotationMat4(m0);
const q1 = Quat.initRotationMat4(m1);
const q2 = Quat.initRotationMat4(m2);
const mr = m0.mul(m1).mul(m2);
const qr = q0.mul(q1).mul(q2);
assert(mr.approxEq(Mat4.initRotationQuat(qr), epsilon));
assert(qr.approxEq(Quat.initRotationMat4(mr), epsilon));
}
}
test "slerp" {
const from = Quat.init(0.0, 0.0, 0.0, 1.0);
const to = Quat.init(0.5, 0.5, -0.5, 0.5);
const result = from.slerp(to, 0.5);
assert(result.approxEq(Quat.init(0.28867513, 0.28867513, -0.28867513, 0.86602540), epsilon));
}
|
libs/common/src/vectormath.zig
|
const struct__iobuf = extern struct {
_ptr: [*c]u8,
_cnt: c_int,
_base: [*c]u8,
_flag: c_int,
_file: c_int,
_charbuf: c_int,
_bufsiz: c_int,
_tmpfname: [*c]u8,
};
const FILE = struct__iobuf;
pub const stbi_uc = u8;
pub const stbi_us = c_ushort;
pub const stbi_io_callbacks = extern struct {
read: ?fn (?*anyopaque, [*c]u8, c_int) callconv(.C) c_int,
skip: ?fn (?*anyopaque, c_int) callconv(.C) void,
eof: ?fn (?*anyopaque) callconv(.C) c_int,
};
pub extern fn stbi_load_from_memory(buffer: [*c]const stbi_uc, len: c_int, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]stbi_uc;
pub extern fn stbi_load_from_callbacks(clbk: [*c]const stbi_io_callbacks, user: ?*anyopaque, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]stbi_uc;
pub extern fn stbi_load(filename: [*c]const u8, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]stbi_uc;
pub extern fn stbi_load_from_file(f: [*c]FILE, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]stbi_uc;
pub extern fn stbi_load_gif_from_memory(buffer: [*c]const stbi_uc, len: c_int, delays: [*c][*c]c_int, x: [*c]c_int, y: [*c]c_int, z: [*c]c_int, comp: [*c]c_int, req_comp: c_int) [*c]stbi_uc;
pub extern fn stbi_load_16_from_memory(buffer: [*c]const stbi_uc, len: c_int, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]stbi_us;
pub extern fn stbi_load_16_from_callbacks(clbk: [*c]const stbi_io_callbacks, user: ?*anyopaque, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]stbi_us;
pub extern fn stbi_load_16(filename: [*c]const u8, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]stbi_us;
pub extern fn stbi_load_from_file_16(f: [*c]FILE, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]stbi_us;
pub extern fn stbi_loadf_from_memory(buffer: [*c]const stbi_uc, len: c_int, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]f32;
pub extern fn stbi_loadf_from_callbacks(clbk: [*c]const stbi_io_callbacks, user: ?*anyopaque, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]f32;
pub extern fn stbi_loadf(filename: [*c]const u8, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]f32;
pub extern fn stbi_loadf_from_file(f: [*c]FILE, x: [*c]c_int, y: [*c]c_int, channels_in_file: [*c]c_int, desired_channels: c_int) [*c]f32;
pub extern fn stbi_hdr_to_ldr_gamma(gamma: f32) void;
pub extern fn stbi_hdr_to_ldr_scale(scale: f32) void;
pub extern fn stbi_ldr_to_hdr_gamma(gamma: f32) void;
pub extern fn stbi_ldr_to_hdr_scale(scale: f32) void;
pub extern fn stbi_is_hdr_from_callbacks(clbk: [*c]const stbi_io_callbacks, user: ?*anyopaque) c_int;
pub extern fn stbi_is_hdr_from_memory(buffer: [*c]const stbi_uc, len: c_int) c_int;
pub extern fn stbi_is_hdr(filename: [*c]const u8) c_int;
pub extern fn stbi_is_hdr_from_file(f: [*c]FILE) c_int;
pub extern fn stbi_failure_reason() [*c]const u8;
pub extern fn stbi_image_free(retval_from_stbi_load: ?*anyopaque) void;
pub extern fn stbi_info_from_memory(buffer: [*c]const stbi_uc, len: c_int, x: [*c]c_int, y: [*c]c_int, comp: [*c]c_int) c_int;
pub extern fn stbi_info_from_callbacks(clbk: [*c]const stbi_io_callbacks, user: ?*anyopaque, x: [*c]c_int, y: [*c]c_int, comp: [*c]c_int) c_int;
pub extern fn stbi_is_16_bit_from_memory(buffer: [*c]const stbi_uc, len: c_int) c_int;
pub extern fn stbi_is_16_bit_from_callbacks(clbk: [*c]const stbi_io_callbacks, user: ?*anyopaque) c_int;
pub extern fn stbi_info(filename: [*c]const u8, x: [*c]c_int, y: [*c]c_int, comp: [*c]c_int) c_int;
pub extern fn stbi_info_from_file(f: [*c]FILE, x: [*c]c_int, y: [*c]c_int, comp: [*c]c_int) c_int;
pub extern fn stbi_is_16_bit(filename: [*c]const u8) c_int;
pub extern fn stbi_is_16_bit_from_file(f: [*c]FILE) c_int;
pub extern fn stbi_set_unpremultiply_on_load(flag_true_if_should_unpremultiply: c_int) void;
pub extern fn stbi_convert_iphone_png_to_rgb(flag_true_if_should_convert: c_int) void;
pub extern fn stbi_set_flip_vertically_on_load(flag_true_if_should_flip: c_int) void;
pub extern fn stbi_set_flip_vertically_on_load_thread(flag_true_if_should_flip: c_int) void;
pub extern fn stbi_zlib_decode_malloc_guesssize(buffer: [*c]const u8, len: c_int, initial_size: c_int, outlen: [*c]c_int) [*c]u8;
pub extern fn stbi_zlib_decode_malloc_guesssize_headerflag(buffer: [*c]const u8, len: c_int, initial_size: c_int, outlen: [*c]c_int, parse_header: c_int) [*c]u8;
pub extern fn stbi_zlib_decode_malloc(buffer: [*c]const u8, len: c_int, outlen: [*c]c_int) [*c]u8;
pub extern fn stbi_zlib_decode_buffer(obuffer: [*c]u8, olen: c_int, ibuffer: [*c]const u8, ilen: c_int) c_int;
pub extern fn stbi_zlib_decode_noheader_malloc(buffer: [*c]const u8, len: c_int, outlen: [*c]c_int) [*c]u8;
pub extern fn stbi_zlib_decode_noheader_buffer(obuffer: [*c]u8, olen: c_int, ibuffer: [*c]const u8, ilen: c_int) c_int;
|
src/pkg/stb_image.zig
|
const std = @import("std");
const zua = @import("zua");
const parse = zua.parse;
// Prunes the input/output pairs used by fuzzed_parse to exclude inputs
// that cause a lexer error (as opposed to a parser error), since those
// pairs will just be overlap with the fuzzed_lex corpus.
//
// Expects @import("build_options").fuzzed_parse_inputs_dir to be a path to
// a directory containing a corpus of inputs to test and
// @import("build_options").fuzzed_parse_outputs_dir to be a path to a
// directory containing the bytecode obtained by running the input
// through the Lua parser and dumper (or an error string).
const build_options = @import("build_options");
const inputs_dir_path = build_options.fuzzed_parse_inputs_dir;
const outputs_dir_path = build_options.fuzzed_parse_outputs_dir;
pub fn main() !void {
const allocator = std.testing.allocator;
var inputs_dir = try std.fs.cwd().openDir(inputs_dir_path, .{ .iterate = true });
defer inputs_dir.close();
var outputs_dir = try std.fs.cwd().openDir(outputs_dir_path, .{});
defer outputs_dir.close();
var paths_to_remove = std.ArrayList([]const u8).init(allocator);
defer paths_to_remove.deinit();
var inputs_iterator = inputs_dir.iterate();
while (try inputs_iterator.next()) |entry| {
if (entry.kind != .File) continue;
const contents = try inputs_dir.readFileAlloc(allocator, entry.name, std.math.maxInt(usize));
defer allocator.free(contents);
const expectedContents = try outputs_dir.readFileAlloc(allocator, entry.name, std.math.maxInt(usize));
defer allocator.free(expectedContents);
// apparently luac can miscompile certain inputs which gives a blank output file
if (expectedContents.len == 0) {
continue;
}
const is_bytecode_expected = std.mem.startsWith(u8, expectedContents, zua.dump.signature);
if (is_bytecode_expected) {
continue;
}
var lexer = zua.lex.Lexer.init(contents, "fuzz");
var parser = zua.parse.Parser.init(&lexer);
if (parser.parse(allocator)) |tree| {
tree.deinit();
} else |err| {
if (isInErrorSet(err, zua.lex.LexError)) {
const duped_path = try allocator.dupe(u8, entry.name);
try paths_to_remove.append(duped_path);
}
}
}
std.debug.print("removing {d} pairs...", .{paths_to_remove.items.len});
for (paths_to_remove.items) |path_to_remove| {
inputs_dir.deleteFile(path_to_remove) catch |err| switch (err) {
error.FileNotFound => {},
else => return err,
};
outputs_dir.deleteFile(path_to_remove) catch |err| switch (err) {
error.FileNotFound => {},
else => return err,
};
}
std.debug.print(" done\n", .{});
}
pub fn isInErrorSet(err: anyerror, comptime T: type) bool {
for (std.meta.fields(T)) |field| {
if (std.mem.eql(u8, std.meta.tagName(err), field.name)) {
return true;
}
}
return false;
}
|
test/fuzzed_parse_prune.zig
|
const std = @import("std");
const utils = @import("utils.zig");
const dif = @import("dif.zig");
const ial = @import("indexedarraylist.zig");
const testing = std.testing;
const any = utils.any;
const DifNode = dif.DifNode;
const debug = std.debug.print;
const DifNodeMap = std.StringHashMap(ial.Entry(DifNode));
const SemaError = error {
NodeWithNoName, Duplicate, OutOfMemory, InvalidField, InvalidReference, InvalidValue
};
pub fn SemaContext() type {
return struct {
const Self = @This();
allocator: std.mem.Allocator,
dif_root: ial.Entry(DifNode),
node_map: DifNodeMap,
edge_map: DifNodeMap,
instance_map: DifNodeMap,
group_map: DifNodeMap,
pub fn init(allocator: std.mem.Allocator, dif_root: ial.Entry(DifNode)) Self {
return Self{
.allocator = allocator,
.dif_root = dif_root,
.node_map = DifNodeMap.init(allocator),
.edge_map = DifNodeMap.init(allocator),
.instance_map = DifNodeMap.init(allocator),
.group_map = DifNodeMap.init(allocator),
};
}
fn findUnit(node: *ial.Entry(DifNode)) !*ial.Entry(DifNode) {
var current = node;
while(current.get().node_type != .Unit) {
if(current.get().parent) |*parent| {
current = parent;
} else {
// Ending up here is a bug
return error.NoUnitFound;
}
}
return current;
}
fn printError(self: *Self, node: *ial.Entry(DifNode), comptime fmt: []const u8, args: anytype) void {
_ = self;
const err_writer = std.io.getStdErr().writer();
const unit = findUnit(node) catch {
err_writer.print("BUG: Could not find unit associated with node\n", .{}) catch {};
unreachable;
};
const src_buf = unit.get().data.Unit.src_buf;
const lc = utils.idxToLineCol(src_buf, node.get().initial_token.?.start);
err_writer.print("{s}:{d}:{d}: error: ", .{unit.get().name.?, lc.line, lc.col}) catch {};
err_writer.print(fmt, args) catch {};
err_writer.print("\n", .{}) catch {};
utils.dumpSrcChunkRef(@TypeOf(err_writer), err_writer, src_buf, node.get().initial_token.?.start);
err_writer.print("\n", .{}) catch {};
// Print ^ at start of symbol
err_writer.writeByteNTimes(' ', lc.col-1) catch {};
err_writer.print("^\n", .{}) catch {};
}
pub fn deinit(self: *Self) void {
self.node_map.deinit();
self.edge_map.deinit();
self.instance_map.deinit();
self.group_map.deinit();
}
};
}
/// Extracts a set of predefined key/values, based on the particular ParamsType
fn getFieldsFromChildSet(ctx: *SemaContext(), comptime ParamsType: type, first_sibling: *ial.Entry(DifNode), result: *ParamsType) !void {
var node_ref = first_sibling;
while (true) {
const node = node_ref.get();
// check node type: We're only looking for value-types
if (node.node_type == .Value) {
if (node.name) |param_name| {
var value = node.data.Value.value;
switch (ParamsType) {
dif.NodeParams => {
if (std.mem.eql(u8, "label", param_name)) {
result.label = value;
} else if (std.mem.eql(u8, "bgcolor", param_name)) {
result.bgcolor = value;
} else if (std.mem.eql(u8, "fgcolor", param_name)) {
result.fgcolor = value;
} else if (std.mem.eql(u8, "shape", param_name)) {
result.shape = value;
} else if (std.mem.eql(u8, "note", param_name)) {
result.note = value;
}
},
dif.EdgeParams => {
if (std.mem.eql(u8, "label", param_name)) {
result.label = value;
} else if (std.mem.eql(u8, "edge_style", param_name)) {
result.edge_style = dif.EdgeStyle.fromString(value) catch |e| {
ctx.printError(node_ref, "Invalid value for field 'edge_style': {s}", .{value});
return e;
};
} else if (std.mem.eql(u8, "source_symbol", param_name)) {
result.source_symbol = dif.EdgeEndStyle.fromString(value) catch |e| {
ctx.printError(node_ref, "Invalid value for field 'source_symbol': {s}", .{value});
return e;
};
} else if (std.mem.eql(u8, "target_symbol", param_name)) {
result.target_symbol = dif.EdgeEndStyle.fromString(value) catch |e| {
ctx.printError(node_ref, "Invalid value for field 'target_symbol': {s}", .{value});
return e;
};
} else if (std.mem.eql(u8, "source_label", param_name)) {
result.source_label = value;
} else if (std.mem.eql(u8, "target_label", param_name)) {
result.target_label = value;
}
},
dif.GroupParams => {
if (std.mem.eql(u8, "label", param_name)) {
result.label = value;
} else if (std.mem.eql(u8, "bgcolor", param_name)) {
result.bgcolor = value;
} else if (std.mem.eql(u8, "layout", param_name)) {
result.layout = value;
} else if (std.mem.eql(u8, "note", param_name)) {
result.note = value;
}
},
else => {
@compileError("ERROR: Unsupported ParamsType " ++ ParamsType ++ ". Most likely a bug.\n");
},
}
}
}
if (node.next_sibling) |*next| {
node_ref = next;
} else {
break;
}
}
}
/// The sema (temporary name - not sure it's accurate enough, there's possibly processing happening at some point too) step will clean up and verify the integrity of the dif-tree.
///
/// Responsibilities:
/// Ensure there are no name-collisions (e.g. duplicate definitions, or collisions between nodes and groups)
/// Validate parameters for node-type (Node, Edge, Instance, Relationship, Group)
/// Possibly:
/// Support partials: name-collisions are allowed, fields will just be updated
///
/// It will also populate a set of indexes to look up the different node-types by name
/// The result shall either be error, or a well-defined tree that can be easily converted to
/// the desired output format (e.g. dot).
///
/// Returned SemaContext must be .deinit()'ed
pub fn doSema(ctx: *SemaContext()) SemaError!void {
try processNoDupesRecursively(ctx, &ctx.dif_root);
try processVerifyAndPopulateReferences(ctx, &ctx.dif_root);
}
fn isValidNodeField(field: []const u8) bool {
comptime var valid_fields = [_][]const u8{"label", "bgcolor", "fgcolor", "shape", "note"};
return any(valid_fields[0..], field);
}
fn isValidEdgeField(field: []const u8) bool {
comptime var valid_fields = [_][]const u8{"label", "edge_style", "source_symbol", "target_symbol", "source_label", "target_label"};
return any(valid_fields[0..], field);
}
/// Verifies that all siblings' names passes the 'verificator'
fn verifyFields(ctx: *SemaContext(), first_sibling: *ial.Entry(DifNode), verificator: fn(field: []const u8) bool) !void {
var current_ref = first_sibling;
// Iterate over sibling set
while(true) {
const current = current_ref.get();
switch(current.node_type) {
.Value => {
if(!verificator(current.name.?)) {
ctx.printError(current_ref, "Unsupported parameter: '{s}'", .{current.name});
return error.InvalidField;
}
},
else => {
ctx.printError(current_ref, "Unsupported child-type '{s}' for: {s}", .{@TypeOf(current.node_type), current.name});
return error.InvalidField;
}
}
if (current.next_sibling) |*next| {
current_ref = next;
} else {
break;
}
}
}
/// Traverses the dif-graph and populates the .data-field of each with e.g. cross-references and parameter-overrides
fn processVerifyAndPopulateReferences(ctx: *SemaContext(), node: *ial.Entry(DifNode)) SemaError!void {
var current_ref = node;
while(true) {
const current = current_ref.get();
switch (current.node_type) {
.Node => {
var data = ¤t.data.Node;
if(current_ref.get().first_child) |*child| try getFieldsFromChildSet(ctx, dif.NodeParams, child, &data.params);
},
.Edge => {
var data = ¤t.data.Edge;
if(current_ref.get().first_child) |*child| try getFieldsFromChildSet(ctx, dif.EdgeParams, child, &data.params);
},
.Instantiation => {
// set data.node_type_ref
var data = ¤t.data.Instantiation;
if(current_ref.get().first_child) |*child| try getFieldsFromChildSet(ctx, dif.NodeParams, child, &data.params);
var node_type_name = current.data.Instantiation.target;
data.node_type_ref = ctx.node_map.get(node_type_name) orelse {
ctx.printError(current_ref, "No node '{s}' found\n", .{node_type_name});
return error.InvalidReference;
};
},
.Relationship => {
// set data.source_ref, data.edge_ref, data.target_ref
var data = ¤t.data.Relationship;
if(current_ref.get().first_child) |*child| try getFieldsFromChildSet(ctx, dif.EdgeParams, child, &data.params);
var node_name_source = current.name.?;
var edge_name = current.data.Relationship.edge;
var node_name_target = current.data.Relationship.target;
data.source_ref = ctx.instance_map.get(node_name_source) orelse ctx.group_map.get(node_name_source) orelse {
ctx.printError(current_ref, "No instance or group '{s}' found\n", .{node_name_source});
return error.InvalidReference;
};
data.target_ref = ctx.instance_map.get(node_name_target) orelse ctx.group_map.get(node_name_target) orelse {
ctx.printError(current_ref, "No instance or group '{s}' found\n", .{node_name_target});
return error.InvalidReference;
};
data.edge_ref = ctx.edge_map.get(edge_name) orelse {
ctx.printError(current_ref, "No edge '{s}' found\n", .{edge_name});
return error.InvalidReference;
};
},
.Group => {
var data = ¤t.data.Group;
if(current_ref.get().first_child) |*child| try getFieldsFromChildSet(ctx, dif.GroupParams, child, &data.params);
},
.Unit => {
var data = ¤t.data.Unit;
if(current_ref.get().first_child) |*child| try getFieldsFromChildSet(ctx, dif.GroupParams, child, &data.params);
},
else => {},
}
if (current.first_child) |*child| {
try processVerifyAndPopulateReferences(ctx, child);
}
if (current.next_sibling) |*next| {
current_ref = next;
} else {
break;
}
}
}
/// Verify integrity of dif-graph. Fails on duplicate definitions and invalid fields.
/// Aborts at first error.
fn processNoDupesRecursively(ctx: *SemaContext(), node: *ial.Entry(DifNode)) SemaError!void {
var current_ref = node;
while(true) {
const current = current_ref.get();
const node_name = current.name orelse {
// Found node with no name... is it even possible at this stage? Bug, most likely
return error.NodeWithNoName;
};
switch (current.node_type) {
.Node => {
if(ctx.node_map.get(node_name)) |*conflict_ref| {
ctx.printError(current_ref, "Duplicate node definition, '{s}' already defined.", .{node_name});
ctx.printError(conflict_ref, "Previous definition was here.", .{});
return error.Duplicate;
}
if(current.first_child) |*child_ref| {
try verifyFields(ctx, child_ref, isValidNodeField);
}
try ctx.node_map.put(node_name, current_ref.*);
},
.Edge => {
if(ctx.edge_map.get(node_name)) |*conflict_ref| {
ctx.printError(current_ref, "Duplicate edge definition, '{s}' already defined.", .{node_name});
ctx.printError(conflict_ref, "Previous definition was here.", .{});
return error.Duplicate;
}
if(current.first_child) |*child_ref| {
try verifyFields(ctx, child_ref, isValidEdgeField);
}
try ctx.edge_map.put(node_name, current_ref.*);
},
.Instantiation => {
if(ctx.instance_map.get(node_name)) |*conflict_ref| {
ctx.printError(current_ref, "Duplicate instantiation, '{s}' already defined.", .{node_name});
ctx.printError(conflict_ref, "Previous instantiation was here.", .{});
return error.Duplicate;
} else if(ctx.group_map.get(node_name)) |*conflict_ref| {
ctx.printError(current_ref, "A group with name '{s}' already defined, can't create instance with same name.", .{node_name});
ctx.printError(conflict_ref, "Previous definition was here.", .{});
return error.Duplicate;
}
if(current.first_child) |*child_ref| {
try verifyFields(ctx, child_ref, isValidNodeField);
}
try ctx.instance_map.put(node_name, current_ref.*);
},
.Group => {
if(ctx.group_map.get(node_name)) |*conflict_ref| {
ctx.printError(current_ref, "Duplicate group definition, {s} already defined.", .{node_name});
ctx.printError(conflict_ref, "Previous definition was here.", .{});
return error.Duplicate;
} else if(ctx.instance_map.get(node_name)) |*conflict_ref| {
ctx.printError(current_ref, "An instance with name '{s}' already defined, can't create group with same name.", .{node_name});
ctx.printError(conflict_ref, "Previous definition was here.", .{});
return error.Duplicate;
}
// TODO: Verify valid group fields
try ctx.group_map.put(node_name, current_ref.*);
},
.Relationship => {
if(current.first_child) |*child_ref| {
try verifyFields(ctx, child_ref, isValidEdgeField);
}
},
else => {},
}
if (current.first_child) |*child| {
try processNoDupesRecursively(ctx, child);
}
if (current.next_sibling) |*next| {
current_ref = next;
} else {
break;
}
}
}
// Yes, these tests are integration-y
// Might at some point rewrite them to use internal dif-format, but I find it more
// likely that the dif will change (and the work connected to it) than the daya-syntax.
fn testSema(buf: []const u8) !void {
const tokenizer = @import("tokenizer.zig");
var tok = tokenizer.Tokenizer.init(buf);
var node_pool = ial.IndexedArrayList(DifNode).init(std.testing.allocator);
defer node_pool.deinit();
var root_node = try dif.tokensToDif(&node_pool, &tok, "test");
var ctx = SemaContext().init(std.testing.allocator, root_node);
errdefer ctx.deinit();
try doSema(&ctx);
defer ctx.deinit();
}
test "sema fails on duplicate edge" {
try testing.expectError(error.Duplicate, testSema(
\\edge owns;
\\edge owns;
));
}
test "sema fails on duplicate node" {
try testing.expectError(error.Duplicate, testSema(
\\node Component;
\\node Component;
));
}
test "sema fails on duplicate instantiation" {
try testing.expectError(error.Duplicate, testSema(
\\node Component;
\\node Library;
\\compA: Component;
\\compA: Library;
));
}
test "sema fails on duplicate group" {
try testing.expectError(error.Duplicate, testSema(
\\group MyGroup;
\\group MyGroup;
));
}
test "sema fails on group with same name as instance" {
try testing.expectError(error.Duplicate, testSema(
\\node Component;
\\compA: Component;
\\group compA;
));
}
test "sema fails on instance with same name as group" {
try testing.expectError(error.Duplicate, testSema(
\\node Component;
\\group compA;
\\compA: Component;
));
}
test "sema does not fail on well-formed daya" {
try testing.expectEqual({}, try testSema(
\\node Component;
\\node Library;
\\edge uses;
\\edge depends_on;
\\compA: Component;
\\compB: Component;
\\libA: Library;
\\compA uses libA;
\\compB depends_on compA;
));
}
test "sema fails on invalid fields for node" {
try testing.expectError(error.InvalidField, testSema(
\\node Component {
\\ lable="misspelled";
\\}
));
}
test "sema fails on invalid fields for instance" {
try testing.expectError(error.InvalidField, testSema(
\\node Component {
\\ label="label here";
\\}
\\mynode: Component {
\\ lable="misspelled";
\\}
));
}
test "sema fails on invalid fields for edge" {
try testing.expectError(error.InvalidField, testSema(
\\edge owns {
\\ lable="misspelled";
\\}
));
}
test "sema fails on invalid fields for relationship" {
try testing.expectError(error.InvalidField, testSema(
\\node Component;
\\edge owns;
\\mynode: Component;
\\mynode2: Component;
\\mynode owns mynode2 {
\\ lable="misspelled";
\\}
));
}
// TODO: Add tests to verify that invalid references fails
//
|
libdaya/src/sema.zig
|
const zang = @import("zang");
const common = @import("common.zig");
const c = @import("common/c.zig");
const Instrument = @import("modules.zig").NiceInstrument;
pub const AUDIO_FORMAT: zang.AudioFormat = .signed16_lsb;
pub const AUDIO_SAMPLE_RATE = 48000;
pub const AUDIO_BUFFER_SIZE = 1024;
pub const DESCRIPTION =
\\example_polyphony
\\
\\Play an instrument with the keyboard. You can hold
\\down multiple notes.
\\
\\Press spacebar to cycle through various amounts of
\\decimation (artificial sample rate reduction).
;
const a4 = 220.0;
const Polyphony = struct {
pub const num_outputs = 1;
pub const num_temps = 2;
pub const Params = struct {
sample_rate: f32,
note_held: [common.key_bindings.len]bool,
};
const Voice = struct {
down: bool,
iq: zang.Notes(Instrument.Params).ImpulseQueue,
idgen: zang.IdGenerator,
instrument: Instrument,
trigger: zang.Trigger(Instrument.Params),
};
voices: [common.key_bindings.len]Voice,
fn init() Polyphony {
var self: Polyphony = .{
.voices = undefined,
};
var i: usize = 0;
while (i < common.key_bindings.len) : (i += 1) {
self.voices[i] = Voice{
.down = false,
.iq = zang.Notes(Instrument.Params).ImpulseQueue.init(),
.idgen = zang.IdGenerator.init(),
.instrument = Instrument.init(0.3),
.trigger = zang.Trigger(Instrument.Params).init(),
};
}
return self;
}
fn paint(
self: *Polyphony,
span: zang.Span,
outputs: [num_outputs][]f32,
temps: [num_temps][]f32,
params: Params,
) void {
var i: usize = 0;
while (i < common.key_bindings.len) : (i += 1) {
if (params.note_held[i] != self.voices[i].down) {
self.voices[i].iq.push(0, self.voices[i].idgen.nextId(), .{
.sample_rate = params.sample_rate,
.freq = a4 * common.key_bindings[i].rel_freq,
.note_on = params.note_held[i],
});
self.voices[i].down = params.note_held[i];
}
}
for (self.voices) |*voice| {
var ctr = voice.trigger.counter(span, voice.iq.consume());
while (voice.trigger.next(&ctr)) |result| {
voice.instrument.paint(
result.span,
outputs,
temps,
result.note_id_changed,
result.params,
);
}
}
}
};
const MyDecimatorParams = struct {
bypass: bool,
};
pub const MainModule = struct {
pub const num_outputs = 1;
pub const num_temps = 3;
pub const output_audio = common.AudioOut{ .mono = 0 };
pub const output_visualize = 0;
current_params: Polyphony.Params,
iq: zang.Notes(Polyphony.Params).ImpulseQueue,
idgen: zang.IdGenerator,
polyphony: Polyphony,
trigger: zang.Trigger(Polyphony.Params),
dec: zang.Decimator,
dec_mode: u32,
pub fn init() MainModule {
return .{
.current_params = .{
.sample_rate = AUDIO_SAMPLE_RATE,
.note_held = [1]bool{false} ** common.key_bindings.len,
},
.iq = zang.Notes(Polyphony.Params).ImpulseQueue.init(),
.idgen = zang.IdGenerator.init(),
.polyphony = Polyphony.init(),
.trigger = zang.Trigger(Polyphony.Params).init(),
.dec = zang.Decimator.init(),
.dec_mode = 0,
};
}
pub fn paint(
self: *MainModule,
span: zang.Span,
outputs: [num_outputs][]f32,
temps: [num_temps][]f32,
) void {
zang.zero(span, temps[2]);
var ctr = self.trigger.counter(span, self.iq.consume());
while (self.trigger.next(&ctr)) |result| {
self.polyphony.paint(
result.span,
.{temps[2]},
.{ temps[0], temps[1] },
result.params,
);
}
if (self.dec_mode > 0) {
self.dec.paint(span, outputs, .{}, false, .{
.sample_rate = AUDIO_SAMPLE_RATE,
.input = temps[2],
.fake_sample_rate = switch (self.dec_mode) {
1 => 6000.0,
2 => 5000.0,
3 => 4000.0,
4 => 3000.0,
5 => 2000.0,
6 => 1000.0,
else => unreachable,
},
});
} else {
zang.addInto(span, outputs[0], temps[2]);
}
}
pub fn keyEvent(self: *MainModule, key: i32, down: bool, impulse_frame: usize) bool {
if (key == c.SDLK_SPACE and down) {
self.dec_mode = (self.dec_mode + 1) % 7;
return false;
}
for (common.key_bindings) |kb, i| {
if (kb.key == key) {
self.current_params.note_held[i] = down;
self.iq.push(impulse_frame, self.idgen.nextId(), self.current_params);
}
}
return true;
}
};
|
examples/example_polyphony.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
pub const PageAlign = 4096;
pub const Page = struct {
pmem: *align(PageAlign) [4096]u8,
vaddr: u32,
flags: u32,
const Self = @This();
pub fn map(self: *const Self, allocator: *Allocator) !void {
var pt = try getPageTable(allocator, self.vaddr);
const Mask = ~@as(u32, 0xffc00000);
const pte_idx = (self.vaddr & Mask) >> 12;
var pte = &pt[pte_idx];
const attr: u32 = if (self.flags & std.elf.PF_W != 0) 3 else 1;
pte.* = @ptrToInt(self.pmem) | attr;
}
pub fn format(
self: *const Self,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
var flags: [3]u8 = .{'-'} ** 3;
if (self.flags & std.elf.PF_R != 0) flags[0] = 'R';
if (self.flags & std.elf.PF_W != 0) flags[1] = 'W';
if (self.flags & std.elf.PF_X != 0) flags[2] = 'X';
try writer.print("padr 0x{x:0>8} | vadr 0x{x:0>8} | {s}", .{
@ptrToInt(self.pmem),
self.vaddr,
flags,
});
}
};
/// Page Directory
pub var pd: ?*align(PageAlign) [1024]u32 = null;
const PageTable = *align(PageAlign) [1024]u32;
pub fn init(allocator: *Allocator) !void {
if (pd != null) @panic("Page Directory already init.");
// Allocate PD
pd = (try allocator.allocAdvanced(u32, PageAlign, 1024, .exact))[0..1024];
// Set each PDE to not present
for (pd.?) |*pde| {
// This sets the following flags to the pages:
// Supervisor: Only kernel-mode can access them
// Write Enabled: It can be both read from and written to
// Not Present: The page table is not present
pde.* = 0x0000_0002;
}
var pt = try allocator.allocAdvanced(u32, PageAlign, 1024, .exact);
// Mapping first 4MiB
for (pt) |*pte, i| {
// attributes: supervisor level, read/write, present.
const attr = 3;
pte.* = (i * 0x1000) | attr;
}
// Load to page directory
// attributes: supervisor level, read/write, present
pd.?[0] = @ptrToInt(pt.ptr) | 3;
loadPD();
// Enable paging
asm volatile (
\\mov %%cr0, %%eax
\\or $0x80000000, %%eax
\\mov %%eax, %%cr0
::: "eax"
);
}
pub fn loadPD() void {
asm volatile ("mov %%eax, %%cr3"
:: [pd] "{eax}" (@ptrToInt(pd))
: "eax"
);
}
pub fn clearPD() void {
for (pd.?[1..1024]) |*pde| {
pde.* = 0x0000_0002;
}
loadPD();
}
fn getPageTable(allocator: *Allocator, vaddr: u32) !PageTable {
if (pd == null) return error.PageDirectoryNull;
const pde_idx = vaddr >> 22;
var pde: *u32 = &pd.?[pde_idx];
var pt: PageTable = undefined;
// if PDE not present, create new page table
if (pde.* & 1 == 0) {
pt = (try allocator.allocAdvanced(u32, PageAlign, 1024, .exact))[0..1024];
for (pt) |*pte| {
pte.* = 0;
}
// Add PDE with new page table
// attributes: supervisor level, read/write, present
pde.* = @ptrToInt(pt) | 3;
}
else {
const Mask = ~@as(u32, 0xFFF);
pt = @intToPtr(PageTable, pde.* & Mask);
}
return pt;
}
pub fn readCR0() u32 {
return asm volatile ("mov %%cr0, %[ret]"
: [ret] "={eax}" (-> u32)
:: "eax"
);
}
pub fn readCR2() u32 {
return asm volatile ("mov %%cr2, %[ret]"
: [ret] "={eax}" (-> u32)
:: "eax"
);
}
pub fn readCR3() u32 {
return asm volatile ("mov %%cr3, %[ret]"
: [ret] "={eax}" (-> u32)
:: "eax"
);
}
|
src/utils/paging.zig
|
const std = @import("std");
pub fn build(b: *std.build.Builder) void {
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
const mode = b.standardReleaseOptions();
const target = b.standardTargetOptions(.{});
const mon13Pkg = std.build.Pkg{
.name = "mon13",
.path = .{ .path = "src/mon13.zig" },
};
const rawSeed = b.option(u64, "seed", "Seed for ptest");
const rawSubmoduleTheft = b.option(bool, "submodule-theft", "Use submodule libtheft instead of system libtheft");
const luaFfiExe = b.option([]const u8, "lua-ffi-exe", "Lua interpreter with LuaJIT-style FFI") orelse "luajit";
const pyCtypesExe = b.option([]const u8, "py-ctypes-exe", "Python3 interpreter with ctypes") orelse "python3";
const jsWasmExe = b.option([]const u8, "js-exe", "NodeJS with WASM") orelse "node";
const sep = [_]u8{std.fs.path.sep};
const test_path = "test" ++ sep;
const ld_path = b.lib_dir;
const bind_path = "binding" ++ sep;
//Library files
const sharedLib = b.addSharedLibrary("mon13", "binding/c/bindc.zig", b.version(0, 7, 1));
sharedLib.addPackage(mon13Pkg);
sharedLib.setBuildMode(mode);
sharedLib.setTarget(target);
sharedLib.install();
//Unit tests
var unitTests = b.addTest("test/unit.zig");
unitTests.addPackage(mon13Pkg);
unitTests.setBuildMode(mode);
const testStep = b.step("utest", "Run unit tests");
testStep.dependOn(&unitTests.step);
//Property-based tests (using theft library - POSIX only)
const submoduleInit = b.addSystemCommand(&[_][]const u8{ "git", "submodule", "init" });
const submoduleUpdate = b.addSystemCommand(&[_][]const u8{ "git", "submodule", "update" });
submoduleUpdate.step.dependOn(&submoduleInit.step);
const makeTheft = b.addSystemCommand(&[_][]const u8{ "make", "-C", "theft" });
makeTheft.step.dependOn(&submoduleUpdate.step);
const propertyTestExe = b.addExecutable("ptest", "test/ptest.c");
propertyTestExe.linkLibC();
propertyTestExe.linkSystemLibraryName("m");
propertyTestExe.linkSystemLibraryName("theft");
propertyTestExe.linkLibrary(sharedLib);
if (rawSubmoduleTheft orelse false) {
propertyTestExe.addLibPath("theft/build");
}
propertyTestExe.addIncludeDir("binding/c");
propertyTestExe.addIncludeDir("test");
if (rawSubmoduleTheft orelse false) {
propertyTestExe.addIncludeDir("theft/inc");
}
propertyTestExe.setBuildMode(mode);
if (rawSubmoduleTheft orelse false) {
propertyTestExe.step.dependOn(&makeTheft.step);
}
const propertyTestRun = propertyTestExe.run();
if (rawSeed) |seed| {
var seed_buf: [100]u8 = undefined;
var seed_arg = std.fmt.bufPrint(
seed_buf[0..seed_buf.len],
"{d}",
.{seed},
) catch unreachable;
propertyTestRun.addArg(seed_arg);
}
const propertyTestStep = b.step("ptest", "Run property-based tests (using libtheft - POSIX only)");
propertyTestStep.dependOn(&propertyTestRun.step);
//Lua FFI test
var luajitTestCmd = b.addSystemCommand(&[_][]const u8{ luaFfiExe, test_path ++ "test.lua" });
luajitTestCmd.setEnvironmentVariable("LD_LIBRARY_PATH", ld_path);
luajitTestCmd.setEnvironmentVariable("LUA_PATH", bind_path ++ "lua_ffi" ++ sep ++ "?.lua");
const luajitTestStep = b.step("lutest", "Run Lua FFI tests");
luajitTestStep.dependOn(b.getInstallStep());
luajitTestStep.dependOn(&luajitTestCmd.step);
//Python FFI test
var pyTestCmd = b.addSystemCommand(&[_][]const u8{ pyCtypesExe, test_path ++ "test.py" });
pyTestCmd.setEnvironmentVariable("LD_LIBRARY_PATH", ld_path);
pyTestCmd.setEnvironmentVariable("PYTHONPATH", bind_path ++ "py_ctypes");
const pyTestStep = b.step("pytest", "Run Python ctypes tests");
pyTestStep.dependOn(b.getInstallStep());
pyTestStep.dependOn(&pyTestCmd.step);
//JavaScript WASM test
const jsWasmStep = b.step("jstest", "Run JavaScript WASM tests (if -Dtarget=wasm32-freestanding)");
var wasmTestPossible = false;
if (target.os_tag) |os_tag| {
if (target.cpu_arch) |cpu_arch| {
if (os_tag == std.Target.Os.Tag.freestanding and cpu_arch == std.Target.Cpu.Arch.wasm32) {
wasmTestPossible = true;
}
}
}
var jsWasmCmd = b.addSystemCommand(&[_][]const u8{ jsWasmExe, test_path ++ "test.js" });
if (wasmTestPossible) {
jsWasmCmd.addArg(ld_path);
}
jsWasmStep.dependOn(b.getInstallStep());
jsWasmStep.dependOn(&jsWasmCmd.step);
}
|
build.zig
|
const clap = @import("clap");
const format = @import("format");
const it = @import("ziter");
const std = @import("std");
const ston = @import("ston");
const util = @import("util");
const ascii = std.ascii;
const debug = std.debug;
const fmt = std.fmt;
const fs = std.fs;
const heap = std.heap;
const io = std.io;
const log = std.log;
const math = std.math;
const mem = std.mem;
const os = std.os;
const rand = std.rand;
const testing = std.testing;
const escape = util.escape;
const Program = @This();
allocator: mem.Allocator,
random: rand.Random,
options: Options,
pokedex: Set = Set{},
pokemons: Pokemons = Pokemons{},
trainers: Trainers = Trainers{},
trainer_names: TrainerNames = TrainerNames{},
moves: Moves = Moves{},
held_items: Set = Set{},
// Precomputed data for later use. Initialized in `run`
species: Set = undefined,
species_by_ability: SpeciesBy = undefined,
species_by_type: SpeciesBy = undefined,
stats: MinMax(u16) = undefined,
// Containers we reuse often enough that keeping them around with
// their preallocated capacity is worth the hassel.
simular: std.ArrayListUnmanaged(u16) = std.ArrayListUnmanaged(u16){},
intersection: Set = Set{},
const Options = struct {
seed: u64,
abilities: ThemeOption,
held_items: HeldItemOption,
moves: MoveOption,
party_size_max: u8,
party_size_min: u8,
party_size: PartySizeOption,
party_pokemons: PartyPokemonsOption,
stats: StatsOption,
types: ThemeOption,
included_trainers: []const []const u8,
excluded_trainers: []const []const u8,
excluded_pokemons: []const []const u8,
};
const HeldItemOption = enum {
none,
unchanged,
random,
};
const MoveOption = enum {
none,
unchanged,
best,
best_for_level,
random_learnable,
random,
};
const ThemeOption = enum {
same,
random,
themed,
};
const StatsOption = enum {
random,
simular,
follow_level,
};
const PartySizeOption = enum {
unchanged,
minimum,
random,
};
const PartyPokemonsOption = enum {
unchanged,
randomize,
};
pub const main = util.generateMain(Program);
pub const version = "0.0.0";
pub const description =
\\Randomizes trainer parties.
\\
;
pub const params = &[_]clap.Param(clap.Help){
clap.parseParam(
"-h, --help " ++
"Display this help text and exit.",
) catch unreachable,
clap.parseParam(
" --held-items <none|unchanged|random> " ++
"The method used to picking held items. (default: unchanged)",
) catch unreachable,
clap.parseParam(
" --moves <none|unchanged|best|best_for_level|random_learnable|random> " ++
" The method used to picking moves. (default: none)",
) catch unreachable,
clap.parseParam(
"-p, --party-pokemons <unchanged|randomize> " ++
"Wether the trainers pokemons should be randomized. (default: unchanged)",
) catch unreachable,
clap.parseParam(
"-m, --party-size-min <INT> " ++
"The minimum size each trainers party is allowed to be. (default: 1)",
) catch unreachable,
clap.parseParam(
"-M, --party-size-max <INT> " ++
"The maximum size each trainers party is allowed to be. (default: 6)",
) catch unreachable,
clap.parseParam(
" --party-size <unchanged|minimum|random> " ++
"The method used to pick the trainer party size. (default: unchanged)",
) catch unreachable,
clap.parseParam(
"-s, --seed <INT> " ++
"The seed to use for random numbers. A random seed will be picked if this is not " ++
"specified.",
) catch unreachable,
clap.parseParam(
"-S, --stats <random|simular|follow_level> " ++
"The total stats the picked pokemon should have if pokemons are randomized. " ++
"(default: random)",
) catch unreachable,
clap.parseParam(
"-t, --types <random|same|themed> " ++
"Which types each trainer should use if pokemons are randomized. " ++
"(default: random)",
) catch unreachable,
clap.parseParam(
"-a, --abilities <random|same|themed> " ++
"Which ability each party member should have. (default: random)",
) catch unreachable,
clap.parseParam(
" --exclude-trainer <STRING>... " ++
"List of trainers to not change. Case insensitive. Supports wildcards like 'grunt*'.",
) catch unreachable,
clap.parseParam(
" --include-trainer <STRING>... " ++
"List of trainers to change, ignoring --exlude-trainer. Case insensitive. " ++
"Supports wildcards like 'grunt*'.",
) catch unreachable,
clap.parseParam(
" --exclude-pokemon <STRING>... " ++
"List of pokemons to never pick. Case insensitive. Supports wildcards like 'nido*'.",
) catch unreachable,
clap.parseParam(
"-v, --version " ++
"Output version information and exit.",
) catch unreachable,
};
pub fn init(allocator: mem.Allocator, args: anytype) !Program {
const excluded_pokemons_arg = args.options("--exclude-pokemon");
var excluded_pokemons = try allocator.alloc([]const u8, excluded_pokemons_arg.len);
for (excluded_pokemons) |_, i|
excluded_pokemons[i] = try ascii.allocLowerString(allocator, excluded_pokemons_arg[i]);
const excluded_trainers_arg = args.options("--exclude-trainer");
var excluded_trainers = try allocator.alloc([]const u8, excluded_trainers_arg.len);
for (excluded_trainers) |_, i|
excluded_trainers[i] = try ascii.allocLowerString(allocator, excluded_trainers_arg[i]);
const included_trainers_arg = args.options("--include-trainer");
var included_trainers = try allocator.alloc([]const u8, included_trainers_arg.len);
for (included_trainers) |_, i|
included_trainers[i] = try ascii.allocLowerString(allocator, included_trainers_arg[i]);
const options = Options{
.seed = try util.args.seed(args),
.party_size_min = (try util.args.int(args, "--party-size-min", u8)) orelse 1,
.party_size_max = (try util.args.int(args, "--party-size-max", u8)) orelse 6,
.party_size = (try util.args.enumeration(args, "--party-size", PartySizeOption)) orelse .unchanged,
.party_pokemons = (try util.args.enumeration(args, "--party-pokemons", PartyPokemonsOption)) orelse .unchanged,
.abilities = (try util.args.enumeration(args, "--abilities", ThemeOption)) orelse .random,
.moves = (try util.args.enumeration(args, "--moves", MoveOption)) orelse .unchanged,
.held_items = (try util.args.enumeration(args, "--held-items", HeldItemOption)) orelse .unchanged,
.stats = (try util.args.enumeration(args, "--stats", StatsOption)) orelse .random,
.types = (try util.args.enumeration(args, "--types", ThemeOption)) orelse .random,
.excluded_pokemons = excluded_pokemons,
.included_trainers = included_trainers,
.excluded_trainers = excluded_trainers,
};
return Program{
.allocator = allocator,
.random = undefined, // Initialized in `run`
.options = options,
};
}
pub fn run(
program: *Program,
comptime Reader: type,
comptime Writer: type,
stdio: util.CustomStdIoStreams(Reader, Writer),
) anyerror!void {
const allocator = program.allocator;
try format.io(allocator, stdio.in, stdio.out, program, useGame);
const species = try pokedexPokemons(
allocator,
program.pokedex,
program.pokemons,
program.options.excluded_pokemons,
);
program.random = rand.DefaultPrng.init(program.options.seed).random();
program.species = species;
program.species_by_ability = try speciesByAbility(allocator, program.pokemons, species);
program.species_by_type = try speciesByType(allocator, program.pokemons, species);
program.stats = minMaxStats(program.pokemons, species);
try program.randomize();
try program.output(stdio.out);
}
fn output(program: *Program, writer: anytype) !void {
try ston.serialize(writer, .{ .trainers = program.trainers });
}
fn useGame(program: *Program, parsed: format.Game) !void {
const allocator = program.allocator;
switch (parsed) {
.pokedex => |pokedex| {
_ = try program.pokedex.put(allocator, pokedex.index, {});
return error.DidNotConsumeData;
},
.pokemons => |pokemons| {
const pokemon = (try program.pokemons.getOrPutValue(allocator, pokemons.index, .{}))
.value_ptr;
switch (pokemons.value) {
.catch_rate => |catch_rate| pokemon.catch_rate = catch_rate,
.pokedex_entry => |pokedex_entry| pokemon.pokedex_entry = pokedex_entry,
.name => |name| {
pokemon.name = try escape.default.unescapeAlloc(allocator, name);
for (pokemon.name) |*c|
c.* = ascii.toLower(c.*);
},
.stats => |stats| {
const stat = @enumToInt(stats);
if (pokemon.stats[stat] > stats.value()) {
pokemon.total_stats -= pokemon.stats[stat] - stats.value();
} else {
pokemon.total_stats += stats.value() - pokemon.stats[stat];
}
pokemon.stats[stat] = stats.value();
},
.types => |types| _ = try pokemon.types.put(allocator, types.value, {}),
.abilities => |ability| _ = try pokemon.abilities.put(
allocator,
ability.index,
ability.value,
),
.moves => |moves| {
const move = (try pokemon.lvl_up_moves.getOrPutValue(
allocator,
moves.index,
.{},
)).value_ptr;
format.setField(move, moves.value);
},
.base_exp_yield,
.ev_yield,
.items,
.gender_ratio,
.egg_cycles,
.base_friendship,
.growth_rate,
.egg_groups,
.color,
.evos,
.tms,
.hms,
=> return error.DidNotConsumeData,
}
return error.DidNotConsumeData;
},
.trainers => |trainers| {
const trainer = (try program.trainers.getOrPutValue(allocator, trainers.index, .{}))
.value_ptr;
switch (trainers.value) {
.party_size => |party_size| trainer.party_size = party_size,
.party_type => |party_type| trainer.party_type = party_type,
.party => |party| {
const member = (try trainer.party.getOrPutValue(allocator, party.index, .{}))
.value_ptr;
switch (party.value) {
.species => |species| member.species = species,
.level => |level| member.level = level,
.item => |item| member.item = item,
.ability => |ability| member.ability = ability,
.moves => |moves| _ = try member.moves.put(
allocator,
moves.index,
moves.value,
),
}
},
.name => |_name| {
const name = try escape.default.unescapeAlloc(allocator, _name);
for (name) |*c|
c.* = ascii.toLower(c.*);
_ = try program.trainer_names.getOrPutValue(allocator, trainers.index, name);
return error.DidNotConsumeData;
},
.class,
.encounter_music,
.trainer_picture,
.items,
=> return error.DidNotConsumeData,
}
return;
},
.items => |items| switch (items.value) {
.battle_effect => |effect| {
if (effect != 0)
_ = try program.held_items.put(allocator, items.index, {});
return error.DidNotConsumeData;
},
.name,
.description,
.price,
.pocket,
=> return error.DidNotConsumeData,
},
.moves => |moves| {
const move = (try program.moves.getOrPutValue(allocator, moves.index, .{}))
.value_ptr;
switch (moves.value) {
.power => |power| move.power = power,
.type => |_type| move.type = _type,
.pp => |pp| move.pp = pp,
.accuracy => |accuracy| move.accuracy = accuracy,
.name,
.description,
.effect,
.target,
.priority,
.category,
=> {},
}
return error.DidNotConsumeData;
},
.version,
.game_title,
.gamecode,
.instant_text,
.starters,
.text_delays,
.abilities,
.types,
.tms,
.hms,
.maps,
.wild_pokemons,
.static_pokemons,
.given_pokemons,
.pokeball_items,
.hidden_hollows,
.text,
=> return error.DidNotConsumeData,
}
unreachable;
}
fn randomize(program: *Program) !void {
if (program.species_by_type.count() == 0) {
std.log.err("No types where found. Cannot randomize.", .{});
return;
}
for (program.trainers.values()) |*trainer, i| {
// Trainers with 0 party members are considered "invalid" trainers
// and will not be randomized.
if (trainer.party_size == 0)
continue;
const key = program.trainers.keys()[i];
if (program.trainer_names.get(key)) |name| {
if (util.glob.matchesOneOf(name, program.options.included_trainers) == null and
util.glob.matchesOneOf(name, program.options.excluded_trainers) != null)
continue;
}
try program.randomizeTrainer(trainer);
}
}
fn randomizeTrainer(program: *Program, trainer: *Trainer) !void {
const allocator = program.allocator;
const themes = Themes{
.type = switch (program.options.types) {
.themed => util.random.item(program.random, program.species_by_type.keys()).?.*,
else => undefined,
},
.ability = switch (program.options.abilities) {
.themed => util.random.item(program.random, program.species_by_ability.keys()).?.*,
else => undefined,
},
};
const wants_moves = switch (program.options.moves) {
.unchanged => trainer.party_type.haveMoves(),
.none => false,
.best,
.best_for_level,
.random_learnable,
.random,
=> true,
};
const wants_items = switch (program.options.held_items) {
.unchanged => trainer.party_type.haveItem(),
.none => false,
.random => true,
};
trainer.party_size = switch (program.options.party_size) {
.unchanged => math.clamp(
trainer.party_size,
program.options.party_size_min,
program.options.party_size_max,
),
.random => program.random.intRangeAtMost(
u8,
program.options.party_size_min,
program.options.party_size_max,
),
.minimum => program.options.party_size_min,
};
trainer.party_type = switch (wants_moves) {
true => switch (wants_items) {
true => format.PartyType.both,
false => format.PartyType.moves,
},
false => switch (wants_items) {
true => format.PartyType.item,
false => format.PartyType.none,
},
};
// Fill trainer party with more Pokémons until `party_size` have been
// reached. The Pokémons we fill the party with are Pokémons that are
// already in the party. This code assumes that at least 1 Pokémon
// is in the party, which is always true as we don't randomize trainers
// with a party size of 0.
const party_member_max = trainer.party.count();
var party_member: u8 = 0;
var i: u8 = 0;
while (i < trainer.party_size) : (i += 1) {
const result = try trainer.party.getOrPut(allocator, i);
if (!result.found_existing) {
const member = trainer.party.values()[party_member];
result.value_ptr.* = .{
.species = member.species,
.item = member.item,
.level = member.level,
.moves = try member.moves.clone(allocator),
};
party_member += 1;
party_member %= @intCast(u8, party_member_max);
}
}
for (trainer.party.values()[0..trainer.party_size]) |*member| {
switch (program.options.party_pokemons) {
.randomize => try randomizePartyMember(program, themes, trainer.*, member),
.unchanged => {},
}
switch (program.options.held_items) {
.unchanged => {},
.none => member.item = null,
.random => member.item = util.random.item(
program.random,
program.held_items.keys(),
).?.*,
}
switch (program.options.moves) {
.none, .unchanged => {},
.best, .best_for_level, .random_learnable, .random => {
// Ensure that the pokemon has 4 moves
_ = try member.moves.getOrPutValue(program.allocator, 0, 0);
_ = try member.moves.getOrPutValue(program.allocator, 1, 0);
_ = try member.moves.getOrPutValue(program.allocator, 2, 0);
_ = try member.moves.getOrPutValue(program.allocator, 3, 0);
},
}
switch (program.options.moves) {
.none, .unchanged => {},
.best, .best_for_level => if (member.species) |species| {
const pokemon = program.pokemons.get(species).?;
const level = switch (program.options.moves) {
.best => math.maxInt(u8),
.best_for_level => member.level orelse math.maxInt(u8),
else => unreachable,
};
fillWithBestMovesForLevel(
program.moves,
pokemon,
level,
&member.moves,
);
},
.random_learnable => if (member.species) |species| {
const pokemon = program.pokemons.get(species).?;
fillWithRandomLevelUpMoves(program.random, pokemon.lvl_up_moves, &member.moves);
},
.random => fillWithRandomMoves(program.random, program.moves, &member.moves),
}
}
}
fn fillWithBestMovesForLevel(
all_moves: Moves,
pokemon: Pokemon,
level: u8,
moves: *MemberMoves,
) void {
// Before pick best moves, we make sure the Pokémon has no moves.
mem.set(u16, moves.values(), 0);
// Go over all level up moves, and replace the current moves with better moves
// as we find them
for (pokemon.lvl_up_moves.values()) |lvl_up_move| {
if (lvl_up_move.id == 0)
continue;
if (level < lvl_up_move.level)
continue;
// Pokémon already have this move. We don't wonna have the same move twice
if (hasMove(moves.values(), lvl_up_move.id))
continue;
const this_move = all_moves.get(lvl_up_move.id) orelse continue;
const this_move_r = RelativeMove.from(pokemon, this_move);
for (moves.values()) |*move| {
const prev_move = all_moves.get(move.*) orelse {
// Could not find info about this move. Assume it's and invalid or bad
// move and replace it.
move.* = lvl_up_move.id;
break;
};
const prev_move_r = RelativeMove.from(pokemon, prev_move);
if (!this_move_r.lessThan(prev_move_r)) {
// We found a move that is better what the Pokémon already have!
move.* = lvl_up_move.id;
break;
}
}
}
}
fn fillWithRandomMoves(random: rand.Random, all_moves: Moves, moves: *MemberMoves) void {
const has_null_move = all_moves.get(0) != null;
for (moves.values()) |*move, i| {
// We need to have more moves in the game than the party member can have,
// otherwise, we cannot pick only unique moves. Also, move `0` is the
// `null` move, so we don't count that as a move we can pick from.
if (all_moves.count() - @boolToInt(has_null_move) <= i) {
move.* = 0;
continue;
}
// Loop until we have picked a move that the party member does not already
// have.
move.* = while (true) {
const pick = util.random.item(random, all_moves.keys()).?.*;
if (pick != 0 and !hasMove(moves.values()[0..i], pick))
break pick;
} else unreachable;
}
}
fn fillWithRandomLevelUpMoves(
random: rand.Random,
lvl_up_moves: LvlUpMoves,
moves: *MemberMoves,
) void {
for (moves.values()) |*move, i| {
// We need to have more moves in the learnset than the party member can have,
// otherwise, we cannot pick only unique moves.
// TODO: This code does no take into account that `lvl_up_moves` can contain
// duplicates or moves with `id == null`. We need to do a count of
// "valid moves" from the learnset and do this check against that
// instead.
if (lvl_up_moves.count() <= i) {
move.* = 0;
continue;
}
// Loop until we have picked a move that the party member does not already
// have.
move.* = while (true) {
const pick = util.random.item(random, lvl_up_moves.values()).?.id;
if (pick != 0 and !hasMove(moves.values()[0..i], pick))
break pick;
} else unreachable;
}
}
const Themes = struct {
type: u16,
ability: u16,
};
fn randomizePartyMember(
program: *Program,
themes: Themes,
trainer: Trainer,
member: *PartyMember,
) !void {
const allocator = program.allocator;
const species = member.species orelse return;
const level = member.level orelse trainer.partyAverageLevel();
const ability_index = member.ability orelse 0;
const type_set = switch (program.options.types) {
.same => blk: {
const pokemon = program.pokemons.get(species) orelse
break :blk program.species;
if (pokemon.types.count() == 0)
break :blk program.species;
const t = util.random.item(program.random, pokemon.types.keys()).?.*;
break :blk program.species_by_type.get(t).?;
},
.themed => program.species_by_type.get(themes.type).?,
.random => program.species,
};
var new_ability: ?u16 = null;
const ability_set = switch (program.options.abilities) {
.same => blk: {
const pokemon = program.pokemons.get(species) orelse
break :blk program.species;
const ability = pokemon.abilities.get(ability_index) orelse
break :blk program.species;
if (ability == 0)
break :blk program.species;
new_ability = ability;
break :blk program.species_by_ability.get(ability).?;
},
.themed => blk: {
new_ability = themes.ability;
break :blk program.species_by_ability.get(themes.ability).?;
},
.random => program.species,
};
if (program.options.abilities != .random and program.options.types != .random) {
// The intersection between the type_set and ability_set will give
// us all pokémons that have a certain type+ability pair. This is
// the set we will pick from.
var intersection = program.intersection.promote(allocator);
intersection.clearRetainingCapacity();
try util.set.intersectInline(&intersection, ability_set, type_set);
program.intersection = intersection.unmanaged;
}
// Pick the first set that has items in it.
const pick_from = if (program.intersection.count() != 0)
program.intersection
else if (program.options.abilities != .random and ability_set.count() != 0)
ability_set
else if (program.options.types != .random and type_set.count() != 0)
type_set
else
program.species;
// When we have picked a new species for our Pokémon we also need
// to fix the ability the Pokémon have, if we're picking Pokémons
// based on ability.
defer if (member.species) |new_species| done: {
const ability_to_find = new_ability orelse break :done;
const pokemon = program.pokemons.get(new_species) orelse break :done;
// Find the index of the ability we want the party member to
// have. If we don't find the ability. The best we can do is
// just let the Pokémon keep the ability it already has.
if (findAbility(pokemon.abilities.iterator(), ability_to_find)) |entry|
member.ability = entry.key_ptr.*;
};
switch (program.options.stats) {
.follow_level => {
member.species = try randomSpeciesWithSimularTotalStats(
program,
pick_from,
levelScaling(program.stats.min, program.stats.max, level),
);
return;
},
.simular => if (program.pokemons.get(species)) |pokemon| {
member.species = try randomSpeciesWithSimularTotalStats(
program,
pick_from,
pokemon.total_stats,
);
return;
} else {},
.random => {},
}
// If the above switch didn't return, the best we can do is just pick a
// random Pokémon from the pick_from set
member.species = util.random.item(program.random, pick_from.keys()).?.*;
}
fn randomSpeciesWithSimularTotalStats(program: *Program, pick_from: Set, total_stats: u16) !u16 {
const allocator = program.allocator;
var min = @intCast(isize, total_stats);
var max = min;
program.simular.shrinkRetainingCapacity(0);
while (program.simular.items.len < 25) : ({
min -= 5;
max += 5;
}) {
for (pick_from.keys()) |s| {
const p = program.pokemons.get(s).?;
const total = @intCast(isize, p.total_stats);
if (min <= total and total <= max)
try program.simular.append(allocator, s);
}
}
return util.random.item(program.random, program.simular.items).?.*;
}
fn levelScaling(min: u16, max: u16, level: u16) u16 {
const fmin = @intToFloat(f64, min);
const fmax = @intToFloat(f64, max);
const diff = fmax - fmin;
const x = @intToFloat(f64, level);
// Function adapted from -0.0001 * x^2 + 0.02 * x
// This functions grows fast at the start, getting 75%
// to max stats at level 50.
const a = -0.0001 * diff;
const b = 0.02 * diff;
const xp2 = math.pow(f64, x, 2);
const res = a * xp2 + b * x + fmin;
return @floatToInt(u16, res);
}
fn hasMove(moves: []const u16, id: u16) bool {
return it.anyEx(moves, id, struct {
fn f(m: u16, e: u16) bool {
return m == e;
}
}.f);
}
fn findAbility(abilities: Abilities.Iterator, ability: u16) ?Abilities.Entry {
return it.findEx(abilities, ability, struct {
fn f(m: u16, e: Abilities.Entry) bool {
return m == e.value_ptr.*;
}
}.f);
}
fn MinMax(comptime T: type) type {
return struct { min: T, max: T };
}
const Abilities = std.AutoArrayHashMapUnmanaged(u8, u16);
const LvlUpMoves = std.AutoArrayHashMapUnmanaged(u16, LvlUpMove);
const MemberMoves = std.AutoArrayHashMapUnmanaged(u8, u16);
const Moves = std.AutoArrayHashMapUnmanaged(u16, Move);
const Party = std.AutoArrayHashMapUnmanaged(u8, PartyMember);
const Pokemons = std.AutoArrayHashMapUnmanaged(u16, Pokemon);
const Set = std.AutoArrayHashMapUnmanaged(u16, void);
const SpeciesBy = std.AutoArrayHashMapUnmanaged(u16, Set);
const TrainerNames = std.AutoArrayHashMapUnmanaged(u16, []const u8);
const Trainers = std.AutoArrayHashMapUnmanaged(u16, Trainer);
fn pokedexPokemons(
allocator: mem.Allocator,
pokedex: Set,
pokemons: Pokemons,
excluded_pokemons: []const []const u8,
) !Set {
var res = Set{};
errdefer res.deinit(allocator);
for (pokemons.values()) |pokemon, i| {
const species = pokemons.keys()[i];
if (pokemon.catch_rate == 0)
continue;
if (pokedex.get(pokemon.pokedex_entry) == null)
continue;
if (util.glob.matchesOneOf(pokemon.name, excluded_pokemons)) |_|
continue;
_ = try res.put(allocator, species, {});
}
return res;
}
fn minMaxStats(pokemons: Pokemons, species: Set) MinMax(u16) {
var res = MinMax(u16){
.min = math.maxInt(u16),
.max = 0,
};
for (species.keys()) |s| {
const pokemon = pokemons.get(s).?;
res.min = math.min(res.min, pokemon.total_stats);
res.max = math.max(res.max, pokemon.total_stats);
}
return res;
}
fn speciesByType(allocator: mem.Allocator, pokemons: Pokemons, species: Set) !SpeciesBy {
var res = SpeciesBy{};
errdefer {
for (res.values()) |*set|
set.deinit(allocator);
res.deinit(allocator);
}
for (species.keys()) |s| {
const pokemon = pokemons.get(s).?;
for (pokemon.types.keys()) |t| {
const set = (try res.getOrPutValue(allocator, t, .{})).value_ptr;
_ = try set.put(allocator, s, {});
}
}
return res;
}
fn speciesByAbility(allocator: mem.Allocator, pokemons: Pokemons, species: Set) !SpeciesBy {
var res = SpeciesBy{};
errdefer {
for (res.values()) |*set|
set.deinit(allocator);
res.deinit(allocator);
}
for (species.keys()) |s| {
const pokemon = pokemons.get(s).?;
for (pokemon.abilities.values()) |ability| {
if (ability == 0)
continue;
const set = (try res.getOrPutValue(allocator, ability, .{})).value_ptr;
_ = try set.put(allocator, s, {});
}
}
return res;
}
const Trainer = struct {
party_size: u8 = 0,
party_type: format.PartyType = .none,
party: Party = Party{},
fn partyAverageLevel(trainer: Trainer) u8 {
var count: u16 = 0;
var sum: u16 = 0;
for (trainer.party.values()) |member| {
sum += member.level orelse 0;
count += @boolToInt(member.level != null);
}
if (count == 0)
return 2;
return @intCast(u8, sum / count);
}
};
const PartyMember = struct {
species: ?u16 = null,
item: ?u16 = null,
level: ?u8 = null,
ability: ?u8 = null,
moves: MemberMoves = MemberMoves{},
};
const LvlUpMove = struct {
level: u16 = 0,
id: u16 = 0,
};
const Move = struct {
power: u8 = 0,
accuracy: u8 = 0,
pp: u8 = 0,
type: u16 = math.maxInt(u16),
};
// Represents a moves power in relation to the pokemon who uses it
const RelativeMove = struct {
power: u16,
accuracy: u8,
pp: u8,
fn from(p: Pokemon, m: Move) RelativeMove {
const is_stab = p.types.get(m.type) != null;
return RelativeMove{
.power = @as(u16, m.power) + (m.power / 2) * @boolToInt(is_stab),
.accuracy = m.accuracy,
.pp = m.pp,
};
}
fn lessThan(a: RelativeMove, b: RelativeMove) bool {
if (a.power < b.power)
return true;
if (a.power > b.power)
return false;
if (a.accuracy < b.accuracy)
return true;
if (a.accuracy > b.accuracy)
return false;
return a.pp < b.pp;
}
};
const Pokemon = struct {
stats: [6]u8 = [_]u8{0} ** 6,
total_stats: u16 = 0,
types: Set = Set{},
abilities: Abilities = Abilities{},
lvl_up_moves: LvlUpMoves = LvlUpMoves{},
catch_rate: usize = 1,
pokedex_entry: u16 = math.maxInt(u16),
name: []u8 = "",
};
const number_of_seeds = 40;
const Pattern = util.testing.Pattern;
test {
const test_case = try util.testing.filter(util.testing.test_case, &.{
".items[*].battle_effect=*",
".moves[*].power=*",
".moves[*].type=*",
".moves[*].pp=*",
".moves[*].accuracy=*",
".pokemons[*].catch_rate=*",
".pokemons[*].pokedex_entry=*",
".pokemons[*].name=*",
".pokemons[*].stats.*",
".pokemons[*].types[*]=*",
".pokemons[*].abilities[*]=*",
".pokemons[*].moves[*].*",
".pokedex[*].*",
".trainers[*].*",
});
defer testing.allocator.free(test_case);
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{"--seed=0"},
.patterns = &[_]Pattern{
Pattern.glob(91, 91, ".trainers[*].party_type=both"),
Pattern.glob(14, 14, ".trainers[*].party_type=item"),
Pattern.glob(87, 87, ".trainers[*].party_type=moves"),
Pattern.glob(621, 621, ".trainers[*].party_type=none"),
Pattern.glob(408, 408, ".trainers[*].party[*].item=*"),
Pattern.glob(2400, 2400, ".trainers[*].party[*].moves[*]=*"),
Pattern.glob(252, 252, ".trainers[*].party_size=1"),
Pattern.glob(296, 296, ".trainers[*].party_size=2"),
Pattern.glob(187, 187, ".trainers[*].party_size=3"),
Pattern.glob(26, 26, ".trainers[*].party_size=4"),
Pattern.glob(13, 13, ".trainers[*].party_size=5"),
Pattern.glob(39, 39, ".trainers[*].party_size=6"),
},
});
// Held items
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{ "--held-items=none", "--seed=0" },
.patterns = &[_]Pattern{
Pattern.glob(0, 0, ".trainers[*].party_type=both"),
Pattern.glob(0, 0, ".trainers[*].party_type=item"),
Pattern.glob(178, 178, ".trainers[*].party_type=moves"),
Pattern.glob(635, 635, ".trainers[*].party_type=none"),
},
});
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{ "--held-items=unchanged", "--seed=0" },
.patterns = &[_]Pattern{
Pattern.glob(91, 91, ".trainers[*].party_type=both"),
Pattern.glob(14, 14, ".trainers[*].party_type=item"),
Pattern.glob(87, 87, ".trainers[*].party_type=moves"),
Pattern.glob(621, 621, ".trainers[*].party_type=none"),
},
});
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{ "--held-items=random", "--seed=0" },
.patterns = &[_]Pattern{
Pattern.glob(178, 178, ".trainers[*].party_type=both"),
Pattern.glob(635, 635, ".trainers[*].party_type=item"),
Pattern.glob(0, 0, ".trainers[*].party_type=moves"),
Pattern.glob(0, 0, ".trainers[*].party_type=none"),
Pattern.glob(1808, 1808, ".trainers[*].party[*].item=*"),
},
});
// Moves
// TODO: best|best_for_level|random_learnable
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{ "--moves=none", "--seed=0" },
.patterns = &[_]Pattern{
Pattern.glob(0, 0, ".trainers[*].party_type=both"),
Pattern.glob(0, 0, ".trainers[*].party_type=moves"),
Pattern.glob(105, 105, ".trainers[*].party_type=item"),
Pattern.glob(708, 708, ".trainers[*].party_type=none"),
},
});
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{ "--moves=unchanged", "--seed=0" },
.patterns = &[_]Pattern{
Pattern.glob(91, 91, ".trainers[*].party_type=both"),
Pattern.glob(14, 14, ".trainers[*].party_type=item"),
Pattern.glob(87, 87, ".trainers[*].party_type=moves"),
Pattern.glob(621, 621, ".trainers[*].party_type=none"),
},
});
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{ "--moves=random", "--seed=0" },
.patterns = &[_]Pattern{
Pattern.glob(105, 105, ".trainers[*].party_type=both"),
Pattern.glob(0, 0, ".trainers[*].party_type=item"),
Pattern.glob(708, 708, ".trainers[*].party_type=moves"),
Pattern.glob(0, 0, ".trainers[*].party_type=none"),
Pattern.glob(7232, 7232, ".trainers[*].party[*].moves[*]=*"),
},
});
// Moves + Held items
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{ "--held-items=none", "--moves=none", "--seed=0" },
.patterns = &[_]Pattern{
Pattern.glob(0, 0, ".trainers[*].party_type=both"),
Pattern.glob(0, 0, ".trainers[*].party_type=item"),
Pattern.glob(0, 0, ".trainers[*].party_type=moves"),
Pattern.glob(813, 813, ".trainers[*].party_type=none"),
},
});
// Party size
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{ "--party-size-min=2", "--party-size-max=5", "--seed=0" },
.patterns = &[_]Pattern{
Pattern.glob(0, 0, ".trainers[*].party_size=1"),
Pattern.glob(548, 548, ".trainers[*].party_size=2"),
Pattern.glob(187, 187, ".trainers[*].party_size=3"),
Pattern.glob(26, 26, ".trainers[*].party_size=4"),
Pattern.glob(52, 52, ".trainers[*].party_size=5"),
Pattern.glob(0, 0, ".trainers[*].party_size=6"),
},
});
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{
"--party-size=minimum", "--party-size-min=2",
"--party-size-max=5", "--seed=0",
},
.patterns = &[_]Pattern{
Pattern.glob(0, 0, ".trainers[*].party_size=1"),
Pattern.glob(813, 813, ".trainers[*].party_size=2"),
Pattern.glob(0, 0, ".trainers[*].party_size=3"),
Pattern.glob(0, 0, ".trainers[*].party_size=4"),
Pattern.glob(0, 0, ".trainers[*].party_size=5"),
Pattern.glob(0, 0, ".trainers[*].party_size=6"),
},
});
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{
"--party-size=random", "--party-size-min=2",
"--party-size-max=5", "--seed=0",
},
.patterns = &[_]Pattern{
Pattern.glob(0, 0, ".trainers[*].party_size=1"),
Pattern.glob(0, 813, ".trainers[*].party_size=2"),
Pattern.glob(0, 813, ".trainers[*].party_size=3"),
Pattern.glob(0, 813, ".trainers[*].party_size=4"),
Pattern.glob(0, 813, ".trainers[*].party_size=5"),
Pattern.glob(0, 0, ".trainers[*].party_size=6"),
},
});
// Test excluding system by running 100 seeds and checking that none of them pick the
// excluded pokemons
var seed: u8 = 0;
while (seed < number_of_seeds) : (seed += 1) {
try util.testing.runProgramFindPatterns(Program, .{
.in = test_case,
.args = &[_][]const u8{
"--exclude-pokemon=Gible",
"--exclude-pokemon=Weedle",
"--party-pokemons=randomize",
(try util.testing.boundPrint(16, "--seed={}", .{seed})).slice(),
},
.patterns = &[_]Pattern{
Pattern.glob(0, 0, ".trainers[*].party[*].species=13"),
Pattern.glob(0, 0, ".trainers[*].party[*].species=443"),
},
});
}
}
// TODO: Test these parameters
// "-p, --party-pokemons <unchanged|randomize> " ++
// "Wether the trainers pokemons should be randomized. (default: unchanged)",
// "-S, --stats <random|simular|follow_level> " ++
// "The total stats the picked pokemon should have if pokemons are randomized. " ++
// "(default: random)",
// "-t, --types <random|same|themed> " ++
// "Which types each trainer should use if pokemons are randomized. " ++
// "(default: random)",
// "-a, --abilities <random|same|themed> " ++
// "Which ability each party member should have. (default: random)",
|
src/randomizers/tm35-rand-trainers.zig
|
const std = @import("std");
pub fn enumFieldNames(comptime e: type) [@typeInfo(e).Enum.fields.len][]const u8 {
const len = @typeInfo(e).Enum.fields.len;
var names: [len][]const u8 = undefined;
inline for (@typeInfo(e).Enum.fields) |f, i| {
names[i] = f.name;
}
return names;
}
test "util.enumFieldNames" {
const e = enum {
cool,
amazing,
};
const names = comptime enumFieldNames(e);
std.debug.assert(std.mem.eql(u8, names[0], "cool"));
std.debug.assert(std.mem.eql(u8, names[1], "amazing"));
}
pub fn unionCreateFieldsWithType(comptime u: type, comptime t: type, comptime val: t) []u {
const len = @typeInfo(u).Union.fields.len;
var fields: [len]u = undefined;
var index: comptime_int = 0;
inline for (@typeInfo(u).Union.fields) |f, i| {
if (f.field_type == t) {
fields[index] = @unionInit(u, f.name, val);
index += 1;
}
}
return fields[0..index];
}
pub fn eqlNoCase(comptime T: type, a: []const T, b:[]const T) bool {
if (a.len != b.len) return false;
if (a.ptr == b.ptr) return true;
for (a) |item, index| {
if (toLower(item) != toLower(b[index])) return false;
}
return true;
}
pub fn indexOfNoCase(comptime T: type, haystack: []const T, needle: []const T) ?usize {
var i: usize = 0;
const end = haystack.len - needle.len;
while (i <= end) : (i += 1) {
if (eqlNoCase(T, haystack[i .. i + needle.len], needle)) return i;
}
return null;
}
pub fn Pair(comptime T: type) type {
return struct {
a: T,
b: T,
};
}
pub fn tailQueueLen(tail_queue: anytype) usize {
var it = tail_queue.first;
var i: usize = 0;
while (it) |node| : ({it = node.next; i += 1;}) {}
return i;
}
pub fn toLowerStr(buffer: []u8) void {
for (buffer) |*letter| {
letter.* = toLower(letter.*);
}
}
pub fn toUpperStr(buffer: []u8) void {
for (buffer) |*letter| {
letter.* = toUpper(letter.*);
}
}
pub fn toLower(char: u8) u8 {
if (isUpper(char)) {
return char + 32;
} else return char;
}
pub fn toUpper(char: u8) u8 {
if (isLower(char)) {
return char - 32;
} else return char;
}
pub fn isAlpha(char: u8) bool {
return isUpper(char) or isLower(char);
}
pub fn isUpper(char: u8) bool {
return 'A' <= char and char <= 'Z';
}
pub fn isLower(char: u8) bool {
return 'a' <= char and char <= 'z';
}
test "util.toLower and util.toUpper" {
const str = "THIS IS A TesT.";
var buffer: [100]u8 = undefined;
std.mem.copy(u8, buffer[0..str.len], str);
toLowerStr(buffer[0..str.len]);
std.testing.expect(std.mem.eql(u8,"this is a test.", buffer[0..str.len]));
toUpperStr(buffer[0..str.len]);
std.testing.expect(std.mem.eql(u8,"THIS IS A TEST.", buffer[0..str.len]));
}
|
src/util.zig
|
const std = @import("std");
const c = @cImport({
@cInclude("uv.h");
});
const util = @import("util.zig");
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const alloc = &gpa.allocator;
var stdin_pipe: c.uv_pipe_t = undefined;
var stdout_pipe: c.uv_pipe_t = undefined;
var file_pipe: c.uv_pipe_t = undefined;
// Read parts
export fn alloc_buffer(handle: ?*c.uv_handle_t, suggested_size: usize, buf: ?*c.uv_buf_t) void {
std.debug.assert(buf != null);
buf.?.* = c.uv_buf_init((alloc.alloc(u8, suggested_size) catch unreachable).ptr, @intCast(u32, suggested_size));
}
export fn read_stdin(stream: ?*c.uv_stream_t, nread: isize, buf: ?*const c.uv_buf_t) void {
std.debug.assert(stream != null);
std.debug.assert(buf != null);
if (nread < 0) {
if (nread == c.UV_EOF) {
_ = c.uv_close(@ptrCast(?*c.uv_handle_t, &stdin_pipe), null);
_ = c.uv_close(@ptrCast(?*c.uv_handle_t, &stdout_pipe), null);
_ = c.uv_close(@ptrCast(?*c.uv_handle_t, &file_pipe), null);
}
} else if (nread > 0) {
write_data(@ptrCast(?*c.uv_stream_t, &stdout_pipe), @intCast(u32, nread), buf.?.*, on_stdout_write);
write_data(@ptrCast(?*c.uv_stream_t, &file_pipe), @intCast(u32, nread), buf.?.*, on_file_write);
}
if (buf.?.base != null) {
alloc.free(buf.?.base[0..buf.?.len]);
}
}
// Write parts
const WriteReq = extern struct {
req: c.uv_write_t,
buf: c.uv_buf_t,
};
fn free_write_req(req: *c.uv_write_t) void {
const write_req = @fieldParentPtr(WriteReq, "req", req);
alloc.free(write_req.buf.base[0..write_req.buf.len]);
alloc.destroy(write_req);
}
export fn on_stdout_write(req: ?*c.uv_write_t, status: i32) void {
if (req) |r| {
free_write_req(r);
}
}
const on_file_write = on_stdout_write;
export fn write_data(dest: ?*c.uv_stream_t, size: u32, buf: c.uv_buf_t, cb: c.uv_write_cb) void {
std.debug.assert(size > 0);
var req = alloc.create(WriteReq) catch unreachable;
req.buf = c.uv_buf_init((alloc.alloc(u8, @intCast(usize, size)) catch unreachable).ptr, size);
std.mem.copy(u8, req.buf.base[0..size], buf.base[0..size]);
_ = c.uv_write(&req.req, @ptrCast(?*c.uv_stream_t, dest), &req.buf, 1, cb);
}
pub fn main() !void {
defer _ = gpa.deinit();
const args = try std.process.argsAlloc(alloc);
defer std.process.argsFree(alloc, args);
if (args.len != 2) {
std.log.warn("Usage: uv_test <outfile>", .{});
return error.missing_args;
}
const fname = try alloc.dupeZ(u8, args[1]);
defer alloc.free(fname);
const loop = c.uv_default_loop();
_ = c.uv_pipe_init(loop, &stdin_pipe, 0);
defer _ = c.uv_loop_close(c.uv_default_loop());
_ = c.uv_pipe_open(&stdin_pipe, 0);
_ = c.uv_pipe_init(loop, &stdout_pipe, 0);
_ = c.uv_pipe_open(&stdout_pipe, 1);
var file_req: c.uv_fs_t = undefined;
var fd = c.uv_fs_open(loop, &file_req, fname.ptr, c.O_CREAT | c.O_RDWR, 0o644, null);
std.log.info("Opened fd: {d}", .{fd});
_ = c.uv_pipe_init(loop, &file_pipe, 0);
_ = c.uv_pipe_open(&file_pipe, fd);
_ = c.uv_read_start(@ptrCast(*c.uv_stream_t, &stdin_pipe), alloc_buffer, read_stdin);
_ = c.uv_run(loop, util.castCEnum(c.uv_run, 1, c.UV_RUN_DEFAULT));
}
|
src/uv_examples/uvtee.zig
|
const std = @import("std");
const Version = std.builtin.Version;
const mem = std.mem;
const log = std.log;
const fs = std.fs;
const fmt = std.fmt;
const assert = std.debug.assert;
pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
const args = try std.process.argsAlloc(arena);
const abilists_file_path = args[1];
var lib_names_buffer: [8][]const u8 = undefined;
var target_names_buffer: [32][]const u8 = undefined;
var versions_buffer: [64]Version = undefined;
var file = try std.fs.cwd().openFile(abilists_file_path, .{});
defer file.close();
const stdout = std.io.getStdOut();
var bw = std.io.bufferedWriter(stdout.writer());
const w = bw.writer();
var br = std.io.bufferedReader(file.reader());
const r = br.reader();
const all_libs = b: {
try w.writeAll("Libraries:\n");
const libs_len = try r.readByte();
var i: u8 = 0;
while (i < libs_len) : (i += 1) {
const lib_name = try r.readUntilDelimiterAlloc(arena, 0, 100);
try w.print(" {d} lib{s}.so\n", .{ i, lib_name });
lib_names_buffer[i] = lib_name;
}
break :b lib_names_buffer[0..libs_len];
};
const all_versions = b: {
try w.writeAll("Versions:\n");
const versions_len = try r.readByte();
var i: u8 = 0;
while (i < versions_len) : (i += 1) {
const major = try r.readByte();
const minor = try r.readByte();
const patch = try r.readByte();
if (patch == 0) {
try w.print(" {d} GLIBC_{d}.{d}\n", .{ i, major, minor });
} else {
try w.print(" {d} GLIBC_{d}.{d}.{d}\n", .{ i, major, minor, patch });
}
versions_buffer[i] = .{ .major = major, .minor = minor, .patch = patch };
}
break :b versions_buffer[0..versions_len];
};
const all_targets = b: {
try w.writeAll("Targets:\n");
const targets_len = try r.readByte();
var i: u8 = 0;
while (i < targets_len) : (i += 1) {
const target_name = try r.readUntilDelimiterAlloc(arena, 0, 100);
try w.print(" {d} {s}\n", .{ i, target_name });
target_names_buffer[i] = target_name;
}
break :b target_names_buffer[0..targets_len];
};
{
try w.writeAll("Functions:\n");
const fns_len = try r.readIntLittle(u16);
var i: u16 = 0;
var opt_symbol_name: ?[]const u8 = null;
while (i < fns_len) : (i += 1) {
const symbol_name = opt_symbol_name orelse n: {
const name = try r.readUntilDelimiterAlloc(arena, 0, 100);
opt_symbol_name = name;
break :n name;
};
try w.print(" {s}:\n", .{symbol_name});
const targets = try r.readIntLittle(u32);
const lib_index = try r.readByte();
const is_terminal = (targets & (1 << 31)) != 0;
if (is_terminal) opt_symbol_name = null;
var ver_buf: [50]u8 = undefined;
var ver_buf_index: usize = 0;
while (true) {
const byte = try r.readByte();
const last = (byte & 0b1000_0000) != 0;
ver_buf[ver_buf_index] = @truncate(u7, byte);
ver_buf_index += 1;
if (last) break;
}
const versions = ver_buf[0..ver_buf_index];
try w.print(" library: lib{s}.so\n", .{all_libs[lib_index]});
try w.writeAll(" versions:");
for (versions) |ver_index| {
const ver = all_versions[ver_index];
if (ver.patch == 0) {
try w.print(" {d}.{d}", .{ ver.major, ver.minor });
} else {
try w.print(" {d}.{d}.{d}", .{ ver.major, ver.minor, ver.patch });
}
}
try w.writeAll("\n");
try w.writeAll(" targets:");
for (all_targets) |target, target_i| {
if ((targets & (@as(u32, 1) << @intCast(u5, target_i))) != 0) {
try w.print(" {s}", .{target});
}
}
try w.writeAll("\n");
}
}
{
try w.writeAll("Objects:\n");
const objects_len = try r.readIntLittle(u16);
var i: u16 = 0;
var opt_symbol_name: ?[]const u8 = null;
while (i < objects_len) : (i += 1) {
const symbol_name = opt_symbol_name orelse n: {
const name = try r.readUntilDelimiterAlloc(arena, 0, 100);
opt_symbol_name = name;
break :n name;
};
try w.print(" {s}:\n", .{symbol_name});
const targets = try r.readIntLittle(u32);
const size = try r.readIntLittle(u16);
const lib_index = try r.readByte();
const is_terminal = (targets & (1 << 31)) != 0;
if (is_terminal) opt_symbol_name = null;
var ver_buf: [50]u8 = undefined;
var ver_buf_index: usize = 0;
while (true) {
const byte = try r.readByte();
const last = (byte & 0b1000_0000) != 0;
ver_buf[ver_buf_index] = @truncate(u7, byte);
ver_buf_index += 1;
if (last) break;
}
const versions = ver_buf[0..ver_buf_index];
try w.print(" size: {d}\n", .{size});
try w.print(" library: lib{s}.so\n", .{all_libs[lib_index]});
try w.writeAll(" versions:");
for (versions) |ver_index| {
const ver = all_versions[ver_index];
if (ver.patch == 0) {
try w.print(" {d}.{d}", .{ ver.major, ver.minor });
} else {
try w.print(" {d}.{d}.{d}", .{ ver.major, ver.minor, ver.patch });
}
}
try w.writeAll("\n");
try w.writeAll(" targets:");
for (all_targets) |target, target_i| {
if ((targets & (@as(u32, 1) << @intCast(u5, target_i))) != 0) {
try w.print(" {s}", .{target});
}
}
try w.writeAll("\n");
}
}
try bw.flush();
}
|
list_symbols.zig
|
const std = @import("std");
pub const FFT = struct {
const Self = @This();
allocator: *std.mem.Allocator,
length: usize,
sin: []f32,
pub fn init(allocator: *std.mem.Allocator, max_length: usize) !Self {
const sin_table_size = max_length - (max_length / 4) + 1;
var sin = try allocator.alloc(f32, sin_table_size);
sin[0] = 0;
const a = std.math.pi / @intToFloat(f32, max_length) * 2;
var i: usize = 1;
while (i < sin_table_size) : (i += 1) {
sin[i] = std.math.sin(a * @intToFloat(f32, i));
}
return Self{
.allocator = allocator,
.length = max_length,
.sin = sin,
};
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.sin);
}
pub fn fftr(self: Self, real: []f32, imag: []f32) !void {
if (real.len != imag.len) return error.DataSizeMismatch;
if (real.len > self.length or !std.math.isPowerOfTwo(real.len)) return error.InvalidSize;
// we assume that imag is zero, but neither assert nor set zeros.
// step 1: even-odd shuffle.
// to calculate FFT of a 2-N point real valued sequence, we can split it
// into even and odd components for "real" and "imaginary" parts of a
// complex sequence.
//
// real=(x0 x1 x2 x3 x4 x5 ... xN xN+1 xN+2 ... x2N), imag=(0)
// becomes
// real=(x0 x2 x4 x6 ... x2N), imag=(1, 3, 5, 7, ... x2N-1)
const m = real.len;
var i: usize = 0;
while (i < m) : (i += 1) {
if (i % 2 == 0) {
real[i / 2] = real[i];
} else {
imag[(i - 1) / 2] = real[i];
}
}
// step 2: run FFT on the shuffled data.
var x = real[0 .. m / 2];
var y = imag[0 .. m / 2];
try self.fft(x, y);
// step 3: split operations
const sin_step = self.length / m;
var sin_idx: usize = 0;
var cos_idx: usize = self.length / 4;
i = m / 2 - 1;
while (i > 0) : (i -= 1) {
sin_idx += sin_step;
cos_idx += sin_step;
const sin = self.sin[sin_idx];
const cos = self.sin[cos_idx];
const tmp_imag = imag[m / 2 - i] + imag[i];
const tmp_real = real[m / 2 - i] - real[i];
real[m / 2 + i] = (real[m / 2 - i] + real[i] + cos * tmp_imag - sin * tmp_real) * 0.5;
imag[m / 2 + i] = (imag[i] - imag[m / 2 - i] + sin * tmp_imag + cos * tmp_real) * 0.5;
}
real[m / 2] = real[0] - imag[0];
imag[m / 2] = 0;
real[0] = real[0] + imag[0];
imag[0] = 0;
// Use complex conjugate properties to get the rest of the transform.
i = 1;
while (i < m / 2) : (i += 1) {
real[i] = real[m - i];
imag[i] = -imag[m - i];
}
}
pub fn fft(self: Self, real: []f32, imag: []f32) !void {
if (real.len != imag.len) return error.DataSizeMismatch;
if (real.len > self.length or !std.math.isPowerOfTwo(real.len)) return error.InvalidSize;
// iterative, in-place radix-2 fft
var sin_step: usize = self.length / real.len;
var n: usize = real.len;
var m: usize = undefined;
while (true) {
m = n;
n /= 2;
if (n <= 1) break;
var sin_idx: usize = 0;
var cos_idx: usize = self.length / 4;
var j: usize = 0;
while (j < n) : (j += 1) {
var p = j;
var k: usize = m;
const sinval = self.sin[sin_idx];
const cosval = self.sin[cos_idx];
while (k <= real.len) : (k += m) {
const t1 = real[p] - real[p + n];
const t2 = imag[p] - imag[p + n];
real[p] += real[p + n];
imag[p] += imag[p + n];
real[p + n] = cosval * t1 + sinval * t2;
imag[p + n] = cosval * t2 - sinval * t1;
p += m;
}
sin_idx += sin_step;
cos_idx += sin_step;
}
sin_step += sin_step;
}
n = real.len;
var p: usize = 0;
var k: usize = n / 2;
while (k > 0) : (k -= 1) {
const t1 = real[p] - real[p + 1];
const t2 = imag[p] - imag[p + 1];
real[p] += real[p + 1];
imag[p] += imag[p + 1];
real[p + 1] = t1;
imag[p + 1] = t2;
p += 2;
}
// bit reversal
const n_bits = std.math.log2_int(usize, n);
const shiftr = @intCast(@TypeOf(n_bits), @bitSizeOf(usize) - 1) - n_bits + 1;
var i: usize = 0;
while (i < n) : (i += 1) {
var j: usize = @bitReverse(usize, i);
j >>= shiftr;
if (i <= j) {
continue;
}
const t1 = real[i];
const t2 = imag[i];
real[i] = real[j];
imag[i] = imag[j];
real[j] = t1;
imag[j] = t2;
}
}
};
test "fft" {
var f = try FFT.init(std.testing.allocator, 32);
defer f.deinit();
var real = [_]f32{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0 };
var imag = [_]f32{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
try f.fftr(real[0..], imag[0..]);
var truth = [_]f32{ 45, -25.452, 10.364, -9.06406, 4, -1.27908, -2.36396, 3.79513, -5, 3.79513, -2.36396, -1.27908, 4, -9.06406, 10.364, -25.452, 0, -16.6652, 3.29289, 2.32849, -5, 5.6422, -4.70711, 2.6485, 0, -2.6485, 4.70711, -5.6422, 5, -2.32849, -3.29289, 16.6652 };
var real_truth = truth[0..16];
var imag_truth = truth[16..];
var i: usize = 0;
while (i < real.len) : (i += 1) {
try std.testing.expectApproxEqRel(real_truth[i], real[i], 0.001);
try std.testing.expectApproxEqRel(imag_truth[i], imag[i], 0.001);
}
}
test "fft file" {
var f = try FFT.init(std.testing.allocator, 256);
defer f.deinit();
var d = std.io.fixedBufferStream(@embedFile("testdata/test_fft.256.frame"));
var data: [512]f32 = undefined;
var data_u8 = std.mem.sliceAsBytes(data[0..256]);
try d.reader().readNoEof(data_u8);
var t = std.io.fixedBufferStream(@embedFile("testdata/test_fft.256.fft"));
var truth: [512]f32 = undefined;
var truth_u8 = std.mem.sliceAsBytes(truth[0..]);
try t.reader().readNoEof(truth_u8);
try f.fftr(data[0..256], data[256..]);
for (data) |x, idx| {
try std.testing.expectApproxEqRel(truth[idx], x, 0.001);
}
}
|
src/dsp/fft.zig
|
const upaya = @import("upaya");
const Texture = upaya.Texture;
const std = @import("std");
const allocator = std.heap.page_allocator;
var path2tex = std.StringHashMap(*upaya.Texture).init(allocator);
// TODO: I am not sure anymore whether it is OK to have this cache alive across
// multiple load / save cycles
// open img file relative to refpath (slideshow file)
pub fn getImg(p: []const u8, refpath: ?[]const u8) !?*upaya.Texture {
var tex: ?*upaya.Texture = null;
// std.log.debug("trying to load texture: {s}", .{p});
if (path2tex.count() > 0) {
if (path2tex.contains(p)) {
return path2tex.get(p).?;
}
}
var absp: []const u8 = undefined;
if (refpath) |rp| {
var pwd_b: [1024]u8 = undefined;
const pwd = std.fs.path.dirname(rp);
if (pwd == null) {
absp = p;
} else {
// std.log.debug("pwd of: {s} is {any}", .{ rp, pwd });
var buf: [1024]u8 = undefined;
absp = try std.fmt.bufPrint(&buf, "{s}{c}{s}", .{ pwd, std.fs.path.sep, p });
}
} else {
absp = p;
}
// std.log.debug("trying to load: {s} with refpath: {s} -> {s}", .{ p, refpath, absp });
tex = try texFromFile(absp, .nearest);
if (tex) |okTexture| {
// std.log.debug("storing {s} as {any}", .{ p, okTexture });
const key = try std.mem.dupe(allocator, u8, p);
try path2tex.put(key, okTexture);
}
return tex;
}
pub fn texFromFile(file: []const u8, filter: Texture.Filter) !?*Texture {
const image_contents = try upaya.fs.read(allocator, file);
var w: c_int = undefined;
var h: c_int = undefined;
var channels: c_int = undefined;
const load_res = upaya.stb.stbi_load_from_memory(image_contents.ptr, @intCast(c_int, image_contents.len), &w, &h, &channels, 4);
if (load_res == null) return error.ImageLoadFailed;
defer upaya.stb.stbi_image_free(load_res);
var ret: ?*Texture = null;
var tex: ?Texture = Texture.initWithData(load_res[0..@intCast(usize, w * h * channels)], w, h, filter);
if (tex) |okTexture| {
ret = try allocator.create(Texture);
ret.?.* = okTexture;
}
return ret;
}
|
src/texturecache.zig
|
const std = @import("std.zig");
const cstr = std.cstr;
const unicode = std.unicode;
const io = std.io;
const fs = std.fs;
const os = std.os;
const process = std.process;
const File = std.fs.File;
const windows = os.windows;
const mem = std.mem;
const debug = std.debug;
const BufMap = std.BufMap;
const Buffer = std.Buffer;
const builtin = @import("builtin");
const Os = builtin.Os;
const TailQueue = std.TailQueue;
const maxInt = std.math.maxInt;
pub const ChildProcess = struct {
pid: if (builtin.os == .windows) void else i32,
handle: if (builtin.os == .windows) windows.HANDLE else void,
thread_handle: if (builtin.os == .windows) windows.HANDLE else void,
allocator: *mem.Allocator,
stdin: ?File,
stdout: ?File,
stderr: ?File,
term: ?(SpawnError!Term),
argv: []const []const u8,
/// Leave as null to use the current env map using the supplied allocator.
env_map: ?*const BufMap,
stdin_behavior: StdIo,
stdout_behavior: StdIo,
stderr_behavior: StdIo,
/// Set to change the user id when spawning the child process.
uid: if (builtin.os == .windows) void else ?u32,
/// Set to change the group id when spawning the child process.
gid: if (builtin.os == .windows) void else ?u32,
/// Set to change the current working directory when spawning the child process.
cwd: ?[]const u8,
err_pipe: if (builtin.os == .windows) void else [2]os.fd_t,
llnode: if (builtin.os == .windows) void else TailQueue(*ChildProcess).Node,
pub const SpawnError = error{
OutOfMemory,
/// POSIX-only. `StdIo.Ignore` was selected and opening `/dev/null` returned ENODEV.
NoDevice,
/// Windows-only. One of:
/// * `cwd` was provided and it could not be re-encoded into UTF16LE, or
/// * The `PATH` or `PATHEXT` environment variable contained invalid UTF-8.
InvalidUtf8,
/// Windows-only. `cwd` was provided, but the path did not exist when spawning the child process.
CurrentWorkingDirectoryUnlinked,
} || os.ExecveError || os.SetIdError || os.ChangeCurDirError || windows.CreateProcessError || windows.WaitForSingleObjectError;
pub const Term = union(enum) {
Exited: u32,
Signal: u32,
Stopped: u32,
Unknown: u32,
};
pub const StdIo = enum {
Inherit,
Ignore,
Pipe,
Close,
};
/// First argument in argv is the executable.
/// On success must call deinit.
pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
const child = try allocator.create(ChildProcess);
child.* = ChildProcess{
.allocator = allocator,
.argv = argv,
.pid = undefined,
.handle = undefined,
.thread_handle = undefined,
.err_pipe = undefined,
.llnode = undefined,
.term = null,
.env_map = null,
.cwd = null,
.uid = if (builtin.os == .windows) {} else null,
.gid = if (builtin.os == .windows) {} else null,
.stdin = null,
.stdout = null,
.stderr = null,
.stdin_behavior = StdIo.Inherit,
.stdout_behavior = StdIo.Inherit,
.stderr_behavior = StdIo.Inherit,
};
errdefer allocator.destroy(child);
return child;
}
pub fn setUserName(self: *ChildProcess, name: []const u8) !void {
const user_info = try os.getUserInfo(name);
self.uid = user_info.uid;
self.gid = user_info.gid;
}
/// On success must call `kill` or `wait`.
pub fn spawn(self: *ChildProcess) SpawnError!void {
if (builtin.os == .windows) {
return self.spawnWindows();
} else {
return self.spawnPosix();
}
}
pub fn spawnAndWait(self: *ChildProcess) SpawnError!Term {
try self.spawn();
return self.wait();
}
/// Forcibly terminates child process and then cleans up all resources.
pub fn kill(self: *ChildProcess) !Term {
if (builtin.os == .windows) {
return self.killWindows(1);
} else {
return self.killPosix();
}
}
pub fn killWindows(self: *ChildProcess, exit_code: windows.UINT) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
}
try windows.TerminateProcess(self.handle, exit_code);
try self.waitUnwrappedWindows();
return self.term.?;
}
pub fn killPosix(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
}
try os.kill(self.pid, os.SIGTERM);
self.waitUnwrapped();
return self.term.?;
}
/// Blocks until child process terminates and then cleans up all resources.
pub fn wait(self: *ChildProcess) !Term {
if (builtin.os == .windows) {
return self.waitWindows();
} else {
return self.waitPosix();
}
}
pub const ExecResult = struct {
term: Term,
stdout: []u8,
stderr: []u8,
};
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
pub fn exec(
allocator: *mem.Allocator,
argv: []const []const u8,
cwd: ?[]const u8,
env_map: ?*const BufMap,
max_output_size: usize,
) !ExecResult {
const child = try ChildProcess.init(argv, allocator);
defer child.deinit();
child.stdin_behavior = ChildProcess.StdIo.Ignore;
child.stdout_behavior = ChildProcess.StdIo.Pipe;
child.stderr_behavior = ChildProcess.StdIo.Pipe;
child.cwd = cwd;
child.env_map = env_map;
try child.spawn();
var stdout = Buffer.initNull(allocator);
var stderr = Buffer.initNull(allocator);
defer Buffer.deinit(&stdout);
defer Buffer.deinit(&stderr);
var stdout_file_in_stream = child.stdout.?.inStream();
var stderr_file_in_stream = child.stderr.?.inStream();
try stdout_file_in_stream.stream.readAllBuffer(&stdout, max_output_size);
try stderr_file_in_stream.stream.readAllBuffer(&stderr, max_output_size);
return ExecResult{
.term = try child.wait(),
.stdout = stdout.toOwnedSlice(),
.stderr = stderr.toOwnedSlice(),
};
}
fn waitWindows(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
}
try self.waitUnwrappedWindows();
return self.term.?;
}
fn waitPosix(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
}
self.waitUnwrapped();
return self.term.?;
}
pub fn deinit(self: *ChildProcess) void {
self.allocator.destroy(self);
}
fn waitUnwrappedWindows(self: *ChildProcess) !void {
const result = windows.WaitForSingleObjectEx(self.handle, windows.INFINITE, false);
self.term = @as(SpawnError!Term, x: {
var exit_code: windows.DWORD = undefined;
if (windows.kernel32.GetExitCodeProcess(self.handle, &exit_code) == 0) {
break :x Term{ .Unknown = 0 };
} else {
break :x Term{ .Exited = exit_code };
}
});
os.close(self.handle);
os.close(self.thread_handle);
self.cleanupStreams();
return result;
}
fn waitUnwrapped(self: *ChildProcess) void {
const status = os.waitpid(self.pid, 0);
self.cleanupStreams();
self.handleWaitResult(status);
}
fn handleWaitResult(self: *ChildProcess, status: u32) void {
// TODO https://github.com/ziglang/zig/issues/3190
var term = self.cleanupAfterWait(status);
self.term = term;
}
fn cleanupStreams(self: *ChildProcess) void {
if (self.stdin) |*stdin| {
stdin.close();
self.stdin = null;
}
if (self.stdout) |*stdout| {
stdout.close();
self.stdout = null;
}
if (self.stderr) |*stderr| {
stderr.close();
self.stderr = null;
}
}
fn cleanupAfterWait(self: *ChildProcess, status: u32) !Term {
defer destroyPipe(self.err_pipe);
if (builtin.os == .linux) {
var fd = [1]std.os.pollfd{std.os.pollfd{
.fd = self.err_pipe[0],
.events = std.os.POLLIN,
.revents = undefined,
}};
// Check if the eventfd buffer stores a non-zero value by polling
// it, that's the error code returned by the child process.
_ = std.os.poll(&fd, 0) catch unreachable;
// According to eventfd(2) the descriptro is readable if the counter
// has a value greater than 0
if ((fd[0].revents & std.os.POLLIN) != 0) {
const err_int = try readIntFd(self.err_pipe[0]);
return @errSetCast(SpawnError, @intToError(err_int));
}
} else {
// Write maxInt(ErrInt) to the write end of the err_pipe. This is after
// waitpid, so this write is guaranteed to be after the child
// pid potentially wrote an error. This way we can do a blocking
// read on the error pipe and either get maxInt(ErrInt) (no error) or
// an error code.
try writeIntFd(self.err_pipe[1], maxInt(ErrInt));
const err_int = try readIntFd(self.err_pipe[0]);
// Here we potentially return the fork child's error from the parent
// pid.
if (err_int != maxInt(ErrInt)) {
return @errSetCast(SpawnError, @intToError(err_int));
}
}
return statusToTerm(status);
}
fn statusToTerm(status: u32) Term {
return if (os.WIFEXITED(status))
Term{ .Exited = os.WEXITSTATUS(status) }
else if (os.WIFSIGNALED(status))
Term{ .Signal = os.WTERMSIG(status) }
else if (os.WIFSTOPPED(status))
Term{ .Stopped = os.WSTOPSIG(status) }
else
Term{ .Unknown = status };
}
fn spawnPosix(self: *ChildProcess) SpawnError!void {
const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try os.pipe() else undefined;
errdefer if (self.stdin_behavior == StdIo.Pipe) {
destroyPipe(stdin_pipe);
};
const stdout_pipe = if (self.stdout_behavior == StdIo.Pipe) try os.pipe() else undefined;
errdefer if (self.stdout_behavior == StdIo.Pipe) {
destroyPipe(stdout_pipe);
};
const stderr_pipe = if (self.stderr_behavior == StdIo.Pipe) try os.pipe() else undefined;
errdefer if (self.stderr_behavior == StdIo.Pipe) {
destroyPipe(stderr_pipe);
};
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
const dev_null_fd = if (any_ignore)
os.openC("/dev/null", os.O_RDWR, 0) catch |err| switch (err) {
error.PathAlreadyExists => unreachable,
error.NoSpaceLeft => unreachable,
error.FileTooBig => unreachable,
error.DeviceBusy => unreachable,
else => |e| return e,
}
else
undefined;
defer {
if (any_ignore) os.close(dev_null_fd);
}
var env_map_owned: BufMap = undefined;
var we_own_env_map: bool = undefined;
const env_map = if (self.env_map) |env_map| x: {
we_own_env_map = false;
break :x env_map;
} else x: {
we_own_env_map = true;
env_map_owned = try process.getEnvMap(self.allocator);
break :x &env_map_owned;
};
defer {
if (we_own_env_map) env_map_owned.deinit();
}
// This pipe is used to communicate errors between the time of fork
// and execve from the child process to the parent process.
const err_pipe = blk: {
if (builtin.os == .linux) {
const fd = try os.eventfd(0, 0);
// There's no distinction between the readable and the writeable
// end with eventfd
break :blk [2]os.fd_t{ fd, fd };
} else {
break :blk try os.pipe();
}
};
errdefer destroyPipe(err_pipe);
const pid_result = try os.fork();
if (pid_result == 0) {
// we are the child
setUpChildIo(self.stdin_behavior, stdin_pipe[0], os.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err);
setUpChildIo(self.stdout_behavior, stdout_pipe[1], os.STDOUT_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err);
setUpChildIo(self.stderr_behavior, stderr_pipe[1], os.STDERR_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err);
if (self.stdin_behavior == .Pipe) {
os.close(stdin_pipe[0]);
os.close(stdin_pipe[1]);
}
if (self.stdout_behavior == .Pipe) {
os.close(stdout_pipe[0]);
os.close(stdout_pipe[1]);
}
if (self.stderr_behavior == .Pipe) {
os.close(stderr_pipe[0]);
os.close(stderr_pipe[1]);
}
if (self.cwd) |cwd| {
os.chdir(cwd) catch |err| forkChildErrReport(err_pipe[1], err);
}
if (self.gid) |gid| {
os.setregid(gid, gid) catch |err| forkChildErrReport(err_pipe[1], err);
}
if (self.uid) |uid| {
os.setreuid(uid, uid) catch |err| forkChildErrReport(err_pipe[1], err);
}
const err = os.execvpe(self.allocator, self.argv, env_map);
forkChildErrReport(err_pipe[1], err);
}
// we are the parent
const pid = @intCast(i32, pid_result);
if (self.stdin_behavior == StdIo.Pipe) {
self.stdin = File.openHandle(stdin_pipe[1]);
} else {
self.stdin = null;
}
if (self.stdout_behavior == StdIo.Pipe) {
self.stdout = File.openHandle(stdout_pipe[0]);
} else {
self.stdout = null;
}
if (self.stderr_behavior == StdIo.Pipe) {
self.stderr = File.openHandle(stderr_pipe[0]);
} else {
self.stderr = null;
}
self.pid = pid;
self.err_pipe = err_pipe;
self.llnode = TailQueue(*ChildProcess).Node.init(self);
self.term = null;
if (self.stdin_behavior == StdIo.Pipe) {
os.close(stdin_pipe[0]);
}
if (self.stdout_behavior == StdIo.Pipe) {
os.close(stdout_pipe[1]);
}
if (self.stderr_behavior == StdIo.Pipe) {
os.close(stderr_pipe[1]);
}
}
fn spawnWindows(self: *ChildProcess) SpawnError!void {
const saAttr = windows.SECURITY_ATTRIBUTES{
.nLength = @sizeOf(windows.SECURITY_ATTRIBUTES),
.bInheritHandle = windows.TRUE,
.lpSecurityDescriptor = null,
};
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
// TODO use CreateFileW here since we are using a string literal for the path
const nul_handle = if (any_ignore)
windows.CreateFile(
"NUL",
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
null,
windows.OPEN_EXISTING,
windows.FILE_ATTRIBUTE_NORMAL,
null,
) catch |err| switch (err) {
error.SharingViolation => unreachable, // not possible for "NUL"
error.PathAlreadyExists => unreachable, // not possible for "NUL"
error.PipeBusy => unreachable, // not possible for "NUL"
error.InvalidUtf8 => unreachable, // not possible for "NUL"
error.BadPathName => unreachable, // not possible for "NUL"
error.FileNotFound => unreachable, // not possible for "NUL"
error.AccessDenied => unreachable, // not possible for "NUL"
error.NameTooLong => unreachable, // not possible for "NUL"
else => |e| return e,
}
else
undefined;
defer {
if (any_ignore) os.close(nul_handle);
}
if (any_ignore) {
try windows.SetHandleInformation(nul_handle, windows.HANDLE_FLAG_INHERIT, 0);
}
var g_hChildStd_IN_Rd: ?windows.HANDLE = null;
var g_hChildStd_IN_Wr: ?windows.HANDLE = null;
switch (self.stdin_behavior) {
StdIo.Pipe => {
try windowsMakePipeIn(&g_hChildStd_IN_Rd, &g_hChildStd_IN_Wr, &saAttr);
},
StdIo.Ignore => {
g_hChildStd_IN_Rd = nul_handle;
},
StdIo.Inherit => {
g_hChildStd_IN_Rd = windows.GetStdHandle(windows.STD_INPUT_HANDLE) catch null;
},
StdIo.Close => {
g_hChildStd_IN_Rd = null;
},
}
errdefer if (self.stdin_behavior == StdIo.Pipe) {
windowsDestroyPipe(g_hChildStd_IN_Rd, g_hChildStd_IN_Wr);
};
var g_hChildStd_OUT_Rd: ?windows.HANDLE = null;
var g_hChildStd_OUT_Wr: ?windows.HANDLE = null;
switch (self.stdout_behavior) {
StdIo.Pipe => {
try windowsMakePipeOut(&g_hChildStd_OUT_Rd, &g_hChildStd_OUT_Wr, &saAttr);
},
StdIo.Ignore => {
g_hChildStd_OUT_Wr = nul_handle;
},
StdIo.Inherit => {
g_hChildStd_OUT_Wr = windows.GetStdHandle(windows.STD_OUTPUT_HANDLE) catch null;
},
StdIo.Close => {
g_hChildStd_OUT_Wr = null;
},
}
errdefer if (self.stdin_behavior == StdIo.Pipe) {
windowsDestroyPipe(g_hChildStd_OUT_Rd, g_hChildStd_OUT_Wr);
};
var g_hChildStd_ERR_Rd: ?windows.HANDLE = null;
var g_hChildStd_ERR_Wr: ?windows.HANDLE = null;
switch (self.stderr_behavior) {
StdIo.Pipe => {
try windowsMakePipeOut(&g_hChildStd_ERR_Rd, &g_hChildStd_ERR_Wr, &saAttr);
},
StdIo.Ignore => {
g_hChildStd_ERR_Wr = nul_handle;
},
StdIo.Inherit => {
g_hChildStd_ERR_Wr = windows.GetStdHandle(windows.STD_ERROR_HANDLE) catch null;
},
StdIo.Close => {
g_hChildStd_ERR_Wr = null;
},
}
errdefer if (self.stdin_behavior == StdIo.Pipe) {
windowsDestroyPipe(g_hChildStd_ERR_Rd, g_hChildStd_ERR_Wr);
};
const cmd_line = try windowsCreateCommandLine(self.allocator, self.argv);
defer self.allocator.free(cmd_line);
var siStartInfo = windows.STARTUPINFOW{
.cb = @sizeOf(windows.STARTUPINFOW),
.hStdError = g_hChildStd_ERR_Wr,
.hStdOutput = g_hChildStd_OUT_Wr,
.hStdInput = g_hChildStd_IN_Rd,
.dwFlags = windows.STARTF_USESTDHANDLES,
.lpReserved = null,
.lpDesktop = null,
.lpTitle = null,
.dwX = 0,
.dwY = 0,
.dwXSize = 0,
.dwYSize = 0,
.dwXCountChars = 0,
.dwYCountChars = 0,
.dwFillAttribute = 0,
.wShowWindow = 0,
.cbReserved2 = 0,
.lpReserved2 = null,
};
var piProcInfo: windows.PROCESS_INFORMATION = undefined;
const cwd_w = if (self.cwd) |cwd| try unicode.utf8ToUtf16LeWithNull(self.allocator, cwd) else null;
defer if (cwd_w) |cwd| self.allocator.free(cwd);
const cwd_w_ptr = if (cwd_w) |cwd| cwd.ptr else null;
const maybe_envp_buf = if (self.env_map) |env_map| try createWindowsEnvBlock(self.allocator, env_map) else null;
defer if (maybe_envp_buf) |envp_buf| self.allocator.free(envp_buf);
const envp_ptr = if (maybe_envp_buf) |envp_buf| envp_buf.ptr else null;
// the cwd set in ChildProcess is in effect when choosing the executable path
// to match posix semantics
const app_name = x: {
if (self.cwd) |cwd| {
const resolved = try fs.path.resolve(self.allocator, &[_][]const u8{ cwd, self.argv[0] });
defer self.allocator.free(resolved);
break :x try cstr.addNullByte(self.allocator, resolved);
} else {
break :x try cstr.addNullByte(self.allocator, self.argv[0]);
}
};
defer self.allocator.free(app_name);
const app_name_w = try unicode.utf8ToUtf16LeWithNull(self.allocator, app_name);
defer self.allocator.free(app_name_w);
const cmd_line_w = try unicode.utf8ToUtf16LeWithNull(self.allocator, cmd_line);
defer self.allocator.free(cmd_line_w);
windowsCreateProcess(app_name_w.ptr, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo) catch |no_path_err| {
if (no_path_err != error.FileNotFound) return no_path_err;
var free_path = true;
const PATH = process.getEnvVarOwned(self.allocator, "PATH") catch |err| switch (err) {
error.EnvironmentVariableNotFound => blk: {
free_path = false;
break :blk "";
},
else => |e| return e,
};
defer if (free_path) self.allocator.free(PATH);
var free_path_ext = true;
const PATHEXT = process.getEnvVarOwned(self.allocator, "PATHEXT") catch |err| switch (err) {
error.EnvironmentVariableNotFound => blk: {
free_path_ext = false;
break :blk "";
},
else => |e| return e,
};
defer if (free_path_ext) self.allocator.free(PATHEXT);
var it = mem.tokenize(PATH, ";");
retry: while (it.next()) |search_path| {
const path_no_ext = try fs.path.join(self.allocator, &[_][]const u8{ search_path, app_name });
defer self.allocator.free(path_no_ext);
var ext_it = mem.tokenize(PATHEXT, ";");
while (ext_it.next()) |app_ext| {
const joined_path = try mem.concat(self.allocator, u8, &[_][]const u8{ path_no_ext, app_ext });
defer self.allocator.free(joined_path);
const joined_path_w = try unicode.utf8ToUtf16LeWithNull(self.allocator, joined_path);
defer self.allocator.free(joined_path_w);
if (windowsCreateProcess(joined_path_w.ptr, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo)) |_| {
break :retry;
} else |err| switch (err) {
error.FileNotFound => continue,
error.AccessDenied => continue,
else => return err,
}
}
} else {
return no_path_err; // return the original error
}
};
if (g_hChildStd_IN_Wr) |h| {
self.stdin = File.openHandle(h);
} else {
self.stdin = null;
}
if (g_hChildStd_OUT_Rd) |h| {
self.stdout = File.openHandle(h);
} else {
self.stdout = null;
}
if (g_hChildStd_ERR_Rd) |h| {
self.stderr = File.openHandle(h);
} else {
self.stderr = null;
}
self.handle = piProcInfo.hProcess;
self.thread_handle = piProcInfo.hThread;
self.term = null;
if (self.stdin_behavior == StdIo.Pipe) {
os.close(g_hChildStd_IN_Rd.?);
}
if (self.stderr_behavior == StdIo.Pipe) {
os.close(g_hChildStd_ERR_Wr.?);
}
if (self.stdout_behavior == StdIo.Pipe) {
os.close(g_hChildStd_OUT_Wr.?);
}
}
fn setUpChildIo(stdio: StdIo, pipe_fd: i32, std_fileno: i32, dev_null_fd: i32) !void {
switch (stdio) {
StdIo.Pipe => try os.dup2(pipe_fd, std_fileno),
StdIo.Close => os.close(std_fileno),
StdIo.Inherit => {},
StdIo.Ignore => try os.dup2(dev_null_fd, std_fileno),
}
}
};
fn windowsCreateProcess(app_name: [*:0]u16, cmd_line: [*:0]u16, envp_ptr: ?[*]u16, cwd_ptr: ?[*:0]u16, lpStartupInfo: *windows.STARTUPINFOW, lpProcessInformation: *windows.PROCESS_INFORMATION) !void {
// TODO the docs for environment pointer say:
// > A pointer to the environment block for the new process. If this parameter
// > is NULL, the new process uses the environment of the calling process.
// > ...
// > An environment block can contain either Unicode or ANSI characters. If
// > the environment block pointed to by lpEnvironment contains Unicode
// > characters, be sure that dwCreationFlags includes CREATE_UNICODE_ENVIRONMENT.
// > If this parameter is NULL and the environment block of the parent process
// > contains Unicode characters, you must also ensure that dwCreationFlags
// > includes CREATE_UNICODE_ENVIRONMENT.
// This seems to imply that we have to somehow know whether our process parent passed
// CREATE_UNICODE_ENVIRONMENT if we want to pass NULL for the environment parameter.
// Since we do not know this information that would imply that we must not pass NULL
// for the parameter.
// However this would imply that programs compiled with -DUNICODE could not pass
// environment variables to programs that were not, which seems unlikely.
// More investigation is needed.
return windows.CreateProcessW(
app_name,
cmd_line,
null,
null,
windows.TRUE,
windows.CREATE_UNICODE_ENVIRONMENT,
@ptrCast(?*c_void, envp_ptr),
cwd_ptr,
lpStartupInfo,
lpProcessInformation,
);
}
/// Caller must dealloc.
/// Guarantees a null byte at result[result.len].
fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![]u8 {
var buf = try Buffer.initSize(allocator, 0);
defer buf.deinit();
var buf_stream = &io.BufferOutStream.init(&buf).stream;
for (argv) |arg, arg_i| {
if (arg_i != 0) try buf.appendByte(' ');
if (mem.indexOfAny(u8, arg, " \t\n\"") == null) {
try buf.append(arg);
continue;
}
try buf.appendByte('"');
var backslash_count: usize = 0;
for (arg) |byte| {
switch (byte) {
'\\' => backslash_count += 1,
'"' => {
try buf_stream.writeByteNTimes('\\', backslash_count * 2 + 1);
try buf.appendByte('"');
backslash_count = 0;
},
else => {
try buf_stream.writeByteNTimes('\\', backslash_count);
try buf.appendByte(byte);
backslash_count = 0;
},
}
}
try buf_stream.writeByteNTimes('\\', backslash_count * 2);
try buf.appendByte('"');
}
return buf.toOwnedSlice();
}
fn windowsDestroyPipe(rd: ?windows.HANDLE, wr: ?windows.HANDLE) void {
if (rd) |h| os.close(h);
if (wr) |h| os.close(h);
}
fn windowsMakePipeIn(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const windows.SECURITY_ATTRIBUTES) !void {
var rd_h: windows.HANDLE = undefined;
var wr_h: windows.HANDLE = undefined;
try windows.CreatePipe(&rd_h, &wr_h, sattr);
errdefer windowsDestroyPipe(rd_h, wr_h);
try windows.SetHandleInformation(wr_h, windows.HANDLE_FLAG_INHERIT, 0);
rd.* = rd_h;
wr.* = wr_h;
}
fn windowsMakePipeOut(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const windows.SECURITY_ATTRIBUTES) !void {
var rd_h: windows.HANDLE = undefined;
var wr_h: windows.HANDLE = undefined;
try windows.CreatePipe(&rd_h, &wr_h, sattr);
errdefer windowsDestroyPipe(rd_h, wr_h);
try windows.SetHandleInformation(rd_h, windows.HANDLE_FLAG_INHERIT, 0);
rd.* = rd_h;
wr.* = wr_h;
}
fn destroyPipe(pipe: [2]os.fd_t) void {
os.close(pipe[0]);
if (pipe[0] != pipe[1]) os.close(pipe[1]);
}
// Child of fork calls this to report an error to the fork parent.
// Then the child exits.
fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
writeIntFd(fd, @as(ErrInt, @errorToInt(err))) catch {};
os.exit(1);
}
const ErrInt = @IntType(false, @sizeOf(anyerror) * 8);
fn writeIntFd(fd: i32, value: ErrInt) !void {
const stream = &File.openHandle(fd).outStream().stream;
stream.writeIntNative(u64, @intCast(u64, value)) catch return error.SystemResources;
}
fn readIntFd(fd: i32) !ErrInt {
const stream = &File.openHandle(fd).inStream().stream;
return @intCast(ErrInt, stream.readIntNative(u64) catch return error.SystemResources);
}
/// Caller must free result.
pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u16 {
// count bytes needed
const max_chars_needed = x: {
var max_chars_needed: usize = 4; // 4 for the final 4 null bytes
var it = env_map.iterator();
while (it.next()) |pair| {
// +1 for '='
// +1 for null byte
max_chars_needed += pair.key.len + pair.value.len + 2;
}
break :x max_chars_needed;
};
const result = try allocator.alloc(u16, max_chars_needed);
errdefer allocator.free(result);
var it = env_map.iterator();
var i: usize = 0;
while (it.next()) |pair| {
i += try unicode.utf8ToUtf16Le(result[i..], pair.key);
result[i] = '=';
i += 1;
i += try unicode.utf8ToUtf16Le(result[i..], pair.value);
result[i] = 0;
i += 1;
}
result[i] = 0;
i += 1;
result[i] = 0;
i += 1;
result[i] = 0;
i += 1;
result[i] = 0;
i += 1;
return allocator.shrink(result, i);
}
|
lib/std/child_process.zig
|
const root = @import("build.zig");
const std = @import("std");
const io = std.io;
const fmt = std.fmt;
const Builder = std.build.Builder;
const Pkg = std.build.Pkg;
const InstallArtifactStep = std.build.InstallArtifactStep;
const LibExeObjStep = std.build.LibExeObjStep;
const ArrayList = std.ArrayList;
///! This is a modified build runner to extract information out of build.zig
///! Modified from the std.special.build_runner
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
const builder = try Builder.create(allocator, "", "", "");
defer builder.destroy();
try runBuild(builder);
const stdout_stream = io.getStdOut().outStream();
// TODO: We currently add packages from every LibExeObj step that the install step depends on.
// Should we error out or keep one step or something similar?
// We also flatten them, we should probably keep the nested structure.
for (builder.top_level_steps.items) |tls| {
for (tls.step.dependencies.items) |step| {
try processStep(stdout_stream, step);
}
}
}
fn processStep(stdout_stream: anytype, step: *std.build.Step) anyerror!void {
if (step.cast(InstallArtifactStep)) |install_exe| {
for (install_exe.artifact.packages.items) |pkg| {
try processPackage(stdout_stream, pkg);
}
} else if (step.cast(LibExeObjStep)) |exe| {
for (exe.packages.items) |pkg| {
try processPackage(stdout_stream, pkg);
}
} else {
for (step.dependencies.items) |unknown_step| {
try processStep(stdout_stream, unknown_step);
}
}
}
fn processPackage(out_stream: anytype, pkg: Pkg) anyerror!void {
try out_stream.print("{}\x00{}\n", .{ pkg.name, pkg.path });
if (pkg.dependencies) |dependencies| {
for (dependencies) |dep| {
try processPackage(out_stream, dep);
}
}
}
fn runBuild(builder: *Builder) anyerror!void {
switch (@typeInfo(@TypeOf(root.build).ReturnType)) {
.Void => root.build(builder),
.ErrorUnion => try root.build(builder),
else => @compileError("expected return type of build to be 'void' or '!void'"),
}
}
|
src/special/build_runner.zig
|
const std = @import("std.zig");
const builtin = std.builtin;
const root = @import("root");
//! std.log is a standardized interface for logging which allows for the logging
//! of programs and libraries using this interface to be formatted and filtered
//! by the implementer of the root.log function.
//!
//! Each log message has an associated scope enum, which can be used to give
//! context to the logging. The logging functions in std.log implicitly use a
//! scope of .default.
//!
//! A logging namespace using a custom scope can be created using the
//! std.log.scoped function, passing the scope as an argument; the logging
//! functions in the resulting struct use the provided scope parameter.
//! For example, a library called 'libfoo' might use
//! `const log = std.log.scoped(.libfoo);` to use .libfoo as the scope of its
//! log messages.
//!
//! An example root.log might look something like this:
//!
//! ```
//! const std = @import("std");
//!
//! // Set the log level to warning
//! pub const log_level: std.log.Level = .warn;
//!
//! // Define root.log to override the std implementation
//! pub fn log(
//! comptime level: std.log.Level,
//! comptime scope: @TypeOf(.EnumLiteral),
//! comptime format: []const u8,
//! args: anytype,
//! ) void {
//! // Ignore all non-critical logging from sources other than
//! // .my_project, .nice_library and .default
//! const scope_prefix = "(" ++ switch (scope) {
//! .my_project, .nice_library, .default => @tagName(scope),
//! else => if (@enumToInt(level) <= @enumToInt(std.log.Level.crit))
//! @tagName(scope)
//! else
//! return,
//! } ++ "): ";
//!
//! const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
//!
//! // Print the message to stderr, silently ignoring any errors
//! const held = std.debug.getStderrMutex().acquire();
//! defer held.release();
//! const stderr = std.io.getStdErr().writer();
//! nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
//! }
//!
//! pub fn main() void {
//! // Using the default scope:
//! std.log.info("Just a simple informational log message", .{}); // Won't be printed as log_level is .warn
//! std.log.warn("Flux capacitor is starting to overheat", .{});
//!
//! // Using scoped logging:
//! const my_project_log = std.log.scoped(.my_project);
//! const nice_library_log = std.log.scoped(.nice_library);
//! const verbose_lib_log = std.log.scoped(.verbose_lib);
//!
//! my_project_log.info("Starting up", .{}); // Won't be printed as log_level is .warn
//! nice_library_log.err("Something went very wrong, sorry", .{});
//! verbose_lib_log.err("Added 1 + 1: {}", .{1 + 1}); // Won't be printed as it gets filtered out by our log function
//! }
//! ```
//! Which produces the following output:
//! ```
//! [warn] (default): Flux capacitor is starting to overheat
//! [err] (nice_library): Something went very wrong, sorry
//! ```
pub const Level = enum {
/// Emergency: a condition that cannot be handled, usually followed by a
/// panic.
emerg,
/// Alert: a condition that should be corrected immediately (e.g. database
/// corruption).
alert,
/// Critical: A bug has been detected or something has gone wrong and it
/// will have an effect on the operation of the program.
crit,
/// Error: A bug has been detected or something has gone wrong but it is
/// recoverable.
err,
/// Warning: it is uncertain if something has gone wrong or not, but the
/// circumstances would be worth investigating.
warn,
/// Notice: non-error but significant conditions.
notice,
/// Informational: general messages about the state of the program.
info,
/// Debug: messages only useful for debugging.
debug,
};
/// The default log level is based on build mode. Note that in ReleaseSmall
/// builds the default level is emerg but no messages will be stored/logged
/// by the default logger to save space.
pub const default_level: Level = switch (builtin.mode) {
.Debug => .debug,
.ReleaseSafe => .notice,
.ReleaseFast => .err,
.ReleaseSmall => .emerg,
};
/// The current log level. This is set to root.log_level if present, otherwise
/// log.default_level.
pub const level: Level = if (@hasDecl(root, "log_level"))
root.log_level
else
default_level;
fn log(
comptime message_level: Level,
comptime scope: @Type(.EnumLiteral),
comptime format: []const u8,
args: anytype,
) void {
if (@enumToInt(message_level) <= @enumToInt(level)) {
if (@hasDecl(root, "log")) {
root.log(message_level, scope, format, args);
} else if (std.Target.current.os.tag == .freestanding) {
// On freestanding one must provide a log function; we do not have
// any I/O configured.
return;
} else if (builtin.mode != .ReleaseSmall) {
const held = std.debug.getStderrMutex().acquire();
defer held.release();
const stderr = std.io.getStdErr().writer();
nosuspend stderr.print(format ++ "\n", args) catch return;
}
}
}
/// Returns a scoped logging namespace that logs all messages using the scope
/// provided here.
pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
return struct {
/// Log an emergency message. This log level is intended to be used
/// for conditions that cannot be handled and is usually followed by a panic.
pub fn emerg(
comptime format: []const u8,
args: anytype,
) void {
@setCold(true);
log(.emerg, scope, format, args);
}
/// Log an alert message. This log level is intended to be used for
/// conditions that should be corrected immediately (e.g. database corruption).
pub fn alert(
comptime format: []const u8,
args: anytype,
) void {
@setCold(true);
log(.alert, scope, format, args);
}
/// Log a critical message. This log level is intended to be used
/// when a bug has been detected or something has gone wrong and it will have
/// an effect on the operation of the program.
pub fn crit(
comptime format: []const u8,
args: anytype,
) void {
@setCold(true);
log(.crit, scope, format, args);
}
/// Log an error message. This log level is intended to be used when
/// a bug has been detected or something has gone wrong but it is recoverable.
pub fn err(
comptime format: []const u8,
args: anytype,
) void {
@setCold(true);
log(.err, scope, format, args);
}
/// Log a warning message. This log level is intended to be used if
/// it is uncertain whether something has gone wrong or not, but the
/// circumstances would be worth investigating.
pub fn warn(
comptime format: []const u8,
args: anytype,
) void {
log(.warn, scope, format, args);
}
/// Log a notice message. This log level is intended to be used for
/// non-error but significant conditions.
pub fn notice(
comptime format: []const u8,
args: anytype,
) void {
log(.notice, scope, format, args);
}
/// Log an info message. This log level is intended to be used for
/// general messages about the state of the program.
pub fn info(
comptime format: []const u8,
args: anytype,
) void {
log(.info, scope, format, args);
}
/// Log a debug message. This log level is intended to be used for
/// messages which are only useful for debugging.
pub fn debug(
comptime format: []const u8,
args: anytype,
) void {
log(.debug, scope, format, args);
}
};
}
/// The default scoped logging namespace.
pub const default = scoped(.default);
/// Log an emergency message using the default scope. This log level is
/// intended to be used for conditions that cannot be handled and is usually
/// followed by a panic.
pub const emerg = default.emerg;
/// Log an alert message using the default scope. This log level is intended to
/// be used for conditions that should be corrected immediately (e.g. database
/// corruption).
pub const alert = default.alert;
/// Log a critical message using the default scope. This log level is intended
/// to be used when a bug has been detected or something has gone wrong and it
/// will have an effect on the operation of the program.
pub const crit = default.crit;
/// Log an error message using the default scope. This log level is intended to
/// be used when a bug has been detected or something has gone wrong but it is
/// recoverable.
pub const err = default.err;
/// Log a warning message using the default scope. This log level is intended
/// to be used if it is uncertain whether something has gone wrong or not, but
/// the circumstances would be worth investigating.
pub const warn = default.warn;
/// Log a notice message using the default scope. This log level is intended to
/// be used for non-error but significant conditions.
pub const notice = default.notice;
/// Log an info message using the default scope. This log level is intended to
/// be used for general messages about the state of the program.
pub const info = default.info;
/// Log a debug message using the default scope. This log level is intended to
/// be used for messages which are only useful for debugging.
pub const debug = default.debug;
|
lib/std/log.zig
|
const std = @import("std");
const builtin = @import("builtin");
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Ast = std.zig.Ast;
const Zir = @This();
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const TypedValue = @import("TypedValue.zig");
const Module = @import("Module.zig");
const LazySrcLoc = Module.LazySrcLoc;
instructions: std.MultiArrayList(Inst).Slice,
/// In order to store references to strings in fewer bytes, we copy all
/// string bytes into here. String bytes can be null. It is up to whomever
/// is referencing the data here whether they want to store both index and length,
/// thus allowing null bytes, or store only index, and use null-termination. The
/// `string_bytes` array is agnostic to either usage.
/// Indexes 0 and 1 are reserved for special cases.
string_bytes: []u8,
/// The meaning of this data is determined by `Inst.Tag` value.
/// The first few indexes are reserved. See `ExtraIndex` for the values.
extra: []u32,
/// The data stored at byte offset 0 when ZIR is stored in a file.
pub const Header = extern struct {
instructions_len: u32,
string_bytes_len: u32,
extra_len: u32,
stat_inode: std.fs.File.INode,
stat_size: u64,
stat_mtime: i128,
};
pub const ExtraIndex = enum(u32) {
/// If this is 0, no compile errors. Otherwise there is a `CompileErrors`
/// payload at this index.
compile_errors,
/// If this is 0, this file contains no imports. Otherwise there is a `Imports`
/// payload at this index.
imports,
_,
};
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, end: usize } {
const fields = @typeInfo(T).Struct.fields;
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
@field(result, field.name) = switch (field.field_type) {
u32 => code.extra[i],
Inst.Ref => @intToEnum(Inst.Ref, code.extra[i]),
i32 => @bitCast(i32, code.extra[i]),
Inst.Call.Flags => @bitCast(Inst.Call.Flags, code.extra[i]),
Inst.BuiltinCall.Flags => @bitCast(Inst.BuiltinCall.Flags, code.extra[i]),
Inst.SwitchBlock.Bits => @bitCast(Inst.SwitchBlock.Bits, code.extra[i]),
Inst.FuncFancy.Bits => @bitCast(Inst.FuncFancy.Bits, code.extra[i]),
else => @compileError("bad field type"),
};
i += 1;
}
return .{
.data = result,
.end = i,
};
}
/// Given an index into `string_bytes` returns the null-terminated string found there.
pub fn nullTerminatedString(code: Zir, index: usize) [:0]const u8 {
var end: usize = index;
while (code.string_bytes[end] != 0) {
end += 1;
}
return code.string_bytes[index..end :0];
}
pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref {
const raw_slice = code.extra[start..][0..len];
// TODO we should be able to directly `@ptrCast` the slice to the other slice type.
return @ptrCast([*]Inst.Ref, raw_slice.ptr)[0..len];
}
pub fn hasCompileErrors(code: Zir) bool {
return code.extra[@enumToInt(ExtraIndex.compile_errors)] != 0;
}
pub fn deinit(code: *Zir, gpa: Allocator) void {
code.instructions.deinit(gpa);
gpa.free(code.string_bytes);
gpa.free(code.extra);
code.* = undefined;
}
/// ZIR is structured so that the outermost "main" struct of any file
/// is always at index 0.
pub const main_struct_inst: Inst.Index = 0;
/// These are untyped instructions generated from an Abstract Syntax Tree.
/// The data here is immutable because it is possible to have multiple
/// analyses on the same ZIR happening at the same time.
pub const Inst = struct {
tag: Tag,
data: Data,
/// These names are used directly as the instruction names in the text format.
/// See `data_field_map` for a list of which `Data` fields are used by each `Tag`.
pub const Tag = enum(u8) {
/// Arithmetic addition, asserts no integer overflow.
/// Uses the `pl_node` union field. Payload is `Bin`.
add,
/// Twos complement wrapping integer addition.
/// Uses the `pl_node` union field. Payload is `Bin`.
addwrap,
/// Saturating addition.
/// Uses the `pl_node` union field. Payload is `Bin`.
add_sat,
/// Arithmetic subtraction. Asserts no integer overflow.
/// Uses the `pl_node` union field. Payload is `Bin`.
sub,
/// Twos complement wrapping integer subtraction.
/// Uses the `pl_node` union field. Payload is `Bin`.
subwrap,
/// Saturating subtraction.
/// Uses the `pl_node` union field. Payload is `Bin`.
sub_sat,
/// Arithmetic multiplication. Asserts no integer overflow.
/// Uses the `pl_node` union field. Payload is `Bin`.
mul,
/// Twos complement wrapping integer multiplication.
/// Uses the `pl_node` union field. Payload is `Bin`.
mulwrap,
/// Saturating multiplication.
/// Uses the `pl_node` union field. Payload is `Bin`.
mul_sat,
/// Implements the `@divExact` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
div_exact,
/// Implements the `@divFloor` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
div_floor,
/// Implements the `@divTrunc` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
div_trunc,
/// Implements the `@mod` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
mod,
/// Implements the `@rem` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
rem,
/// Ambiguously remainder division or modulus. If the computation would possibly have
/// a different value depending on whether the operation is remainder division or modulus,
/// a compile error is emitted. Otherwise the computation is performed.
/// Uses the `pl_node` union field. Payload is `Bin`.
mod_rem,
/// Integer shift-left. Zeroes are shifted in from the right hand side.
/// Uses the `pl_node` union field. Payload is `Bin`.
shl,
/// Implements the `@shlExact` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
shl_exact,
/// Saturating shift-left.
/// Uses the `pl_node` union field. Payload is `Bin`.
shl_sat,
/// Integer shift-right. Arithmetic or logical depending on the signedness of
/// the integer type.
/// Uses the `pl_node` union field. Payload is `Bin`.
shr,
/// Implements the `@shrExact` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
shr_exact,
/// Declares a parameter of the current function. Used for:
/// * debug info
/// * checking shadowing against declarations in the current namespace
/// * parameter type expressions referencing other parameters
/// These occur in the block outside a function body (the same block as
/// contains the func instruction).
/// Uses the `pl_tok` field. Token is the parameter name, payload is a `Param`.
param,
/// Same as `param` except the parameter is marked comptime.
param_comptime,
/// Same as `param` except the parameter is marked anytype.
/// Uses the `str_tok` field. Token is the parameter name. String is the parameter name.
param_anytype,
/// Same as `param` except the parameter is marked both comptime and anytype.
/// Uses the `str_tok` field. Token is the parameter name. String is the parameter name.
param_anytype_comptime,
/// Array concatenation. `a ++ b`
/// Uses the `pl_node` union field. Payload is `Bin`.
array_cat,
/// Array multiplication `a ** b`
/// Uses the `pl_node` union field. Payload is `Bin`.
array_mul,
/// `[N]T` syntax. No source location provided.
/// Uses the `bin` union field. lhs is length, rhs is element type.
array_type,
/// `[N:S]T` syntax. Source location is the array type expression node.
/// Uses the `pl_node` union field. Payload is `ArrayTypeSentinel`.
array_type_sentinel,
/// `@Vector` builtin.
/// Uses the `pl_node` union field with `Bin` payload.
/// lhs is length, rhs is element type.
vector_type,
/// Given an indexable type, returns the type of the element at given index.
/// Uses the `bin` union field. lhs is the indexable type, rhs is the index.
elem_type_index,
/// Given a pointer to an indexable object, returns the len property. This is
/// used by for loops. This instruction also emits a for-loop specific compile
/// error if the indexable object is not indexable.
/// Uses the `un_node` field. The AST node is the for loop node.
indexable_ptr_len,
/// Create a `anyframe->T` type.
/// Uses the `un_node` field.
anyframe_type,
/// Type coercion. No source location attached.
/// Uses the `bin` field.
as,
/// Type coercion to the function's return type.
/// Uses the `pl_node` field. Payload is `As`. AST node could be many things.
as_node,
/// Bitwise AND. `&`
bit_and,
/// Reinterpret the memory representation of a value as a different type.
/// Uses the pl_node field with payload `Bin`.
bitcast,
/// Bitwise NOT. `~`
/// Uses `un_node`.
bit_not,
/// Bitwise OR. `|`
bit_or,
/// A labeled block of code, which can return a value.
/// Uses the `pl_node` union field. Payload is `Block`.
block,
/// A list of instructions which are analyzed in the parent context, without
/// generating a runtime block. Must terminate with an "inline" variant of
/// a noreturn instruction.
/// Uses the `pl_node` union field. Payload is `Block`.
block_inline,
/// Implements `suspend {...}`.
/// Uses the `pl_node` union field. Payload is `Block`.
suspend_block,
/// Boolean NOT. See also `bit_not`.
/// Uses the `un_node` field.
bool_not,
/// Short-circuiting boolean `and`. `lhs` is a boolean `Ref` and the other operand
/// is a block, which is evaluated if `lhs` is `true`.
/// Uses the `bool_br` union field.
bool_br_and,
/// Short-circuiting boolean `or`. `lhs` is a boolean `Ref` and the other operand
/// is a block, which is evaluated if `lhs` is `false`.
/// Uses the `bool_br` union field.
bool_br_or,
/// Return a value from a block.
/// Uses the `break` union field.
/// Uses the source information from previous instruction.
@"break",
/// Return a value from a block. This instruction is used as the terminator
/// of a `block_inline`. It allows using the return value from `Sema.analyzeBody`.
/// This instruction may also be used when it is known that there is only one
/// break instruction in a block, and the target block is the parent.
/// Uses the `break` union field.
break_inline,
/// Function call.
/// Uses the `pl_node` union field with payload `Call`.
/// AST node is the function call.
call,
/// Implements the `@call` builtin.
/// Uses the `pl_node` union field with payload `BuiltinCall`.
/// AST node is the builtin call.
builtin_call,
/// `<`
/// Uses the `pl_node` union field. Payload is `Bin`.
cmp_lt,
/// `<=`
/// Uses the `pl_node` union field. Payload is `Bin`.
cmp_lte,
/// `==`
/// Uses the `pl_node` union field. Payload is `Bin`.
cmp_eq,
/// `>=`
/// Uses the `pl_node` union field. Payload is `Bin`.
cmp_gte,
/// `>`
/// Uses the `pl_node` union field. Payload is `Bin`.
cmp_gt,
/// `!=`
/// Uses the `pl_node` union field. Payload is `Bin`.
cmp_neq,
/// Coerces a result location pointer to a new element type. It is evaluated "backwards"-
/// as type coercion from the new element type to the old element type.
/// Uses the `bin` union field.
/// LHS is destination element type, RHS is result pointer.
coerce_result_ptr,
/// Conditional branch. Splits control flow based on a boolean condition value.
/// Uses the `pl_node` union field. AST node is an if, while, for, etc.
/// Payload is `CondBr`.
condbr,
/// Same as `condbr`, except the condition is coerced to a comptime value, and
/// only the taken branch is analyzed. The then block and else block must
/// terminate with an "inline" variant of a noreturn instruction.
condbr_inline,
/// Given an operand which is an error union, splits control flow. In
/// case of error, control flow goes into the block that is part of this
/// instruction, which is guaranteed to end with a return instruction
/// and never breaks out of the block.
/// In the case of non-error, control flow proceeds to the next instruction
/// after the `try`, with the result of this instruction being the unwrapped
/// payload value, as if `err_union_payload_unsafe` was executed on the operand.
/// Uses the `pl_node` union field. Payload is `Try`.
@"try",
///// Same as `try` except the operand is coerced to a comptime value, and
///// only the taken branch is analyzed. The block must terminate with an "inline"
///// variant of a noreturn instruction.
//try_inline,
/// Same as `try` except the operand is a pointer and the result is a pointer.
try_ptr,
///// Same as `try_inline` except the operand is a pointer and the result is a pointer.
//try_ptr_inline,
/// An error set type definition. Contains a list of field names.
/// Uses the `pl_node` union field. Payload is `ErrorSetDecl`.
error_set_decl,
error_set_decl_anon,
error_set_decl_func,
/// Declares the beginning of a statement. Used for debug info.
/// Uses the `dbg_stmt` union field. The line and column are offset
/// from the parent declaration.
dbg_stmt,
/// Marks a variable declaration. Used for debug info.
/// Uses the `str_op` union field. The string is the local variable name,
/// and the operand is the pointer to the variable's location. The local
/// may be a const or a var.
dbg_var_ptr,
/// Same as `dbg_var_ptr` but the local is always a const and the operand
/// is the local's value.
dbg_var_val,
/// Marks the beginning of a semantic scope for debug info variables.
dbg_block_begin,
/// Marks the end of a semantic scope for debug info variables.
dbg_block_end,
/// Uses a name to identify a Decl and takes a pointer to it.
/// Uses the `str_tok` union field.
decl_ref,
/// Uses a name to identify a Decl and uses it as a value.
/// Uses the `str_tok` union field.
decl_val,
/// Load the value from a pointer. Assumes `x.*` syntax.
/// Uses `un_node` field. AST node is the `x.*` syntax.
load,
/// Arithmetic division. Asserts no integer overflow.
/// Uses the `pl_node` union field. Payload is `Bin`.
div,
/// Given a pointer to an array, slice, or pointer, returns a pointer to the element at
/// the provided index. Uses the `bin` union field. Source location is implied
/// to be the same as the previous instruction.
elem_ptr,
/// Same as `elem_ptr` except also stores a source location node.
/// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`.
elem_ptr_node,
/// Same as `elem_ptr_node` except the index is stored immediately rather than
/// as a reference to another ZIR instruction.
/// Uses the `pl_node` union field. AST node is an element inside array initialization
/// syntax. Payload is `ElemPtrImm`.
elem_ptr_imm,
/// Given an array, slice, or pointer, returns the element at the provided index.
/// Uses the `bin` union field. Source location is implied to be the same
/// as the previous instruction.
elem_val,
/// Same as `elem_val` except also stores a source location node.
/// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`.
elem_val_node,
/// Emits a compile error if the operand is not `void`.
/// Uses the `un_node` field.
ensure_result_used,
/// Emits a compile error if an error is ignored.
/// Uses the `un_node` field.
ensure_result_non_error,
/// Create a `E!T` type.
/// Uses the `pl_node` field with `Bin` payload.
error_union_type,
/// `error.Foo` syntax. Uses the `str_tok` field of the Data union.
error_value,
/// Implements the `@export` builtin function, based on either an identifier to a Decl,
/// or field access of a Decl. The thing being exported is the Decl.
/// Uses the `pl_node` union field. Payload is `Export`.
@"export",
/// Implements the `@export` builtin function, based on a comptime-known value.
/// The thing being exported is the comptime-known value which is the operand.
/// Uses the `pl_node` union field. Payload is `ExportValue`.
export_value,
/// Given a pointer to a struct or object that contains virtual fields, returns a pointer
/// to the named field. The field name is stored in string_bytes. Used by a.b syntax.
/// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field.
field_ptr,
/// Given a struct or object that contains virtual fields, returns the named field.
/// The field name is stored in string_bytes. Used by a.b syntax.
/// This instruction also accepts a pointer.
/// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field.
field_val,
/// Given a pointer to a struct or object that contains virtual fields, returns the
/// named field. If there is no named field, searches in the type for a decl that
/// matches the field name. The decl is resolved and we ensure that it's a function
/// which can accept the object as the first parameter, with one pointer fixup. If
/// all of that works, this instruction produces a special "bound function" value
/// which contains both the function and the saved first parameter value.
/// Bound functions may only be used as the function parameter to a `call` or
/// `builtin_call` instruction. Any other use is invalid zir and may crash the compiler.
field_call_bind,
/// Given a pointer to a struct or object that contains virtual fields, returns a pointer
/// to the named field. The field name is a comptime instruction. Used by @field.
/// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed.
field_ptr_named,
/// Given a struct or object that contains virtual fields, returns the named field.
/// The field name is a comptime instruction. Used by @field.
/// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed.
field_val_named,
/// Returns a function type, or a function instance, depending on whether
/// the body_len is 0. Calling convention is auto.
/// Uses the `pl_node` union field. `payload_index` points to a `Func`.
func,
/// Same as `func` but has an inferred error set.
func_inferred,
/// Represents a function declaration or function prototype, depending on
/// whether body_len is 0.
/// Uses the `pl_node` union field. `payload_index` points to a `FuncFancy`.
func_fancy,
/// Implements the `@import` builtin.
/// Uses the `str_tok` field.
import,
/// Integer literal that fits in a u64. Uses the `int` union field.
int,
/// Arbitrary sized integer literal. Uses the `str` union field.
int_big,
/// A float literal that fits in a f64. Uses the float union value.
float,
/// A float literal that fits in a f128. Uses the `pl_node` union value.
/// Payload is `Float128`.
float128,
/// Make an integer type out of signedness and bit count.
/// Payload is `int_type`
int_type,
/// Return a boolean false if an optional is null. `x != null`
/// Uses the `un_node` field.
is_non_null,
/// Return a boolean false if an optional is null. `x.* != null`
/// Uses the `un_node` field.
is_non_null_ptr,
/// Return a boolean false if value is an error
/// Uses the `un_node` field.
is_non_err,
/// Return a boolean false if dereferenced pointer is an error
/// Uses the `un_node` field.
is_non_err_ptr,
/// A labeled block of code that loops forever. At the end of the body will have either
/// a `repeat` instruction or a `repeat_inline` instruction.
/// Uses the `pl_node` field. The AST node is either a for loop or while loop.
/// This ZIR instruction is needed because AIR does not (yet?) match ZIR, and Sema
/// needs to emit more than 1 AIR block for this instruction.
/// The payload is `Block`.
loop,
/// Sends runtime control flow back to the beginning of the current block.
/// Uses the `node` field.
repeat,
/// Sends comptime control flow back to the beginning of the current block.
/// Uses the `node` field.
repeat_inline,
/// Merge two error sets into one, `E1 || E2`.
/// Uses the `pl_node` field with payload `Bin`.
merge_error_sets,
/// Given a reference to a function and a parameter index, returns the
/// type of the parameter. The only usage of this instruction is for the
/// result location of parameters of function calls. In the case of a function's
/// parameter type being `anytype`, it is the type coercion's job to detect this
/// scenario and skip the coercion, so that semantic analysis of this instruction
/// is not in a position where it must create an invalid type.
/// Uses the `param_type` union field.
param_type,
/// Turns an R-Value into a const L-Value. In other words, it takes a value,
/// stores it in a memory location, and returns a const pointer to it. If the value
/// is `comptime`, the memory location is global static constant data. Otherwise,
/// the memory location is in the stack frame, local to the scope containing the
/// instruction.
/// Uses the `un_tok` union field.
ref,
/// Sends control flow back to the function's callee.
/// Includes an operand as the return value.
/// Includes an AST node source location.
/// Uses the `un_node` union field.
ret_node,
/// Sends control flow back to the function's callee.
/// The operand is a `ret_ptr` instruction, where the return value can be found.
/// Includes an AST node source location.
/// Uses the `un_node` union field.
ret_load,
/// Sends control flow back to the function's callee.
/// Includes an operand as the return value.
/// Includes a token source location.
/// Uses the `un_tok` union field.
ret_tok,
/// Sends control flow back to the function's callee.
/// The return operand is `error.foo` where `foo` is given by the string.
/// If the current function has an inferred error set, the error given by the
/// name is added to it.
/// Uses the `str_tok` union field.
ret_err_value,
/// A string name is provided which is an anonymous error set value.
/// If the current function has an inferred error set, the error given by the
/// name is added to it.
/// Results in the error code. Note that control flow is not diverted with
/// this instruction; a following 'ret' instruction will do the diversion.
/// Uses the `str_tok` union field.
ret_err_value_code,
/// Obtains a pointer to the return value.
/// Uses the `node` union field.
ret_ptr,
/// Obtains the return type of the in-scope function.
/// Uses the `node` union field.
ret_type,
/// Create a pointer type that does not have a sentinel, alignment, address space, or bit range specified.
/// Uses the `ptr_type_simple` union field.
ptr_type_simple,
/// Create a pointer type which can have a sentinel, alignment, address space, and/or bit range.
/// Uses the `ptr_type` union field.
ptr_type,
/// Slice operation `lhs[rhs..]`. No sentinel and no end offset.
/// Returns a pointer to the subslice.
/// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceStart`.
slice_start,
/// Slice operation `array_ptr[start..end]`. No sentinel.
/// Returns a pointer to the subslice.
/// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceEnd`.
slice_end,
/// Slice operation `array_ptr[start..end:sentinel]`.
/// Returns a pointer to the subslice.
/// Uses the `pl_node` field. AST node is the slice syntax. Payload is `SliceSentinel`.
slice_sentinel,
/// Write a value to a pointer. For loading, see `load`.
/// Source location is assumed to be same as previous instruction.
/// Uses the `bin` union field.
store,
/// Same as `store` except provides a source location.
/// Uses the `pl_node` union field. Payload is `Bin`.
store_node,
/// This instruction is not really supposed to be emitted from AstGen; nevertheless it
/// is sometimes emitted due to deficiencies in AstGen. When Sema sees this instruction,
/// it must clean up after AstGen's mess by looking at various context clues and
/// then treating it as one of the following:
/// * no-op
/// * store_to_inferred_ptr
/// * store
/// Uses the `bin` union field with LHS as the pointer to store to.
store_to_block_ptr,
/// Same as `store` but the type of the value being stored will be used to infer
/// the pointer type.
/// Uses the `bin` union field - Astgen.zig depends on the ability to change
/// the tag of an instruction from `store_to_block_ptr` to `store_to_inferred_ptr`
/// without changing the data.
store_to_inferred_ptr,
/// String Literal. Makes an anonymous Decl and then takes a pointer to it.
/// Uses the `str` union field.
str,
/// Arithmetic negation. Asserts no integer overflow.
/// Same as sub with a lhs of 0, split into a separate instruction to save memory.
/// Uses `un_node`.
negate,
/// Twos complement wrapping integer negation.
/// Same as subwrap with a lhs of 0, split into a separate instruction to save memory.
/// Uses `un_node`.
negate_wrap,
/// Returns the type of a value.
/// Uses the `un_node` field.
typeof,
/// Implements `@TypeOf` for one operand.
/// Uses the `pl_node` field.
typeof_builtin,
/// Given a value, look at the type of it, which must be an integer type.
/// Returns the integer type for the RHS of a shift operation.
/// Uses the `un_node` field.
typeof_log2_int_type,
/// Given an integer type, returns the integer type for the RHS of a shift operation.
/// Uses the `un_node` field.
log2_int_type,
/// Asserts control-flow will not reach this instruction (`unreachable`).
/// Uses the `unreachable` union field.
@"unreachable",
/// Bitwise XOR. `^`
/// Uses the `pl_node` union field. Payload is `Bin`.
xor,
/// Create an optional type '?T'
/// Uses the `un_node` field.
optional_type,
/// ?T => T with safety.
/// Given an optional value, returns the payload value, with a safety check that
/// the value is non-null. Used for `orelse`, `if` and `while`.
/// Uses the `un_node` field.
optional_payload_safe,
/// ?T => T without safety.
/// Given an optional value, returns the payload value. No safety checks.
/// Uses the `un_node` field.
optional_payload_unsafe,
/// *?T => *T with safety.
/// Given a pointer to an optional value, returns a pointer to the payload value,
/// with a safety check that the value is non-null. Used for `orelse`, `if` and `while`.
/// Uses the `un_node` field.
optional_payload_safe_ptr,
/// *?T => *T without safety.
/// Given a pointer to an optional value, returns a pointer to the payload value.
/// No safety checks.
/// Uses the `un_node` field.
optional_payload_unsafe_ptr,
/// E!T => T with safety.
/// Given an error union value, returns the payload value, with a safety check
/// that the value is not an error. Used for catch, if, and while.
/// Uses the `un_node` field.
err_union_payload_safe,
/// E!T => T without safety.
/// Given an error union value, returns the payload value. No safety checks.
/// Uses the `un_node` field.
err_union_payload_unsafe,
/// *E!T => *T with safety.
/// Given a pointer to an error union value, returns a pointer to the payload value,
/// with a safety check that the value is not an error. Used for catch, if, and while.
/// Uses the `un_node` field.
err_union_payload_safe_ptr,
/// *E!T => *T without safety.
/// Given a pointer to a error union value, returns a pointer to the payload value.
/// No safety checks.
/// Uses the `un_node` field.
err_union_payload_unsafe_ptr,
/// E!T => E without safety.
/// Given an error union value, returns the error code. No safety checks.
/// Uses the `un_node` field.
err_union_code,
/// *E!T => E without safety.
/// Given a pointer to an error union value, returns the error code. No safety checks.
/// Uses the `un_node` field.
err_union_code_ptr,
/// Takes a *E!T and raises a compiler error if T != void
/// Uses the `un_tok` field.
ensure_err_payload_void,
/// An enum literal. Uses the `str_tok` union field.
enum_literal,
/// A switch expression. Uses the `pl_node` union field.
/// AST node is the switch, payload is `SwitchBlock`.
switch_block,
/// Produces the value that will be switched on. For example, for
/// integers, it returns the integer with no modifications. For tagged unions, it
/// returns the active enum tag.
/// Uses the `un_node` union field.
switch_cond,
/// Same as `switch_cond`, except the input operand is a pointer to
/// what will be switched on.
/// Uses the `un_node` union field.
switch_cond_ref,
/// Produces the capture value for a switch prong.
/// Uses the `switch_capture` field.
/// If the `prong_index` field is max int, it means this is the capture
/// for the else/`_` prong.
switch_capture,
/// Produces the capture value for a switch prong.
/// Result is a pointer to the value.
/// Uses the `switch_capture` field.
/// If the `prong_index` field is max int, it means this is the capture
/// for the else/`_` prong.
switch_capture_ref,
/// Produces the capture value for a switch prong.
/// The prong is one of the multi cases.
/// Uses the `switch_capture` field.
switch_capture_multi,
/// Produces the capture value for a switch prong.
/// The prong is one of the multi cases.
/// Result is a pointer to the value.
/// Uses the `switch_capture` field.
switch_capture_multi_ref,
/// Given a
/// *A returns *A
/// *E!A returns *A
/// *?A returns *A
/// Uses the `un_node` field.
array_base_ptr,
/// Given a
/// *S returns *S
/// *E!S returns *S
/// *?S returns *S
/// Uses the `un_node` field.
field_base_ptr,
/// Checks that the type supports array init syntax.
/// Uses the `un_node` field.
validate_array_init_ty,
/// Checks that the type supports struct init syntax.
/// Uses the `un_node` field.
validate_struct_init_ty,
/// Given a set of `field_ptr` instructions, assumes they are all part of a struct
/// initialization expression, and emits compile errors for duplicate fields
/// as well as missing fields, if applicable.
/// This instruction asserts that there is at least one field_ptr instruction,
/// because it must use one of them to find out the struct type.
/// Uses the `pl_node` field. Payload is `Block`.
validate_struct_init,
/// Same as `validate_struct_init` but additionally communicates that the
/// resulting struct initialization value is within a comptime scope.
validate_struct_init_comptime,
/// Given a set of `elem_ptr_imm` instructions, assumes they are all part of an
/// array initialization expression, and emits a compile error if the number of
/// elements does not match the array type.
/// This instruction asserts that there is at least one `elem_ptr_imm` instruction,
/// because it must use one of them to find out the array type.
/// Uses the `pl_node` field. Payload is `Block`.
validate_array_init,
/// Same as `validate_array_init` but additionally communicates that the
/// resulting array initialization value is within a comptime scope.
validate_array_init_comptime,
/// A struct literal with a specified type, with no fields.
/// Uses the `un_node` field.
struct_init_empty,
/// Given a struct or union, and a field name as a string index,
/// returns the field type. Uses the `pl_node` field. Payload is `FieldType`.
field_type,
/// Given a struct or union, and a field name as a Ref,
/// returns the field type. Uses the `pl_node` field. Payload is `FieldTypeRef`.
field_type_ref,
/// Finalizes a typed struct or union initialization, performs validation, and returns the
/// struct or union value.
/// Uses the `pl_node` field. Payload is `StructInit`.
struct_init,
/// Struct initialization syntax, make the result a pointer.
/// Uses the `pl_node` field. Payload is `StructInit`.
struct_init_ref,
/// Struct initialization without a type.
/// Uses the `pl_node` field. Payload is `StructInitAnon`.
struct_init_anon,
/// Anonymous struct initialization syntax, make the result a pointer.
/// Uses the `pl_node` field. Payload is `StructInitAnon`.
struct_init_anon_ref,
/// Array initialization syntax.
/// Uses the `pl_node` field. Payload is `MultiOp`.
array_init,
/// Anonymous array initialization syntax.
/// Uses the `pl_node` field. Payload is `MultiOp`.
array_init_anon,
/// Array initialization syntax, make the result a pointer.
/// Uses the `pl_node` field. Payload is `MultiOp`.
array_init_ref,
/// Anonymous array initialization syntax, make the result a pointer.
/// Uses the `pl_node` field. Payload is `MultiOp`.
array_init_anon_ref,
/// Implements the `@unionInit` builtin.
/// Uses the `pl_node` field. Payload is `UnionInit`.
union_init,
/// Implements the `@typeInfo` builtin. Uses `un_node`.
type_info,
/// Implements the `@sizeOf` builtin. Uses `un_node`.
size_of,
/// Implements the `@bitSizeOf` builtin. Uses `un_node`.
bit_size_of,
/// Implement builtin `@ptrToInt`. Uses `un_node`.
/// Convert a pointer to a `usize` integer.
ptr_to_int,
/// Implement builtin `@errToInt`. Uses `un_node`.
error_to_int,
/// Implement builtin `@intToError`. Uses `un_node`.
int_to_error,
/// Emit an error message and fail compilation.
/// Uses the `un_node` field.
compile_error,
/// Changes the maximum number of backwards branches that compile-time
/// code execution can use before giving up and making a compile error.
/// Uses the `un_node` union field.
set_eval_branch_quota,
/// Converts an enum value into an integer. Resulting type will be the tag type
/// of the enum. Uses `un_node`.
enum_to_int,
/// Implement builtin `@alignOf`. Uses `un_node`.
align_of,
/// Implement builtin `@boolToInt`. Uses `un_node`.
bool_to_int,
/// Implement builtin `@embedFile`. Uses `un_node`.
embed_file,
/// Implement builtin `@errorName`. Uses `un_node`.
error_name,
/// Implement builtin `@panic`. Uses `un_node`.
panic,
/// Implement builtin `@setCold`. Uses `un_node`.
set_cold,
/// Implement builtin `@setRuntimeSafety`. Uses `un_node`.
set_runtime_safety,
/// Implement builtin `@sqrt`. Uses `un_node`.
sqrt,
/// Implement builtin `@sin`. Uses `un_node`.
sin,
/// Implement builtin `@cos`. Uses `un_node`.
cos,
/// Implement builtin `@tan`. Uses `un_node`.
tan,
/// Implement builtin `@exp`. Uses `un_node`.
exp,
/// Implement builtin `@exp2`. Uses `un_node`.
exp2,
/// Implement builtin `@log`. Uses `un_node`.
log,
/// Implement builtin `@log2`. Uses `un_node`.
log2,
/// Implement builtin `@log10`. Uses `un_node`.
log10,
/// Implement builtin `@fabs`. Uses `un_node`.
fabs,
/// Implement builtin `@floor`. Uses `un_node`.
floor,
/// Implement builtin `@ceil`. Uses `un_node`.
ceil,
/// Implement builtin `@trunc`. Uses `un_node`.
trunc,
/// Implement builtin `@round`. Uses `un_node`.
round,
/// Implement builtin `@tagName`. Uses `un_node`.
tag_name,
/// Implement builtin `@Type`. Uses `un_node`.
reify,
/// Implement builtin `@typeName`. Uses `un_node`.
type_name,
/// Implement builtin `@Frame`. Uses `un_node`.
frame_type,
/// Implement builtin `@frameSize`. Uses `un_node`.
frame_size,
/// Implements the `@floatToInt` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
float_to_int,
/// Implements the `@intToFloat` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
int_to_float,
/// Implements the `@intToPtr` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
int_to_ptr,
/// Converts an integer into an enum value.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
int_to_enum,
/// Convert a larger float type to any other float type, possibly causing
/// a loss of precision.
/// Uses the `pl_node` field. AST is the `@floatCast` syntax.
/// Payload is `Bin` with lhs as the dest type, rhs the operand.
float_cast,
/// Implements the `@intCast` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
/// Convert an integer value to another integer type, asserting that the destination type
/// can hold the same mathematical value.
int_cast,
/// Implements the `@ptrCast` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
ptr_cast,
/// Implements the `@truncate` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
truncate,
/// Implements the `@alignCast` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest alignment, `rhs` is operand.
align_cast,
/// Implements the `@hasDecl` builtin.
/// Uses the `pl_node` union field. Payload is `Bin`.
has_decl,
/// Implements the `@hasField` builtin.
/// Uses the `pl_node` union field. Payload is `Bin`.
has_field,
/// Implements the `@clz` builtin. Uses the `un_node` union field.
clz,
/// Implements the `@ctz` builtin. Uses the `un_node` union field.
ctz,
/// Implements the `@popCount` builtin. Uses the `un_node` union field.
pop_count,
/// Implements the `@byteSwap` builtin. Uses the `un_node` union field.
byte_swap,
/// Implements the `@bitReverse` builtin. Uses the `un_node` union field.
bit_reverse,
/// Implements the `@bitOffsetOf` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
bit_offset_of,
/// Implements the `@offsetOf` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
offset_of,
/// Implements the `@cmpxchgStrong` builtin.
/// Uses the `pl_node` union field with payload `Cmpxchg`.
cmpxchg_strong,
/// Implements the `@cmpxchgWeak` builtin.
/// Uses the `pl_node` union field with payload `Cmpxchg`.
cmpxchg_weak,
/// Implements the `@splat` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
splat,
/// Implements the `@reduce` builtin.
/// Uses the `pl_node` union field with payload `Bin`.
reduce,
/// Implements the `@shuffle` builtin.
/// Uses the `pl_node` union field with payload `Shuffle`.
shuffle,
/// Implements the `@select` builtin.
/// Uses the `pl_node` union field with payload `Select`.
select,
/// Implements the `@atomicLoad` builtin.
/// Uses the `pl_node` union field with payload `AtomicLoad`.
atomic_load,
/// Implements the `@atomicRmw` builtin.
/// Uses the `pl_node` union field with payload `AtomicRmw`.
atomic_rmw,
/// Implements the `@atomicStore` builtin.
/// Uses the `pl_node` union field with payload `AtomicStore`.
atomic_store,
/// Implements the `@mulAdd` builtin.
/// Uses the `pl_node` union field with payload `MulAdd`.
/// The addend communicates the type of the builtin.
/// The mulends need to be coerced to the same type.
mul_add,
/// Implements the `@fieldParentPtr` builtin.
/// Uses the `pl_node` union field with payload `FieldParentPtr`.
field_parent_ptr,
/// Implements the `@memcpy` builtin.
/// Uses the `pl_node` union field with payload `Memcpy`.
memcpy,
/// Implements the `@memset` builtin.
/// Uses the `pl_node` union field with payload `Memset`.
memset,
/// Implements the `@minimum` builtin.
/// Uses the `pl_node` union field with payload `Bin`
minimum,
/// Implements the `@maximum` builtin.
/// Uses the `pl_node` union field with payload `Bin`
maximum,
/// Implements the `@asyncCall` builtin.
/// Uses the `pl_node` union field with payload `AsyncCall`.
builtin_async_call,
/// Implements the `@cImport` builtin.
/// Uses the `pl_node` union field with payload `Block`.
c_import,
/// Allocates stack local memory.
/// Uses the `un_node` union field. The operand is the type of the allocated object.
/// The node source location points to a var decl node.
alloc,
/// Same as `alloc` except mutable.
alloc_mut,
/// Allocates comptime-mutable memory.
/// Uses the `un_node` union field. The operand is the type of the allocated object.
/// The node source location points to a var decl node.
alloc_comptime_mut,
/// Same as `alloc` except the type is inferred.
/// Uses the `node` union field.
alloc_inferred,
/// Same as `alloc_inferred` except mutable.
alloc_inferred_mut,
/// Allocates comptime const memory.
/// Uses the `node` union field. The type of the allocated object is inferred.
/// The node source location points to a var decl node.
alloc_inferred_comptime,
/// Same as `alloc_comptime_mut` except the type is inferred.
alloc_inferred_comptime_mut,
/// Each `store_to_inferred_ptr` puts the type of the stored value into a set,
/// and then `resolve_inferred_alloc` triggers peer type resolution on the set.
/// The operand is a `alloc_inferred` or `alloc_inferred_mut` instruction, which
/// is the allocation that needs to have its type inferred.
/// Uses the `un_node` field. The AST node is the var decl.
resolve_inferred_alloc,
/// Turns a pointer coming from an `alloc`, `alloc_inferred`, `alloc_inferred_comptime` or
/// `Extended.alloc` into a constant version of the same pointer.
/// Uses the `un_node` union field.
make_ptr_const,
/// Implements `resume` syntax. Uses `un_node` field.
@"resume",
@"await",
/// When a type or function refers to a comptime value from an outer
/// scope, that forms a closure over comptime value. The outer scope
/// will record a capture of that value, which encodes its current state
/// and marks it to persist. Uses `un_tok` field. Operand is the
/// instruction value to capture.
closure_capture,
/// The inner scope of a closure uses closure_get to retrieve the value
/// stored by the outer scope. Uses `inst_node` field. Operand is the
/// closure_capture instruction ref.
closure_get,
/// The ZIR instruction tag is one of the `Extended` ones.
/// Uses the `extended` union field.
extended,
/// Returns whether the instruction is one of the control flow "noreturn" types.
/// Function calls do not count.
pub fn isNoReturn(tag: Tag) bool {
return switch (tag) {
.param,
.param_comptime,
.param_anytype,
.param_anytype_comptime,
.add,
.addwrap,
.add_sat,
.alloc,
.alloc_mut,
.alloc_comptime_mut,
.alloc_inferred,
.alloc_inferred_mut,
.alloc_inferred_comptime,
.alloc_inferred_comptime_mut,
.make_ptr_const,
.array_cat,
.array_mul,
.array_type,
.array_type_sentinel,
.vector_type,
.elem_type_index,
.indexable_ptr_len,
.anyframe_type,
.as,
.as_node,
.bit_and,
.bitcast,
.bit_or,
.block,
.block_inline,
.suspend_block,
.loop,
.bool_br_and,
.bool_br_or,
.bool_not,
.call,
.cmp_lt,
.cmp_lte,
.cmp_eq,
.cmp_gte,
.cmp_gt,
.cmp_neq,
.coerce_result_ptr,
.error_set_decl,
.error_set_decl_anon,
.error_set_decl_func,
.dbg_stmt,
.dbg_var_ptr,
.dbg_var_val,
.dbg_block_begin,
.dbg_block_end,
.decl_ref,
.decl_val,
.load,
.div,
.elem_ptr,
.elem_val,
.elem_ptr_node,
.elem_ptr_imm,
.elem_val_node,
.ensure_result_used,
.ensure_result_non_error,
.@"export",
.export_value,
.field_ptr,
.field_val,
.field_call_bind,
.field_ptr_named,
.field_val_named,
.func,
.func_inferred,
.func_fancy,
.has_decl,
.int,
.int_big,
.float,
.float128,
.int_type,
.is_non_null,
.is_non_null_ptr,
.is_non_err,
.is_non_err_ptr,
.mod_rem,
.mul,
.mulwrap,
.mul_sat,
.param_type,
.ref,
.shl,
.shl_sat,
.shr,
.store,
.store_node,
.store_to_block_ptr,
.store_to_inferred_ptr,
.str,
.sub,
.subwrap,
.sub_sat,
.negate,
.negate_wrap,
.typeof,
.typeof_builtin,
.xor,
.optional_type,
.optional_payload_safe,
.optional_payload_unsafe,
.optional_payload_safe_ptr,
.optional_payload_unsafe_ptr,
.err_union_payload_safe,
.err_union_payload_unsafe,
.err_union_payload_safe_ptr,
.err_union_payload_unsafe_ptr,
.err_union_code,
.err_union_code_ptr,
.error_to_int,
.int_to_error,
.ptr_type,
.ptr_type_simple,
.ensure_err_payload_void,
.enum_literal,
.merge_error_sets,
.error_union_type,
.bit_not,
.error_value,
.slice_start,
.slice_end,
.slice_sentinel,
.import,
.typeof_log2_int_type,
.log2_int_type,
.resolve_inferred_alloc,
.set_eval_branch_quota,
.switch_capture,
.switch_capture_ref,
.switch_capture_multi,
.switch_capture_multi_ref,
.switch_block,
.switch_cond,
.switch_cond_ref,
.array_base_ptr,
.field_base_ptr,
.validate_array_init_ty,
.validate_struct_init_ty,
.validate_struct_init,
.validate_struct_init_comptime,
.validate_array_init,
.validate_array_init_comptime,
.struct_init_empty,
.struct_init,
.struct_init_ref,
.struct_init_anon,
.struct_init_anon_ref,
.array_init,
.array_init_anon,
.array_init_ref,
.array_init_anon_ref,
.union_init,
.field_type,
.field_type_ref,
.int_to_enum,
.enum_to_int,
.type_info,
.size_of,
.bit_size_of,
.ptr_to_int,
.align_of,
.bool_to_int,
.embed_file,
.error_name,
.set_cold,
.set_runtime_safety,
.sqrt,
.sin,
.cos,
.tan,
.exp,
.exp2,
.log,
.log2,
.log10,
.fabs,
.floor,
.ceil,
.trunc,
.round,
.tag_name,
.reify,
.type_name,
.frame_type,
.frame_size,
.float_to_int,
.int_to_float,
.int_to_ptr,
.float_cast,
.int_cast,
.ptr_cast,
.truncate,
.align_cast,
.has_field,
.clz,
.ctz,
.pop_count,
.byte_swap,
.bit_reverse,
.div_exact,
.div_floor,
.div_trunc,
.mod,
.rem,
.shl_exact,
.shr_exact,
.bit_offset_of,
.offset_of,
.cmpxchg_strong,
.cmpxchg_weak,
.splat,
.reduce,
.shuffle,
.select,
.atomic_load,
.atomic_rmw,
.atomic_store,
.mul_add,
.builtin_call,
.field_parent_ptr,
.maximum,
.memcpy,
.memset,
.minimum,
.builtin_async_call,
.c_import,
.@"resume",
.@"await",
.ret_err_value_code,
.extended,
.closure_get,
.closure_capture,
.ret_ptr,
.ret_type,
.@"try",
.try_ptr,
//.try_inline,
//.try_ptr_inline,
=> false,
.@"break",
.break_inline,
.condbr,
.condbr_inline,
.compile_error,
.ret_node,
.ret_load,
.ret_tok,
.ret_err_value,
.@"unreachable",
.repeat,
.repeat_inline,
.panic,
=> true,
};
}
pub fn isParam(tag: Tag) bool {
return switch (tag) {
.param,
.param_comptime,
.param_anytype,
.param_anytype_comptime,
=> true,
else => false,
};
}
/// AstGen uses this to find out if `Ref.void_value` should be used in place
/// of the result of a given instruction. This allows Sema to forego adding
/// the instruction to the map after analysis.
pub fn isAlwaysVoid(tag: Tag, data: Data) bool {
return switch (tag) {
.dbg_stmt,
.dbg_var_ptr,
.dbg_var_val,
.dbg_block_begin,
.dbg_block_end,
.ensure_result_used,
.ensure_result_non_error,
.ensure_err_payload_void,
.set_eval_branch_quota,
.atomic_store,
.store,
.store_node,
.store_to_block_ptr,
.store_to_inferred_ptr,
.resolve_inferred_alloc,
.validate_array_init_ty,
.validate_struct_init_ty,
.validate_struct_init,
.validate_struct_init_comptime,
.validate_array_init,
.validate_array_init_comptime,
.@"export",
.export_value,
.set_cold,
.set_runtime_safety,
.memcpy,
.memset,
=> true,
.param,
.param_comptime,
.param_anytype,
.param_anytype_comptime,
.add,
.addwrap,
.add_sat,
.alloc,
.alloc_mut,
.alloc_comptime_mut,
.alloc_inferred,
.alloc_inferred_mut,
.alloc_inferred_comptime,
.alloc_inferred_comptime_mut,
.make_ptr_const,
.array_cat,
.array_mul,
.array_type,
.array_type_sentinel,
.vector_type,
.elem_type_index,
.indexable_ptr_len,
.anyframe_type,
.as,
.as_node,
.bit_and,
.bitcast,
.bit_or,
.block,
.block_inline,
.suspend_block,
.loop,
.bool_br_and,
.bool_br_or,
.bool_not,
.call,
.cmp_lt,
.cmp_lte,
.cmp_eq,
.cmp_gte,
.cmp_gt,
.cmp_neq,
.coerce_result_ptr,
.error_set_decl,
.error_set_decl_anon,
.error_set_decl_func,
.decl_ref,
.decl_val,
.load,
.div,
.elem_ptr,
.elem_val,
.elem_ptr_node,
.elem_ptr_imm,
.elem_val_node,
.field_ptr,
.field_val,
.field_call_bind,
.field_ptr_named,
.field_val_named,
.func,
.func_inferred,
.func_fancy,
.has_decl,
.int,
.int_big,
.float,
.float128,
.int_type,
.is_non_null,
.is_non_null_ptr,
.is_non_err,
.is_non_err_ptr,
.mod_rem,
.mul,
.mulwrap,
.mul_sat,
.param_type,
.ref,
.shl,
.shl_sat,
.shr,
.str,
.sub,
.subwrap,
.sub_sat,
.negate,
.negate_wrap,
.typeof,
.typeof_builtin,
.xor,
.optional_type,
.optional_payload_safe,
.optional_payload_unsafe,
.optional_payload_safe_ptr,
.optional_payload_unsafe_ptr,
.err_union_payload_safe,
.err_union_payload_unsafe,
.err_union_payload_safe_ptr,
.err_union_payload_unsafe_ptr,
.err_union_code,
.err_union_code_ptr,
.error_to_int,
.int_to_error,
.ptr_type,
.ptr_type_simple,
.enum_literal,
.merge_error_sets,
.error_union_type,
.bit_not,
.error_value,
.slice_start,
.slice_end,
.slice_sentinel,
.import,
.typeof_log2_int_type,
.log2_int_type,
.switch_capture,
.switch_capture_ref,
.switch_capture_multi,
.switch_capture_multi_ref,
.switch_block,
.switch_cond,
.switch_cond_ref,
.array_base_ptr,
.field_base_ptr,
.struct_init_empty,
.struct_init,
.struct_init_ref,
.struct_init_anon,
.struct_init_anon_ref,
.array_init,
.array_init_anon,
.array_init_ref,
.array_init_anon_ref,
.union_init,
.field_type,
.field_type_ref,
.int_to_enum,
.enum_to_int,
.type_info,
.size_of,
.bit_size_of,
.ptr_to_int,
.align_of,
.bool_to_int,
.embed_file,
.error_name,
.sqrt,
.sin,
.cos,
.tan,
.exp,
.exp2,
.log,
.log2,
.log10,
.fabs,
.floor,
.ceil,
.trunc,
.round,
.tag_name,
.reify,
.type_name,
.frame_type,
.frame_size,
.float_to_int,
.int_to_float,
.int_to_ptr,
.float_cast,
.int_cast,
.ptr_cast,
.truncate,
.align_cast,
.has_field,
.clz,
.ctz,
.pop_count,
.byte_swap,
.bit_reverse,
.div_exact,
.div_floor,
.div_trunc,
.mod,
.rem,
.shl_exact,
.shr_exact,
.bit_offset_of,
.offset_of,
.cmpxchg_strong,
.cmpxchg_weak,
.splat,
.reduce,
.shuffle,
.select,
.atomic_load,
.atomic_rmw,
.mul_add,
.builtin_call,
.field_parent_ptr,
.maximum,
.minimum,
.builtin_async_call,
.c_import,
.@"resume",
.@"await",
.ret_err_value_code,
.closure_get,
.closure_capture,
.@"break",
.break_inline,
.condbr,
.condbr_inline,
.compile_error,
.ret_node,
.ret_load,
.ret_tok,
.ret_err_value,
.ret_ptr,
.ret_type,
.@"unreachable",
.repeat,
.repeat_inline,
.panic,
.@"try",
.try_ptr,
//.try_inline,
//.try_ptr_inline,
=> false,
.extended => switch (data.extended.opcode) {
.breakpoint, .fence => true,
else => false,
},
};
}
/// Used by debug safety-checking code.
pub const data_tags = list: {
@setEvalBranchQuota(2000);
break :list std.enums.directEnumArray(Tag, Data.FieldEnum, 0, .{
.add = .pl_node,
.addwrap = .pl_node,
.add_sat = .pl_node,
.sub = .pl_node,
.subwrap = .pl_node,
.sub_sat = .pl_node,
.mul = .pl_node,
.mulwrap = .pl_node,
.mul_sat = .pl_node,
.param_type = .param_type,
.param = .pl_tok,
.param_comptime = .pl_tok,
.param_anytype = .str_tok,
.param_anytype_comptime = .str_tok,
.array_cat = .pl_node,
.array_mul = .pl_node,
.array_type = .bin,
.array_type_sentinel = .pl_node,
.vector_type = .pl_node,
.elem_type_index = .bin,
.indexable_ptr_len = .un_node,
.anyframe_type = .un_node,
.as = .bin,
.as_node = .pl_node,
.bit_and = .pl_node,
.bitcast = .pl_node,
.bit_not = .un_node,
.bit_or = .pl_node,
.block = .pl_node,
.block_inline = .pl_node,
.suspend_block = .pl_node,
.bool_not = .un_node,
.bool_br_and = .bool_br,
.bool_br_or = .bool_br,
.@"break" = .@"break",
.break_inline = .@"break",
.call = .pl_node,
.cmp_lt = .pl_node,
.cmp_lte = .pl_node,
.cmp_eq = .pl_node,
.cmp_gte = .pl_node,
.cmp_gt = .pl_node,
.cmp_neq = .pl_node,
.coerce_result_ptr = .bin,
.condbr = .pl_node,
.condbr_inline = .pl_node,
.@"try" = .pl_node,
.try_ptr = .pl_node,
//.try_inline = .pl_node,
//.try_ptr_inline = .pl_node,
.error_set_decl = .pl_node,
.error_set_decl_anon = .pl_node,
.error_set_decl_func = .pl_node,
.dbg_stmt = .dbg_stmt,
.dbg_var_ptr = .str_op,
.dbg_var_val = .str_op,
.dbg_block_begin = .tok,
.dbg_block_end = .tok,
.decl_ref = .str_tok,
.decl_val = .str_tok,
.load = .un_node,
.div = .pl_node,
.elem_ptr = .bin,
.elem_ptr_node = .pl_node,
.elem_ptr_imm = .pl_node,
.elem_val = .bin,
.elem_val_node = .pl_node,
.ensure_result_used = .un_node,
.ensure_result_non_error = .un_node,
.error_union_type = .pl_node,
.error_value = .str_tok,
.@"export" = .pl_node,
.export_value = .pl_node,
.field_ptr = .pl_node,
.field_val = .pl_node,
.field_ptr_named = .pl_node,
.field_val_named = .pl_node,
.field_call_bind = .pl_node,
.func = .pl_node,
.func_inferred = .pl_node,
.func_fancy = .pl_node,
.import = .str_tok,
.int = .int,
.int_big = .str,
.float = .float,
.float128 = .pl_node,
.int_type = .int_type,
.is_non_null = .un_node,
.is_non_null_ptr = .un_node,
.is_non_err = .un_node,
.is_non_err_ptr = .un_node,
.loop = .pl_node,
.repeat = .node,
.repeat_inline = .node,
.merge_error_sets = .pl_node,
.mod_rem = .pl_node,
.ref = .un_tok,
.ret_node = .un_node,
.ret_load = .un_node,
.ret_tok = .un_tok,
.ret_err_value = .str_tok,
.ret_err_value_code = .str_tok,
.ret_ptr = .node,
.ret_type = .node,
.ptr_type_simple = .ptr_type_simple,
.ptr_type = .ptr_type,
.slice_start = .pl_node,
.slice_end = .pl_node,
.slice_sentinel = .pl_node,
.store = .bin,
.store_node = .pl_node,
.store_to_block_ptr = .bin,
.store_to_inferred_ptr = .bin,
.str = .str,
.negate = .un_node,
.negate_wrap = .un_node,
.typeof = .un_node,
.typeof_log2_int_type = .un_node,
.log2_int_type = .un_node,
.@"unreachable" = .@"unreachable",
.xor = .pl_node,
.optional_type = .un_node,
.optional_payload_safe = .un_node,
.optional_payload_unsafe = .un_node,
.optional_payload_safe_ptr = .un_node,
.optional_payload_unsafe_ptr = .un_node,
.err_union_payload_safe = .un_node,
.err_union_payload_unsafe = .un_node,
.err_union_payload_safe_ptr = .un_node,
.err_union_payload_unsafe_ptr = .un_node,
.err_union_code = .un_node,
.err_union_code_ptr = .un_node,
.ensure_err_payload_void = .un_tok,
.enum_literal = .str_tok,
.switch_block = .pl_node,
.switch_cond = .un_node,
.switch_cond_ref = .un_node,
.switch_capture = .switch_capture,
.switch_capture_ref = .switch_capture,
.switch_capture_multi = .switch_capture,
.switch_capture_multi_ref = .switch_capture,
.array_base_ptr = .un_node,
.field_base_ptr = .un_node,
.validate_array_init_ty = .un_node,
.validate_struct_init_ty = .un_node,
.validate_struct_init = .pl_node,
.validate_struct_init_comptime = .pl_node,
.validate_array_init = .pl_node,
.validate_array_init_comptime = .pl_node,
.struct_init_empty = .un_node,
.field_type = .pl_node,
.field_type_ref = .pl_node,
.struct_init = .pl_node,
.struct_init_ref = .pl_node,
.struct_init_anon = .pl_node,
.struct_init_anon_ref = .pl_node,
.array_init = .pl_node,
.array_init_anon = .pl_node,
.array_init_ref = .pl_node,
.array_init_anon_ref = .pl_node,
.union_init = .pl_node,
.type_info = .un_node,
.size_of = .un_node,
.bit_size_of = .un_node,
.ptr_to_int = .un_node,
.error_to_int = .un_node,
.int_to_error = .un_node,
.compile_error = .un_node,
.set_eval_branch_quota = .un_node,
.enum_to_int = .un_node,
.align_of = .un_node,
.bool_to_int = .un_node,
.embed_file = .un_node,
.error_name = .un_node,
.panic = .un_node,
.set_cold = .un_node,
.set_runtime_safety = .un_node,
.sqrt = .un_node,
.sin = .un_node,
.cos = .un_node,
.tan = .un_node,
.exp = .un_node,
.exp2 = .un_node,
.log = .un_node,
.log2 = .un_node,
.log10 = .un_node,
.fabs = .un_node,
.floor = .un_node,
.ceil = .un_node,
.trunc = .un_node,
.round = .un_node,
.tag_name = .un_node,
.reify = .un_node,
.type_name = .un_node,
.frame_type = .un_node,
.frame_size = .un_node,
.float_to_int = .pl_node,
.int_to_float = .pl_node,
.int_to_ptr = .pl_node,
.int_to_enum = .pl_node,
.float_cast = .pl_node,
.int_cast = .pl_node,
.ptr_cast = .pl_node,
.truncate = .pl_node,
.align_cast = .pl_node,
.typeof_builtin = .pl_node,
.has_decl = .pl_node,
.has_field = .pl_node,
.clz = .un_node,
.ctz = .un_node,
.pop_count = .un_node,
.byte_swap = .un_node,
.bit_reverse = .un_node,
.div_exact = .pl_node,
.div_floor = .pl_node,
.div_trunc = .pl_node,
.mod = .pl_node,
.rem = .pl_node,
.shl = .pl_node,
.shl_exact = .pl_node,
.shl_sat = .pl_node,
.shr = .pl_node,
.shr_exact = .pl_node,
.bit_offset_of = .pl_node,
.offset_of = .pl_node,
.cmpxchg_strong = .pl_node,
.cmpxchg_weak = .pl_node,
.splat = .pl_node,
.reduce = .pl_node,
.shuffle = .pl_node,
.select = .pl_node,
.atomic_load = .pl_node,
.atomic_rmw = .pl_node,
.atomic_store = .pl_node,
.mul_add = .pl_node,
.builtin_call = .pl_node,
.field_parent_ptr = .pl_node,
.maximum = .pl_node,
.memcpy = .pl_node,
.memset = .pl_node,
.minimum = .pl_node,
.builtin_async_call = .pl_node,
.c_import = .pl_node,
.alloc = .un_node,
.alloc_mut = .un_node,
.alloc_comptime_mut = .un_node,
.alloc_inferred = .node,
.alloc_inferred_mut = .node,
.alloc_inferred_comptime = .node,
.alloc_inferred_comptime_mut = .node,
.resolve_inferred_alloc = .un_node,
.make_ptr_const = .un_node,
.@"resume" = .un_node,
.@"await" = .un_node,
.closure_capture = .un_tok,
.closure_get = .inst_node,
.extended = .extended,
});
};
// Uncomment to view how many tag slots are available.
//comptime {
// @compileLog("ZIR tags left: ", 256 - @typeInfo(Tag).Enum.fields.len);
//}
};
/// Rarer instructions are here; ones that do not fit in the 8-bit `Tag` enum.
/// `noreturn` instructions may not go here; they must be part of the main `Tag` enum.
pub const Extended = enum(u16) {
/// Declares a global variable.
/// `operand` is payload index to `ExtendedVar`.
/// `small` is `ExtendedVar.Small`.
variable,
/// A struct type definition. Contains references to ZIR instructions for
/// the field types, defaults, and alignments.
/// `operand` is payload index to `StructDecl`.
/// `small` is `StructDecl.Small`.
struct_decl,
/// An enum type definition. Contains references to ZIR instructions for
/// the field value expressions and optional type tag expression.
/// `operand` is payload index to `EnumDecl`.
/// `small` is `EnumDecl.Small`.
enum_decl,
/// A union type definition. Contains references to ZIR instructions for
/// the field types and optional type tag expression.
/// `operand` is payload index to `UnionDecl`.
/// `small` is `UnionDecl.Small`.
union_decl,
/// An opaque type definition. Contains references to decls and captures.
/// `operand` is payload index to `OpaqueDecl`.
/// `small` is `OpaqueDecl.Small`.
opaque_decl,
/// Implements the `@This` builtin.
/// `operand` is `src_node: i32`.
this,
/// Implements the `@returnAddress` builtin.
/// `operand` is `src_node: i32`.
ret_addr,
/// Implements the `@src` builtin.
/// `operand` is payload index to `LineColumn`.
builtin_src,
/// Implements the `@errorReturnTrace` builtin.
/// `operand` is `src_node: i32`.
error_return_trace,
/// Implements the `@frame` builtin.
/// `operand` is `src_node: i32`.
frame,
/// Implements the `@frameAddress` builtin.
/// `operand` is `src_node: i32`.
frame_address,
/// Same as `alloc` from `Tag` but may contain an alignment instruction.
/// `operand` is payload index to `AllocExtended`.
/// `small`:
/// * 0b000X - has type
/// * 0b00X0 - has alignment
/// * 0b0X00 - 1=const, 0=var
/// * 0bX000 - is comptime
alloc,
/// The `@extern` builtin.
/// `operand` is payload index to `BinNode`.
builtin_extern,
/// Inline assembly.
/// `small`:
/// * 0b00000000_000XXXXX - `outputs_len`.
/// * 0b000000XX_XXX00000 - `inputs_len`.
/// * 0b0XXXXX00_00000000 - `clobbers_len`.
/// * 0bX0000000_00000000 - is volatile
/// `operand` is payload index to `Asm`.
@"asm",
/// Log compile time variables and emit an error message.
/// `operand` is payload index to `NodeMultiOp`.
/// `small` is `operands_len`.
/// The AST node is the compile log builtin call.
compile_log,
/// The builtin `@TypeOf` which returns the type after Peer Type Resolution
/// of one or more params.
/// `operand` is payload index to `NodeMultiOp`.
/// `small` is `operands_len`.
/// The AST node is the builtin call.
typeof_peer,
/// Implements the `@addWithOverflow` builtin.
/// `operand` is payload index to `OverflowArithmetic`.
/// `small` is unused.
add_with_overflow,
/// Implements the `@subWithOverflow` builtin.
/// `operand` is payload index to `OverflowArithmetic`.
/// `small` is unused.
sub_with_overflow,
/// Implements the `@mulWithOverflow` builtin.
/// `operand` is payload index to `OverflowArithmetic`.
/// `small` is unused.
mul_with_overflow,
/// Implements the `@shlWithOverflow` builtin.
/// `operand` is payload index to `OverflowArithmetic`.
/// `small` is unused.
shl_with_overflow,
/// `operand` is payload index to `UnNode`.
c_undef,
/// `operand` is payload index to `UnNode`.
c_include,
/// `operand` is payload index to `BinNode`.
c_define,
/// `operand` is payload index to `UnNode`.
wasm_memory_size,
/// `operand` is payload index to `BinNode`.
wasm_memory_grow,
/// The `@prefetch` builtin.
/// `operand` is payload index to `BinNode`.
prefetch,
/// Given a pointer to a struct or object that contains virtual fields, returns the
/// named field. If there is no named field, searches in the type for a decl that
/// matches the field name. The decl is resolved and we ensure that it's a function
/// which can accept the object as the first parameter, with one pointer fixup. If
/// all of that works, this instruction produces a special "bound function" value
/// which contains both the function and the saved first parameter value.
/// Bound functions may only be used as the function parameter to a `call` or
/// `builtin_call` instruction. Any other use is invalid zir and may crash the compiler.
/// Uses `pl_node` field. The AST node is the `@field` builtin. Payload is FieldNamedNode.
field_call_bind_named,
/// Implements the `@fence` builtin.
/// `operand` is payload index to `UnNode`.
fence,
/// Implement builtin `@setFloatMode`.
/// `operand` is payload index to `UnNode`.
set_float_mode,
/// Implement builtin `@setAlignStack`.
/// `operand` is payload index to `UnNode`.
set_align_stack,
/// Implements the `@errSetCast` builtin.
/// `operand` is payload index to `BinNode`. `lhs` is dest type, `rhs` is operand.
err_set_cast,
/// `operand` is payload index to `UnNode`.
await_nosuspend,
/// `operand` is `src_node: i32`.
breakpoint,
pub const InstData = struct {
opcode: Extended,
small: u16,
operand: u32,
};
};
/// The position of a ZIR instruction within the `Zir` instructions array.
pub const Index = u32;
/// A reference to a TypedValue or ZIR instruction.
///
/// If the Ref has a tag in this enum, it refers to a TypedValue which may be
/// retrieved with Ref.toTypedValue().
///
/// If the value of a Ref does not have a tag, it refers to a ZIR instruction.
///
/// The first values after the the last tag refer to ZIR instructions which may
/// be derived by subtracting `typed_value_map.len`.
///
/// When adding a tag to this enum, consider adding a corresponding entry to
/// `primitives` in astgen.
///
/// The tag type is specified so that it is safe to bitcast between `[]u32`
/// and `[]Ref`.
pub const Ref = enum(u32) {
/// This Ref does not correspond to any ZIR instruction or constant
/// value and may instead be used as a sentinel to indicate null.
none,
u1_type,
u8_type,
i8_type,
u16_type,
i16_type,
u29_type,
u32_type,
i32_type,
u64_type,
i64_type,
u128_type,
i128_type,
usize_type,
isize_type,
c_short_type,
c_ushort_type,
c_int_type,
c_uint_type,
c_long_type,
c_ulong_type,
c_longlong_type,
c_ulonglong_type,
c_longdouble_type,
f16_type,
f32_type,
f64_type,
f80_type,
f128_type,
anyopaque_type,
bool_type,
void_type,
type_type,
anyerror_type,
comptime_int_type,
comptime_float_type,
noreturn_type,
anyframe_type,
null_type,
undefined_type,
enum_literal_type,
atomic_order_type,
atomic_rmw_op_type,
calling_convention_type,
address_space_type,
float_mode_type,
reduce_op_type,
call_options_type,
prefetch_options_type,
export_options_type,
extern_options_type,
type_info_type,
manyptr_u8_type,
manyptr_const_u8_type,
fn_noreturn_no_args_type,
fn_void_no_args_type,
fn_naked_noreturn_no_args_type,
fn_ccc_void_no_args_type,
single_const_pointer_to_comptime_int_type,
const_slice_u8_type,
anyerror_void_error_union_type,
generic_poison_type,
/// `undefined` (untyped)
undef,
/// `0` (comptime_int)
zero,
/// `1` (comptime_int)
one,
/// `{}`
void_value,
/// `unreachable` (noreturn type)
unreachable_value,
/// `null` (untyped)
null_value,
/// `true`
bool_true,
/// `false`
bool_false,
/// `.{}` (untyped)
empty_struct,
/// `0` (usize)
zero_usize,
/// `1` (usize)
one_usize,
/// `std.builtin.CallingConvention.C`
calling_convention_c,
/// `std.builtin.CallingConvention.Inline`
calling_convention_inline,
/// Used for generic parameters where the type and value
/// is not known until generic function instantiation.
generic_poison,
_,
pub const typed_value_map = std.enums.directEnumArray(Ref, TypedValue, 0, .{
.none = undefined,
.u1_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u1_type),
},
.u8_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u8_type),
},
.i8_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.i8_type),
},
.u16_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u16_type),
},
.i16_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.i16_type),
},
.u29_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u29_type),
},
.u32_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u32_type),
},
.i32_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.i32_type),
},
.u64_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u64_type),
},
.i64_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.i64_type),
},
.u128_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u128_type),
},
.i128_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.i128_type),
},
.usize_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.usize_type),
},
.isize_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.isize_type),
},
.c_short_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_short_type),
},
.c_ushort_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_ushort_type),
},
.c_int_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_int_type),
},
.c_uint_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_uint_type),
},
.c_long_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_long_type),
},
.c_ulong_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_ulong_type),
},
.c_longlong_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_longlong_type),
},
.c_ulonglong_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_ulonglong_type),
},
.c_longdouble_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_longdouble_type),
},
.f16_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.f16_type),
},
.f32_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.f32_type),
},
.f64_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.f64_type),
},
.f80_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.f80_type),
},
.f128_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.f128_type),
},
.anyopaque_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.anyopaque_type),
},
.bool_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.bool_type),
},
.void_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.void_type),
},
.type_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.type_type),
},
.anyerror_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.anyerror_type),
},
.comptime_int_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.comptime_int_type),
},
.comptime_float_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.comptime_float_type),
},
.noreturn_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.noreturn_type),
},
.anyframe_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.anyframe_type),
},
.null_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.null_type),
},
.undefined_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.undefined_type),
},
.fn_noreturn_no_args_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.fn_noreturn_no_args_type),
},
.fn_void_no_args_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.fn_void_no_args_type),
},
.fn_naked_noreturn_no_args_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.fn_naked_noreturn_no_args_type),
},
.fn_ccc_void_no_args_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.fn_ccc_void_no_args_type),
},
.single_const_pointer_to_comptime_int_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.single_const_pointer_to_comptime_int_type),
},
.const_slice_u8_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.const_slice_u8_type),
},
.anyerror_void_error_union_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.anyerror_void_error_union_type),
},
.generic_poison_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.generic_poison_type),
},
.enum_literal_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.enum_literal_type),
},
.manyptr_u8_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.manyptr_u8_type),
},
.manyptr_const_u8_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.manyptr_const_u8_type),
},
.atomic_order_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.atomic_order_type),
},
.atomic_rmw_op_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.atomic_rmw_op_type),
},
.calling_convention_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.calling_convention_type),
},
.address_space_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.address_space_type),
},
.float_mode_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.float_mode_type),
},
.reduce_op_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.reduce_op_type),
},
.call_options_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.call_options_type),
},
.prefetch_options_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.prefetch_options_type),
},
.export_options_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.export_options_type),
},
.extern_options_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.extern_options_type),
},
.type_info_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.type_info_type),
},
.undef = .{
.ty = Type.initTag(.@"undefined"),
.val = Value.initTag(.undef),
},
.zero = .{
.ty = Type.initTag(.comptime_int),
.val = Value.initTag(.zero),
},
.zero_usize = .{
.ty = Type.initTag(.usize),
.val = Value.initTag(.zero),
},
.one = .{
.ty = Type.initTag(.comptime_int),
.val = Value.initTag(.one),
},
.one_usize = .{
.ty = Type.initTag(.usize),
.val = Value.initTag(.one),
},
.void_value = .{
.ty = Type.initTag(.void),
.val = Value.initTag(.void_value),
},
.unreachable_value = .{
.ty = Type.initTag(.noreturn),
.val = Value.initTag(.unreachable_value),
},
.null_value = .{
.ty = Type.initTag(.@"null"),
.val = Value.initTag(.null_value),
},
.bool_true = .{
.ty = Type.initTag(.bool),
.val = Value.initTag(.bool_true),
},
.bool_false = .{
.ty = Type.initTag(.bool),
.val = Value.initTag(.bool_false),
},
.empty_struct = .{
.ty = Type.initTag(.empty_struct_literal),
.val = Value.initTag(.empty_struct_value),
},
.calling_convention_c = .{
.ty = Type.initTag(.calling_convention),
.val = .{ .ptr_otherwise = &calling_convention_c_payload.base },
},
.calling_convention_inline = .{
.ty = Type.initTag(.calling_convention),
.val = .{ .ptr_otherwise = &calling_convention_inline_payload.base },
},
.generic_poison = .{
.ty = Type.initTag(.generic_poison),
.val = Value.initTag(.generic_poison),
},
});
};
/// We would like this to be const but `Value` wants a mutable pointer for
/// its payload field. Nothing should mutate this though.
var calling_convention_c_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
.data = @enumToInt(std.builtin.CallingConvention.C),
};
/// We would like this to be const but `Value` wants a mutable pointer for
/// its payload field. Nothing should mutate this though.
var calling_convention_inline_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
.data = @enumToInt(std.builtin.CallingConvention.Inline),
};
/// All instructions have an 8-byte payload, which is contained within
/// this union. `Tag` determines which union field is active, as well as
/// how to interpret the data within.
pub const Data = union {
/// Used for `Tag.extended`. The extended opcode determines the meaning
/// of the `small` and `operand` fields.
extended: Extended.InstData,
/// Used for unary operators, with an AST node source location.
un_node: struct {
/// Offset from Decl AST node index.
src_node: i32,
/// The meaning of this operand depends on the corresponding `Tag`.
operand: Ref,
pub fn src(self: @This()) LazySrcLoc {
return LazySrcLoc.nodeOffset(self.src_node);
}
},
/// Used for unary operators, with a token source location.
un_tok: struct {
/// Offset from Decl AST token index.
src_tok: Ast.TokenIndex,
/// The meaning of this operand depends on the corresponding `Tag`.
operand: Ref,
pub fn src(self: @This()) LazySrcLoc {
return .{ .token_offset = self.src_tok };
}
},
pl_node: struct {
/// Offset from Decl AST node index.
/// `Tag` determines which kind of AST node this points to.
src_node: i32,
/// index into extra.
/// `Tag` determines what lives there.
payload_index: u32,
pub fn src(self: @This()) LazySrcLoc {
return LazySrcLoc.nodeOffset(self.src_node);
}
},
pl_tok: struct {
/// Offset from Decl AST token index.
src_tok: Ast.TokenIndex,
/// index into extra.
/// `Tag` determines what lives there.
payload_index: u32,
pub fn src(self: @This()) LazySrcLoc {
return .{ .token_offset = self.src_tok };
}
},
bin: Bin,
/// For strings which may contain null bytes.
str: struct {
/// Offset into `string_bytes`.
start: u32,
/// Number of bytes in the string.
len: u32,
pub fn get(self: @This(), code: Zir) []const u8 {
return code.string_bytes[self.start..][0..self.len];
}
},
str_tok: struct {
/// Offset into `string_bytes`. Null-terminated.
start: u32,
/// Offset from Decl AST token index.
src_tok: u32,
pub fn get(self: @This(), code: Zir) [:0]const u8 {
return code.nullTerminatedString(self.start);
}
pub fn src(self: @This()) LazySrcLoc {
return .{ .token_offset = self.src_tok };
}
},
/// Offset from Decl AST token index.
tok: Ast.TokenIndex,
/// Offset from Decl AST node index.
node: i32,
int: u64,
float: f64,
ptr_type_simple: struct {
is_allowzero: bool,
is_mutable: bool,
is_volatile: bool,
size: std.builtin.Type.Pointer.Size,
elem_type: Ref,
},
ptr_type: struct {
flags: packed struct {
is_allowzero: bool,
is_mutable: bool,
is_volatile: bool,
has_sentinel: bool,
has_align: bool,
has_addrspace: bool,
has_bit_range: bool,
_: u1 = undefined,
},
size: std.builtin.Type.Pointer.Size,
/// Index into extra. See `PtrType`.
payload_index: u32,
},
int_type: struct {
/// Offset from Decl AST node index.
/// `Tag` determines which kind of AST node this points to.
src_node: i32,
signedness: std.builtin.Signedness,
bit_count: u16,
pub fn src(self: @This()) LazySrcLoc {
return LazySrcLoc.nodeOffset(self.src_node);
}
},
bool_br: struct {
lhs: Ref,
/// Points to a `Block`.
payload_index: u32,
},
param_type: struct {
callee: Ref,
param_index: u32,
},
@"unreachable": struct {
/// Offset from Decl AST node index.
/// `Tag` determines which kind of AST node this points to.
src_node: i32,
force_comptime: bool,
pub fn src(self: @This()) LazySrcLoc {
return LazySrcLoc.nodeOffset(self.src_node);
}
},
@"break": struct {
block_inst: Index,
operand: Ref,
},
switch_capture: struct {
switch_inst: Index,
prong_index: u32,
},
dbg_stmt: LineColumn,
/// Used for unary operators which reference an inst,
/// with an AST node source location.
inst_node: struct {
/// Offset from Decl AST node index.
src_node: i32,
/// The meaning of this operand depends on the corresponding `Tag`.
inst: Index,
pub fn src(self: @This()) LazySrcLoc {
return LazySrcLoc.nodeOffset(self.src_node);
}
},
str_op: struct {
/// Offset into `string_bytes`. Null-terminated.
str: u32,
operand: Ref,
pub fn getStr(self: @This(), zir: Zir) [:0]const u8 {
return zir.nullTerminatedString(self.str);
}
},
// Make sure we don't accidentally add a field to make this union
// bigger than expected. Note that in Debug builds, Zig is allowed
// to insert a secret field for safety checks.
comptime {
if (builtin.mode != .Debug) {
assert(@sizeOf(Data) == 8);
}
}
/// TODO this has to be kept in sync with `Data` which we want to be an untagged
/// union. There is some kind of language awkwardness here and it has to do with
/// deserializing an untagged union (in this case `Data`) from a file, and trying
/// to preserve the hidden safety field.
pub const FieldEnum = enum {
extended,
un_node,
un_tok,
pl_node,
pl_tok,
bin,
str,
str_tok,
tok,
node,
int,
float,
ptr_type_simple,
ptr_type,
int_type,
bool_br,
param_type,
@"unreachable",
@"break",
switch_capture,
dbg_stmt,
inst_node,
str_op,
};
};
/// Trailing:
/// 0. Output for every outputs_len
/// 1. Input for every inputs_len
/// 2. clobber: u32 // index into string_bytes (null terminated) for every clobbers_len.
pub const Asm = struct {
src_node: i32,
// null-terminated string index
asm_source: u32,
/// 1 bit for each outputs_len: whether it uses `-> T` or not.
/// 0b0 - operand is a pointer to where to store the output.
/// 0b1 - operand is a type; asm expression has the output as the result.
/// 0b0X is the first output, 0bX0 is the second, etc.
output_type_bits: u32,
pub const Output = struct {
/// index into string_bytes (null terminated)
name: u32,
/// index into string_bytes (null terminated)
constraint: u32,
/// How to interpret this is determined by `output_type_bits`.
operand: Ref,
};
pub const Input = struct {
/// index into string_bytes (null terminated)
name: u32,
/// index into string_bytes (null terminated)
constraint: u32,
operand: Ref,
};
};
/// Trailing:
/// if (ret_body_len == 1) {
/// 0. return_type: Ref
/// }
/// if (ret_body_len > 1) {
/// 1. return_type: Index // for each ret_body_len
/// }
/// 2. body: Index // for each body_len
/// 3. src_locs: SrcLocs // if body_len != 0
pub const Func = struct {
/// If this is 0 it means a void return type.
/// If this is 1 it means return_type is a simple Ref
ret_body_len: u32,
/// Points to the block that contains the param instructions for this function.
param_block: Index,
body_len: u32,
pub const SrcLocs = struct {
/// Line index in the source file relative to the parent decl.
lbrace_line: u32,
/// Line index in the source file relative to the parent decl.
rbrace_line: u32,
/// lbrace_column is least significant bits u16
/// rbrace_column is most significant bits u16
columns: u32,
};
};
/// Trailing:
/// 0. lib_name: u32, // null terminated string index, if has_lib_name is set
/// if (has_align_ref and !has_align_body) {
/// 1. align: Ref,
/// }
/// if (has_align_body) {
/// 2. align_body_len: u32
/// 3. align_body: u32 // for each align_body_len
/// }
/// if (has_addrspace_ref and !has_addrspace_body) {
/// 4. addrspace: Ref,
/// }
/// if (has_addrspace_body) {
/// 5. addrspace_body_len: u32
/// 6. addrspace_body: u32 // for each addrspace_body_len
/// }
/// if (has_section_ref and !has_section_body) {
/// 7. section: Ref,
/// }
/// if (has_section_body) {
/// 8. section_body_len: u32
/// 9. section_body: u32 // for each section_body_len
/// }
/// if (has_cc_ref and !has_cc_body) {
/// 10. cc: Ref,
/// }
/// if (has_cc_body) {
/// 11. cc_body_len: u32
/// 12. cc_body: u32 // for each cc_body_len
/// }
/// if (has_ret_ty_ref and !has_ret_ty_body) {
/// 13. ret_ty: Ref,
/// }
/// if (has_ret_ty_body) {
/// 14. ret_ty_body_len: u32
/// 15. ret_ty_body: u32 // for each ret_ty_body_len
/// }
/// 16. noalias_bits: u32 // if has_any_noalias
/// - each bit starting with LSB corresponds to parameter indexes
/// 17. body: Index // for each body_len
/// 18. src_locs: Func.SrcLocs // if body_len != 0
pub const FuncFancy = struct {
/// Points to the block that contains the param instructions for this function.
param_block: Index,
body_len: u32,
bits: Bits,
/// If both has_cc_ref and has_cc_body are false, it means auto calling convention.
/// If both has_align_ref and has_align_body are false, it means default alignment.
/// If both has_ret_ty_ref and has_ret_ty_body are false, it means void return type.
/// If both has_section_ref and has_section_body are false, it means default section.
/// If both has_addrspace_ref and has_addrspace_body are false, it means default addrspace.
pub const Bits = packed struct {
is_var_args: bool,
is_inferred_error: bool,
is_test: bool,
is_extern: bool,
has_align_ref: bool,
has_align_body: bool,
has_addrspace_ref: bool,
has_addrspace_body: bool,
has_section_ref: bool,
has_section_body: bool,
has_cc_ref: bool,
has_cc_body: bool,
has_ret_ty_ref: bool,
has_ret_ty_body: bool,
has_lib_name: bool,
has_any_noalias: bool,
_: u16 = undefined,
};
};
/// Trailing:
/// 0. lib_name: u32, // null terminated string index, if has_lib_name is set
/// 1. align: Ref, // if has_align is set
/// 2. init: Ref // if has_init is set
/// The source node is obtained from the containing `block_inline`.
pub const ExtendedVar = struct {
var_type: Ref,
pub const Small = packed struct {
has_lib_name: bool,
has_align: bool,
has_init: bool,
is_extern: bool,
is_threadlocal: bool,
_: u11 = undefined,
};
};
/// This data is stored inside extra, with trailing operands according to `operands_len`.
/// Each operand is a `Ref`.
pub const MultiOp = struct {
operands_len: u32,
};
/// Trailing: operand: Ref, // for each `operands_len` (stored in `small`).
pub const NodeMultiOp = struct {
src_node: i32,
};
/// This data is stored inside extra, with trailing operands according to `body_len`.
/// Each operand is an `Index`.
pub const Block = struct {
body_len: u32,
};
/// Stored inside extra, with trailing arguments according to `args_len`.
/// Each argument is a `Ref`.
pub const Call = struct {
// Note: Flags *must* come first so that unusedResultExpr
// can find it when it goes to modify them.
flags: Flags,
callee: Ref,
pub const Flags = packed struct {
/// std.builtin.CallOptions.Modifier in packed form
pub const PackedModifier = u3;
pub const PackedArgsLen = u28;
packed_modifier: PackedModifier,
ensure_result_used: bool = false,
args_len: PackedArgsLen,
comptime {
if (@sizeOf(Flags) != 4 or @bitSizeOf(Flags) != 32)
@compileError("Layout of Call.Flags needs to be updated!");
if (@bitSizeOf(std.builtin.CallOptions.Modifier) != @bitSizeOf(PackedModifier))
@compileError("Call.Flags.PackedModifier needs to be updated!");
}
};
};
pub const TypeOfPeer = struct {
src_node: i32,
body_len: u32,
body_index: u32,
};
pub const BuiltinCall = struct {
// Note: Flags *must* come first so that unusedResultExpr
// can find it when it goes to modify them.
flags: Flags,
options: Ref,
callee: Ref,
args: Ref,
pub const Flags = packed struct {
is_nosuspend: bool,
is_comptime: bool,
ensure_result_used: bool,
_: u29 = undefined,
comptime {
if (@sizeOf(Flags) != 4 or @bitSizeOf(Flags) != 32)
@compileError("Layout of BuiltinCall.Flags needs to be updated!");
}
};
};
/// This data is stored inside extra, with two sets of trailing `Ref`:
/// * 0. the then body, according to `then_body_len`.
/// * 1. the else body, according to `else_body_len`.
pub const CondBr = struct {
condition: Ref,
then_body_len: u32,
else_body_len: u32,
};
/// This data is stored inside extra, trailed by:
/// * 0. body: Index // for each `body_len`.
pub const Try = struct {
/// The error union to unwrap.
operand: Ref,
body_len: u32,
};
/// Stored in extra. Depending on the flags in Data, there will be up to 5
/// trailing Ref fields:
/// 0. sentinel: Ref // if `has_sentinel` flag is set
/// 1. align: Ref // if `has_align` flag is set
/// 2. address_space: Ref // if `has_addrspace` flag is set
/// 3. bit_start: Ref // if `has_bit_range` flag is set
/// 4. host_size: Ref // if `has_bit_range` flag is set
pub const PtrType = struct {
elem_type: Ref,
};
pub const ArrayTypeSentinel = struct {
len: Ref,
sentinel: Ref,
elem_type: Ref,
};
pub const SliceStart = struct {
lhs: Ref,
start: Ref,
};
pub const SliceEnd = struct {
lhs: Ref,
start: Ref,
end: Ref,
};
pub const SliceSentinel = struct {
lhs: Ref,
start: Ref,
end: Ref,
sentinel: Ref,
};
/// The meaning of these operands depends on the corresponding `Tag`.
pub const Bin = struct {
lhs: Ref,
rhs: Ref,
};
pub const BinNode = struct {
node: i32,
lhs: Ref,
rhs: Ref,
};
pub const UnNode = struct {
node: i32,
operand: Ref,
};
pub const ElemPtrImm = struct {
ptr: Ref,
index: u32,
};
/// 0. multi_cases_len: u32 // If has_multi_cases is set.
/// 1. else_body { // If has_else or has_under is set.
/// body_len: u32,
/// body member Index for every body_len
/// }
/// 2. scalar_cases: { // for every scalar_cases_len
/// item: Ref,
/// body_len: u32,
/// body member Index for every body_len
/// }
/// 3. multi_cases: { // for every multi_cases_len
/// items_len: u32,
/// ranges_len: u32,
/// body_len: u32,
/// item: Ref // for every items_len
/// ranges: { // for every ranges_len
/// item_first: Ref,
/// item_last: Ref,
/// }
/// body member Index for every body_len
/// }
pub const SwitchBlock = struct {
/// This is always a `switch_cond` or `switch_cond_ref` instruction.
/// If it is a `switch_cond_ref` instruction, bits.is_ref is always true.
/// If it is a `switch_cond` instruction, bits.is_ref is always false.
/// Both `switch_cond` and `switch_cond_ref` return a value, not a pointer,
/// that is useful for the case items, but cannot be used for capture values.
/// For the capture values, Sema is expected to find the operand of this operand
/// and use that.
operand: Ref,
bits: Bits,
pub const Bits = packed struct {
/// If true, one or more prongs have multiple items.
has_multi_cases: bool,
/// If true, there is an else prong. This is mutually exclusive with `has_under`.
has_else: bool,
/// If true, there is an underscore prong. This is mutually exclusive with `has_else`.
has_under: bool,
/// If true, the `operand` is a pointer to the value being switched on.
/// TODO this flag is redundant with the tag of operand and can be removed.
is_ref: bool,
scalar_cases_len: ScalarCasesLen,
pub const ScalarCasesLen = u28;
pub fn specialProng(bits: Bits) SpecialProng {
const has_else: u2 = @boolToInt(bits.has_else);
const has_under: u2 = @boolToInt(bits.has_under);
return switch ((has_else << 1) | has_under) {
0b00 => .none,
0b01 => .under,
0b10 => .@"else",
0b11 => unreachable,
};
}
};
pub const ScalarProng = struct {
item: Ref,
body: []const Index,
};
/// TODO performance optimization: instead of having this helper method
/// change the definition of switch_capture instruction to store extra_index
/// instead of prong_index. This way, Sema won't be doing O(N^2) iterations
/// over the switch prongs.
pub fn getScalarProng(
self: SwitchBlock,
zir: Zir,
extra_end: usize,
prong_index: usize,
) ScalarProng {
var extra_index: usize = extra_end;
if (self.bits.has_multi_cases) {
extra_index += 1;
}
if (self.bits.specialProng() != .none) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
extra_index += body.len;
}
var scalar_i: usize = 0;
while (true) : (scalar_i += 1) {
const item = @intToEnum(Ref, zir.extra[extra_index]);
extra_index += 1;
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
extra_index += body.len;
if (scalar_i < prong_index) continue;
return .{
.item = item,
.body = body,
};
}
}
pub const MultiProng = struct {
items: []const Ref,
body: []const Index,
};
pub fn getMultiProng(
self: SwitchBlock,
zir: Zir,
extra_end: usize,
prong_index: usize,
) MultiProng {
// +1 for self.bits.has_multi_cases == true
var extra_index: usize = extra_end + 1;
if (self.bits.specialProng() != .none) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
extra_index += body.len;
}
var scalar_i: usize = 0;
while (scalar_i < self.bits.scalar_cases_len) : (scalar_i += 1) {
extra_index += 1;
const body_len = zir.extra[extra_index];
extra_index += 1;
extra_index += body_len;
}
var multi_i: u32 = 0;
while (true) : (multi_i += 1) {
const items_len = zir.extra[extra_index];
extra_index += 2;
const body_len = zir.extra[extra_index];
extra_index += 1;
const items = zir.refSlice(extra_index, items_len);
extra_index += items_len;
const body = zir.extra[extra_index..][0..body_len];
extra_index += body_len;
if (multi_i < prong_index) continue;
return .{
.items = items,
.body = body,
};
}
}
};
pub const Field = struct {
lhs: Ref,
/// Offset into `string_bytes`.
field_name_start: u32,
};
pub const FieldNamed = struct {
lhs: Ref,
field_name: Ref,
};
pub const FieldNamedNode = struct {
node: i32,
lhs: Ref,
field_name: Ref,
};
pub const As = struct {
dest_type: Ref,
operand: Ref,
};
/// Trailing:
/// 0. src_node: i32, // if has_src_node
/// 1. body_len: u32, // if has_body_len
/// 2. fields_len: u32, // if has_fields_len
/// 3. decls_len: u32, // if has_decls_len
/// 4. decl_bits: u32 // for every 8 decls
/// - sets of 4 bits:
/// 0b000X: whether corresponding decl is pub
/// 0b00X0: whether corresponding decl is exported
/// 0b0X00: whether corresponding decl has an align expression
/// 0bX000: whether corresponding decl has a linksection or an address space expression
/// 5. decl: { // for every decls_len
/// src_hash: [4]u32, // hash of source bytes
/// line: u32, // line number of decl, relative to parent
/// name: u32, // null terminated string index
/// - 0 means comptime or usingnamespace decl.
/// - if name == 0 `is_exported` determines which one: 0=comptime,1=usingnamespace
/// - 1 means test decl with no name.
/// - 2 means that the test is a decltest, doc_comment gives the name of the identifier
/// - if there is a 0 byte at the position `name` indexes, it indicates
/// this is a test decl, and the name starts at `name+1`.
/// value: Index,
/// doc_comment: u32, 0 if no doc comment, if this is a decltest, doc_comment references the decl name in the string table
/// align: Ref, // if corresponding bit is set
/// link_section_or_address_space: { // if corresponding bit is set.
/// link_section: Ref,
/// address_space: Ref,
/// }
/// }
/// 6. inst: Index // for every body_len
/// 7. flags: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has an align expression
/// 0b00X0: whether corresponding field has a default expression
/// 0b0X00: whether corresponding field is comptime
/// 0bX000: unused
/// 8. fields: { // for every fields_len
/// field_name: u32,
/// field_type: Ref,
/// - if none, means `anytype`.
/// doc_comment: u32, // 0 if no doc comment
/// align: Ref, // if corresponding bit is set
/// default_value: Ref, // if corresponding bit is set
/// }
pub const StructDecl = struct {
pub const Small = packed struct {
has_src_node: bool,
has_body_len: bool,
has_fields_len: bool,
has_decls_len: bool,
known_non_opv: bool,
known_comptime_only: bool,
name_strategy: NameStrategy,
layout: std.builtin.Type.ContainerLayout,
_: u6 = undefined,
};
};
pub const NameStrategy = enum(u2) {
/// Use the same name as the parent declaration name.
/// e.g. `const Foo = struct {...};`.
parent,
/// Use the name of the currently executing comptime function call,
/// with the current parameters. e.g. `ArrayList(i32)`.
func,
/// Create an anonymous name for this declaration.
/// Like this: "ParentDeclName_struct_69"
anon,
};
/// Trailing:
/// 0. src_node: i32, // if has_src_node
/// 1. tag_type: Ref, // if has_tag_type
/// 2. body_len: u32, // if has_body_len
/// 3. fields_len: u32, // if has_fields_len
/// 4. decls_len: u32, // if has_decls_len
/// 5. decl_bits: u32 // for every 8 decls
/// - sets of 4 bits:
/// 0b000X: whether corresponding decl is pub
/// 0b00X0: whether corresponding decl is exported
/// 0b0X00: whether corresponding decl has an align expression
/// 0bX000: whether corresponding decl has a linksection or an address space expression
/// 6. decl: { // for every decls_len
/// src_hash: [4]u32, // hash of source bytes
/// line: u32, // line number of decl, relative to parent
/// name: u32, // null terminated string index
/// - 0 means comptime or usingnamespace decl.
/// - if name == 0 `is_exported` determines which one: 0=comptime,1=usingnamespace
/// - 1 means test decl with no name.
/// - if there is a 0 byte at the position `name` indexes, it indicates
/// this is a test decl, and the name starts at `name+1`.
/// value: Index,
/// doc_comment: u32, // 0 if no doc_comment
/// align: Ref, // if corresponding bit is set
/// link_section_or_address_space: { // if corresponding bit is set.
/// link_section: Ref,
/// address_space: Ref,
/// }
/// }
/// 7. inst: Index // for every body_len
/// 8. has_bits: u32 // for every 32 fields
/// - the bit is whether corresponding field has an value expression
/// 9. fields: { // for every fields_len
/// field_name: u32,
/// doc_comment: u32, // 0 if no doc_comment
/// value: Ref, // if corresponding bit is set
/// }
pub const EnumDecl = struct {
pub const Small = packed struct {
has_src_node: bool,
has_tag_type: bool,
has_body_len: bool,
has_fields_len: bool,
has_decls_len: bool,
name_strategy: NameStrategy,
nonexhaustive: bool,
_: u8 = undefined,
};
};
/// Trailing:
/// 0. src_node: i32, // if has_src_node
/// 1. tag_type: Ref, // if has_tag_type
/// 2. body_len: u32, // if has_body_len
/// 3. fields_len: u32, // if has_fields_len
/// 4. decls_len: u32, // if has_decls_len
/// 5. decl_bits: u32 // for every 8 decls
/// - sets of 4 bits:
/// 0b000X: whether corresponding decl is pub
/// 0b00X0: whether corresponding decl is exported
/// 0b0X00: whether corresponding decl has an align expression
/// 0bX000: whether corresponding decl has a linksection or an address space expression
/// 6. decl: { // for every decls_len
/// src_hash: [4]u32, // hash of source bytes
/// line: u32, // line number of decl, relative to parent
/// name: u32, // null terminated string index
/// - 0 means comptime or usingnamespace decl.
/// - if name == 0 `is_exported` determines which one: 0=comptime,1=usingnamespace
/// - 1 means test decl with no name.
/// - if there is a 0 byte at the position `name` indexes, it indicates
/// this is a test decl, and the name starts at `name+1`.
/// value: Index,
/// doc_comment: u32, // 0 if no doc comment
/// align: Ref, // if corresponding bit is set
/// link_section_or_address_space: { // if corresponding bit is set.
/// link_section: Ref,
/// address_space: Ref,
/// }
/// }
/// 7. inst: Index // for every body_len
/// 8. has_bits: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has a type expression
/// 0b00X0: whether corresponding field has a align expression
/// 0b0X00: whether corresponding field has a tag value expression
/// 0bX000: unused
/// 9. fields: { // for every fields_len
/// field_name: u32, // null terminated string index
/// doc_comment: u32, // 0 if no doc comment
/// field_type: Ref, // if corresponding bit is set
/// - if none, means `anytype`.
/// align: Ref, // if corresponding bit is set
/// tag_value: Ref, // if corresponding bit is set
/// }
pub const UnionDecl = struct {
pub const Small = packed struct {
has_src_node: bool,
has_tag_type: bool,
has_body_len: bool,
has_fields_len: bool,
has_decls_len: bool,
name_strategy: NameStrategy,
layout: std.builtin.Type.ContainerLayout,
/// has_tag_type | auto_enum_tag | result
/// -------------------------------------
/// false | false | union { }
/// false | true | union(enum) { }
/// true | true | union(enum(T)) { }
/// true | false | union(T) { }
auto_enum_tag: bool,
_: u6 = undefined,
};
};
/// Trailing:
/// 0. src_node: i32, // if has_src_node
/// 1. decls_len: u32, // if has_decls_len
/// 2. decl_bits: u32 // for every 8 decls
/// - sets of 4 bits:
/// 0b000X: whether corresponding decl is pub
/// 0b00X0: whether corresponding decl is exported
/// 0b0X00: whether corresponding decl has an align expression
/// 0bX000: whether corresponding decl has a linksection or an address space expression
/// 3. decl: { // for every decls_len
/// src_hash: [4]u32, // hash of source bytes
/// line: u32, // line number of decl, relative to parent
/// name: u32, // null terminated string index
/// - 0 means comptime or usingnamespace decl.
/// - if name == 0 `is_exported` determines which one: 0=comptime,1=usingnamespace
/// - 1 means test decl with no name.
/// - if there is a 0 byte at the position `name` indexes, it indicates
/// this is a test decl, and the name starts at `name+1`.
/// value: Index,
/// doc_comment: u32, // 0 if no doc comment,
/// align: Ref, // if corresponding bit is set
/// link_section_or_address_space: { // if corresponding bit is set.
/// link_section: Ref,
/// address_space: Ref,
/// }
/// }
pub const OpaqueDecl = struct {
pub const Small = packed struct {
has_src_node: bool,
has_decls_len: bool,
name_strategy: NameStrategy,
_: u12 = undefined,
};
};
/// Trailing:
/// { // for every fields_len
/// field_name: u32 // null terminated string index
/// doc_comment: u32 // null terminated string index
/// }
pub const ErrorSetDecl = struct {
fields_len: u32,
};
/// A f128 value, broken up into 4 u32 parts.
pub const Float128 = struct {
piece0: u32,
piece1: u32,
piece2: u32,
piece3: u32,
pub fn get(self: Float128) f128 {
const int_bits = @as(u128, self.piece0) |
(@as(u128, self.piece1) << 32) |
(@as(u128, self.piece2) << 64) |
(@as(u128, self.piece3) << 96);
return @bitCast(f128, int_bits);
}
};
/// Trailing is an item per field.
pub const StructInit = struct {
fields_len: u32,
pub const Item = struct {
/// The `field_type` ZIR instruction for this field init.
field_type: Index,
/// The field init expression to be used as the field value.
init: Ref,
};
};
/// Trailing is an Item per field.
/// TODO make this instead array of inits followed by array of names because
/// it will be simpler Sema code and better for CPU cache.
pub const StructInitAnon = struct {
fields_len: u32,
pub const Item = struct {
/// Null-terminated string table index.
field_name: u32,
/// The field init expression to be used as the field value.
init: Ref,
};
};
pub const FieldType = struct {
container_type: Ref,
/// Offset into `string_bytes`, null terminated.
name_start: u32,
};
pub const FieldTypeRef = struct {
container_type: Ref,
field_name: Ref,
};
pub const OverflowArithmetic = struct {
node: i32,
lhs: Ref,
rhs: Ref,
ptr: Ref,
};
pub const Cmpxchg = struct {
ptr: Ref,
expected_value: Ref,
new_value: Ref,
success_order: Ref,
failure_order: Ref,
};
pub const AtomicRmw = struct {
ptr: Ref,
operation: Ref,
operand: Ref,
ordering: Ref,
};
pub const UnionInit = struct {
union_type: Ref,
field_name: Ref,
init: Ref,
};
pub const AtomicStore = struct {
ptr: Ref,
operand: Ref,
ordering: Ref,
};
pub const AtomicLoad = struct {
elem_type: Ref,
ptr: Ref,
ordering: Ref,
};
pub const MulAdd = struct {
mulend1: Ref,
mulend2: Ref,
addend: Ref,
};
pub const FieldParentPtr = struct {
parent_type: Ref,
field_name: Ref,
field_ptr: Ref,
};
pub const Memcpy = struct {
dest: Ref,
source: Ref,
byte_count: Ref,
};
pub const Memset = struct {
dest: Ref,
byte: Ref,
byte_count: Ref,
};
pub const Shuffle = struct {
elem_type: Ref,
a: Ref,
b: Ref,
mask: Ref,
};
pub const Select = struct {
elem_type: Ref,
pred: Ref,
a: Ref,
b: Ref,
};
pub const AsyncCall = struct {
frame_buffer: Ref,
result_ptr: Ref,
fn_ptr: Ref,
args: Ref,
};
/// Trailing: inst: Index // for every body_len
pub const Param = struct {
/// Null-terminated string index.
name: u32,
/// 0 if no doc comment
doc_comment: u32,
/// The body contains the type of the parameter.
body_len: u32,
};
/// Trailing:
/// 0. type_inst: Ref, // if small 0b000X is set
/// 1. align_inst: Ref, // if small 0b00X0 is set
pub const AllocExtended = struct {
src_node: i32,
pub const Small = packed struct {
has_type: bool,
has_align: bool,
is_const: bool,
is_comptime: bool,
_: u12 = undefined,
};
};
pub const Export = struct {
/// If present, this is referring to a Decl via field access, e.g. `a.b`.
/// If omitted, this is referring to a Decl via identifier, e.g. `a`.
namespace: Ref,
/// Null-terminated string index.
decl_name: u32,
options: Ref,
};
pub const ExportValue = struct {
/// The comptime value to export.
operand: Ref,
options: Ref,
};
/// Trailing: `CompileErrors.Item` for each `items_len`.
pub const CompileErrors = struct {
items_len: u32,
/// Trailing: `note_payload_index: u32` for each `notes_len`.
/// It's a payload index of another `Item`.
pub const Item = struct {
/// null terminated string index
msg: u32,
node: Ast.Node.Index,
/// If node is 0 then this will be populated.
token: Ast.TokenIndex,
/// Can be used in combination with `token`.
byte_offset: u32,
/// 0 or a payload index of a `Block`, each is a payload
/// index of another `Item`.
notes: u32,
};
};
/// Trailing: for each `imports_len` there is an Item
pub const Imports = struct {
imports_len: Inst.Index,
pub const Item = struct {
/// null terminated string index
name: u32,
/// points to the import name
token: Ast.TokenIndex,
};
};
pub const LineColumn = struct {
line: u32,
column: u32,
};
};
pub const SpecialProng = enum { none, @"else", under };
pub const DeclIterator = struct {
extra_index: usize,
bit_bag_index: usize,
cur_bit_bag: u32,
decl_i: u32,
decls_len: u32,
zir: Zir,
pub const Item = struct {
name: [:0]const u8,
sub_index: u32,
};
pub fn next(it: *DeclIterator) ?Item {
if (it.decl_i >= it.decls_len) return null;
if (it.decl_i % 8 == 0) {
it.cur_bit_bag = it.zir.extra[it.bit_bag_index];
it.bit_bag_index += 1;
}
it.decl_i += 1;
const flags = @truncate(u4, it.cur_bit_bag);
it.cur_bit_bag >>= 4;
const sub_index = @intCast(u32, it.extra_index);
it.extra_index += 5; // src_hash(4) + line(1)
const name = it.zir.nullTerminatedString(it.zir.extra[it.extra_index]);
it.extra_index += 3; // name(1) + value(1) + doc_comment(1)
it.extra_index += @truncate(u1, flags >> 2);
it.extra_index += @truncate(u1, flags >> 3);
return Item{
.sub_index = sub_index,
.name = name,
};
}
};
pub fn declIterator(zir: Zir, decl_inst: u32) DeclIterator {
const tags = zir.instructions.items(.tag);
const datas = zir.instructions.items(.data);
switch (tags[decl_inst]) {
// Functions are allowed and yield no iterations.
// There is one case matching this in the extended instruction set below.
.func, .func_inferred, .func_fancy => return declIteratorInner(zir, 0, 0),
.extended => {
const extended = datas[decl_inst].extended;
switch (extended.opcode) {
.struct_decl => {
const small = @bitCast(Inst.StructDecl.Small, extended.small);
var extra_index: usize = extended.operand;
extra_index += @boolToInt(small.has_src_node);
extra_index += @boolToInt(small.has_body_len);
extra_index += @boolToInt(small.has_fields_len);
const decls_len = if (small.has_decls_len) decls_len: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :decls_len decls_len;
} else 0;
return declIteratorInner(zir, extra_index, decls_len);
},
.enum_decl => {
const small = @bitCast(Inst.EnumDecl.Small, extended.small);
var extra_index: usize = extended.operand;
extra_index += @boolToInt(small.has_src_node);
extra_index += @boolToInt(small.has_tag_type);
extra_index += @boolToInt(small.has_body_len);
extra_index += @boolToInt(small.has_fields_len);
const decls_len = if (small.has_decls_len) decls_len: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :decls_len decls_len;
} else 0;
return declIteratorInner(zir, extra_index, decls_len);
},
.union_decl => {
const small = @bitCast(Inst.UnionDecl.Small, extended.small);
var extra_index: usize = extended.operand;
extra_index += @boolToInt(small.has_src_node);
extra_index += @boolToInt(small.has_tag_type);
extra_index += @boolToInt(small.has_body_len);
extra_index += @boolToInt(small.has_fields_len);
const decls_len = if (small.has_decls_len) decls_len: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :decls_len decls_len;
} else 0;
return declIteratorInner(zir, extra_index, decls_len);
},
.opaque_decl => {
const small = @bitCast(Inst.OpaqueDecl.Small, extended.small);
var extra_index: usize = extended.operand;
extra_index += @boolToInt(small.has_src_node);
const decls_len = if (small.has_decls_len) decls_len: {
const decls_len = zir.extra[extra_index];
extra_index += 1;
break :decls_len decls_len;
} else 0;
return declIteratorInner(zir, extra_index, decls_len);
},
else => unreachable,
}
},
else => unreachable,
}
}
pub fn declIteratorInner(zir: Zir, extra_index: usize, decls_len: u32) DeclIterator {
const bit_bags_count = std.math.divCeil(usize, decls_len, 8) catch unreachable;
return .{
.zir = zir,
.extra_index = extra_index + bit_bags_count,
.bit_bag_index = extra_index,
.cur_bit_bag = undefined,
.decl_i = 0,
.decls_len = decls_len,
};
}
/// The iterator would have to allocate memory anyway to iterate. So here we populate
/// an ArrayList as the result.
pub fn findDecls(zir: Zir, list: *std.ArrayList(Inst.Index), decl_sub_index: u32) !void {
const block_inst = zir.extra[decl_sub_index + 6];
list.clearRetainingCapacity();
return zir.findDeclsInner(list, block_inst);
}
fn findDeclsInner(
zir: Zir,
list: *std.ArrayList(Inst.Index),
inst: Inst.Index,
) Allocator.Error!void {
const tags = zir.instructions.items(.tag);
const datas = zir.instructions.items(.data);
switch (tags[inst]) {
// Functions instructions are interesting and have a body.
.func,
.func_inferred,
=> {
try list.append(inst);
const inst_data = datas[inst].pl_node;
const extra = zir.extraData(Inst.Func, inst_data.payload_index);
var extra_index: usize = extra.end;
switch (extra.data.ret_body_len) {
0 => {},
1 => extra_index += 1,
else => {
const body = zir.extra[extra_index..][0..extra.data.ret_body_len];
extra_index += body.len;
try zir.findDeclsBody(list, body);
},
}
const body = zir.extra[extra_index..][0..extra.data.body_len];
return zir.findDeclsBody(list, body);
},
.func_fancy => {
try list.append(inst);
const inst_data = datas[inst].pl_node;
const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index);
var extra_index: usize = extra.end;
extra_index += @boolToInt(extra.data.bits.has_lib_name);
if (extra.data.bits.has_align_body) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
try zir.findDeclsBody(list, body);
extra_index += body.len;
} else if (extra.data.bits.has_align_ref) {
extra_index += 1;
}
if (extra.data.bits.has_addrspace_body) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
try zir.findDeclsBody(list, body);
extra_index += body.len;
} else if (extra.data.bits.has_addrspace_ref) {
extra_index += 1;
}
if (extra.data.bits.has_section_body) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
try zir.findDeclsBody(list, body);
extra_index += body.len;
} else if (extra.data.bits.has_section_ref) {
extra_index += 1;
}
if (extra.data.bits.has_cc_body) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
try zir.findDeclsBody(list, body);
extra_index += body.len;
} else if (extra.data.bits.has_cc_ref) {
extra_index += 1;
}
if (extra.data.bits.has_ret_ty_body) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
try zir.findDeclsBody(list, body);
extra_index += body.len;
} else if (extra.data.bits.has_ret_ty_ref) {
extra_index += 1;
}
extra_index += @boolToInt(extra.data.bits.has_any_noalias);
const body = zir.extra[extra_index..][0..extra.data.body_len];
return zir.findDeclsBody(list, body);
},
.extended => {
const extended = datas[inst].extended;
switch (extended.opcode) {
// Decl instructions are interesting but have no body.
// TODO yes they do have a body actually. recurse over them just like block instructions.
.struct_decl,
.union_decl,
.enum_decl,
.opaque_decl,
=> return list.append(inst),
else => return,
}
},
// Block instructions, recurse over the bodies.
.block, .block_inline => {
const inst_data = datas[inst].pl_node;
const extra = zir.extraData(Inst.Block, inst_data.payload_index);
const body = zir.extra[extra.end..][0..extra.data.body_len];
return zir.findDeclsBody(list, body);
},
.condbr, .condbr_inline => {
const inst_data = datas[inst].pl_node;
const extra = zir.extraData(Inst.CondBr, inst_data.payload_index);
const then_body = zir.extra[extra.end..][0..extra.data.then_body_len];
const else_body = zir.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
try zir.findDeclsBody(list, then_body);
try zir.findDeclsBody(list, else_body);
},
.@"try", .try_ptr => {
const inst_data = datas[inst].pl_node;
const extra = zir.extraData(Inst.Try, inst_data.payload_index);
const body = zir.extra[extra.end..][0..extra.data.body_len];
try zir.findDeclsBody(list, body);
},
.switch_block => return findDeclsSwitch(zir, list, inst),
.suspend_block => @panic("TODO iterate suspend block"),
else => return, // Regular instruction, not interesting.
}
}
fn findDeclsSwitch(
zir: Zir,
list: *std.ArrayList(Inst.Index),
inst: Inst.Index,
) Allocator.Error!void {
const inst_data = zir.instructions.items(.data)[inst].pl_node;
const extra = zir.extraData(Inst.SwitchBlock, inst_data.payload_index);
var extra_index: usize = extra.end;
const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
const multi_cases_len = zir.extra[extra_index];
extra_index += 1;
break :blk multi_cases_len;
} else 0;
const special_prong = extra.data.bits.specialProng();
if (special_prong != .none) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
extra_index += body.len;
try zir.findDeclsBody(list, body);
}
{
const scalar_cases_len = extra.data.bits.scalar_cases_len;
var scalar_i: usize = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
extra_index += 1;
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
extra_index += body_len;
try zir.findDeclsBody(list, body);
}
}
{
var multi_i: usize = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = zir.extra[extra_index];
extra_index += 1;
const ranges_len = zir.extra[extra_index];
extra_index += 1;
const body_len = zir.extra[extra_index];
extra_index += 1;
const items = zir.refSlice(extra_index, items_len);
extra_index += items_len;
_ = items;
var range_i: usize = 0;
while (range_i < ranges_len) : (range_i += 1) {
extra_index += 1;
extra_index += 1;
}
const body = zir.extra[extra_index..][0..body_len];
extra_index += body_len;
try zir.findDeclsBody(list, body);
}
}
}
fn findDeclsBody(
zir: Zir,
list: *std.ArrayList(Inst.Index),
body: []const Inst.Index,
) Allocator.Error!void {
for (body) |member| {
try zir.findDeclsInner(list, member);
}
}
pub const FnInfo = struct {
param_body: []const Inst.Index,
param_body_inst: Inst.Index,
ret_ty_body: []const Inst.Index,
body: []const Inst.Index,
ret_ty_ref: Zir.Inst.Ref,
total_params_len: u32,
};
pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
const tags = zir.instructions.items(.tag);
const datas = zir.instructions.items(.data);
const info: struct {
param_block: Inst.Index,
body: []const Inst.Index,
ret_ty_ref: Inst.Ref,
ret_ty_body: []const Inst.Index,
} = switch (tags[fn_inst]) {
.func, .func_inferred => blk: {
const inst_data = datas[fn_inst].pl_node;
const extra = zir.extraData(Inst.Func, inst_data.payload_index);
var extra_index: usize = extra.end;
var ret_ty_ref: Inst.Ref = .none;
var ret_ty_body: []const Inst.Index = &.{};
switch (extra.data.ret_body_len) {
0 => {
ret_ty_ref = .void_type;
},
1 => {
ret_ty_ref = @intToEnum(Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
},
else => {
ret_ty_body = zir.extra[extra_index..][0..extra.data.ret_body_len];
extra_index += ret_ty_body.len;
},
}
const body = zir.extra[extra_index..][0..extra.data.body_len];
extra_index += body.len;
break :blk .{
.param_block = extra.data.param_block,
.ret_ty_ref = ret_ty_ref,
.ret_ty_body = ret_ty_body,
.body = body,
};
},
.func_fancy => blk: {
const inst_data = datas[fn_inst].pl_node;
const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index);
var extra_index: usize = extra.end;
var ret_ty_ref: Inst.Ref = .void_type;
var ret_ty_body: []const Inst.Index = &.{};
extra_index += @boolToInt(extra.data.bits.has_lib_name);
if (extra.data.bits.has_align_body) {
extra_index += zir.extra[extra_index] + 1;
} else if (extra.data.bits.has_align_ref) {
extra_index += 1;
}
if (extra.data.bits.has_addrspace_body) {
extra_index += zir.extra[extra_index] + 1;
} else if (extra.data.bits.has_addrspace_ref) {
extra_index += 1;
}
if (extra.data.bits.has_section_body) {
extra_index += zir.extra[extra_index] + 1;
} else if (extra.data.bits.has_section_ref) {
extra_index += 1;
}
if (extra.data.bits.has_cc_body) {
extra_index += zir.extra[extra_index] + 1;
} else if (extra.data.bits.has_cc_ref) {
extra_index += 1;
}
if (extra.data.bits.has_ret_ty_body) {
const body_len = zir.extra[extra_index];
extra_index += 1;
ret_ty_body = zir.extra[extra_index..][0..body_len];
extra_index += ret_ty_body.len;
} else if (extra.data.bits.has_ret_ty_ref) {
ret_ty_ref = @intToEnum(Inst.Ref, zir.extra[extra_index]);
extra_index += 1;
}
extra_index += @boolToInt(extra.data.bits.has_any_noalias);
const body = zir.extra[extra_index..][0..extra.data.body_len];
extra_index += body.len;
break :blk .{
.param_block = extra.data.param_block,
.ret_ty_ref = ret_ty_ref,
.ret_ty_body = ret_ty_body,
.body = body,
};
},
else => unreachable,
};
assert(tags[info.param_block] == .block or tags[info.param_block] == .block_inline);
const param_block = zir.extraData(Inst.Block, datas[info.param_block].pl_node.payload_index);
const param_body = zir.extra[param_block.end..][0..param_block.data.body_len];
var total_params_len: u32 = 0;
for (param_body) |inst| {
switch (tags[inst]) {
.param, .param_comptime, .param_anytype, .param_anytype_comptime => {
total_params_len += 1;
},
else => continue,
}
}
return .{
.param_body = param_body,
.param_body_inst = info.param_block,
.ret_ty_body = info.ret_ty_body,
.ret_ty_ref = info.ret_ty_ref,
.body = info.body,
.total_params_len = total_params_len,
};
}
const ref_start_index: u32 = Inst.Ref.typed_value_map.len;
pub fn indexToRef(inst: Inst.Index) Inst.Ref {
return @intToEnum(Inst.Ref, ref_start_index + inst);
}
pub fn refToIndex(inst: Inst.Ref) ?Inst.Index {
const ref_int = @enumToInt(inst);
if (ref_int >= ref_start_index) {
return ref_int - ref_start_index;
} else {
return null;
}
}
|
src/Zir.zig
|