code
stringlengths
38
801k
repo_path
stringlengths
6
263
// Aggregate structs pub const Letter = @import("components/aggregate/Letter.zig"); pub const Mark = @import("components/aggregate/Mark.zig"); pub const Number = @import("components/aggregate/Number.zig"); pub const Punct = @import("components/aggregate/Punct.zig"); pub const Symbol = @import("components/aggregate/Symbol.zig"); // Display Width pub const Width = @import("components/aggregate/Width.zig"); // String Segmentation pub const CodePoint = @import("segmenter/CodePoint.zig"); pub const CodePointIterator = CodePoint.CodePointIterator; pub const Grapheme = @import("segmenter/Grapheme.zig"); pub const GraphemeIterator = Grapheme.GraphemeIterator; pub const ComptimeGraphemeIterator = Grapheme.ComptimeGraphemeIterator; pub const Word = @import("segmenter/Word.zig"); pub const WordIterator = Word.WordIterator; pub const ComptimeWordIterator = Word.ComptimeWordIterator; pub const Sentence = @import("segmenter/Sentence.zig"); pub const SentenceIterator = Sentence.SentenceIterator; pub const ComptimeSentenceIterator = Sentence.ComptimeSentenceIterator; // Collation pub const Collator = @import("collator/Collator.zig"); // Normalization pub const Normalizer = @import("normalizer/Normalizer.zig"); // Auto-Generated pub const Blocks = @import("components/autogen/Blocks.zig"); pub const Canonicals = @import("components/autogen/Canonicals.zig"); pub const CaseFoldMap = @import("components/autogen/CaseFoldMap.zig"); pub const CombiningMap = @import("components/autogen/CombiningMap.zig"); pub const DerivedCoreProperties = @import("components/autogen/DerivedCoreProperties.zig"); pub const DerivedEastAsianWidth = @import("components/autogen/DerivedEastAsianWidth.zig"); pub const DerivedGeneralCategory = @import("components/autogen/DerivedGeneralCategory.zig"); pub const DerivedNormalizationProps = @import("components/autogen/DerivedNormalizationProps.zig"); pub const DerivedNumericType = @import("components/autogen/DerivedNumericType.zig"); pub const EmojiData = @import("components/autogen/EmojiData.zig"); pub const GraphemeBreakProperty = @import("components/autogen/GraphemeBreakProperty.zig"); pub const HangulMap = @import("components/autogen/HangulMap.zig"); pub const LowerMap = @import("components/autogen/LowerMap.zig"); pub const PropList = @import("components/autogen/PropList.zig"); pub const SentenceBreakProperty = @import("components/autogen/SentenceBreakProperty.zig"); pub const TitleMap = @import("components/autogen/TitleMap.zig"); pub const UpperMap = @import("components/autogen/UpperMap.zig"); pub const WordBreakProperty = @import("components/autogen/WordBreakProperty.zig");
src/components.zig
const std = @import("std"); const c = @import("internal/c.zig"); const internal = @import("internal/internal.zig"); const log = std.log.scoped(.git); pub const PATH_LIST_SEPARATOR = c.GIT_PATH_LIST_SEPARATOR; pub const GIT_PASSTHROUGH = c.GIT_PASSTHROUGH; pub usingnamespace @import("alloc.zig"); pub usingnamespace @import("annotated_commit.zig"); pub usingnamespace @import("attribute.zig"); pub usingnamespace @import("blame.zig"); pub usingnamespace @import("blob.zig"); pub usingnamespace @import("buffer.zig"); pub usingnamespace @import("certificate.zig"); pub usingnamespace @import("commit.zig"); pub usingnamespace @import("config.zig"); pub usingnamespace @import("credential.zig"); pub usingnamespace @import("describe.zig"); pub usingnamespace @import("diff.zig"); pub usingnamespace @import("errors.zig"); pub usingnamespace @import("filter.zig"); pub usingnamespace @import("handle.zig"); pub usingnamespace @import("index.zig"); pub usingnamespace @import("indexer.zig"); pub usingnamespace @import("mailmap.zig"); pub usingnamespace @import("merge.zig"); pub usingnamespace @import("message.zig"); pub usingnamespace @import("net.zig"); pub usingnamespace @import("notes.zig"); pub usingnamespace @import("object.zig"); pub usingnamespace @import("odb.zig"); pub usingnamespace @import("oid.zig"); pub usingnamespace @import("pack.zig"); pub usingnamespace @import("proxy.zig"); pub usingnamespace @import("ref_db.zig"); pub usingnamespace @import("reference.zig"); pub usingnamespace @import("remote.zig"); pub usingnamespace @import("repository.zig"); pub usingnamespace @import("signature.zig"); pub usingnamespace @import("status_list.zig"); pub usingnamespace @import("str_array.zig"); pub usingnamespace @import("transaction.zig"); pub usingnamespace @import("transport.zig"); pub usingnamespace @import("tree.zig"); pub usingnamespace @import("types.zig"); pub usingnamespace @import("worktree.zig"); pub usingnamespace @import("writestream.zig"); const git = @This(); /// Initialize global state. This function must be called before any other function. /// *NOTE*: This function can called multiple times. pub fn init() !git.Handle { log.debug("init called", .{}); const number = try internal.wrapCallWithReturn("git_libgit2_init", .{}); if (number == 1) { log.debug("libgit2 initalization successful", .{}); } else { log.debug("{} ongoing initalizations without shutdown", .{number}); } return git.Handle{}; } pub fn availableLibGit2Features() LibGit2Features { return @bitCast(LibGit2Features, c.git_libgit2_features()); } pub const LibGit2Features = packed struct { /// If set, libgit2 was built thread-aware and can be safely used from multiple threads. THREADS: bool = false, /// If set, libgit2 was built with and linked against a TLS implementation. /// Custom TLS streams may still be added by the user to support HTTPS regardless of this. HTTPS: bool = false, /// If set, libgit2 was built with and linked against libssh2. A custom transport may still be added by the user to support /// libssh2 regardless of this. SSH: bool = false, /// If set, libgit2 was built with support for sub-second resolution in file modification times. NSEC: bool = false, z_padding: u28 = 0, pub fn format( value: LibGit2Features, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = fmt; return internal.formatWithoutFields(value, options, writer, &.{"z_padding"}); } test { try std.testing.expectEqual(@sizeOf(c_uint), @sizeOf(LibGit2Features)); try std.testing.expectEqual(@bitSizeOf(c_uint), @bitSizeOf(LibGit2Features)); } comptime { std.testing.refAllDecls(@This()); } }; comptime { std.testing.refAllDecls(@This()); }
src/git.zig
const xcb = @import("../xcb.zig"); pub const id = xcb.Extension{ .name = "XEVIE", .global_id = 0 }; /// @brief QueryVersioncookie pub const QueryVersioncookie = struct { sequence: c_uint, }; /// @brief QueryVersionRequest pub const QueryVersionRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 0, @"length": u16, @"client_major_version": u16, @"client_minor_version": u16, }; /// @brief QueryVersionReply pub const QueryVersionReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"server_major_version": u16, @"server_minor_version": u16, @"pad1": [20]u8, }; /// @brief Startcookie pub const Startcookie = struct { sequence: c_uint, }; /// @brief StartRequest pub const StartRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 1, @"length": u16, @"screen": u32, }; /// @brief StartReply pub const StartReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [24]u8, }; /// @brief Endcookie pub const Endcookie = struct { sequence: c_uint, }; /// @brief EndRequest pub const EndRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 2, @"length": u16, @"cmap": u32, }; /// @brief EndReply pub const EndReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [24]u8, }; pub const Datatype = extern enum(c_uint) { @"Unmodified" = 0, @"Modified" = 1, }; /// @brief Event pub const Event = struct { @"pad0": [32]u8, }; /// @brief Sendcookie pub const Sendcookie = struct { sequence: c_uint, }; /// @brief SendRequest pub const SendRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 3, @"length": u16, @"event": xcb.xevie.Event, @"data_type": u32, @"pad0": [64]u8, }; /// @brief SendReply pub const SendReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [24]u8, }; /// @brief SelectInputcookie pub const SelectInputcookie = struct { sequence: c_uint, }; /// @brief SelectInputRequest pub const SelectInputRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 4, @"length": u16, @"event_mask": u32, }; /// @brief SelectInputReply pub const SelectInputReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [24]u8, }; test "" { @import("std").testing.refAllDecls(@This()); }
src/auto/xevie.zig
const std = @import("std"); pub fn build(b: *std.build.Builder) void { // Standard target options allows the person running `zig build` to choose // what target to build for. Here we do not override the defaults, which // means any target is allowed, and the default is native. Other options // for restricting supported target set are available. const target = b.standardTargetOptions(.{}); // Standard release options allow the person running `zig build` to select // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. const mode = b.standardReleaseOptions(); const flite = b.addStaticLibrary("flite", null); flite.setTarget(target); flite.setBuildMode(mode); flite.linkLibC(); flite.addIncludeDir("include"); flite.addCSourceFiles(&.{ //"src/audio/au_alsa.c", "src/audio/au_command.c", "src/audio/au_none.c", //"src/audio/au_oss.c", //"src/audio/au_palmos.c", //"src/audio/au_pulseaudio.c", "src/audio/au_streaming.c", //"src/audio/au_sun.c", //"src/audio/au_win.c", //"src/audio/au_wince.c", "src/audio/auclient.c", "src/audio/audio.c", "src/audio/auserver.c", "src/cg/cst_cg.c", "src/cg/cst_cg_dump_voice.c", "src/cg/cst_cg_load_voice.c", "src/cg/cst_cg_map.c", "src/cg/cst_mlpg.c", "src/cg/cst_mlsa.c", "src/cg/cst_spamf0.c", "src/cg/cst_vc.c", "src/hrg/cst_ffeature.c", "src/hrg/cst_item.c", "src/hrg/cst_rel_io.c", "src/hrg/cst_relation.c", "src/hrg/cst_utterance.c", "src/lexicon/cst_lexicon.c", "src/lexicon/cst_lts.c", "src/lexicon/cst_lts_rewrites.c", "src/regex/cst_regex.c", "src/regex/regexp.c", "src/regex/regsub.c", "src/speech/cst_lpcres.c", "src/speech/cst_track.c", "src/speech/cst_track_io.c", "src/speech/cst_wave.c", "src/speech/cst_wave_io.c", "src/speech/cst_wave_utils.c", "src/speech/g721.c", "src/speech/g723_24.c", "src/speech/g723_40.c", "src/speech/g72x.c", "src/speech/rateconv.c", "src/stats/cst_cart.c", "src/stats/cst_ss.c", "src/stats/cst_viterbi.c", "src/synth/cst_ffeatures.c", "src/synth/cst_phoneset.c", "src/synth/cst_ssml.c", "src/synth/cst_synth.c", "src/synth/cst_utt_utils.c", "src/synth/cst_voice.c", "src/synth/flite.c", "src/utils/cst_alloc.c", "src/utils/cst_args.c", "src/utils/cst_endian.c", "src/utils/cst_error.c", "src/utils/cst_features.c", //"src/utils/cst_file_palmos.c", "src/utils/cst_file_stdio.c", //"src/utils/cst_file_wince.c", "src/utils/cst_mmap_none.c", "src/utils/cst_mmap_posix.c", //"src/utils/cst_mmap_win32.c", "src/utils/cst_socket.c", "src/utils/cst_string.c", "src/utils/cst_tokenstream.c", "src/utils/cst_url.c", "src/utils/cst_val.c", "src/utils/cst_val_const.c", "src/utils/cst_val_user.c", "src/utils/cst_wchar.c", "src/wavesynth/cst_clunits.c", "src/wavesynth/cst_diphone.c", "src/wavesynth/cst_reflpc.c", "src/wavesynth/cst_sigpr.c", "src/wavesynth/cst_sts.c", "src/wavesynth/cst_units.c", }, &.{ "-DCST_AUDIO_NONE", "-DCST_NO_SOCKETS", }); const usenglish = b.addStaticLibrary("usenglish", null); usenglish.setTarget(target); usenglish.setBuildMode(mode); usenglish.linkLibC(); usenglish.addIncludeDir("include"); usenglish.addCSourceFiles(&.{ "lang/usenglish/us_aswd.c", "lang/usenglish/us_dur_stats.c", "lang/usenglish/us_durz_cart.c", "lang/usenglish/us_expand.c", "lang/usenglish/us_f0_model.c", "lang/usenglish/us_f0lr.c", "lang/usenglish/us_ffeatures.c", "lang/usenglish/us_gpos.c", "lang/usenglish/us_int_accent_cart.c", "lang/usenglish/us_int_tone_cart.c", "lang/usenglish/us_nums_cart.c", "lang/usenglish/us_phoneset.c", "lang/usenglish/us_phrasing_cart.c", "lang/usenglish/us_pos_cart.c", "lang/usenglish/us_text.c", "lang/usenglish/usenglish.c", }, &.{ }); const cmulex = b.addStaticLibrary("cmulex", null); cmulex.setTarget(target); cmulex.setBuildMode(mode); cmulex.linkLibC(); cmulex.addIncludeDir("include"); cmulex.addCSourceFiles(&.{ "lang/cmulex/cmu_lex.c", "lang/cmulex/cmu_lex_data.c", "lang/cmulex/cmu_lex_entries.c", "lang/cmulex/cmu_lts_model.c", "lang/cmulex/cmu_lts_rules.c", "lang/cmulex/cmu_postlex.c", }, &.{ }); const awb = b.addStaticLibrary("awb", null); awb.setTarget(target); awb.setBuildMode(mode); awb.linkLibC(); awb.addIncludeDir("include"); awb.addIncludeDir("lang/usenglish"); awb.addIncludeDir("lang/cmulex"); awb.linkLibrary(usenglish); awb.linkLibrary(cmulex); awb.addCSourceFiles(&.{ "lang/cmu_us_awb/cmu_us_awb.c", "lang/cmu_us_awb/cmu_us_awb_cg.c", "lang/cmu_us_awb/cmu_us_awb_cg_durmodel.c", "lang/cmu_us_awb/cmu_us_awb_cg_f0_trees.c", "lang/cmu_us_awb/cmu_us_awb_cg_phonestate.c", "lang/cmu_us_awb/cmu_us_awb_cg_single_mcep_trees.c", "lang/cmu_us_awb/cmu_us_awb_cg_single_params.c", "lang/cmu_us_awb/cmu_us_awb_spamf0_accent.c", "lang/cmu_us_awb/cmu_us_awb_spamf0_accent_params.c", "lang/cmu_us_awb/cmu_us_awb_spamf0_phrase.c", }, &.{ }); const exe = b.addExecutable("flite", null); exe.setTarget(target); exe.setBuildMode(mode); exe.install(); exe.addIncludeDir("include"); exe.addCSourceFiles(&.{ "main/flite_main.c", "flite_lang_list.c", "flite_voice_list.c", }, &.{ }); exe.linkLibrary(flite); exe.linkLibrary(usenglish); exe.linkLibrary(cmulex); exe.linkLibrary(awb); }
build.zig
// pub fn ScratchAllocator(comptime T: type) type { // return struct { // const Self = @This(); // backup_allocator: T, // end_index: usize, // buffer: []u8, // pub fn init (backup_allocator: T) @This() { // const scratch_buffer = backup_allocator.allocator().alloc(u8, 2 * 1024 * 1024) catch unreachable; // return .{ // .backup_allocator = backup_allocator, // .end_index = 0, // .buffer = scratch_buffer, // }; // } // pub fn allocator(self: *Self) Allocator { // return Allocator.init(self, alloc, Allocator.NoResize(Self).noResize, Allocator.PanicFree(Self).noOpFree); // } // pub fn alloc(self: *Self, n:usize, ptr_align:u29, len_align:u29, ret_addr:usize) Allocator.Error![]u8 { // const a = self.allocator(); // const addr = @ptrToInt(self.buffer.ptr) + self.end_index; // const adjusted_addr = mem.alignForward(addr, ptr_align); // const adjusted_index = self.end_index + (adjusted_addr - addr); // const new_end_index = adjusted_index + n; // } // }; // } // pub const ScratchAllocatorOld = struct { // allocator: *Allocator, // backup_allocator: *Allocator, // end_index: usize, // buffer: []u8, // const Self = @This(); // pub fn init(allocator: Allocator) ScratchAllocator { // const scratch_buffer = allocator.alloc(u8, 2 * 1024 * 1024) catch unreachable; // return ScratchAllocator{ // .allocator = Allocator.init(&allocator, alloc, Allocator.NoResize(Allocator).noResize, Allocator.PanicFree(Allocator).noOpFree), // .backup_allocator = allocator, // .buffer = scratch_buffer, // .end_index = 0, // }; // } // fn alloc(ptr: *Allocator, n: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Allocator.Error![]u8 { // const self = @fieldParentPtr(ScratchAllocator, "allocator", &ptr); // const addr = @ptrToInt(self.buffer.ptr) + self.end_index; // const adjusted_addr = mem.alignForward(addr, ptr_align); // const adjusted_index = self.end_index + (adjusted_addr - addr); // const new_end_index = adjusted_index + n; // if (new_end_index > self.buffer.len) { // // if more memory is requested then we have in our buffer leak like a sieve! // if (n > self.buffer.len) { // std.log.err("\n---------\nwarning: tmp allocated more than is in our temp allocator. This memory WILL leak!\n--------\n", .{}); // return self.allocator.alloc(self, n, ptr_align, len_align, ret_addr); // } // const result = self.buffer[0..n]; // self.end_index = n; // return result; // } // const result = self.buffer[adjusted_index..new_end_index]; // self.end_index = new_end_index; // return result; // } // }; // test "scratch allocator" { // var allocator_instance = ScratchAllocator.init(@import("mem.zig").allocator); // var slice = try allocator_instance.allocator.alloc(*i32, 100); // std.testing.expect(slice.len == 100); // _ = try allocator_instance.allocator.create(i32); // slice = try allocator_instance.allocator.realloc(slice, 20000); // std.testing.expect(slice.len == 20000); // allocator_instance.allocator.free(slice); // }
src/mem/scratch_allocator.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const ChildProcess = std.ChildProcess; const LsColors = @import("lscolors").LsColors; const Entry = @import("walkdir").Entry; pub const ActionType = enum { Print, Execute, ExecuteBatch, }; pub const Action = union(ActionType) { Print, Execute: ExecuteTarget, ExecuteBatch: ExecuteBatchTarget, const Self = @This(); pub const default: Self = .Print; pub fn deinit(self: *Self) void { switch (self.*) { .Execute => |*x| x.deinit(), .ExecuteBatch => |*x| x.deinit(), else => {}, } } }; pub const ExecuteTarget = struct { allocator: Allocator, cmd: ArrayList([]const u8), const Self = @This(); pub fn init(allocator: Allocator) Self { return Self{ .allocator = allocator, .cmd = ArrayList([]const u8).init(allocator), }; } pub fn deinit(self: *Self) void { self.cmd.deinit(); } pub fn do(self: *Self, entry: Entry) !void { try self.cmd.append(entry.relative_path); defer _ = self.cmd.pop(); var child_process = try ChildProcess.init(self.cmd.items, self.allocator); defer child_process.deinit(); _ = try child_process.spawnAndWait(); } }; pub const ExecuteBatchTarget = struct { cmd: ArrayList([]const u8), allocator: Allocator, args: ArrayList(Entry), const Self = @This(); pub fn init(allocator: Allocator) Self { return Self{ .cmd = ArrayList([]const u8).init(allocator), .allocator = allocator, .args = ArrayList(Entry).init(allocator), }; } pub fn do(self: *Self, entry: Entry) !void { try self.args.append(entry); } pub fn deinit(self: *Self) void { self.cmd.deinit(); } pub fn finalize(self: *Self) !void { defer self.args.deinit(); defer for (self.args.items) |x| x.deinit(); for (self.args.items) |e| { try self.cmd.append(e.relative_path); } var child_process = try ChildProcess.init(self.cmd.items, self.allocator); defer child_process.deinit(); _ = try child_process.spawnAndWait(); } }; pub const ColorOption = enum { Auto, Always, Never, const Self = @This(); pub const default = Self.Auto; }; pub const PrintOptions = struct { color: ?*LsColors, null_sep: bool, errors: bool, const Self = @This(); pub const default = Self{ .color = null, .null_sep = false, .errors = false, }; }; pub fn printEntry(entry: Entry, writer: anytype, opt: PrintOptions) !void { if (opt.color) |lsc| { try writer.print("{}", .{lsc.styledComponents(entry.relative_path)}); } else { try writer.writeAll(entry.relative_path); } if (opt.null_sep) { try writer.writeAll(&[_]u8{0}); } else { try writer.writeAll("\n"); } }
src/actions.zig
const Self = @This(); const build_options = @import("build_options"); const std = @import("std"); const assert = std.debug.assert; const math = std.math; const os = std.os; const wlr = @import("wlroots"); const wl = @import("wayland").server.wl; const server = &@import("main.zig").server; const util = @import("util.zig"); const Box = @import("Box.zig"); const Output = @import("Output.zig"); const Seat = @import("Seat.zig"); const ViewStack = @import("view_stack.zig").ViewStack; const XdgToplevel = @import("XdgToplevel.zig"); const XwaylandView = if (build_options.xwayland) @import("XwaylandView.zig") else @import("VoidView.zig"); const log = std.log.scoped(.view); pub const Constraints = struct { min_width: u32, max_width: u32, min_height: u32, max_height: u32, }; // Minimum width/height for surfaces. // This is needed, because external layouts and large padding and border sizes // may cause surfaces so small, that bugs in client applications are encountered, // or even surfaces of zero or negative size,which are a protocol error and would // likely cause river to crash. The value is totally arbitrary and low enough, // that it should never be encountered during normal usage. pub const min_size = 50; const Impl = union(enum) { xdg_toplevel: XdgToplevel, xwayland_view: XwaylandView, }; const State = struct { /// The output-relative effective coordinates and effective dimensions of the view. The /// surface itself may have other dimensions which are stored in the /// surface_box member. box: Box = Box{ .x = 0, .y = 0, .width = 0, .height = 0 }, /// The tags of the view, as a bitmask tags: u32, /// Number of seats currently focusing the view focus: u32 = 0, float: bool = false, fullscreen: bool = false, urgent: bool = false, }; const SavedBuffer = struct { client_buffer: *wlr.ClientBuffer, /// x/y relative to the root surface in the surface tree. surface_box: Box, source_box: wlr.FBox, transform: wl.Output.Transform, }; /// The implementation of this view impl: Impl = undefined, /// The output this view is currently associated with output: *Output, /// This is non-null exactly when the view is mapped surface: ?*wlr.Surface = null, /// This indicates that the view should be destroyed when the current /// transaction completes. See View.destroy() destroying: bool = false, /// The double-buffered state of the view current: State, pending: State, /// The serial sent with the currently pending configure event pending_serial: ?u32 = null, /// The currently commited geometry of the surface. The x/y may be negative if /// for example the client has decided to draw CSD shadows a la GTK. surface_box: Box = undefined, /// The geometry the view's surface had when the transaction started and /// buffers were saved. saved_surface_box: Box = undefined, /// These are what we render while a transaction is in progress saved_buffers: std.ArrayList(SavedBuffer), /// The floating dimensions the view, saved so that they can be restored if the /// view returns to floating mode. float_box: Box = undefined, /// This state exists purely to allow for more intuitive behavior when /// exiting fullscreen if there is no active layout. post_fullscreen_box: Box = undefined, draw_borders: bool = true, /// This is created when the view is mapped and destroyed when unmapped foreign_toplevel_handle: ?*wlr.ForeignToplevelHandleV1 = null, foreign_activate: wl.Listener(*wlr.ForeignToplevelHandleV1.event.Activated) = wl.Listener(*wlr.ForeignToplevelHandleV1.event.Activated).init(handleForeignActivate), foreign_fullscreen: wl.Listener(*wlr.ForeignToplevelHandleV1.event.Fullscreen) = wl.Listener(*wlr.ForeignToplevelHandleV1.event.Fullscreen).init(handleForeignFullscreen), foreign_close: wl.Listener(*wlr.ForeignToplevelHandleV1) = wl.Listener(*wlr.ForeignToplevelHandleV1).init(handleForeignClose), request_activate: wl.Listener(*wlr.XdgActivationV1.event.RequestActivate) = wl.Listener(*wlr.XdgActivationV1.event.RequestActivate).init(handleRequestActivate), pub fn init(self: *Self, output: *Output, tags: u32, surface: anytype) void { self.* = .{ .output = output, .current = .{ .tags = tags }, .pending = .{ .tags = tags }, .saved_buffers = std.ArrayList(SavedBuffer).init(util.gpa), }; if (@TypeOf(surface) == *wlr.XdgSurface) { self.impl = .{ .xdg_toplevel = undefined }; self.impl.xdg_toplevel.init(self, surface); } else if (build_options.xwayland and @TypeOf(surface) == *wlr.XwaylandSurface) { self.impl = .{ .xwayland_view = undefined }; self.impl.xwayland_view.init(self, surface); } else unreachable; } /// If saved buffers of the view are currently in use by a transaction, /// mark this view for destruction when the transaction completes. Otherwise /// destroy immediately. pub fn destroy(self: *Self) void { assert(self.surface == null); self.destroying = true; // If there are still saved buffers, then this view needs to be kept // around until the current transaction completes. This function will be // called again in Root.commitTransaction() if (self.saved_buffers.items.len == 0) { self.saved_buffers.deinit(); const node = @fieldParentPtr(ViewStack(Self).Node, "view", self); util.gpa.destroy(node); } } /// Handle changes to pending state and start a transaction to apply them pub fn applyPending(self: *Self) void { var arrange_output = false; if (self.current.tags != self.pending.tags) arrange_output = true; // If switching from float -> layout or layout -> float arrange the output // to get assigned a new size or fill the hole in the layout left behind. if (self.current.float != self.pending.float) { assert(!self.pending.fullscreen); // float state modifed while in fullscreen arrange_output = true; } // If switching from float to non-float, save the dimensions. if (self.current.float and !self.pending.float) self.float_box = self.current.box; // If switching from non-float to float, apply the saved float dimensions. if (!self.current.float and self.pending.float) self.pending.box = self.float_box; // If switching to fullscreen, set the dimensions to the full area of the output // Since no other views will be visible, we can skip arranging the output. if (!self.current.fullscreen and self.pending.fullscreen) { self.setFullscreen(true); self.post_fullscreen_box = self.current.box; const dimensions = self.output.getEffectiveResolution(); self.pending.box = .{ .x = 0, .y = 0, .width = dimensions.width, .height = dimensions.height, }; } if (self.current.fullscreen and !self.pending.fullscreen) { self.setFullscreen(false); self.pending.box = self.post_fullscreen_box; // If switching from fullscreen to layout, we need to arrange the output. if (!self.pending.float) arrange_output = true; } if (arrange_output) self.output.arrangeViews(); server.root.startTransaction(); } pub fn needsConfigure(self: Self) bool { return switch (self.impl) { .xdg_toplevel => |xdg_toplevel| xdg_toplevel.needsConfigure(), .xwayland_view => |xwayland_view| xwayland_view.needsConfigure(), }; } pub fn configure(self: *Self) void { switch (self.impl) { .xdg_toplevel => |*xdg_toplevel| xdg_toplevel.configure(), .xwayland_view => |*xwayland_view| xwayland_view.configure(), } } pub fn sendFrameDone(self: Self) void { var now: os.timespec = undefined; os.clock_gettime(os.CLOCK_MONOTONIC, &now) catch @panic("CLOCK_MONOTONIC not supported"); self.surface.?.sendFrameDone(&now); } pub fn dropSavedBuffers(self: *Self) void { for (self.saved_buffers.items) |buffer| buffer.client_buffer.base.unlock(); self.saved_buffers.items.len = 0; } pub fn saveBuffers(self: *Self) void { assert(self.saved_buffers.items.len == 0); self.saved_surface_box = self.surface_box; self.forEachSurface(*std.ArrayList(SavedBuffer), saveBuffersIterator, &self.saved_buffers); } fn saveBuffersIterator( surface: *wlr.Surface, surface_x: c_int, surface_y: c_int, saved_buffers: *std.ArrayList(SavedBuffer), ) callconv(.C) void { if (surface.buffer) |buffer| { var source_box: wlr.FBox = undefined; surface.getBufferSourceBox(&source_box); saved_buffers.append(.{ .client_buffer = buffer, .surface_box = .{ .x = surface_x, .y = surface_y, .width = @intCast(u32, surface.current.width), .height = @intCast(u32, surface.current.height), }, .source_box = source_box, .transform = surface.current.transform, }) catch return; _ = buffer.base.lock(); } } /// Move a view from one output to another, sending the required enter/leave /// events. pub fn sendToOutput(self: *Self, destination_output: *Output) void { const node = @fieldParentPtr(ViewStack(Self).Node, "view", self); self.output.views.remove(node); destination_output.views.attach(node, server.config.attach_mode); self.output.sendViewTags(); destination_output.sendViewTags(); if (self.pending.urgent) { self.output.sendUrgentTags(); destination_output.sendUrgentTags(); } // if the view is mapped send enter/leave events if (self.surface != null) { self.sendLeave(self.output); self.sendEnter(destination_output); // Must be present if surface is non-null indicating that the view // is mapped. self.foreign_toplevel_handle.?.outputLeave(self.output.wlr_output); self.foreign_toplevel_handle.?.outputEnter(destination_output.wlr_output); } if (self.pending.fullscreen) { const dimensions = destination_output.getEffectiveResolution(); self.pending.box = .{ .x = 0, .y = 0, .width = dimensions.width, .height = dimensions.height, }; } self.output = destination_output; } fn sendEnter(self: *Self, output: *Output) void { self.forEachSurface(*wlr.Output, sendEnterIterator, output.wlr_output); } fn sendEnterIterator(surface: *wlr.Surface, sx: c_int, sy: c_int, wlr_output: *wlr.Output) callconv(.C) void { surface.sendEnter(wlr_output); } fn sendLeave(self: *Self, output: *Output) void { self.forEachSurface(*wlr.Output, sendLeaveIterator, output.wlr_output); } fn sendLeaveIterator(surface: *wlr.Surface, sx: c_int, sy: c_int, wlr_output: *wlr.Output) callconv(.C) void { surface.sendLeave(wlr_output); } pub fn close(self: Self) void { switch (self.impl) { .xdg_toplevel => |xdg_toplevel| xdg_toplevel.close(), .xwayland_view => |xwayland_view| xwayland_view.close(), } } pub fn setActivated(self: Self, activated: bool) void { if (self.foreign_toplevel_handle) |handle| handle.setActivated(activated); switch (self.impl) { .xdg_toplevel => |xdg_toplevel| xdg_toplevel.setActivated(activated), .xwayland_view => |xwayland_view| xwayland_view.setActivated(activated), } } fn setFullscreen(self: Self, fullscreen: bool) void { if (self.foreign_toplevel_handle) |handle| handle.setFullscreen(fullscreen); switch (self.impl) { .xdg_toplevel => |xdg_toplevel| xdg_toplevel.setFullscreen(fullscreen), .xwayland_view => |xwayland_view| xwayland_view.setFullscreen(fullscreen), } } pub fn setResizing(self: Self, resizing: bool) void { switch (self.impl) { .xdg_toplevel => |xdg_toplevel| xdg_toplevel.setResizing(resizing), .xwayland_view => {}, } } /// Iterates over all surfaces, subsurfaces, and popups in the tree pub inline fn forEachSurface( self: Self, comptime T: type, iterator: fn (surface: *wlr.Surface, sx: c_int, sy: c_int, data: T) callconv(.C) void, user_data: T, ) void { switch (self.impl) { .xdg_toplevel => |xdg_toplevel| { xdg_toplevel.xdg_surface.forEachSurface(T, iterator, user_data); }, .xwayland_view => { assert(build_options.xwayland); self.surface.?.forEachSurface(T, iterator, user_data); }, } } /// Return the surface at output coordinates ox, oy and set sx, sy to the /// corresponding surface-relative coordinates, if there is a surface. pub fn surfaceAt(self: Self, ox: f64, oy: f64, sx: *f64, sy: *f64) ?*wlr.Surface { return switch (self.impl) { .xdg_toplevel => |xdg_toplevel| xdg_toplevel.surfaceAt(ox, oy, sx, sy), .xwayland_view => |xwayland_view| xwayland_view.surfaceAt(ox, oy, sx, sy), }; } /// Return the current title of the view if any. pub fn getTitle(self: Self) ?[*:0]const u8 { return switch (self.impl) { .xdg_toplevel => |xdg_toplevel| xdg_toplevel.getTitle(), .xwayland_view => |xwayland_view| xwayland_view.getTitle(), }; } /// Return the current app_id of the view if any. pub fn getAppId(self: Self) ?[*:0]const u8 { return switch (self.impl) { .xdg_toplevel => |xdg_toplevel| xdg_toplevel.getAppId(), .xwayland_view => |xwayland_view| xwayland_view.getAppId(), }; } /// Clamp the width/height of the pending state to the constraints of the view pub fn applyConstraints(self: *Self) void { const constraints = self.getConstraints(); const box = &self.pending.box; box.width = math.clamp(box.width, constraints.min_width, constraints.max_width); box.height = math.clamp(box.height, constraints.min_height, constraints.max_height); } /// Return bounds on the dimensions of the view pub fn getConstraints(self: Self) Constraints { return switch (self.impl) { .xdg_toplevel => |xdg_toplevel| xdg_toplevel.getConstraints(), .xwayland_view => |xwayland_view| xwayland_view.getConstraints(), }; } /// Modify the pending x/y of the view by the given deltas, clamping to the /// bounds of the output. pub fn move(self: *Self, delta_x: i32, delta_y: i32) void { const border_width = if (self.draw_borders) @intCast(i32, server.config.border_width) else 0; const output_resolution = self.output.getEffectiveResolution(); const max_x = @intCast(i32, output_resolution.width) - @intCast(i32, self.pending.box.width) - border_width; self.pending.box.x += delta_x; self.pending.box.x = math.max(self.pending.box.x, border_width); self.pending.box.x = math.min(self.pending.box.x, max_x); self.pending.box.x = math.max(self.pending.box.x, 0); const max_y = @intCast(i32, output_resolution.height) - @intCast(i32, self.pending.box.height) - border_width; self.pending.box.y += delta_y; self.pending.box.y = math.max(self.pending.box.y, border_width); self.pending.box.y = math.min(self.pending.box.y, max_y); self.pending.box.y = math.max(self.pending.box.y, 0); } /// Find and return the view corresponding to a given surface, if any pub fn fromWlrSurface(surface: *wlr.Surface) ?*Self { if (surface.isXdgSurface()) { const xdg_surface = wlr.XdgSurface.fromWlrSurface(surface); if (xdg_surface.role == .toplevel) { return @intToPtr(*Self, xdg_surface.data); } } if (build_options.xwayland) { if (surface.isXWaylandSurface()) { const xwayland_surface = wlr.XwaylandSurface.fromWlrSurface(surface); return @intToPtr(*Self, xwayland_surface.data); } } return null; } pub fn shouldTrackConfigure(self: Self) bool { // We don't give a damn about frame perfection for xwayland views if (build_options.xwayland and self.impl == .xwayland_view) return false; // There are exactly three cases in which we do not track configures // 1. the view was and remains floating // 2. the view is changing from float/layout to fullscreen // 3. the view is changing from fullscreen to float return !((self.pending.float and self.current.float) or (self.pending.fullscreen and !self.current.fullscreen) or (self.pending.float and !self.pending.fullscreen and self.current.fullscreen)); } /// Called by the impl when the surface is ready to be displayed pub fn map(self: *Self) !void { log.debug("view '{s}' mapped", .{self.getTitle()}); { assert(self.foreign_toplevel_handle == null); const handle = try wlr.ForeignToplevelHandleV1.create(server.foreign_toplevel_manager); self.foreign_toplevel_handle = handle; handle.events.request_activate.add(&self.foreign_activate); handle.events.request_fullscreen.add(&self.foreign_fullscreen); handle.events.request_close.add(&self.foreign_close); if (self.getTitle()) |s| handle.setTitle(s); if (self.getAppId()) |s| handle.setAppId(s); handle.outputEnter(self.output.wlr_output); } server.xdg_activation.events.request_activate.add(&self.request_activate); // Add the view to the stack of its output const node = @fieldParentPtr(ViewStack(Self).Node, "view", self); self.output.views.attach(node, server.config.attach_mode); // Inform all seats that the view has been mapped so they can handle focus var it = server.input_manager.seats.first; while (it) |seat_node| : (it = seat_node.next) try seat_node.data.handleViewMap(self); self.sendEnter(self.output); self.output.sendViewTags(); if (!self.current.float) self.output.arrangeViews(); server.root.startTransaction(); } /// Called by the impl when the surface will no longer be displayed pub fn unmap(self: *Self) void { log.debug("view '{s}' unmapped", .{self.getTitle()}); if (self.saved_buffers.items.len == 0) self.saveBuffers(); assert(self.surface != null); self.surface = null; // Inform all seats that the view has been unmapped so they can handle focus var it = server.input_manager.seats.first; while (it) |seat_node| : (it = seat_node.next) seat_node.data.handleViewUnmap(self); assert(self.foreign_toplevel_handle != null); self.foreign_activate.link.remove(); self.foreign_fullscreen.link.remove(); self.foreign_close.link.remove(); self.foreign_toplevel_handle.?.destroy(); self.foreign_toplevel_handle = null; self.request_activate.link.remove(); self.output.sendViewTags(); // Still need to arrange if fullscreened from the layout if (!self.current.float) self.output.arrangeViews(); server.root.startTransaction(); } pub fn notifyTitle(self: Self) void { if (self.foreign_toplevel_handle) |handle| { if (self.getTitle()) |s| handle.setTitle(s); } // Send title to all status listeners attached to a seat which focuses this view var seat_it = server.input_manager.seats.first; while (seat_it) |seat_node| : (seat_it = seat_node.next) { if (seat_node.data.focused == .view and seat_node.data.focused.view == &self) { var client_it = seat_node.data.status_trackers.first; while (client_it) |client_node| : (client_it = client_node.next) { client_node.data.sendFocusedView(); } } } } pub fn notifyAppId(self: Self) void { if (self.foreign_toplevel_handle) |handle| { if (self.getAppId()) |s| handle.setAppId(s); } } /// Only honors the request if the view is already visible on the seat's /// currently focused output. TODO: consider allowing this request to switch /// output/tag focus. fn handleForeignActivate( listener: *wl.Listener(*wlr.ForeignToplevelHandleV1.event.Activated), event: *wlr.ForeignToplevelHandleV1.event.Activated, ) void { const self = @fieldParentPtr(Self, "foreign_activate", listener); const seat = @intToPtr(*Seat, event.seat.data); seat.focus(self); server.root.startTransaction(); } fn handleForeignFullscreen( listener: *wl.Listener(*wlr.ForeignToplevelHandleV1.event.Fullscreen), event: *wlr.ForeignToplevelHandleV1.event.Fullscreen, ) void { const self = @fieldParentPtr(Self, "foreign_fullscreen", listener); self.pending.fullscreen = event.fullscreen; self.applyPending(); } fn handleForeignClose( listener: *wl.Listener(*wlr.ForeignToplevelHandleV1), event: *wlr.ForeignToplevelHandleV1, ) void { const self = @fieldParentPtr(Self, "foreign_close", listener); self.close(); } fn handleRequestActivate( listener: *wl.Listener(*wlr.XdgActivationV1.event.RequestActivate), event: *wlr.XdgActivationV1.event.RequestActivate, ) void { const self = @fieldParentPtr(Self, "request_activate", listener); if (fromWlrSurface(event.surface)) |view| { if (view.current.focus == 0) { view.pending.urgent = true; server.root.startTransaction(); } } }
source/river-0.1.0/river/View.zig
const translate = @import("translate.zig"); const std = @import("std"); const c = @import("c.zig"); pub const register_function = translate.register_function; pub const throw = translate.throw; pub const slice_from_value = translate.slice_from_value; pub const create_buffer = translate.create_buffer; pub fn create_string(env: c.napi_env, value: [:0]const u8) !c.napi_value { var result: c.napi_value = undefined; if (c.napi_create_string_utf8(env, value, value.len, &result) != .napi_ok) { return translate.throw(env, "Failed to create string"); } return result; } pub fn console_log(env: c.napi_env, message: c.napi_value) !void { var global: c.napi_value = undefined; if (c.napi_get_global(env, &global) != .napi_ok) { return translate.throw(env, "Failed to get global object"); } var napi_console: c.napi_value = undefined; if (c.napi_get_named_property(env, global, "console", &napi_console) != .napi_ok) { return translate.throw(env, "Failed to get the console object"); } var napi_log: c.napi_value = undefined; if (c.napi_get_named_property(env, napi_console, "log", &napi_log) != .napi_ok) { return translate.throw(env, "Failed to get the log function"); } var returned: c.napi_value = undefined; if (c.napi_call_function(env, napi_console, napi_log, 1, &message, &returned) != .napi_ok) { return translate.throw(env, "Failed to call the log function"); } } pub fn isTypeof(env: c.napi_env, value: c.napi_value, expected: c.napi_valuetype) bool { var actual: c.napi_valuetype = undefined; if (c.napi_typeof(env, value, &actual) == .napi_ok) { return actual == expected; } return false; } pub fn instanceOfDate(env: c.napi_env, value: c.napi_value) !bool { var global: c.napi_value = undefined; if (c.napi_get_global(env, &global) != .napi_ok) { return translate.throw(env, "Failed to get global object"); } var date: c.napi_value = undefined; if (c.napi_get_named_property(env, global, "Date", &date) != .napi_ok) { return translate.throw(env, "Failed to get the Date constructor"); } var result: bool = undefined; if (c.napi_instanceof(env, value, date, &result) != .napi_ok) { return translate.throw(env, "Failed to execute instanceof"); } return result; } pub fn callBoolMethod(env: c.napi_env, comptime objectName: [:0]const u8, comptime methodName: [:0]const u8, arg: c.napi_value) !bool { var global: c.napi_value = undefined; if (c.napi_get_global(env, &global) != .napi_ok) { return translate.throw(env, "Failed to get global object"); } var object: c.napi_value = undefined; if (c.napi_get_named_property(env, global, objectName, &object) != .napi_ok) { return translate.throw(env, "Failed to get the object"); } var method: c.napi_value = undefined; if (c.napi_get_named_property(env, object, methodName, &method) != .napi_ok) { return translate.throw(env, "Failed to get the method on the object"); } var returned: c.napi_value = undefined; if (c.napi_call_function(env, object, method, 1, &arg, &returned) != .napi_ok) { return translate.throw(env, "Failed to call the method on the object"); } var result: bool = undefined; if (c.napi_get_value_bool(env, returned, &result) != .napi_ok) { return translate.throw(env, "Failed to get the return value"); } return result; } pub fn arrayPush(env: c.napi_env, arr: c.napi_value, item: c.napi_value) !void { var length: u32 = undefined; if (c.napi_get_array_length(env, arr, &length) != .napi_ok) { return translate.throw(env, "Failed to get array length"); } if (c.napi_set_element(env, arr, length, item) != .napi_ok) { return translate.throw(env, "Failed to set array element"); } } pub fn isTruthy(env: c.napi_env, value: c.napi_value) !bool { var global: c.napi_value = undefined; if (c.napi_get_global(env, &global) != .napi_ok) { return translate.throw(env, "Failed to get global object"); } var booleanFn: c.napi_value = undefined; if (c.napi_get_named_property(env, global, "Boolean", &booleanFn) != .napi_ok) { return translate.throw(env, "Failed to get the Boolean constructor"); } var returned: c.napi_value = undefined; if (c.napi_call_function(env, global, booleanFn, 1, &value, &returned) != .napi_ok) { return translate.throw(env, "Failed to call the Boolean constructor"); } var result: bool = undefined; if (c.napi_get_value_bool(env, returned, &result) != .napi_ok) { return translate.throw(env, "Failed to get the return value of Boolean"); } return result; } pub fn isNull(env: c.napi_env, value: c.napi_value) !bool { var nullJS: c.napi_value = undefined; if (c.napi_get_null(env, &nullJS) != .napi_ok) { return translate.throw(env, "Failed to get the null value"); } var result: bool = undefined; if (c.napi_strict_equals(env, value, nullJS, &result) != .napi_ok) { return translate.throw(env, "Failed to compare the value to null"); } return result; } pub fn isUndefined(env: c.napi_env, value: c.napi_value) !bool { var undefinedJS = try getUndefined(env); var result: bool = undefined; if (c.napi_strict_equals(env, value, undefinedJS, &result) != .napi_ok) { return translate.throw(env, "Failed to compare the value to undefined"); } return result; } pub fn getUndefined(env: c.napi_env) !c.napi_value { var undefinedJS: c.napi_value = undefined; if (c.napi_get_undefined(env, &undefinedJS) != .napi_ok) { return throw(env, "Failed to get the undefined value"); } return undefinedJS; } pub fn bufferByteLength(env: c.napi_env, value: c.napi_value) !u32 { var global: c.napi_value = undefined; if (c.napi_get_global(env, &global) != .napi_ok) { return translate.throw(env, "Failed to get global object"); } var bufferJS: c.napi_value = undefined; if (c.napi_get_named_property(env, global, "Buffer", &bufferJS) != .napi_ok) { return translate.throw(env, "Failed to get the Buffer constructor"); } var byteLengthJS: c.napi_value = undefined; if (c.napi_get_named_property(env, bufferJS, "byteLength", &byteLengthJS) != .napi_ok) { return translate.throw(env, "Failed to get Buffer.byteLength"); } var returned: c.napi_value = undefined; if (c.napi_call_function(env, bufferJS, byteLengthJS, 1, &value, &returned) != .napi_ok) { return translate.throw(env, "Failed to call Buffer.byteLength"); } var result: u32 = undefined; if (c.napi_get_value_uint32(env, returned, &result) != .napi_ok) { return translate.throw(env, "Failed to get the return value of Buffer.byteLength"); } return result; } pub fn u32ToJS(env: c.napi_env, value: u32) !c.napi_value { var result: c.napi_value = undefined; if (c.napi_create_uint32(env, value, &result) != .napi_ok) { return translate.throw(env, "Failed to create a uint32"); } return result; } pub fn i32ToJS(env: c.napi_env, value: i32) !c.napi_value { var result: c.napi_value = undefined; if (c.napi_create_int32(env, value, &result) != .napi_ok) { return translate.throw(env, "Failed to create a int32"); } return result; }
src/helpers.zig
const std = @import("std"); const expect = std.testing.expect; const minInt = std.math.minInt; test "@bitReverse" { comptime testBitReverse(); testBitReverse(); } fn testBitReverse() void { // using comptime_ints, unsigned expect(@bitReverse(u0, 0) == 0); expect(@bitReverse(u5, 0x12) == 0x9); expect(@bitReverse(u8, 0x12) == 0x48); expect(@bitReverse(u16, 0x1234) == 0x2c48); expect(@bitReverse(u24, 0x123456) == 0x6a2c48); expect(@bitReverse(u32, 0x12345678) == 0x1e6a2c48); expect(@bitReverse(u40, 0x123456789a) == 0x591e6a2c48); expect(@bitReverse(u48, 0x123456789abc) == 0x3d591e6a2c48); expect(@bitReverse(u56, 0x123456789abcde) == 0x7b3d591e6a2c48); expect(@bitReverse(u64, 0x123456789abcdef1) == 0x8f7b3d591e6a2c48); expect(@bitReverse(u128, 0x123456789abcdef11121314151617181) == 0x818e868a828c84888f7b3d591e6a2c48); // using runtime uints, unsigned var num0: u0 = 0; expect(@bitReverse(u0, num0) == 0); var num5: u5 = 0x12; expect(@bitReverse(u5, num5) == 0x9); var num8: u8 = 0x12; expect(@bitReverse(u8, num8) == 0x48); var num16: u16 = 0x1234; expect(@bitReverse(u16, num16) == 0x2c48); var num24: u24 = 0x123456; expect(@bitReverse(u24, num24) == 0x6a2c48); var num32: u32 = 0x12345678; expect(@bitReverse(u32, num32) == 0x1e6a2c48); var num40: u40 = 0x123456789a; expect(@bitReverse(u40, num40) == 0x591e6a2c48); var num48: u48 = 0x123456789abc; expect(@bitReverse(u48, num48) == 0x3d591e6a2c48); var num56: u56 = 0x123456789abcde; expect(@bitReverse(u56, num56) == 0x7b3d591e6a2c48); var num64: u64 = 0x123456789abcdef1; expect(@bitReverse(u64, num64) == 0x8f7b3d591e6a2c48); var num128: u128 = 0x123456789abcdef11121314151617181; expect(@bitReverse(u128, num128) == 0x818e868a828c84888f7b3d591e6a2c48); // using comptime_ints, signed, positive expect(@bitReverse(u8, u8(0)) == 0); expect(@bitReverse(i8, @bitCast(i8, u8(0x92))) == @bitCast(i8, u8(0x49))); expect(@bitReverse(i16, @bitCast(i16, u16(0x1234))) == @bitCast(i16, u16(0x2c48))); expect(@bitReverse(i24, @bitCast(i24, u24(0x123456))) == @bitCast(i24, u24(0x6a2c48))); expect(@bitReverse(i32, @bitCast(i32, u32(0x12345678))) == @bitCast(i32, u32(0x1e6a2c48))); expect(@bitReverse(i40, @bitCast(i40, u40(0x123456789a))) == @bitCast(i40, u40(0x591e6a2c48))); expect(@bitReverse(i48, @bitCast(i48, u48(0x123456789abc))) == @bitCast(i48, u48(0x3d591e6a2c48))); expect(@bitReverse(i56, @bitCast(i56, u56(0x123456789abcde))) == @bitCast(i56, u56(0x7b3d591e6a2c48))); expect(@bitReverse(i64, @bitCast(i64, u64(0x123456789abcdef1))) == @bitCast(i64, u64(0x8f7b3d591e6a2c48))); expect(@bitReverse(i128, @bitCast(i128, u128(0x123456789abcdef11121314151617181))) == @bitCast(i128, u128(0x818e868a828c84888f7b3d591e6a2c48))); // using signed, negative. Compare to runtime ints returned from llvm. var neg8: i8 = -18; expect(@bitReverse(i8, i8(-18)) == @bitReverse(i8, neg8)); var neg16: i16 = -32694; expect(@bitReverse(i16, i16(-32694)) == @bitReverse(i16, neg16)); var neg24: i24 = -6773785; expect(@bitReverse(i24, i24(-6773785)) == @bitReverse(i24, neg24)); var neg32: i32 = -16773785; expect(@bitReverse(i32, i32(-16773785)) == @bitReverse(i32, neg32)); }
test/stage1/behavior/bitreverse.zig
const errs = @import("./error.zig"); const OlinError = errs.OlinError; extern fn resource_open(data: [*]const u8, len: usize) i32; extern fn resource_read(fd: i32, data: [*]const u8, len: usize) i32; extern fn resource_write(fd: i32, data: [*]const u8, len: usize) i32; extern fn resource_close(fd: i32) void; extern fn resource_flush(fd: i32) i32; extern fn io_get_stdin() i32; extern fn io_get_stdout() i32; extern fn io_get_stderr() i32; fn fd_check(fd: i32) errs.OlinError!Resource { if(errs.parse(fd)) |fd_for_handle| { return Resource { .fd = fd_for_handle, }; } else |err| { return err; } } pub const open = Resource.open; pub const stdin = Resource.stdin; pub const stdout = Resource.stdout; pub const stderr = Resource.stderr; pub const Resource = struct { fd: i32, pub fn open(url: []const u8) !Resource { const fd = resource_open(url.ptr, url.len); return fd_check(fd); } pub fn stdin() !Resource{ const fd = io_get_stdin(); return fd_check(fd); } pub fn stdout() !Resource{ const fd = io_get_stdout(); return fd_check(fd); } pub fn stderr() !Resource{ const fd = io_get_stderr(); return fd_check(fd); } pub fn write(self: Resource, data: [*]const u8, len: usize) OlinError!i32 { const n = resource_write(self.fd, data, len); if (errs.parse(n)) |nresp| { return nresp; } else |err| { return err; } } pub fn read(self: Resource, data: [*]u8, len: usize) OlinError!i32 { const n = resource_read(self.fd, data, len); if (errs.parse(n)) |nresp| { return nresp; } else |err| { return err; } } pub fn close(self: Resource) void { resource_close(self.fd); return; } pub fn flush(self: Resource) OlinError!void { const n = resource_flush(self.fd); if (errs.parse(n)) {} else |err| { return err; } } };
god/olin/resource.zig
const std = @import("std"); const Self = @This(); step: std.build.Step, builder: *std.build.Builder, source: []std.build.FileSource, defines: [][]const u8, output_file: std.build.GeneratedFile, /// Allocates a ZingleHeaderStep, caller owns memory pub fn create(builder: *std.build.Builder, files: [][]const u8, defines: [][]const u8) *Self { var sources = builder.allocator.alloc(std.build.FileSource, files.len) catch unreachable; for(files) |v,i| { sources[i] = .{.path = builder.allocator.dupe(u8, v) catch unreachable}; } return createSource(builder, sources, defines); } /// Allocates a ZingleHeaderStep, caller owns memory pub fn createSource(builder: *std.build.Builder, sources: []std.build.FileSource, defines: [][]const u8) *Self { const self = builder.allocator.create(Self) catch unreachable; self.defines = builder.allocator.dupe([]const u8, defines) catch unreachable; self.* = Self{ .step = std.build.Step.init(.custom, "single-header", builder.allocator, make), .builder = builder, .source = sources, .defines = self.builder.dupeStrings(defines), .output_file = std.build.GeneratedFile{ .step = &self.step }, }; for (sources) |source| { source.addStepDependencies(&self.step); } return self; } pub fn getFileSource(self: *const Self) std.build.FileSource { return std.build.FileSource{ .generated = &self.output_file }; } fn make(step: *std.build.Step) !void { const self = @fieldParentPtr(Self, "step", step); const source_file_name = self.source[0].getPath(self.builder); // std.debug.print("source = '{s}'\n", .{source_file_name}); const basename = std.fs.path.basename(source_file_name); const output_name = blk: { if (std.mem.indexOf(u8, basename, ".")) |index| { break :blk try std.mem.join(self.builder.allocator, ".", &[_][]const u8{ basename[0..index], "c", }); } else { break :blk try std.mem.join(self.builder.allocator, ".", &[_][]const u8{ basename, "c", }); } }; var output_buffer = std.ArrayList(u8).init(self.builder.allocator); defer output_buffer.deinit(); var writer = output_buffer.writer(); for (self.defines) |m| { try writer.print("#define {s}\n", .{m}); } for (self.source) |s| { try writer.print("#include \"{s}\"\n", .{s.getPath(self.builder)}); } var hash = std.crypto.hash.blake2.Blake2b384.init(.{}); hash.update("C9XVU4MxSDFZz2to"); hash.update(basename); hash.update(output_buffer.items); var digest: [48]u8 = undefined; hash.final(&digest); var hash_basename: [64]u8 = undefined; _ = std.fs.base64_encoder.encode(&hash_basename, &digest); const output_dir = try std.fs.path.join(self.builder.allocator, &[_][]const u8{ self.builder.cache_root, "o", &hash_basename, }); var dir = std.fs.cwd().makeOpenPath(output_dir, .{}) catch |err| { std.debug.print("unable to make path {s}: {s}\n", .{ output_dir, @errorName(err) }); return err; }; self.output_file.path = try std.fs.path.join(self.builder.allocator, &[_][]const u8{ output_dir, output_name, }); dir.writeFile(output_name, output_buffer.items) catch |err| { std.debug.print("unable to write {s} into {s}: {s}\n", .{ output_name, output_dir, @errorName(err), }); return err; }; } /// Allocates a ZingleHeaderStep, caller owns memory pub fn addSingleHeaderFiles(exeLibObj: *std.build.LibExeObjStep, paths: [][]const u8, defines: [][]const u8, flags: [][]const u8) *Self { var zingle_step = create(exeLibObj.builder, paths, defines); zingle_step.step.make() catch unreachable; var cSource = std.build.CSourceFile{ .source = zingle_step.getFileSource(), .args = flags }; exeLibObj.step.dependOn(&zingle_step.step); exeLibObj.addCSourceFileSource(cSource); return zingle_step; } pub fn addSingleHeaderFile(exeLibObj: *std.build.LibExeObjStep, path: []const u8, defines: [][]const u8, flags: [][]const u8) *Self { return addSingleHeaderFiles(exeLibObj,&.{path},defines,flags); } pub fn free(self: *const Self) void { for (self.defines) |v| { self.builder.allocator.free(v); } for(self.source) |v| { self.builder.allocator.free(v.path); } self.builder.allocator.free(self.defines); self.builder.allocator.free(self.source); self.builder.allocator.destroy(self); }
zingle_header.zig
const std = @import("std"); const stdx = @import("stdx"); const graphics = @import("graphics"); const Color = graphics.Color; const ui = @import("../ui.zig"); const ZStack = ui.widgets.ZStack; const log = stdx.log.scoped(.root); const OverlayId = u32; const RootOverlayHandle = struct { root: *Root, overlay_id: OverlayId, }; /// The Root widget allows the user's root widget to be wrapped by a container that can provide additional functionality such as modals and popovers. pub const Root = struct { props: struct { user_root: ui.FrameId = ui.NullFrameId, }, overlays: std.ArrayList(OverlayItem), build_buf: std.ArrayList(ui.FrameId), next_id: OverlayId, user_root: ui.NodeRef, const Self = @This(); pub fn init(self: *Self, c: *ui.InitContext) void { self.overlays = std.ArrayList(OverlayItem).init(c.alloc); self.build_buf = std.ArrayList(ui.FrameId).init(c.alloc); self.next_id = 1; } pub fn deinit(node: *ui.Node, _: std.mem.Allocator) void { const self = node.getWidget(Self); self.overlays.deinit(); self.build_buf.deinit(); } pub fn build(self: *Self, c: *ui.BuildContext) ui.FrameId { self.build_buf.ensureTotalCapacity(1 + self.overlays.items.len) catch @panic("error"); self.build_buf.items.len = 0; self.build_buf.appendAssumeCapacity(self.props.user_root); c.bindFrame(self.props.user_root, &self.user_root); const S = struct { fn popoverRequestClose(h: RootOverlayHandle) void { const item = h.root.getOverlay(h.overlay_id).?; if (item.close_cb) |cb| { cb(item.close_ctx); } h.root.closePopover(h.overlay_id); } fn modalRequestClose(h: RootOverlayHandle) void { const item = h.root.getOverlay(h.overlay_id).?; if (item.close_cb) |cb| { cb(item.close_ctx); } h.root.closeModal(h.overlay_id); } }; // Build the overlay items. for (self.overlays.items) |overlay| { switch (overlay.tag) { .Popover => { const frame_id = overlay.build_fn(overlay.build_ctx, c); const wrapper = c.decl(PopoverOverlay, .{ .child = frame_id, .src_node = overlay.src_node, .onRequestClose = c.closure(RootOverlayHandle{ .root = self, .overlay_id = overlay.id }, S.popoverRequestClose), }); self.build_buf.appendAssumeCapacity(wrapper); }, .Modal => { const frame_id = overlay.build_fn(overlay.build_ctx, c); const wrapper = c.decl(ModalOverlay, .{ .child = frame_id, .onRequestClose = c.closure(RootOverlayHandle{ .root = self, .overlay_id = overlay.id }, S.modalRequestClose), }); self.build_buf.appendAssumeCapacity(wrapper); }, } } // For now the user's root is the first child so it doesn't need a key. return c.decl(ZStack, .{ .children = c.list(self.build_buf.items), }); } pub fn showPopover(self: *Self, src_widget: *ui.Node, build_ctx: ?*anyopaque, build_fn: fn (?*anyopaque, *ui.BuildContext) ui.FrameId, opts: PopoverOptions) OverlayId { defer self.next_id += 1; _ = self.overlays.append(.{ .id = self.next_id, .tag = .Popover, .build_ctx = build_ctx, .build_fn = build_fn, .close_ctx = opts.close_ctx, .close_cb = opts.close_cb, .src_node = src_widget, }) catch @panic("error"); return self.next_id; } pub fn showModal(self: *Self, build_ctx: ?*anyopaque, build_fn: fn (?*anyopaque, *ui.BuildContext) ui.FrameId, opts: ModalOptions) OverlayId { defer self.next_id += 1; _ = self.overlays.append(.{ .id = self.next_id, .tag = .Modal, .build_ctx = build_ctx, .build_fn = build_fn, .close_ctx = opts.close_ctx, .close_cb = opts.close_cb, .src_node = undefined, }) catch @panic("error"); return self.next_id; } fn getOverlay(self: Self, id: OverlayId) ?OverlayItem { for (self.overlays.items) |it| { if (it.id == id) { return it; } } return null; } pub fn closePopover(self: *Self, id: OverlayId) void { for (self.overlays.items) |it, i| { if (it.tag == .Popover and it.id == id) { _ = self.overlays.orderedRemove(i); break; } } } pub fn closeModal(self: *Self, id: OverlayId) void { for (self.overlays.items) |it, i| { if (it.tag == .Modal and it.id == id) { _ = self.overlays.orderedRemove(i); break; } } } }; const ModalOptions = struct { close_ctx: ?*anyopaque = null, close_cb: ?fn (?*anyopaque) void = null, }; const PopoverOptions = struct { close_ctx: ?*anyopaque = null, close_cb: ?fn (?*anyopaque) void = null, }; const OverlayItem = struct { id: OverlayId, build_ctx: ?*anyopaque, build_fn: fn (?*anyopaque, *ui.BuildContext) ui.FrameId, close_ctx: ?*anyopaque, close_cb: ?fn (?*anyopaque) void, tag: OverlayTag, // Used for popovers. src_node: *ui.Node, }; const OverlayTag = enum(u1) { Popover = 0, Modal = 1, }; /// An overlay that positions the child modal in a specific alignment over the overlay bounds. /// Clicking outside of the child modal will close the modal. pub const ModalOverlay = struct { props: struct { child: ui.FrameId, valign: ui.VAlign = .Center, halign: ui.HAlign = .Center, border_color: Color = Color.DarkGray, bg_color: Color = Color.DarkGray.darker(), onRequestClose: ?stdx.Function(fn () void) = null, }, const Self = @This(); pub fn init(self: *Self, c: *ui.InitContext) void { _ = self; c.addMouseDownHandler(self, onMouseDown); } fn onMouseDown(self: *Self, e: ui.MouseDownEvent) ui.EventResult { if (self.props.child != ui.NullFrameId) { const child = e.ctx.node.children.items[0]; const xf = @intToFloat(f32, e.val.x); const yf = @intToFloat(f32, e.val.y); // If hit outside of the bounds, request to close. if (xf < child.abs_pos.x or xf > child.abs_pos.x + child.layout.width or yf < child.abs_pos.y or yf > child.abs_pos.y + child.layout.height) { self.requestClose(); } } return .Continue; } pub fn requestClose(self: *Self) void { if (self.props.onRequestClose) |cb| { cb.call(.{}); } } pub fn build(self: *Self, _: *ui.BuildContext) ui.FrameId { return self.props.child; } pub fn layout(self: *Self, c: *ui.LayoutContext) ui.LayoutSize { const cstr = c.getSizeConstraint(); if (self.props.child != ui.NullFrameId) { const child = c.node.children.items[0]; const child_size = c.computeLayout(child, cstr); // Currently always centers. c.setLayout(child, ui.Layout.init((cstr.width - child_size.width) * 0.5, (cstr.height - child_size.height) * 0.5, child_size.width, child_size.height)); } return cstr; } pub fn renderCustom(self: *Self, c: *ui.RenderContext) void { if (self.props.child != ui.NullFrameId) { const alo = c.getAbsLayout(); const child_lo = c.node.children.items[0].layout; const child_x = alo.x + child_lo.x; const child_y = alo.y + child_lo.y; const g = c.g; g.setFillColor(self.props.bg_color); g.fillRect(child_x, child_y, child_lo.width, child_lo.height); c.renderChildren(); g.setStrokeColor(self.props.border_color); g.setLineWidth(2); g.drawRect(child_x, child_y, child_lo.width, child_lo.height); } } }; /// An overlay that positions the child popover adjacent to a source widget. /// Clicking outside of the child popover will close the popover. pub const PopoverOverlay = struct { props: struct { child: ui.FrameId, src_node: *ui.Node, border_color: Color = Color.DarkGray, bg_color: Color = Color.DarkGray.darker(), onRequestClose: ?stdx.Function(fn () void) = null, }, to_left: bool, /// Allow a custom post render. For child popovers that want to draw over the border. custom_post_render_ctx: ?*anyopaque, custom_post_render: ?fn (?*anyopaque, ctx: *ui.RenderContext) void, const Self = @This(); const MarginFromSource = 20; const ArrowSize = 30; pub fn init(self: *Self, c: *ui.InitContext) void { _ = self; self.custom_post_render = null; self.custom_post_render_ctx = null; c.addMouseDownHandler(self, onMouseDown); } fn onMouseDown(self: *Self, e: ui.MouseDownEvent) ui.EventResult { if (self.props.child != ui.NullFrameId) { const child = e.ctx.node.children.items[0]; const xf = @intToFloat(f32, e.val.x); const yf = @intToFloat(f32, e.val.y); // If hit outside of the bounds, request to close. if (xf < child.abs_pos.x or xf > child.abs_pos.x + child.layout.width or yf < child.abs_pos.y or yf > child.abs_pos.y + child.layout.height) { self.requestClose(); } } return .Continue; } pub fn requestClose(self: *Self) void { if (self.props.onRequestClose) |cb| { cb.call(.{}); } } pub fn build(self: *Self, _: *ui.BuildContext) ui.FrameId { return self.props.child; } pub fn layout(self: *Self, c: *ui.LayoutContext) ui.LayoutSize { const cstr = c.getSizeConstraint(); if (self.props.child != ui.NullFrameId) { const child = c.node.children.items[0]; const child_size = c.computeLayout(child, cstr); // Position relative to source widget. Source widget layout should already be computed. const src_abs_pos = self.props.src_node.computeCurrentAbsPos(); if (src_abs_pos.x > cstr.width * 0.5) { // Display popover to the left. c.setLayout(child, ui.Layout.init(src_abs_pos.x - child_size.width - MarginFromSource, src_abs_pos.y, child_size.width, child_size.height)); self.to_left = true; } else { // Display popover to the right. c.setLayout(child, ui.Layout.init(src_abs_pos.x + self.props.src_node.layout.width + MarginFromSource, src_abs_pos.y, child_size.width, child_size.height)); self.to_left = false; } } return cstr; } pub fn renderCustom(self: *Self, c: *ui.RenderContext) void { if (self.props.child != ui.NullFrameId) { const alo = c.getAbsLayout(); const child_lo = c.node.children.items[0].layout; const child_x = alo.x + child_lo.x; const child_y = alo.y + child_lo.y; const g = c.g; g.setFillColor(self.props.bg_color); g.fillRect(child_x, child_y, child_lo.width, child_lo.height); if (self.to_left) { g.fillTriangle(child_x + child_lo.width, child_y, child_x + child_lo.width, child_y + ArrowSize, child_x + child_lo.width + ArrowSize/2, child_y + ArrowSize/2); } else { g.fillTriangle(child_x, child_y, child_x - ArrowSize/2, child_y + ArrowSize/2, child_x, child_y + ArrowSize); } c.renderChildren(); g.setStrokeColor(self.props.border_color); g.setLineWidth(2); g.drawRect(child_x, child_y, child_lo.width, child_lo.height); } if (self.custom_post_render) |cb| { cb(self.custom_post_render_ctx, c); } } };
ui/src/widgets/root.zig
const std = @import("std"); const debug = std.log.debug; const warn = std.log.warn; const assert = std.debug.assert; const times = @import("times.zig"); const SCREEN_WIDTH = 64; const SCREEN_HEIGHT = 32; pub const MEM_SIZE = 4096; const MachineError = error{ InvalidRegister, UnknownInstruction, }; pub const CpuState = enum { Running, WaitingForKey, }; pub const Machine = struct { screen: [SCREEN_HEIGHT][SCREEN_WIDTH]u8 = undefined, cpu_state: CpuState = .Running, pc: u16 = 0x200, index: u16 = 0, vregs: [16]u8, stack: [16]u16, sp: u8 = 0, delay_timer: u8 = 0, sound_timer: u8 = 0, timestamp: i64 = 0, key_down: [16]bool = [_]bool{false} ** 16, read_key_reg: u8 = 0, rnd: std.rand.Xoshiro256, pub fn init() Machine { var machine = Machine{ .vregs = std.mem.zeroes([16]u8), .stack = std.mem.zeroes([16]u16), .rnd = std.rand.DefaultPrng.init(0), }; machine.clearScreen(); return machine; } pub fn dumpState(self: *Machine) void { debug("PC={X:0>4}\nINDEX={X:0>4}", .{ self.pc, self.index }); for (self.vregs) |val, i| { debug("V{X}={X:0>2}", .{ i, val }); } } pub fn updateTimers(self: *Machine) bool { if (self.timestamp == 0) { self.timestamp = std.time.milliTimestamp(); return false; } const now = std.time.milliTimestamp(); if ((now - self.timestamp) > 16) { self.timestamp = now; if (self.delay_timer > 0) { self.delay_timer -= 1; } if (self.sound_timer > 0) { self.sound_timer -= 1; if (self.sound_timer == 0) { return true; } } } return false; } pub fn updateKey(self: *Machine, key: u8, is_down: bool) void { debug("Update key: {} {}", .{ key, is_down }); assert(key < 16); if (self.cpu_state == CpuState.WaitingForKey and self.key_down[key] != is_down) { self.cpu_state = CpuState.Running; self.vregs[self.read_key_reg] = key; } self.key_down[key] = is_down; } fn fetchOpcode(self: *Machine, mem: *[MEM_SIZE]u8, pc: u16) u16 { _ = self; const b1: u8 = mem[pc]; const b2: u8 = mem[pc + 1]; return (@as(u16, b1) << 8) | b2; } fn clearScreen(self: *Machine) void { _ = self; var y: u8 = 0; while (y < SCREEN_HEIGHT) : (y += 1) { self.screen[y] = std.mem.zeroes([SCREEN_WIDTH]u8); } } pub fn getScreen(self: *Machine) *[SCREEN_HEIGHT][SCREEN_WIDTH]u8 { return &self.screen; } fn addAndFlag(self: *Machine, xreg: u8, yreg: u8) void { var result: u8 = undefined; if (@addWithOverflow(u8, self.vregs[xreg], self.vregs[yreg], &result)) { self.vregs[0xf] = 1; } else { self.vregs[0xf] = 0; } self.vregs[xreg] = result; } fn subAndFlag(self: *Machine, xreg: u8, minuend_reg: u8, subtrahend_reg: u8) void { var result: u8 = undefined; if (!@subWithOverflow(u8, self.vregs[minuend_reg], self.vregs[subtrahend_reg], &result)) { self.vregs[0xf] = 1; } else { self.vregs[0xf] = 0; } self.vregs[xreg] = result; } fn checkBitAndShift(self: *Machine, xreg: u8, yreg: u8, left: bool) void { _ = yreg; //self.vregs[xreg] = self.vregs[yreg]; var bit: u8 = undefined; if (left) { bit = 0x80; } else { bit = 0x1; } if (self.vregs[xreg] & bit == bit) { self.vregs[0xf] = 1; } else { self.vregs[0xf] = 0; } if (left) { self.vregs[xreg] = self.vregs[xreg] << 1; } else { self.vregs[xreg] = self.vregs[xreg] >> 1; } } fn doArithmetic(self: *Machine, op: u16, xreg: u8, yreg: u8) MachineError!void { switch (op) { 0 => self.vregs[xreg] = self.vregs[yreg], 1 => self.vregs[xreg] |= self.vregs[yreg], 2 => self.vregs[xreg] &= self.vregs[yreg], 3 => self.vregs[xreg] ^= self.vregs[yreg], 4 => self.addAndFlag(xreg, yreg), 5 => self.subAndFlag(xreg, xreg, yreg), 6 => self.checkBitAndShift(xreg, yreg, false), 7 => self.subAndFlag(xreg, yreg, xreg), 0xe => self.checkBitAndShift(xreg, yreg, true), else => { debug("unknown 0x8XXX instruction: 0x8X{X:0>2}", .{op}); return MachineError.UnknownInstruction; }, } } fn doDraw(self: *Machine, mem: *[MEM_SIZE]u8, npix: u8, xreg: u8, yreg: u8) void { self.vregs[0xf] = 0; debug("npix={}", .{npix}); var y = self.vregs[yreg] & 31; var n: u8 = 0; while (n < npix) : (n += 1) { var x = self.vregs[xreg] & 63; var sprite: u8 = mem[self.index + n]; var bit: u8 = 0; while (bit < 8) : (bit += 1) { debug("x={}, y={}", .{ x, y }); if ((sprite & 0x80) == 0x80) { if (self.screen[y][x] == 1) { debug(" OFF", .{}); self.screen[y][x] = 0; self.vregs[0xf] = 1; } else { debug(" ON", .{}); self.screen[y][x] = 1; } } else { debug(" empty", .{}); } sprite <<= 1; x += 1; if (x == SCREEN_WIDTH) break; } y += 1; if (y == SCREEN_HEIGHT) break; } } fn getRegisterX(self: *Machine, inst: u16) MachineError!u8 { _ = self; const reg: u8 = @intCast(u8, (inst & 0x0f00) >> 8); if (reg > 0xf) { return MachineError.InvalidRegister; } return reg; } fn getRegisterY(self: *Machine, inst: u16) MachineError!u8 { _ = self; const reg: u8 = @intCast(u8, (inst & 0x00f0) >> 4); if (reg > 0xf) { return MachineError.InvalidRegister; } return reg; } pub fn fetchAndExec(self: *Machine, mem: *[MEM_SIZE]u8) MachineError!u32 { const inst: u16 = self.fetchOpcode(mem, self.pc); self.pc += 2; debug("inst={x:0>4}", .{inst}); var inst_cost: u32 = 0; switch (inst) { 0x00e0 => { debug("clear screen", .{}); self.clearScreen(); inst_cost = times.CLEAR_SCREEN; }, 0x00ee => { debug("return", .{}); self.sp -= 1; self.pc = self.stack[self.sp]; inst_cost = times.RETURN; }, 0x1000...0x1fff => { debug("jump", .{}); const new_pc: u16 = inst & 0x0fff; assert(new_pc < 4096); self.pc = new_pc; inst_cost = times.JUMP; }, 0x2000...0x2fff => { debug("call 2x", .{}); const new_pc: u16 = inst & 0x0fff; assert(new_pc < 4096); self.stack[self.sp] = self.pc; self.sp += 1; self.pc = new_pc; inst_cost = times.CALL; }, 0x3000...0x3fff => { debug("skip 3x", .{}); const xreg = try self.getRegisterX(inst); if (self.vregs[xreg] == @intCast(u8, inst & 0x00ff)) { self.pc += 2; } inst_cost = times.SKIP_3X; }, 0x4000...0x4fff => { debug("skip 4x", .{}); const xreg = try self.getRegisterX(inst); if (self.vregs[xreg] != @intCast(u8, inst & 0x00ff)) { self.pc += 2; } inst_cost = times.SKIP_4X; }, 0x5000...0x5fff => { debug("skip 5x", .{}); const xreg = try self.getRegisterX(inst); const yreg = try self.getRegisterY(inst); if (self.vregs[xreg] == self.vregs[yreg]) { self.pc += 2; } inst_cost = times.SKIP_5X; }, 0x6000...0x6fff => { debug("set register", .{}); const xreg = try self.getRegisterX(inst); self.vregs[xreg] = @intCast(u8, inst & 0x00ff); inst_cost = times.SET_REGISTER; }, 0x7000...0x7fff => { debug("add", .{}); const xreg: u16 = (inst & 0x0f00) >> 8; var result: u8 = undefined; _ = @addWithOverflow(u8, self.vregs[xreg], @intCast(u8, inst & 0x00ff), &result); self.vregs[xreg] = result; inst_cost = times.ADD; }, 0x8000...0x8fff => { debug("arithmetic", .{}); const op: u16 = (inst & 0xf); const xreg = try self.getRegisterX(inst); const yreg = try self.getRegisterY(inst); try self.doArithmetic(op, xreg, yreg); inst_cost = times.ARITHMETIC; }, 0x9000...0x9fff => { debug("skip 9x", .{}); const xreg = try self.getRegisterX(inst); const yreg = try self.getRegisterY(inst); if (self.vregs[xreg] != self.vregs[yreg]) { self.pc += 2; } inst_cost = times.SKIP_9X; }, 0xa000...0xafff => { debug("set index", .{}); self.index = inst & 0x0fff; inst_cost = times.SET_INDEX; }, 0xc000...0xcfff => { debug("random", .{}); const xreg = try self.getRegisterX(inst); var number: u8 = @intCast(u8, inst & 0x00ff); const randnum: u8 = self.rnd.random().int(u8); number &= randnum; self.vregs[xreg] = number; inst_cost = times.GET_RANDOM; }, 0xd000...0xdfff => { debug("draw screen", .{}); const npix = @intCast(u8, inst & 0x000f); const xreg = try self.getRegisterX(inst); const yreg = try self.getRegisterY(inst); self.doDraw(mem, npix, xreg, yreg); inst_cost = times.DRAW; }, 0xe000...0xefff => { debug("check key", .{}); const op = @intCast(u8, inst & 0x00ff); const xreg = try self.getRegisterX(inst); const key = self.vregs[xreg]; assert(key < 16); switch (op) { 0x9e => { if (self.key_down[key]) { self.pc += 2; } }, 0xa1 => { if (!self.key_down[key]) { self.pc += 2; } }, else => { warn("unknown 0xEXXX instruction: 0x{X:0>4}", .{inst}); return MachineError.UnknownInstruction; }, } inst_cost = times.CHECK_KEY; }, 0xf000...0xffff => { const op = inst & 0xff; const xreg = try self.getRegisterX(inst); switch (op) { 0x07 => { debug("get delay timer", .{}); self.vregs[xreg] = self.delay_timer; inst_cost = times.GET_DELAY_TIMER; }, 0x0a => { debug("get key", .{}); self.read_key_reg = xreg; self.cpu_state = CpuState.WaitingForKey; inst_cost = times.GET_KEY; }, 0x15 => { debug("set delay timer", .{}); self.delay_timer = self.vregs[xreg]; inst_cost = times.SET_DELAY_TIMER; }, 0x18 => { debug("set sound timer", .{}); self.sound_timer = self.vregs[xreg]; inst_cost = times.SET_SOUND_TIMER; }, 0x1e => { var result: u16 = undefined; _ = @addWithOverflow(u16, self.index, @as(u16, self.vregs[xreg]), &result); self.index = result; inst_cost = times.ADD_TO_INDEX; }, 0x29 => { debug("set font", .{}); self.index = self.vregs[xreg] * 5; inst_cost = times.SET_FONT; }, 0x33 => { var number = self.vregs[xreg]; debug("number={}", .{number}); mem[self.index] = number / 100; number = number % 100; mem[self.index + 1] = number / 10; mem[self.index + 2] = number % 10; debug(" mem={} {} {}", .{ mem[self.index], mem[self.index + 1], mem[self.index + 2] }); inst_cost = times.BCD; }, 0x55 => { var i: u8 = 0; while (i <= xreg) : (i += 1) { mem[self.index + i] = self.vregs[i]; } inst_cost = times.STORE_MEM; }, 0x65 => { var i: u8 = 0; while (i <= xreg) : (i += 1) { self.vregs[i] = mem[self.index + i]; } inst_cost = times.LOAD_MEM; }, else => { warn("unknown 0xFXXX instruction: 0x{X:0>4}", .{inst}); return MachineError.UnknownInstruction; }, } }, else => { warn("unknown instruction 0x{X:0>4}", .{mem[self.pc]}); return MachineError.UnknownInstruction; }, } return inst_cost; } };
src/machine.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const print = std.debug.print; const OpType = enum { nop, acc, jmp, }; const Op = struct { t: OpType, value: i32, }; pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); var alloc = &arena.allocator; var instructions = std.ArrayList(Op).init(alloc); // var lines = std.mem.split(@embedFile("../inputs/day08_sample.txt"), "\n"); var lines = std.mem.split(@embedFile("../inputs/day08.txt"), "\n"); while (lines.next()) |line| { var trimmed = std.mem.trim(u8, line, &std.ascii.spaces); if (trimmed.len == 0) break; var parts = std.mem.tokenize(trimmed, " "); try instructions.append(.{ .t = std.meta.stringToEnum(OpType, parts.next().?).?, .value = try std.fmt.parseInt(i32, parts.next().?, 10), }); } var seen = std.ArrayList(bool).init(alloc); try seen.resize(instructions.items.len); var tries: usize = 0; for (instructions.items) |*x| { const og_type = x.t; x.t = switch (og_type) { .nop => .jmp, .jmp => .nop, else => continue, }; defer x.t = og_type; var acc: i32 = 0; var pc: i32 = 0; tries += 1; var looped = while (pc < instructions.items.len) { var i = @intCast(usize, pc); var op = &instructions.items[i]; if (seen.items[i]) { break true; } seen.items[i] = true; switch (op.t) { .nop => pc += 1, .acc => { acc += op.value; pc += 1; }, .jmp => pc += op.value, } } else false; if (!looped) { print("solution after {} tries: acc={}\n", .{ tries, acc }); break; } std.mem.set(bool, seen.items, false); } }
src/day08.zig
const assert = @import("std").debug.assert; const testing = @import("std").testing; const color = @import("zigimg").color; const helpers = @import("helpers.zig"); test "Convert color to premultipled alpha" { const originalColor = color.Color.initRGBA(100 / 255, 128 / 255, 210 / 255, 100 / 255); const premultipliedAlpha = originalColor.premultipliedAlpha(); try helpers.expectEq(premultipliedAlpha.R, 39 / 255); try helpers.expectEq(premultipliedAlpha.G, 50 / 255); try helpers.expectEq(premultipliedAlpha.B, 82 / 255); try helpers.expectEq(premultipliedAlpha.A, 100 / 255); } test "Convert Rgb24 to Color" { const originalColor = color.Rgb24.initRGB(100, 128, 210); const result = originalColor.toColor().toIntegerColor8(); try helpers.expectEq(result.R, 100); try helpers.expectEq(result.G, 128); try helpers.expectEq(result.B, 210); try helpers.expectEq(result.A, 255); } test "Convert Rgba32 to Color" { const originalColor = color.Rgba32.initRGBA(1, 2, 3, 4); const result = originalColor.toColor().toIntegerColor8(); try helpers.expectEq(result.R, 1); try helpers.expectEq(result.G, 2); try helpers.expectEq(result.B, 3); try helpers.expectEq(result.A, 4); } test "Convert Rgb565 to Color" { const originalColor = color.Rgb565.initRGB(10, 30, 20); const result = originalColor.toColor().toIntegerColor8(); try helpers.expectEq(result.R, 82); try helpers.expectEq(result.G, 121); try helpers.expectEq(result.B, 165); try helpers.expectEq(result.A, 255); } test "Convert Rgb555 to Color" { const originalColor = color.Rgb555.initRGB(16, 20, 24); const result = originalColor.toColor().toIntegerColor8(); try helpers.expectEq(result.R, 132); try helpers.expectEq(result.G, 165); try helpers.expectEq(result.B, 197); try helpers.expectEq(result.A, 255); } test "Convert Bgra32 to Color" { const originalColor = color.Bgra32.initRGBA(50, 100, 150, 200); const result = originalColor.toColor().toIntegerColor8(); try helpers.expectEq(result.R, 50); try helpers.expectEq(result.G, 100); try helpers.expectEq(result.B, 150); try helpers.expectEq(result.A, 200); } test "Convert Grayscale1 to Color" { const white = color.Grayscale1{ .value = 1 }; const whiteColor = white.toColor().toIntegerColor8(); try helpers.expectEq(whiteColor.R, 255); try helpers.expectEq(whiteColor.G, 255); try helpers.expectEq(whiteColor.B, 255); try helpers.expectEq(whiteColor.A, 255); const black = color.Grayscale1{ .value = 0 }; const blackColor = black.toColor().toIntegerColor8(); try helpers.expectEq(blackColor.R, 0); try helpers.expectEq(blackColor.G, 0); try helpers.expectEq(blackColor.B, 0); try helpers.expectEq(blackColor.A, 255); } test "Convert Grayscale8 to Color" { const original = color.Grayscale8{ .value = 128 }; const result = original.toColor().toIntegerColor8(); try helpers.expectEq(result.R, 128); try helpers.expectEq(result.G, 128); try helpers.expectEq(result.B, 128); try helpers.expectEq(result.A, 255); } test "Convert Grayscale16 to Color" { const original = color.Grayscale16{ .value = 21845 }; const result = original.toColor().toIntegerColor8(); try helpers.expectEq(result.R, 85); try helpers.expectEq(result.G, 85); try helpers.expectEq(result.B, 85); try helpers.expectEq(result.A, 255); } test "From HTMl hex to IntegerColor" { const actual = color.IntegerColor8.fromHtmlHex(0x876347); try helpers.expectEq(actual.R, 0x87); try helpers.expectEq(actual.G, 0x63); try helpers.expectEq(actual.B, 0x47); try helpers.expectEq(actual.A, 0xFF); }
tests/color_test.zig
const std = @import("std"); const helper = @import("helper.zig"); const Allocator = std.mem.Allocator; const input = @embedFile("../inputs/day11.txt"); const grid_size = 10; pub fn run(alloc: Allocator, stdout_: anytype) !void { _ = alloc; var grid = OctoGrid.init(input); var grid2 = grid; const res1 = grid.simulateMany(100); const res2 = grid2.getSyncStep(); if (stdout_) |stdout| { try stdout.print("Part 1: {}\n", .{res1}); try stdout.print("Part 2: {}\n", .{res2}); } } const OctoGrid = struct { grid: [grid_size][grid_size]u8, const Self = @This(); pub fn init(str: []const u8) Self { var grid: [grid_size][grid_size]u8 = undefined; var lines = tokenize(u8, str, "\r\n"); for (grid) |*row| { const line = lines.next().?; for (row) |*num, i| { num.* = line[i] - '0'; } } if (lines.next() != null) unreachable; return Self{ .grid = grid }; } pub fn simulateOne(self: *Self) i32 { self.addOneAll(); var flashed = true; var flashes: i32 = 0; while (flashed) { const temp = self.flashOnce(); flashes += temp; flashed = temp > 0; } return flashes; } pub fn simulateMany(self: *Self, amt: i32) i32 { var i: i32 = 0; var sum: i32 = 0; while (i < amt) : (i += 1) { sum += self.simulateOne(); } return sum; } pub fn getSyncStep(self: *Self) i32 { var i: i32 = 1; while (self.simulateOne() != grid_size * grid_size) { i += 1; } return i; } fn addOneAll(self: *Self) void { for (self.grid) |*row| { for (row) |*num| { num.* += 1; } } } fn flashOnce(self: *Self) i32 { var flashes: i32 = 0; for (self.grid) |*row, i| { for (row) |num, j| { if (num > 9) { flashes += 1; self.flashAt(i, j); } } } return flashes; } fn flashAt(self: *Self, iu: usize, ju: usize) void { self.grid[iu][ju] = 0; const i = @intCast(i32, iu); const j = @intCast(i32, ju); const delta: [3]i32 = .{ -1, 0, 1 }; for (delta) |di| { for (delta) |dj| { if (di == 0 and dj == 0) continue; if (i + di < 0 or i + di >= grid_size) continue; if (j + dj < 0 or j + dj >= grid_size) continue; const newi = @intCast(usize, i + di); const newj = @intCast(usize, j + dj); if (self.grid[newi][newj] == 0) continue; self.grid[newi][newj] += 1; } } } fn print(self: *const Self) void { const stdout = std.io.getStdOut().writer(); for (self.grid) |*row| { for (row) |num| { stdout.print("{}", .{num}) catch unreachable; } stdout.print("\n", .{}) catch unreachable; } } }; const eql = std.mem.eql; const tokenize = std.mem.tokenize; const split = std.mem.split; const count = std.mem.count; const parseUnsigned = std.fmt.parseUnsigned; const parseInt = std.fmt.parseInt; const sort = std.sort.sort;
src/day11.zig
const std = @import("std"); const assert = std.debug.assert; const cpu = @import("cpu.zig"); const config = @import("config.zig"); const memory = @import("memory.zig"); usingnamespace @import("instruction.zig"); pub fn load_program(state: *cpu.CPUState, program: []u8) void { assert((program.len & 0x0001) == 0); // Unaligned size assert(memory.is_valid_range(cpu.MinProgramAddress, @intCast(u16, program.len), memory.Usage.Write)); for (program) |byte, i| { const offset: u16 = cpu.MinProgramAddress + @intCast(u16, i); state.memory[offset] = byte; } } fn load_next_instruction(state: *cpu.CPUState) u16 { const msb = @intCast(u16, state.memory[state.pc]) << 8; const lsb = @intCast(u16, state.memory[state.pc + 1]); return msb | lsb; } pub fn execute_step(state: *cpu.CPUState, deltaTimeMs: u32) void { var instructionsToExecute = update_timers(state, deltaTimeMs); var i: u32 = 0; while (i < instructionsToExecute) { // Simulate logic const nextInstruction = load_next_instruction(state); execute_instruction(state, nextInstruction); i += 1; } } // Returns execution counter fn update_timers(state: *cpu.CPUState, deltaTimeMs: u32) u32 { // Update delay timer state.delayTimerAccumulator += deltaTimeMs; const delayTimerDecrement: i32 = @intCast(i32, state.delayTimerAccumulator / cpu.DelayTimerPeriodMs); state.delayTimer = @intCast(u8, std.math.max(0, @intCast(i32, state.delayTimer) - delayTimerDecrement)); // Remove accumulated ticks state.delayTimerAccumulator = state.delayTimerAccumulator % cpu.DelayTimerPeriodMs; // Update execution counter state.executionTimerAccumulator += deltaTimeMs; const executionCounter = state.executionTimerAccumulator / cpu.InstructionExecutionPeriodMs; state.executionTimerAccumulator = state.executionTimerAccumulator % cpu.InstructionExecutionPeriodMs; // TODO Handle sound if (state.soundTimer > 0) state.soundTimer -= 1; return executionCounter; } pub fn execute_instruction(state: *cpu.CPUState, instruction: u16) void { // Save PC for later const pcSave = state.pc; // Decode and execute if (instruction == 0x00E0) { // 00E0 - CLS execute_cls(state); } else if (instruction == 0x00EE) { // 00EE - RET execute_ret(state); } else if ((instruction & 0xF000) == 0x0000) { // 0nnn - SYS addr const address = instruction & 0x0FFF; execute_sys(state, address); } else if ((instruction & 0xF000) == 0x1000) { // 1nnn - JP addr const address = instruction & 0x0FFF; execute_jp(state, address); } else if ((instruction & 0xF000) == 0x2000) { // 2nnn - CALL addr const address = instruction & 0x0FFF; execute_call(state, address); } else if ((instruction & 0xF000) == 0x3000) { // 3xkk - SE Vx, byte const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); const value = @intCast(u8, instruction & 0x00FF); execute_se(state, registerName, value); } else if ((instruction & 0xF000) == 0x4000) { // 4xkk - SNE Vx, byte const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); const value = @intCast(u8, instruction & 0x00FF); execute_sne(state, registerName, value); } else if ((instruction & 0xF00F) == 0x5000) { // 5xy0 - SE Vx, Vy const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); execute_se2(state, registerLHS, registerRHS); } else if ((instruction & 0xF000) == 0x6000) { // 6xkk - LD Vx, byte const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); const value = @intCast(u8, instruction & 0x00FF); execute_ld(state, registerName, value); } else if ((instruction & 0xF000) == 0x7000) { // 7xkk - ADD Vx, byte const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); const value = @intCast(u8, instruction & 0x00FF); execute_add(state, registerName, value); } else if ((instruction & 0xF00F) == 0x8000) { // 8xy0 - LD Vx, Vy const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); execute_ld2(state, registerLHS, registerRHS); } else if ((instruction & 0xF00F) == 0x8001) { // 8xy1 - OR Vx, Vy const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); execute_or(state, registerLHS, registerRHS); } else if ((instruction & 0xF00F) == 0x8002) { // 8xy2 - AND Vx, Vy const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); execute_and(state, registerLHS, registerRHS); } else if ((instruction & 0xF00F) == 0x8003) { // 8xy3 - XOR Vx, Vy const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); execute_xor(state, registerLHS, registerRHS); } else if ((instruction & 0xF00F) == 0x8004) { // 8xy4 - ADD Vx, Vy const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); execute_add2(state, registerLHS, registerRHS); } else if ((instruction & 0xF00F) == 0x8005) { // 8xy5 - SUB Vx, Vy const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); execute_sub(state, registerLHS, registerRHS); } else if ((instruction & 0xF00F) == 0x8006) { // 8xy6 - SHR Vx {, Vy} const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); execute_shr1(state, registerLHS, registerRHS); } else if ((instruction & 0xF00F) == 0x8007) { // 8xy7 - SUBN Vx, Vy const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); execute_subn(state, registerLHS, registerRHS); } else if ((instruction & 0xF00F) == 0x800E) { // 8xyE - SHL Vx {, Vy} const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); execute_shl1(state, registerLHS, registerRHS); } else if ((instruction & 0xF00F) == 0x9000) { // 9xy0 - SNE Vx, Vy const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); execute_sne2(state, registerLHS, registerRHS); } else if ((instruction & 0xF000) == 0xA000) { // Annn - LD I, addr const address = instruction & 0x0FFF; execute_ldi(state, address); } else if ((instruction & 0xF000) == 0xB000) { // Bnnn - JP V0, addr const address = instruction & 0x0FFF; execute_jp2(state, address); } else if ((instruction & 0xF000) == 0xC000) { // Cxkk - RND Vx, byte const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); const value = @intCast(u8, instruction & 0x00FF); execute_rnd(state, registerName, value); } else if ((instruction & 0xF000) == 0xD000) { // Dxyn - DRW Vx, Vy, nibble const registerLHS = @intCast(u8, (instruction & 0x0F00) >> 8); const registerRHS = @intCast(u8, (instruction & 0x00F0) >> 4); const size = @intCast(u8, instruction & 0x000F); execute_drw(state, registerLHS, registerRHS, size); } else if ((instruction & 0xF0FF) == 0xE09E) { // Ex9E - SKP Vx const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); execute_skp(state, registerName); } else if ((instruction & 0xF0FF) == 0xE0A1) { // ExA1 - SKNP Vx const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); execute_sknp(state, registerName); } else if ((instruction & 0xF0FF) == 0xF007) { // Fx07 - LD Vx, DT const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); execute_ldt(state, registerName); } else if ((instruction & 0xF0FF) == 0xF00A) { // Fx0A - LD Vx, K const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); execute_ldk(state, registerName); } else if ((instruction & 0xF0FF) == 0xF015) { // Fx15 - LD DT, Vx const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); execute_lddt(state, registerName); } else if ((instruction & 0xF0FF) == 0xF018) { // Fx18 - LD ST, Vx const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); execute_ldst(state, registerName); } else if ((instruction & 0xF0FF) == 0xF01E) { // Fx1E - ADD I, Vx const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); execute_addi(state, registerName); } else if ((instruction & 0xF0FF) == 0xF029) { // Fx29 - LD F, Vx const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); execute_ldf(state, registerName); } else if ((instruction & 0xF0FF) == 0xF033) { // Fx33 - LD B, Vx const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); execute_ldb(state, registerName); } else if ((instruction & 0xF0FF) == 0xF055) { // Fx55 - LD [I], Vx const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); execute_ldai(state, registerName); } else if ((instruction & 0xF0FF) == 0xF065) { // Fx65 - LD Vx, [I] const registerName = @intCast(u8, (instruction & 0x0F00) >> 8); execute_ldm(state, registerName); } else { assert(false); // Unknown instruction } // Increment PC only if it was NOT overriden by an instruction, // or if we are waiting for user input. if (pcSave == state.pc and !state.isWaitingForKey) state.pc += 2; // Save previous key state state.keyStatePrev = state.keyState; }
src/chip8/execution.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const assert = std.debug.assert; const print = std.debug.print; const input = @embedFile("../input/day02.txt"); fn readPolicies(allocator: *Allocator) !ArrayList(Policy) { var policies = ArrayList(Policy).init(allocator); var lines = std.mem.tokenize(input, "\n"); while (lines.next()) |line| { try policies.append(try Policy.initFromString(line)); } return policies; } const Policy = struct { min: u8, max: u8, char: u8, password: []const u8, pub fn initFromString(string: []const u8) !Policy { const min_end = std.mem.indexOfScalar(u8, string, '-').?; const min = try std.fmt.parseUnsigned(u8, string[0..min_end], 10); const max_start = min_end + 1; const max_end = max_start + std.mem.indexOfScalar(u8, string[max_start..], ' ').?; const max = try std.fmt.parseUnsigned(u8, string[max_start..max_end], 10); return Policy{ .min = min, .max = max, .char = string[max_end + 1], .password = string[max_end + 4..], }; } pub fn checkRule1(self: Policy) bool { var num_char: usize = 0; for (self.password) |c| { if (c == self.char) num_char += 1; } return self.min <= num_char and num_char <= self.max; } pub fn checkRule2(self: Policy) bool { return (self.password[self.min - 1] == self.char) != (self.password[self.max - 1] == self.char); } }; pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; var policies = try readPolicies(&gpa.allocator); var part1: usize = 0; var part2: usize = 0; for (policies.items) |policy| { if (policy.checkRule1()) part1 += 1; if (policy.checkRule2()) part2 += 1; } print("part1: {}\n", .{part1}); print("part2: {}\n", .{part2}); } test "part1 example" { std.testing.expect((try Policy.initFromString("1-3 a: abcde")).checkRule1() == true); std.testing.expect((try Policy.initFromString("1-3 b: cdefg")).checkRule1() == false); std.testing.expect((try Policy.initFromString("2-9 c: ccccccccc")).checkRule1() == true); } test "part2 example" { std.testing.expect((try Policy.initFromString("1-3 a: abcde")).checkRule2() == true); std.testing.expect((try Policy.initFromString("1-3 b: cdefg")).checkRule2() == false); std.testing.expect((try Policy.initFromString("2-9 c: ccccccccc")).checkRule2() == false); }
src/day02.zig
const std = @import("std"); const Answer = struct { @"0": u32, @"1": u32 }; const NumberTag = enum { regular_num, pair_num, }; const Number = union(NumberTag) { regular_num: *ValueNode, pair_num: Pair, }; const Pair = struct { left: *Number, right: *Number }; const ValueNode = struct { value: u32, prev: ?*ValueNode, next: ?*ValueNode, }; fn parseNumber(str: []const u8, idx: *usize, arena: *std.heap.ArenaAllocator) error{ Overflow, InvalidCharacter, OutOfMemory }!*Number { const b = str[idx.*]; idx.* += 1; if (b == '[') { const left = try parseNumber(str, idx, arena); idx.* += 1; const right = try parseNumber(str, idx, arena); idx.* += 1; // close bracket var num = try arena.allocator.create(Number); num.* = Number{ .pair_num = Pair{ .left = left, .right = right } }; return num; } else { var num = try arena.allocator.create(Number); var node = try arena.allocator.create(ValueNode); node.* = ValueNode{ .value = try std.fmt.parseInt(u32, ([1]u8{b})[0..], 10), .prev = null, .next = null, }; num.* = Number{ .regular_num = node, }; return num; } } fn printNumber(num: Number) void { switch (num) { NumberTag.regular_num => |n| { std.debug.print("{d}", .{n}); }, NumberTag.pair_num => |pair| { std.debug.print("[", .{}); printNumber(pair.left.*); std.debug.print(",", .{}); printNumber(pair.right.*); std.debug.print("]", .{}); }, } } fn flattenToValues(num: Number, arr: *std.ArrayList(u32)) error{OutOfMemory}!void { switch (num) { NumberTag.regular_num => |n| { try arr.append(n.*.value); }, NumberTag.pair_num => |pair| { try flattenToValues(pair.left.*, arr); try flattenToValues(pair.right.*, arr); }, } } fn flattenNumber(num: Number, depth: u32, arr: *std.ArrayList(*ValueNode)) error{OutOfMemory}!void { switch (num) { NumberTag.regular_num => |node| { try arr.append(node); }, NumberTag.pair_num => |pair| { try flattenNumber(pair.left.*, depth + 1, arr); try flattenNumber(pair.right.*, depth + 1, arr); }, } } fn mergeNumbers(left: *Number, right: *Number, arena: *std.heap.ArenaAllocator) !*Number { var z = try arena.allocator.create(Number); z.* = Number{ .pair_num = Pair{ .left = left, .right = right, }, }; var left_last = last(left.*).?; var right_head = head(right.*).?; left_last.*.next = right_head; right_head.*.prev = left_last; return z; } fn head(num_root: Number) ?*ValueNode { var num = num_root; while (true) { switch (num) { NumberTag.regular_num => |node| { return node; }, NumberTag.pair_num => |pair| { num = pair.left.*; }, } } } fn last(num_root: Number) ?*ValueNode { var num = num_root; while (true) { switch (num) { NumberTag.regular_num => |node| { return node; }, NumberTag.pair_num => |pair| { num = pair.right.*; }, } } } fn explode(num: *Number, depth: u32, arena: *std.heap.ArenaAllocator) error{OutOfMemory}!bool { switch (num.*) { NumberTag.regular_num => |_| { return false; }, NumberTag.pair_num => |*pair| { if (depth == 4) { var node = try arena.allocator.create(ValueNode); node.* = ValueNode{ .value = 0, .prev = null, .next = null, }; if (pair.*.left.*.regular_num.*.prev) |prev| { prev.*.value += pair.*.left.regular_num.*.value; node.*.prev = prev; prev.*.next = node; } if (pair.*.right.*.regular_num.*.next) |next| { next.*.value += pair.*.right.regular_num.*.value; node.*.next = next; next.*.prev = node; } num.* = Number{ .regular_num = node, }; return true; } else { if (try explode(pair.left, depth + 1, arena)) { return true; } if (try explode(pair.right, depth + 1, arena)) { return true; } return false; } }, } } fn split(num: *Number, arena: *std.heap.ArenaAllocator) error{OutOfMemory}!bool { switch (num.*) { NumberTag.regular_num => |node| { if (node.value >= 10) { var left = try arena.allocator.create(Number); var left_value = try arena.allocator.create(ValueNode); var right = try arena.allocator.create(Number); var right_value = try arena.allocator.create(ValueNode); left_value.* = ValueNode{ .value = node.value / 2, .prev = node.prev, .next = right_value, }; left.* = Number{ .regular_num = left_value, }; right_value.* = ValueNode{ .value = if (@mod(node.value, 2) == 0) node.value / 2 else node.value / 2 + 1, .prev = left_value, .next = node.next, }; right.* = Number{ .regular_num = right_value, }; if (node.prev) |*prev| { prev.*.next = left_value; } if (node.next) |*next| { next.*.prev = right_value; } num.* = Number{ .pair_num = Pair{ .left = left, .right = right, }, }; return true; } else { return false; } }, NumberTag.pair_num => |*pair| { if (try split(pair.left, arena)) { return true; } if (try split(pair.right, arena)) { return true; } return false; }, } } fn magnitude(num: Number) u32 { switch (num) { NumberTag.regular_num => |node| { return node.value; }, NumberTag.pair_num => |pair| { return 3 * magnitude(pair.left.*) + 2 * magnitude(pair.right.*); }, } } // Set up prev/next chains fn linkTree(num: Number, allocator: *std.mem.Allocator) !void { var nodes = std.ArrayList(*ValueNode).init(allocator); defer nodes.deinit(); try flattenNumber(num, 0, &nodes); var i: usize = 0; while (i < nodes.items.len) : (i += 1) { if (i != 0) { nodes.items[i].*.prev = nodes.items[i - 1]; } if (i != nodes.items.len - 1) { nodes.items[i].*.next = nodes.items[i + 1]; } } } fn clone(num: Number, arena: *std.heap.ArenaAllocator) error{OutOfMemory}!*Number { switch (num) { NumberTag.regular_num => |node| { var cloned_num = try arena.allocator.create(Number); var cloned_node = try arena.allocator.create(ValueNode); cloned_node.* = ValueNode{ .value = node.value, .prev = null, .next = null, }; cloned_num.* = Number{ .regular_num = cloned_node, }; return cloned_num; }, NumberTag.pair_num => |pair| { var cloned_num = try arena.allocator.create(Number); cloned_num.* = Number{ .pair_num = Pair{ .left = try clone(pair.left.*, arena), .right = try clone(pair.right.*, arena), }, }; return cloned_num; }, } } fn run(filename: []const u8) !Answer { const file = try std.fs.cwd().openFile(filename, .{ .read = true }); defer file.close(); var reader = std.io.bufferedReader(file.reader()).reader(); var buffer: [4096]u8 = undefined; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); var arena = std.heap.ArenaAllocator.init(&gpa.allocator); defer arena.deinit(); var num_total: ?*Number = null; var numbers = std.ArrayList(*Number).init(&gpa.allocator); defer numbers.deinit(); while (try reader.readUntilDelimiterOrEof(&buffer, '\n')) |line| { var i: usize = 0; // Skip the first opening bracket var num = (try parseNumber(line, &i, &arena)); try linkTree(num.*, &arena.allocator); var cloned_number = try clone(num.*, &arena); try linkTree(cloned_number.*, &arena.allocator); try numbers.append(cloned_number); // Add numbers if (num_total == null) { num_total = num; } else { num_total = try mergeNumbers(num_total.?, num, &arena); } // Reduce while (true) { if (try explode(num_total.?, 0, &arena)) { continue; } if (try split(num_total.?, &arena)) { continue; } break; } } var max_mag: u32 = 0; for (numbers.items) |num1, i| { for (numbers.items) |num2, j| { if (i == j) { continue; } var num1_clone = try clone(num1.*, &arena); try linkTree(num1_clone.*, &arena.allocator); var num2_clone = try clone(num2.*, &arena); try linkTree(num2_clone.*, &arena.allocator); var num3 = try mergeNumbers(num1_clone, num2_clone, &arena); while (true) { if (try explode(num3, 0, &arena)) { continue; } if (try split(num3, &arena)) { continue; } break; } max_mag = @maximum(max_mag, magnitude(num3.*)); } } return Answer{ .@"0" = magnitude(num_total.?.*), .@"1" = max_mag }; } pub fn main() !void { const answer = try run("inputs/" ++ @typeName(@This()) ++ ".txt"); std.debug.print("{d}\n", .{answer.@"0"}); std.debug.print("{d}\n", .{answer.@"1"}); } test { const answer = try run("test-inputs/" ++ @typeName(@This()) ++ ".txt"); try std.testing.expectEqual(answer.@"0", 4140); try std.testing.expectEqual(answer.@"1", 3993); }
src/day18.zig
const std = @import("std"); pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const allocator = gpa.allocator(); var input_file = try std.fs.cwd().openFile("input/03.txt", .{}); defer input_file.close(); var buffered_reader = std.io.bufferedReader(input_file.reader()); const result = try analyze(allocator, buffered_reader.reader()); std.debug.print( "oxgen generator rating * CO2 generator rating: {}\n", .{result.o2_generator_rating * result.co2_scrubber_rating}, ); } const Result = struct { o2_generator_rating: u64, co2_scrubber_rating: u64, }; fn analyze(allocator: std.mem.Allocator, reader: anytype) !Result { var buf: [128]u8 = undefined; var bit_length: u6 = 0; var o2_numbers = std.ArrayList(u64).init(allocator); defer o2_numbers.deinit(); while (try reader.readUntilDelimiterOrEof(&buf, '\n')) |line| { const number = try std.fmt.parseInt(u64, line, 2); try o2_numbers.append(number); bit_length = @intCast(u6, line.len); } var co2_numbers = std.ArrayList(u64).fromOwnedSlice(allocator, try allocator.dupe(u64, o2_numbers.items)); defer co2_numbers.deinit(); { var i = bit_length - 1; while (o2_numbers.items.len > 1) { var count_ones: usize = 0; for (o2_numbers.items) |x| { if (((x >> i) & 0x1) == 0x1) count_ones += 1; } const filter_out: u64 = if (2 * count_ones == o2_numbers.items.len or count_ones > o2_numbers.items.len / 2) 0x0 else 0x1; var j: isize = 0; while (j < o2_numbers.items.len) : (j += 1) { if ((o2_numbers.items[@intCast(usize, j)] >> i) & 0x1 == filter_out) { _ = o2_numbers.swapRemove(@intCast(usize, j)); j -= 1; } } if (o2_numbers.items.len > 1) i -= 1; } } { var i = bit_length - 1; while (co2_numbers.items.len > 1) { var count_ones: usize = 0; for (co2_numbers.items) |x| { if (((x >> i) & 0x1) == 0x1) count_ones += 1; } const filter_out: u64 = if (2 * count_ones == co2_numbers.items.len or count_ones > co2_numbers.items.len / 2) 0x1 else 0x0; var j: isize = 0; while (j < co2_numbers.items.len) : (j += 1) { if ((co2_numbers.items[@intCast(usize, j)] >> i) & 0x1 == filter_out) { _ = co2_numbers.swapRemove(@intCast(usize, j)); j -= 1; } } if (co2_numbers.items.len > 1) i -= 1; } } return Result{ .o2_generator_rating = o2_numbers.items[0], .co2_scrubber_rating = co2_numbers.items[0], }; } test "example 1" { const text = \\00100 \\11110 \\10110 \\10111 \\10101 \\01111 \\00111 \\11100 \\10000 \\11001 \\00010 \\01010 ; var fbs = std.io.fixedBufferStream(text); const result = try analyze(std.testing.allocator, fbs.reader()); try std.testing.expectEqual(@as(u64, 23), result.o2_generator_rating); try std.testing.expectEqual(@as(u64, 10), result.co2_scrubber_rating); }
src/03_2.zig
const std = @import("std"); const Allocator = std.mem.Allocator; pub const StringTime = struct { commands: CommandList, const CommandKind = union(enum) { literal: []const u8, substitution: struct { variable_name: []const u8, }, for_range: struct { times: usize, command_list: CommandList, index_name: ?[]const u8 = null, pub fn deinit(self: @This()) void { self.command_list.deinit(); } }, foreach: struct { list_name: []const u8, item_name: []const u8, index_name: ?[]const u8 = null, command_list: CommandList, pub fn deinit(self: @This()) void { self.command_list.deinit(); } }, pub fn deinit(self: @This()) void { switch (self) { .for_range => |for_range| for_range.deinit(), .foreach => |foreach| foreach.deinit(), else => {}, } } }; const CommandList = std.ArrayList(CommandKind); const ExecutionContext = struct { index_name: ?[]const u8 = null, item_name: ?[]const u8 = null, list_name: ?[]const u8 = null, current_index: usize = 0, }; const Self = @This(); pub fn init(allocator: *Allocator, template: []const u8) !Self { var result = Self{ .commands = CommandList.init(allocator), }; errdefer result.deinit(); try result.parse(allocator, template); return result; } pub fn deinit(self: Self) void { for (self.commands.items) |command| { command.deinit(); } self.commands.deinit(); } pub fn render(self: Self, allocator: *Allocator, context: anytype) !StringBuffer { var result = StringBuffer.init(allocator); errdefer result.deinit(); var exec_context: ExecutionContext = .{}; try processCommand(self.commands, &result, &exec_context, context); return result; } fn processCommand(command_list: CommandList, result: *StringBuffer, exec_context: *ExecutionContext, context: anytype) !void { const ValuePrint = struct { fn print(value: anytype, result_buffer: *StringBuffer) !void { switch (@typeInfo(@TypeOf(value))) { .Enum => { try result_buffer.appendSlice(@tagName(value)); }, .Array => |array_info| { if (array_info.child == u8) { try std.fmt.formatType(value, "s", .{}, result_buffer.writer(), std.fmt.default_max_depth); } else { try std.fmt.formatType(value, "", .{}, result_buffer.writer(), std.fmt.default_max_depth); } }, .Pointer => |ptr_info| { switch (ptr_info.size) { .One => switch (@typeInfo(ptr_info.child)) { .Array => |info| { if (info.child == u8) { try std.fmt.formatType(value, "s", .{}, result_buffer.writer(), std.fmt.default_max_depth); } else { try std.fmt.formatType(value, "", .{}, result_buffer.writer(), std.fmt.default_max_depth); } }, else => { try std.fmt.formatType(value, "", .{}, result_buffer.writer(), std.fmt.default_max_depth); }, }, .Many, .C, .Slice => { if (ptr_info.child == u8) { try std.fmt.formatType(value, "s", .{}, result_buffer.writer(), std.fmt.default_max_depth); } else { try std.fmt.formatType(value, "", .{}, result_buffer.writer(), std.fmt.default_max_depth); } }, } }, else => { try std.fmt.formatType(value, "", .{}, result_buffer.writer(), std.fmt.default_max_depth); }, } } }; for (command_list.items) |command| { switch (command) { .literal => |literal| { try result.appendSlice(literal); }, .substitution => |sub| { var found = false; inline for (std.meta.fields(@TypeOf(context))) |field| { if (std.mem.eql(u8, field.name, sub.variable_name)) { const value = @field(context, field.name); try ValuePrint.print(value, result); found = true; } if (exec_context.list_name) |list_name| { if (exec_context.item_name) |item_name| { if (std.mem.eql(u8, sub.variable_name, item_name)) { if (std.mem.eql(u8, field.name, list_name)) { if (@typeInfo(field.field_type) == .Array) { const list_instance = @field(context, field.name); const list_value = list_instance[exec_context.current_index]; try ValuePrint.print(list_value, result); found = true; } } } } } } if (exec_context.index_name) |index_name| { if (std.mem.eql(u8, sub.variable_name, index_name)) { try std.fmt.formatInt(exec_context.current_index, 10, false, .{}, result.writer()); found = true; } } if (!found) { return error.VariableNameNotFound; } }, .for_range => |for_range| { var count: usize = 0; var for_exec_context: ExecutionContext = .{ .index_name = for_range.index_name, }; while (count < for_range.times) : (count += 1) { for_exec_context.current_index = count; // TODO: Remove that catch unreachable workaround processCommand(for_range.command_list, result, &for_exec_context, context) catch unreachable; } }, .foreach => |foreach| { var count: usize = 0; var for_exec_context: ExecutionContext = .{ .item_name = foreach.item_name, .index_name = foreach.index_name, .list_name = foreach.list_name, }; inline for (std.meta.fields(@TypeOf(context))) |field| { if (std.mem.eql(u8, field.name, foreach.list_name)) { if (@typeInfo(field.field_type) == .Array) { const list_instance = @field(context, field.name); while (count < list_instance.len) : (count += 1) { for_exec_context.current_index = count; // TODO: Remove that catch unreachable workaround processCommand(foreach.command_list, result, &for_exec_context, context) catch unreachable; } } } } }, } } } fn parse(self: *Self, allocator: *Allocator, template: []const u8) !void { var start_index: usize = 0; var previous_index: usize = 0; const State = enum { Literal, InsideTemplate, }; var command_stack = std.ArrayList(*CommandList).init(allocator); defer command_stack.deinit(); try command_stack.append(&self.commands); var state: State = .Literal; var it = (try std.unicode.Utf8View.init(template)).iterator(); while (it.peek(1).len != 0) { switch (state) { .Literal => { var codepoint = it.nextCodepointSlice().?; if (std.mem.eql(u8, codepoint, "{") and std.mem.eql(u8, it.peek(1), "{")) { try command_stack.items[command_stack.items.len - 1].append(CommandKind{ .literal = template[start_index..previous_index] }); _ = it.nextCodepointSlice(); start_index = it.i; state = .InsideTemplate; } else if (std.mem.eql(u8, codepoint, "}") and std.mem.eql(u8, it.peek(1), "}")) { return error.MismatchedTemplateDelimiter; } else { if (it.peek(1).len == 0) { try command_stack.items[command_stack.items.len - 1].append(CommandKind{ .literal = template[start_index..] }); } } }, .InsideTemplate => { var parser = Parser.initFromIterator(allocator, it); var expression_opt = try parser.parse(); if (expression_opt) |expression| { defer expression.deinit(); switch (expression) { .field_qualifier => |var_name| { try command_stack.items[command_stack.items.len - 1].append(CommandKind{ .substitution = .{ .variable_name = var_name } }); }, .for_loop => |for_expression| { switch (for_expression.expression) { .field_qualifier => |field| { if (for_expression.variable_captures.items.len < 0) { return error.NoItemVariableCapture; } var new_command = CommandKind{ .foreach = .{ .list_name = field, .item_name = for_expression.variable_captures.items[0], .command_list = CommandList.init(allocator), }, }; if (for_expression.variable_captures.items.len > 1) { new_command.foreach.index_name = for_expression.variable_captures.items[1]; } try command_stack.items[command_stack.items.len - 1].append(new_command); const command_stack_top = &command_stack.items[command_stack.items.len - 1]; try command_stack.append(&command_stack_top.*.items[command_stack_top.*.items.len - 1].foreach.command_list); }, .range => |range| { const times = range.end - range.start; var new_command = CommandKind{ .for_range = .{ .times = times, .command_list = CommandList.init(allocator) } }; if (for_expression.variable_captures.items.len > 0) { new_command.for_range.index_name = for_expression.variable_captures.items[0]; } try command_stack.items[command_stack.items.len - 1].append(new_command); const command_stack_top = &command_stack.items[command_stack.items.len - 1]; try command_stack.append(&command_stack_top.*.items[command_stack_top.*.items.len - 1].for_range.command_list); }, } }, .end => { _ = command_stack.popOrNull(); }, } } it = parser.lexer.it; start_index = it.i; state = .Literal; }, } previous_index = it.i; } } }; pub const Lexer = struct { it: std.unicode.Utf8Iterator, pub const Token = union(enum) { comma: void, dot: void, end: void, end_template: void, for_loop: void, identifier: []const u8, left_paren: void, number: usize, range: void, right_paren: void, vertical_line: void, }; const Self = @This(); const Keywords = [_]struct { keyword: []const u8, token: Token, }{ .{ .keyword = "for", .token = Token.for_loop, }, .{ .keyword = "end", .token = Token.end, }, }; pub fn init(input: []const u8) !Self { return Self{ .it = (try std.unicode.Utf8View.init(input)).iterator(), }; } pub fn initFromIterator(it: std.unicode.Utf8Iterator) Self { return Self{ .it = it, }; } pub fn next(self: *Self) !?Token { // Eat whitespace var codepoint = self.it.peek(1); while (codepoint.len == 1 and std.ascii.isSpace(codepoint[0])) { _ = self.it.nextCodepointSlice(); codepoint = self.it.peek(1); } if (std.mem.eql(u8, codepoint, "(")) { _ = self.it.nextCodepointSlice(); return Token.left_paren; } else if (std.mem.eql(u8, codepoint, ")")) { _ = self.it.nextCodepointSlice(); return Token.right_paren; } else if (std.mem.eql(u8, codepoint, "|")) { _ = self.it.nextCodepointSlice(); return Token.vertical_line; } else if (std.mem.eql(u8, codepoint, ",")) { _ = self.it.nextCodepointSlice(); return Token.comma; } else if (std.mem.eql(u8, codepoint, ".")) { var peek_codepoint = self.it.peek(2); if (std.mem.eql(u8, peek_codepoint, "..")) { _ = self.it.nextCodepointSlice(); _ = self.it.nextCodepointSlice(); return Token.range; } else { _ = self.it.nextCodepointSlice(); return Token.dot; } } else if (std.mem.eql(u8, codepoint, "}")) { var peek_codepoint = self.it.peek(2); if (std.mem.eql(u8, peek_codepoint, "}}")) { _ = self.it.nextCodepointSlice(); _ = self.it.nextCodepointSlice(); return Token.end_template; } else { return error.MismatchedTemplateDelimiter; } } else if (codepoint.len == 1 and std.ascii.isDigit(codepoint[0])) { var start_index: usize = self.it.i; _ = self.it.nextCodepointSlice(); var number_codepoint = self.it.peek(1); while (number_codepoint.len == 1 and std.ascii.isDigit(number_codepoint[0])) { _ = self.it.nextCodepointSlice(); number_codepoint = self.it.peek(1); } var number_string = self.it.bytes[start_index..self.it.i]; var parsed_number: usize = try std.fmt.parseInt(usize, number_string, 10); return Token{ .number = parsed_number }; } else if (codepoint.len == 1 and (std.ascii.isAlpha(codepoint[0]) or codepoint[0] == '_')) { var start_index: usize = self.it.i; _ = self.it.nextCodepointSlice(); var identifier_codepoint = self.it.peek(1); while (identifier_codepoint.len == 1 and (std.ascii.isAlNum(identifier_codepoint[0]) or identifier_codepoint[0] == '_')) { _ = self.it.nextCodepointSlice(); identifier_codepoint = self.it.peek(1); } var identifier_string = self.it.bytes[start_index..self.it.i]; for (Keywords) |keyword| { if (std.mem.eql(u8, keyword.keyword, identifier_string)) { return keyword.token; } } return Token{ .identifier = identifier_string }; } else if (codepoint.len == 1) { return error.InvalidToken; } return null; } pub fn peek(self: *Self, look_ahead: usize) !?Token { var backup_it = self.it; defer { self.it = backup_it; } var peek_token: ?Token = null; var count: usize = 0; while (count < look_ahead) : (count += 1) { peek_token = try self.next(); } return peek_token; } }; pub const Parser = struct { lexer: Lexer, allocator: *Allocator, const Expression = union(enum) { end: bool, field_qualifier: []const u8, for_loop: struct { expression: ForExpression, variable_captures: std.ArrayList([]const u8), }, pub fn deinit(self: @This()) void { switch (self) { .for_loop => |for_loop| { for_loop.variable_captures.deinit(); }, else => {}, } } }; const ForExpression = union(enum) { field_qualifier: []const u8, range: struct { start: usize, end: usize, }, }; const Self = @This(); pub fn init(allocator: *Allocator, input: []const u8) !Self { return Self{ .lexer = try Lexer.init(input), .allocator = allocator, }; } pub fn initFromIterator(allocator: *Allocator, it: std.unicode.Utf8Iterator) Self { return Self{ .lexer = Lexer.initFromIterator(it), .allocator = allocator, }; } // Inside template grammar //IDENTIFIER: [_a-zA-Z][0-9a-zA-Z_]+; //NUMBER: [0-9]+ // //root: expression '}}' // ; // //expression: field_qualifier // | 'for' '(' for_expression ')' ('|' IDENTIFIER (',' IDENTIFIER)* '|' ) // | 'end' // ; // //field_qualifier: // IDENTIFIER ('.' IDENTIFIER)* // ; // //for_expression: field_qualifier // | NUMBER..NUMBER // ; pub fn parse(self: *Self) !?Expression { var peek_token_opt = try self.lexer.peek(1); var expression: ?Expression = null; if (peek_token_opt) |peek_token| { switch (peek_token) { .identifier => { expression = try self.parseFieldQualifier(); }, .end => { _ = try self.lexer.next(); expression = Expression{ .end = true }; }, .for_loop => { expression = try self.parseForLoop(); }, else => { return error.InvalidToken; }, } if (try self.lexer.next()) |next_token| { if (next_token != .end_template) { return error.MismatchedTemplateDelimiter; } } else { return error.MismatchedTemplateDelimiter; } } return expression; } fn parseFieldQualifier(self: *Self) !?Expression { var start_index = self.lexer.it.i; // Eat identifier token _ = try self.lexer.next(); var peek = try self.lexer.peek(1); while (peek != null and peek.? == .dot) { // Eat the dot _ = try self.lexer.next(); if (try self.lexer.peek(1)) |peek2| { if (peek2 != .identifier) { return error.ParseError; } } // Eat the identifier _ = try self.lexer.next(); peek = try self.lexer.peek(1); } var end_index = self.lexer.it.i; return Expression{ .field_qualifier = self.lexer.it.bytes[start_index..end_index], }; } fn parseForLoop(self: *Self) !?Expression { // Eat for token _ = try self.lexer.next(); // Eat left paren if (try self.lexer.peek(1)) |peek| { if (peek != .left_paren) { return error.ParseError; } _ = try self.lexer.next(); } var inner_expression = try self.parseForExpression(); // Eat right paren if (try self.lexer.peek(1)) |peek| { if (peek != .right_paren) { return error.ParseError; } _ = try self.lexer.next(); } var variable_captures = std.ArrayList([]const u8).init(self.allocator); errdefer variable_captures.deinit(); // Check if the variable capture section is present if (try self.lexer.peek(1)) |peek| { if (peek == .vertical_line) { _ = try self.lexer.next(); var peek2 = try self.lexer.peek(1); while (peek2 != null and peek2.? != .vertical_line) { if (peek2.? != .identifier) { return error.ParseError; } if (try self.lexer.next()) |token| { try variable_captures.append(token.identifier); } if (try self.lexer.peek(1)) |peek3| { if (peek3 == .comma) { _ = try self.lexer.next(); } } peek2 = try self.lexer.peek(1); } if (peek2 != null and peek2.? == .vertical_line) { _ = try self.lexer.next(); } } } if (inner_expression) |inner| { return Expression{ .for_loop = .{ .expression = inner, .variable_captures = variable_captures, }, }; } return null; } fn parseForExpression(self: *Self) !?ForExpression { var peek_token_opt = try self.lexer.peek(1); if (peek_token_opt) |peek_token| { switch (peek_token) { .identifier => { if (try self.lexer.next()) |token| { return ForExpression{ .field_qualifier = token.identifier }; } }, .number => { var start: usize = 0; var end: usize = 0; if (try self.lexer.next()) |token| { start = token.number; } if (try self.lexer.peek(1)) |peek| { if (peek != .range) { return error.ParseError; } _ = try self.lexer.next(); } if (try self.lexer.peek(1)) |peek| { if (peek != .number) { return error.ParseError; } if (try self.lexer.next()) |token| { end = token.number; } } return ForExpression{ .range = .{ .start = start, .end = end, }, }; }, else => { return error.ParseError; }, } } return null; } }; // Copy of std.ArrayList for now, with items renamed to str. // In the future, maybe it will a more optimized data structure for mutable string buffer pub const StringBuffer = struct { const Self = @This(); /// Content of the string buffer str: Slice, capacity: usize, allocator: *Allocator, pub const Slice = []u8; pub const SliceConst = []const u8; /// Deinitialize with `deinit` or use `toOwnedSlice`. pub fn init(allocator: *Allocator) Self { return Self{ .str = &[_]u8{}, .capacity = 0, .allocator = allocator, }; } /// Initialize with capacity to hold at least num elements. /// Deinitialize with `deinit` or use `toOwnedSlice`. pub fn initCapacity(allocator: *Allocator, num: usize) !Self { var self = Self.init(allocator); const new_memory = try self.allocator.allocAdvanced(u8, alignment, num, .at_least); self.str.ptr = new_memory.ptr; self.capacity = new_memory.len; return self; } /// Release all allocated memory. pub fn deinit(self: Self) void { self.allocator.free(self.allocatedSlice()); } /// ArrayList takes ownership of the passed in slice. The slice must have been /// allocated with `allocator`. /// Deinitialize with `deinit` or use `toOwnedSlice`. pub fn fromOwnedSlice(allocator: *Allocator, slice: Slice) Self { return Self{ .str = slice, .capacity = slice.len, .allocator = allocator, }; } /// The caller owns the returned memory. ArrayList becomes empty. pub fn toOwnedSlice(self: *Self) Slice { const allocator = self.allocator; const result = allocator.shrink(self.allocatedSlice(), self.str.len); self.* = init(allocator); return result; } /// The caller owns the returned memory. ArrayList becomes empty. pub fn toOwnedSliceSentinel(self: *Self, comptime sentinel: u8) ![:sentinel]u8 { try self.append(sentinel); const result = self.toOwnedSlice(); return result[0 .. result.len - 1 :sentinel]; } /// Insert `item` at index `n` by moving `list[n .. list.len]` to make room. /// This operation is O(N). pub fn insert(self: *Self, n: usize, item: u8) !void { try self.ensureCapacity(self.str.len + 1); self.str.len += 1; mem.copyBackwards(u8, self.str[n + 1 .. self.str.len], self.str[n .. self.str.len - 1]); self.str[n] = item; } /// Insert slice `str` at index `i` by moving `list[i .. list.len]` to make room. /// This operation is O(N). pub fn insertSlice(self: *Self, i: usize, str: SliceConst) !void { try self.ensureCapacity(self.str.len + str.len); self.str.len += str.len; mem.copyBackwards(u8, self.str[i + str.len .. self.str.len], self.str[i .. self.str.len - str.len]); mem.copy(u8, self.str[i .. i + str.len], str); } /// Replace range of elements `list[start..start+len]` with `new_items` /// grows list if `len < new_items.len`. may allocate /// shrinks list if `len > new_items.len` pub fn replaceRange(self: *Self, start: usize, len: usize, new_items: SliceConst) !void { const after_range = start + len; const range = self.str[start..after_range]; if (range.len == new_items.len) mem.copy(u8, range, new_items) else if (range.len < new_items.len) { const first = new_items[0..range.len]; const rest = new_items[range.len..]; mem.copy(u8, range, first); try self.insertSlice(after_range, rest); } else { mem.copy(u8, range, new_items); const after_subrange = start + new_items.len; for (self.str[after_range..]) |item, i| { self.str[after_subrange..][i] = item; } self.str.len -= len - new_items.len; } } /// Extend the list by 1 element. Allocates more memory as necessary. pub fn append(self: *Self, item: u8) !void { const new_item_ptr = try self.addOne(); new_item_ptr.* = item; } /// Extend the list by 1 element, but asserting `self.capacity` /// is sufficient to hold an additional item. pub fn appendAssumeCapacity(self: *Self, item: u8) void { const new_item_ptr = self.addOneAssumeCapacity(); new_item_ptr.* = item; } /// Remove the element at index `i` from the list and return its value. /// Asserts the array has at least one item. /// This operation is O(N). pub fn orderedRemove(self: *Self, i: usize) u8 { const newlen = self.str.len - 1; if (newlen == i) return self.pop(); const old_item = self.str[i]; for (self.str[i..newlen]) |*b, j| b.* = self.str[i + 1 + j]; self.str[newlen] = undefined; self.str.len = newlen; return old_item; } /// Removes the element at the specified index and returns it. /// The empty slot is filled from the end of the list. /// This operation is O(1). pub fn swapRemove(self: *Self, i: usize) u8 { if (self.str.len - 1 == i) return self.pop(); const old_item = self.str[i]; self.str[i] = self.pop(); return old_item; } /// Append the slice of str to the list. Allocates more /// memory as necessary. pub fn appendSlice(self: *Self, str: SliceConst) !void { try self.ensureCapacity(self.str.len + str.len); self.appendSliceAssumeCapacity(str); } /// Append the slice of str to the list, asserting the capacity is already /// enough to store the new str. pub fn appendSliceAssumeCapacity(self: *Self, str: SliceConst) void { const oldlen = self.str.len; const newlen = self.str.len + str.len; self.str.len = newlen; std.mem.copy(u8, self.str[oldlen..], str); } pub usingnamespace struct { pub const Writer = std.io.Writer(*Self, error{OutOfMemory}, appendWrite); /// Initializes a Writer which will append to the list. pub fn writer(self: *Self) Writer { return .{ .context = self }; } /// Deprecated: use `writer` pub const outStream = writer; /// Same as `append` except it returns the number of bytes written, which is always the same /// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API. fn appendWrite(self: *Self, m: []const u8) !usize { try self.appendSlice(m); return m.len; } }; /// Append a value to the list `n` times. /// Allocates more memory as necessary. pub fn appendNTimes(self: *Self, value: u8, n: usize) !void { const old_len = self.str.len; try self.resize(self.str.len + n); mem.set(u8, self.str[old_len..self.str.len], value); } /// Append a value to the list `n` times. /// Asserts the capacity is enough. pub fn appendNTimesAssumeCapacity(self: *Self, value: u8, n: usize) void { const new_len = self.str.len + n; assert(new_len <= self.capacity); mem.set(u8, self.str.ptr[self.str.len..new_len], value); self.str.len = new_len; } /// Adjust the list's length to `new_len`. /// Does not initialize added str if any. pub fn resize(self: *Self, new_len: usize) !void { try self.ensureCapacity(new_len); self.str.len = new_len; } /// Reduce allocated capacity to `new_len`. /// Invalidates element pointers. pub fn shrink(self: *Self, new_len: usize) void { assert(new_len <= self.str.len); self.str = self.allocator.realloc(self.allocatedSlice(), new_len) catch |e| switch (e) { error.OutOfMemory => { // no problem, capacity is still correct then. self.str.len = new_len; return; }, }; self.capacity = new_len; } /// Reduce length to `new_len`. /// Invalidates element pointers. /// Keeps capacity the same. pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { assert(new_len <= self.str.len); self.str.len = new_len; } pub fn ensureCapacity(self: *Self, new_capacity: usize) !void { var better_capacity = self.capacity; if (better_capacity >= new_capacity) return; while (true) { better_capacity += better_capacity / 2 + 8; if (better_capacity >= new_capacity) break; } // TODO This can be optimized to avoid needlessly copying undefined memory. const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), better_capacity); self.str.ptr = new_memory.ptr; self.capacity = new_memory.len; } /// Increases the array's length to match the full capacity that is already allocated. /// The new elements have `undefined` values. This operation does not invalidate any /// element pointers. pub fn expandToCapacity(self: *Self) void { self.str.len = self.capacity; } /// Increase length by 1, returning pointer to the new item. /// The returned pointer becomes invalid when the list is resized. pub fn addOne(self: *Self) !*u8 { const newlen = self.str.len + 1; try self.ensureCapacity(newlen); return self.addOneAssumeCapacity(); } /// Increase length by 1, returning pointer to the new item. /// Asserts that there is already space for the new item without allocating more. /// The returned pointer becomes invalid when the list is resized. pub fn addOneAssumeCapacity(self: *Self) *u8 { assert(self.str.len < self.capacity); self.str.len += 1; return &self.str[self.str.len - 1]; } /// Resize the array, adding `n` new elements, which have `undefined` values. /// The return value is an array pointing to the newly allocated elements. pub fn addManyAsArray(self: *Self, comptime n: usize) !*[n]u8 { const prev_len = self.str.len; try self.resize(self.str.len + n); return self.str[prev_len..][0..n]; } /// Resize the array, adding `n` new elements, which have `undefined` values. /// The return value is an array pointing to the newly allocated elements. /// Asserts that there is already space for the new item without allocating more. pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]u8 { assert(self.str.len + n <= self.capacity); const prev_len = self.str.len; self.str.len += n; return self.str[prev_len..][0..n]; } /// Remove and return the last element from the list. /// Asserts the list has at least one item. pub fn pop(self: *Self) u8 { const val = self.str[self.str.len - 1]; self.str.len -= 1; return val; } /// Remove and return the last element from the list. /// If the list is empty, returns `null`. pub fn popOrNull(self: *Self) ?u8 { if (self.str.len == 0) return null; return self.pop(); } // For a nicer API, `str.len` is the length, not the capacity. // This requires "unsafe" slicing. fn allocatedSlice(self: Self) Slice { return self.str.ptr[0..self.capacity]; } };
src/main.zig
const std = @import("std"); usingnamespace (@import("../machine.zig")); usingnamespace (@import("../util.zig")); test "AVX512 - mask register" { const m32 = Machine.init(.x86_32); const m64 = Machine.init(.x64); const reg = Operand.register; const imm = Operand.immediate; const mem8 = Operand.memoryRm(.DefaultSeg, .BYTE, .EAX, 0); const mem16 = Operand.memoryRm(.DefaultSeg, .WORD, .EAX, 0); const mem32 = Operand.memoryRm(.DefaultSeg, .DWORD, .EAX, 0); const mem64 = Operand.memoryRm(.DefaultSeg, .QWORD, .EAX, 0); debugPrint(false); // General tests { testOp3(m32, .KADDB, reg(.K0), reg(.K0), reg(.K0), "c5 fd 4a c0"); testOp3(m32, .KADDW, reg(.K0), reg(.K0), reg(.K0), "c5 fc 4a c0"); testOp3(m32, .KADDD, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fd 4a c0"); testOp3(m32, .KADDQ, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fc 4a c0"); testOp3(m32, .KADDB, reg(.K7), reg(.K7), reg(.K7), "c5 c5 4a ff"); testOp3(m32, .KADDW, reg(.K7), reg(.K7), reg(.K7), "c5 c4 4a ff"); testOp3(m32, .KADDD, reg(.K7), reg(.K7), reg(.K7), "c4 e1 c5 4a ff"); testOp3(m32, .KADDQ, reg(.K7), reg(.K7), reg(.K7), "c4 e1 c4 4a ff"); testOp3(m32, .KADDB, reg(.K0), reg(.K7), reg(.K7), "c5 c5 4a c7"); testOp3(m32, .KADDW, reg(.K0), reg(.K7), reg(.K7), "c5 c4 4a c7"); testOp3(m32, .KADDD, reg(.K0), reg(.K7), reg(.K7), "c4 e1 c5 4a c7"); testOp3(m32, .KADDQ, reg(.K0), reg(.K7), reg(.K7), "c4 e1 c4 4a c7"); testOp3(m32, .KADDB, reg(.K7), reg(.K0), reg(.K7), "c5 fd 4a ff"); testOp3(m32, .KADDW, reg(.K7), reg(.K0), reg(.K7), "c5 fc 4a ff"); testOp3(m32, .KADDD, reg(.K7), reg(.K0), reg(.K7), "c4 e1 fd 4a ff"); testOp3(m32, .KADDQ, reg(.K7), reg(.K0), reg(.K7), "c4 e1 fc 4a ff"); testOp3(m32, .KADDB, reg(.K7), reg(.K7), reg(.K0), "c5 c5 4a f8"); testOp3(m32, .KADDW, reg(.K7), reg(.K7), reg(.K0), "c5 c4 4a f8"); testOp3(m32, .KADDD, reg(.K7), reg(.K7), reg(.K0), "c4 e1 c5 4a f8"); testOp3(m32, .KADDQ, reg(.K7), reg(.K7), reg(.K0), "c4 e1 c4 4a f8"); } // KMOV { testOp2(m64, .KMOVB, reg(.K0), reg(.K0), "c5 f9 90 c0 "); testOp2(m64, .KMOVW, reg(.K0), reg(.K0), "c5 f8 90 c0 "); testOp2(m64, .KMOVD, reg(.K0), reg(.K0), "c4 e1 f9 90 c0"); testOp2(m64, .KMOVQ, reg(.K0), reg(.K0), "c4 e1 f8 90 c0"); testOp2(m64, .KMOVB, reg(.K0), mem8 , "67 c5 f9 90 00 "); testOp2(m64, .KMOVW, reg(.K0), mem16, "67 c5 f8 90 00 "); testOp2(m64, .KMOVD, reg(.K0), mem32, "67 c4 e1 f9 90 00"); testOp2(m64, .KMOVQ, reg(.K0), mem64, "67 c4 e1 f8 90 00"); testOp2(m64, .KMOVB, mem8 , reg(.K0), "67 c5 f9 91 00 "); testOp2(m64, .KMOVW, mem16, reg(.K0), "67 c5 f8 91 00 "); testOp2(m64, .KMOVD, mem32, reg(.K0), "67 c4 e1 f9 91 00"); testOp2(m64, .KMOVQ, mem64, reg(.K0), "67 c4 e1 f8 91 00"); testOp2(m64, .KMOVB, reg(.K0), reg(.EAX), "c5 f9 92 c0 "); testOp2(m64, .KMOVW, reg(.K0), reg(.EAX), "c5 f8 92 c0 "); testOp2(m64, .KMOVD, reg(.K0), reg(.EAX), "c5 fb 92 c0 "); testOp2(m64, .KMOVQ, reg(.K0), reg(.RAX), "c4 e1 fb 92 c0"); testOp2(m64, .KMOVB, reg(.EAX), reg(.K0), "c5 f9 93 c0 "); testOp2(m64, .KMOVW, reg(.EAX), reg(.K0), "c5 f8 93 c0 "); testOp2(m64, .KMOVD, reg(.EAX), reg(.K0), "c5 fb 93 c0 "); testOp2(m64, .KMOVQ, reg(.RAX), reg(.K0), "c4 e1 fb 93 c0"); testOp2(m32, .KMOVB, reg(.K0), reg(.K0), "c5 f9 90 c0 "); testOp2(m32, .KMOVW, reg(.K0), reg(.K0), "c5 f8 90 c0 "); testOp2(m32, .KMOVD, reg(.K0), reg(.K0), "c4 e1 f9 90 c0"); testOp2(m32, .KMOVQ, reg(.K0), reg(.K0), "c4 e1 f8 90 c0"); testOp2(m32, .KMOVB, reg(.K0), mem8 , "c5 f9 90 00 "); testOp2(m32, .KMOVW, reg(.K0), mem16, "c5 f8 90 00 "); testOp2(m32, .KMOVD, reg(.K0), mem32, "c4 e1 f9 90 00"); testOp2(m32, .KMOVQ, reg(.K0), mem64, "c4 e1 f8 90 00"); testOp2(m32, .KMOVB, mem8 , reg(.K0), "c5 f9 91 00 "); testOp2(m32, .KMOVW, mem16, reg(.K0), "c5 f8 91 00 "); testOp2(m32, .KMOVD, mem32, reg(.K0), "c4 e1 f9 91 00"); testOp2(m32, .KMOVQ, mem64, reg(.K0), "c4 e1 f8 91 00"); testOp2(m32, .KMOVB, reg(.K0), reg(.EAX), "c5 f9 92 c0 "); testOp2(m32, .KMOVW, reg(.K0), reg(.EAX), "c5 f8 92 c0 "); testOp2(m32, .KMOVD, reg(.K0), reg(.EAX), "c5 fb 92 c0 "); testOp2(m32, .KMOVQ, reg(.K0), reg(.RAX), AsmError.InvalidOperand); testOp2(m32, .KMOVB, reg(.EAX), reg(.K0), "c5 f9 93 c0 "); testOp2(m32, .KMOVW, reg(.EAX), reg(.K0), "c5 f8 93 c0 "); testOp2(m32, .KMOVD, reg(.EAX), reg(.K0), "c5 fb 93 c0 "); testOp2(m32, .KMOVQ, reg(.RAX), reg(.K0), AsmError.InvalidOperand); } testOp3(m64, .KADDB, reg(.K0), reg(.K0), reg(.K0), "c5 fd 4a c0"); testOp3(m64, .KADDW, reg(.K0), reg(.K0), reg(.K0), "c5 fc 4a c0"); testOp3(m64, .KADDD, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fd 4a c0"); testOp3(m64, .KADDQ, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fc 4a c0"); testOp3(m64, .KANDB, reg(.K0), reg(.K0), reg(.K0), "c5 fd 41 c0"); testOp3(m64, .KANDW, reg(.K0), reg(.K0), reg(.K0), "c5 fc 41 c0"); testOp3(m64, .KANDD, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fd 41 c0"); testOp3(m64, .KANDQ, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fc 41 c0"); testOp3(m64, .KANDNB, reg(.K0), reg(.K0), reg(.K0), "c5 fd 42 c0"); testOp3(m64, .KANDNW, reg(.K0), reg(.K0), reg(.K0), "c5 fc 42 c0"); testOp3(m64, .KANDND, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fd 42 c0"); testOp3(m64, .KANDNQ, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fc 42 c0"); testOp2(m64, .KNOTB, reg(.K0), reg(.K0), "c5 f9 44 c0"); testOp2(m64, .KNOTW, reg(.K0), reg(.K0), "c5 f8 44 c0"); testOp2(m64, .KNOTD, reg(.K0), reg(.K0), "c4 e1 f9 44 c0"); testOp2(m64, .KNOTQ, reg(.K0), reg(.K0), "c4 e1 f8 44 c0"); testOp3(m64, .KORB, reg(.K0), reg(.K0), reg(.K0), "c5 fd 45 c0"); testOp3(m64, .KORW, reg(.K0), reg(.K0), reg(.K0), "c5 fc 45 c0"); testOp3(m64, .KORD, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fd 45 c0"); testOp3(m64, .KORQ, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fc 45 c0"); testOp2(m64, .KORTESTB, reg(.K0), reg(.K0), "c5 f9 98 c0"); testOp2(m64, .KORTESTW, reg(.K0), reg(.K0), "c5 f8 98 c0"); testOp2(m64, .KORTESTD, reg(.K0), reg(.K0), "c4 e1 f9 98 c0"); testOp2(m64, .KORTESTQ, reg(.K0), reg(.K0), "c4 e1 f8 98 c0"); testOp3(m64, .KSHIFTLB, reg(.K0), reg(.K0), imm(0), "c4 e3 79 32 c0 00"); testOp3(m64, .KSHIFTLW, reg(.K0), reg(.K0), imm(0), "c4 e3 f9 32 c0 00"); testOp3(m64, .KSHIFTLD, reg(.K0), reg(.K0), imm(0), "c4 e3 79 33 c0 00"); testOp3(m64, .KSHIFTLQ, reg(.K0), reg(.K0), imm(0), "c4 e3 f9 33 c0 00"); testOp3(m64, .KSHIFTRB, reg(.K0), reg(.K0), imm(0), "c4 e3 79 30 c0 00"); testOp3(m64, .KSHIFTRW, reg(.K0), reg(.K0), imm(0), "c4 e3 f9 30 c0 00"); testOp3(m64, .KSHIFTRD, reg(.K0), reg(.K0), imm(0), "c4 e3 79 31 c0 00"); testOp3(m64, .KSHIFTRQ, reg(.K0), reg(.K0), imm(0), "c4 e3 f9 31 c0 00"); testOp2(m64, .KTESTB, reg(.K0), reg(.K0), "c5 f9 99 c0"); testOp2(m64, .KTESTW, reg(.K0), reg(.K0), "c5 f8 99 c0"); testOp2(m64, .KTESTD, reg(.K0), reg(.K0), "c4 e1 f9 99 c0"); testOp2(m64, .KTESTQ, reg(.K0), reg(.K0), "c4 e1 f8 99 c0"); testOp3(m64, .KUNPCKBW, reg(.K0), reg(.K0), reg(.K0), "c5 fd 4b c0 "); testOp3(m64, .KUNPCKWD, reg(.K0), reg(.K0), reg(.K0), "c5 fc 4b c0 "); testOp3(m64, .KUNPCKDQ, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fc 4b c0"); testOp3(m64, .KXNORB, reg(.K0), reg(.K0), reg(.K0), "c5 fd 46 c0 "); testOp3(m64, .KXNORW, reg(.K0), reg(.K0), reg(.K0), "c5 fc 46 c0 "); testOp3(m64, .KXNORD, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fd 46 c0"); testOp3(m64, .KXNORQ, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fc 46 c0"); testOp3(m64, .KXORB, reg(.K0), reg(.K0), reg(.K0), "c5 fd 47 c0 "); testOp3(m64, .KXORW, reg(.K0), reg(.K0), reg(.K0), "c5 fc 47 c0 "); testOp3(m64, .KXORD, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fd 47 c0"); testOp3(m64, .KXORQ, reg(.K0), reg(.K0), reg(.K0), "c4 e1 fc 47 c0"); }
src/x86/tests/mask_register.zig
const zt = @import("deps/ZT/src/zt.zig"); const std = @import("std"); const sling = @import("sling.zig"); const ig = @import("deps/ZT/src/pkg/imgui.zig"); const gl = @import("deps/ZT/src/pkg/gl.zig"); const Self = @This(); pub const Space = enum { screen, world }; pub const Patch = struct { subRect: sling.math.Rect = .{}, left: f32 = 0, top: f32 = 0, right: f32 = 0, bottom: f32 = 0, }; const SortingContext = struct { // ??? }; pub const Depth = union(enum) { YSorted: YSorted, Regular: f32, pub fn initY(y: f32, depth: f32) Depth { return .{ .YSorted = .{ .depth = depth, .y = y, } }; } pub fn init(depth: f32) Depth { return .{ .Regular = depth }; } pub fn higher(self: Depth, amount: f32) Depth { switch (self) { .YSorted => { return initY(self.YSorted.y, self.YSorted.depth - amount); }, .Regular => { return self.Regular - amount; }, } } pub fn lower(self: Depth, amount: f32) Depth { return self.higher(-amount); } pub fn getDepth(self: Depth) f32 { switch (self) { .YSorted => return self.YSorted.depth, .Regular => return self.Regular, } } pub const YSorted = struct { depth: f32, y: f32 }; }; pub const RenderRequestData = union(enum) { Texture: Texture, Quad: Quad, Line: Line, Circle: Circle, Rectangle: Rectangle, pub const Texture = struct { position: sling.math.Vec2, targetTexture: usize, tint: sling.math.Vec4, source: ?sling.math.Rect, size: sling.math.Vec2, }; pub const Quad = struct { verts: [4]sling.Vertex, targetTexture: usize, tint: sling.math.Vec4, }; pub const Line = struct { from: sling.math.Vec2, to: sling.math.Vec2, width: f32, colorFrom: sling.math.Vec4, colorTo: sling.math.Vec4, }; pub const Circle = struct { position: sling.math.Vec2, radius: f32, color: sling.math.Vec4, }; pub const Rectangle = struct { value: sling.math.Rect, thickness: f32, color: sling.math.Vec4, }; }; pub const RenderRequest = struct { depth: Depth, shader: ?sling.Shader = null, data: RenderRequestData, }; fn sort(context: SortingContext, lhs: RenderRequest, rhs: RenderRequest) bool { _ = context; const ldep = lhs.depth.getDepth(); const rdep = rhs.depth.getDepth(); if (ldep != rdep) { return ldep > rdep; } if (lhs.depth == .YSorted and rhs.depth == .YSorted) { return lhs.depth.YSorted.y < rhs.depth.YSorted.y; } if (lhs.data == .Texture and rhs.data == .Texture) { return lhs.data.Texture.targetTexture > rhs.data.Texture.targetTexture; } return ldep > rdep; } pub var SortingBehaviour: SortingContext = .{}; worldRequests: std.ArrayList(RenderRequest) = sling.types.alloc, screenRequests: std.ArrayList(RenderRequest), renderer: zt.game.Renderer, whitePixelTex: sling.asset.Texture, debugDepth: Depth = Depth.init(-300_000), camera: sling.Camera = .{}, currentShader: ?sling.Shader = null, pub fn init() Self { var pixel: sling.asset.Texture = .{ .internal = zt.gl.Texture.initBlank(1, 1), }; pixel.whitePixel = zt.math.rect(0, 0, 1, 1); pixel.internal.bind(); var white = [_]u8{ 255, 255, 255 }; gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, 1, 1, 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE, &white); pixel.internal.unbind(); return .{ .worldRequests = std.ArrayList(RenderRequest).init(std.heap.c_allocator), .screenRequests = std.ArrayList(RenderRequest).init(std.heap.c_allocator), .renderer = zt.game.Renderer.init(), .whitePixelTex = pixel, }; } pub fn flush(self: *Self) void { self.camera.recalc(); // World self.renderer.projectionMatrix = self.camera.projectionMatrix; self.renderer.viewMatrix = self.camera.viewMatrix; self.flushBuffer(&self.worldRequests); // Screen if (self.screenRequests.items.len > 0) { self.renderer.viewMatrix = sling.math.Mat4.identity; self.flushBuffer(&self.screenRequests); } // Clear self.worldRequests.clearRetainingCapacity(); self.screenRequests.clearRetainingCapacity(); } fn flushBuffer(self: *Self, target: *std.ArrayList(RenderRequest)) void { var lastTexture: ?*sling.asset.Texture = &self.whitePixelTex; var lastShader: ?sling.Shader = null; std.sort.sort(RenderRequest, target.items, SortingBehaviour, sort); for (target.items) |*req| { // Sort shader before hand. if (req.shader) |shd| { if (lastShader) |lshd| { if (lshd.id != shd.id) { self.renderer.flush(); lastShader = req.shader; self.renderer.updateShader(&(req.shader.?)); } } else { self.renderer.flush(); lastShader = req.shader; self.renderer.updateShader(&(req.shader.?)); } } else { if (lastShader != null) { self.renderer.flush(); lastShader = req.shader; self.renderer.updateShader(null); } } switch (req.data) { .Texture => { var tex: *sling.asset.Texture = sling.asset.get(sling.asset.Texture, req.data.Texture.targetTexture); var src: sling.math.Rect = undefined; if (req.data.Texture.source) |sourceValue| { src = sourceValue; } else { src = sling.math.rect(0, 0, tex.internal.width, tex.internal.height); } self.renderer.sprite(tex.*.internal, req.data.Texture.position, 0.5, req.data.Texture.size, req.data.Texture.tint, null, src); lastTexture = tex; }, .Quad => { var tex: *sling.asset.Texture = sling.asset.get(sling.asset.Texture, req.data.Quad.targetTexture); var verts = req.data.Quad.verts; for (verts) |*vertex| { vertex.tex = vertex.tex.div(.{ .x = tex.internal.width, .y = tex.internal.height }); } self.renderer.spriteManual(tex.*.internal, verts[0], verts[1], verts[2], verts[3]); lastTexture = tex; }, .Rectangle => { var tex: *sling.asset.Texture = undefined; var src: sling.math.Rect = undefined; if (lastTexture != null and lastTexture.?.whitePixel != null) { tex = lastTexture.?; src = lastTexture.?.whitePixel.?; } else { tex = &self.whitePixelTex; src = sling.math.rect(0, 0, tex.internal.width, tex.internal.height); lastTexture = &self.whitePixelTex; } if (req.data.Rectangle.thickness > 0) { self.renderer.rectangleHollow(tex.internal, src, req.data.Rectangle.value, 0.5, req.data.Rectangle.thickness, req.data.Rectangle.color); } else { self.renderer.sprite(tex.*.internal, req.data.Rectangle.value.position, 0.5, req.data.Rectangle.value.size, req.data.Rectangle.color, null, src); } }, .Line => { var tex: *sling.asset.Texture = undefined; var src: sling.math.Rect = undefined; if (lastTexture != null and lastTexture.?.whitePixel != null) { tex = lastTexture.?; src = lastTexture.?.whitePixel.?; } else { tex = &self.whitePixelTex; src = sling.math.rect(0, 0, tex.internal.width, tex.internal.height); lastTexture = &self.whitePixelTex; } self.renderer.line(tex.internal, src, req.data.Line.from, req.data.Line.to, 0.5, req.data.Line.width, req.data.Line.colorFrom, req.data.Line.colorTo); }, .Circle => { var tex: *sling.asset.Texture = undefined; var src: sling.math.Rect = undefined; if (lastTexture != null and lastTexture.?.whitePixel != null) { tex = lastTexture.?; src = lastTexture.?.whitePixel.?; } else { tex = &self.whitePixelTex; src = sling.math.rect(0, 0, tex.internal.width, tex.internal.height); lastTexture = &self.whitePixelTex; } self.renderer.circle(tex.internal, src, req.data.Circle.position, req.data.Circle.radius, 0.5, req.data.Circle.color); }, } } self.renderer.flush(); } pub fn createShader(self: *Self, content: [*:0]const u8) sling.Shader { _ = self; return zt.game.Renderer.createShader(content); } /// Sets the shader for the next render requests you submit. Make sure to `popShader` /// when you're done with the shader. pub fn pushShader(self: *Self, shader: sling.Shader) void { self.currentShader = shader; } /// Ends the current shader until you set a new one. pub fn popShader(self: *Self) void { self.currentShader = null; } pub fn texture(self: *Self, space: Space, depth: Depth, textureId: usize, position: sling.math.Vec2, size: sling.math.Vec2, color: sling.math.Vec4, sourceRect: ?sling.math.Rect, originNormalized: ?sling.math.Vec2) void { var offset: sling.math.Vec2 = .{}; if (originNormalized) |origin| { offset.x = -(size.x * origin.x); offset.y = -(size.y * origin.y); } var targetBuffer = if (space == .world) &self.worldRequests else &self.screenRequests; targetBuffer.append(.{ .depth = depth, .shader = self.currentShader, .data = .{ .Texture = .{ .position = position.add(offset), .targetTexture = textureId, .tint = color, .source = sourceRect, .size = size, } } }) catch unreachable; } pub fn textureRotated(self: *Self, space: Space, depth: Depth, textureId: usize, position: sling.math.Vec2, size: sling.math.Vec2, radians: f32, color: sling.math.Vec4, sourceRect: ?sling.math.Rect, originNormalized: ?sling.math.Vec2) void { if (radians == 0) { self.texture(space, depth, textureId, position, size, color, sourceRect, originNormalized); return; } var rect = if (sourceRect == null) sling.math.rect(0, 0, 1, 1) else sourceRect.?; var offset: sling.math.Vec2 = .{}; if (originNormalized) |origin| { offset.x = -(size.x * origin.x); offset.y = -(size.y * origin.y); } const positions: [4]sling.math.Vec2 = .{ position.add(offset), // tl position.add(offset).add(.{ .x = size.x }), // tr position.add(offset).add(.{ .y = size.y }), // bl position.add(offset).add(size), // br }; const textures: [4]sling.math.Vec2 = .{ rect.position, // tl rect.position.add(.{ .x = rect.size.x }), // tr rect.position.add(.{ .y = rect.size.y }), // bl rect.position.add(rect.size), // br }; var vertices: [4]sling.Vertex = undefined; for (positions) |target, i| { const cache = target.sub(position).rotate(radians).add(position); vertices[i] = .{ .pos = sling.math.vec3(cache.x, cache.y, 0.5), .tex = textures[i], .col = color, }; } self.quad(space, depth, textureId, vertices, color); } pub fn quad(self: *Self, space: Space, depth: Depth, textureId: usize, verts: [4]sling.Vertex, color: sling.math.Vec4) void { var targetBuffer = if (space == .world) &self.worldRequests else &self.screenRequests; targetBuffer.append(.{ .depth = depth, .shader = self.currentShader, .data = .{ .Quad = .{ .verts = verts, .targetTexture = textureId, .tint = color, } } }) catch unreachable; } // todo make it scalable, and make it squish to fit smaller than smallest dimensions. pub fn patch(self: *Self, space: Space, depth: Depth, patchInfo: Patch, textureId: usize, destination: sling.math.Rect, color: sling.math.Vec4) void { const dpos = destination.position; const dsiz = destination.size; const spos = patchInfo.subRect.position; const ssiz = patchInfo.subRect.size; // Corners const tl = sling.math.rect(dpos.x, dpos.y, patchInfo.left, patchInfo.top); const tls = sling.math.rect(spos.x, spos.y, patchInfo.left, patchInfo.top); const tr = sling.math.rect(dpos.x + dsiz.x - patchInfo.right, dpos.y, patchInfo.right, patchInfo.top); const trs = sling.math.rect(ssiz.x - patchInfo.right, spos.y, patchInfo.right, patchInfo.top); const bl = sling.math.rect(dpos.x, dpos.y + dsiz.y - patchInfo.bottom, patchInfo.left, patchInfo.bottom); const bls = sling.math.rect(spos.x, spos.y + ssiz.y - patchInfo.bottom, patchInfo.left, patchInfo.bottom); const br = sling.math.rect(dpos.x + dsiz.x - patchInfo.right, dpos.y + dsiz.y - patchInfo.bottom, patchInfo.right, patchInfo.bottom); const brs = sling.math.rect(spos.x + ssiz.x - patchInfo.right, spos.y + ssiz.y - patchInfo.bottom, patchInfo.right, patchInfo.bottom); self.texture(space, depth, textureId, tl.position, tl.size, color, tls, null); self.texture(space, depth, textureId, tr.position, tr.size, color, trs, null); self.texture(space, depth, textureId, bl.position, bl.size, color, bls, null); self.texture(space, depth, textureId, br.position, br.size, color, brs, null); // Checks for middle pieces. const tm = sling.math.rect(dpos.x + patchInfo.left, dpos.y, dsiz.x - patchInfo.left - patchInfo.right, patchInfo.top); if (tm.size.x > 0.0) { const tms = sling.math.rect(spos.x + patchInfo.left, spos.y, ssiz.x - patchInfo.left - patchInfo.right, patchInfo.top); self.texture(space, depth, textureId, tm.position, tm.size, color, tms, null); } const bm = sling.math.rect(dpos.x + patchInfo.left, dpos.y + dsiz.y - patchInfo.bottom, dsiz.x - patchInfo.left - patchInfo.right, patchInfo.bottom); if (tm.size.x > 0.0) { const bms = sling.math.rect(spos.x + patchInfo.left, spos.y + ssiz.y - patchInfo.bottom, ssiz.x - patchInfo.left - patchInfo.right, patchInfo.top); self.texture(space, depth, textureId, bm.position, bm.size, color, bms, null); } const ml = sling.math.rect(dpos.x, dpos.y + patchInfo.top, patchInfo.left, dsiz.y - patchInfo.top - patchInfo.bottom); if (ml.size.y > 0.0) { const mls = sling.math.rect(spos.x, spos.y + patchInfo.top, patchInfo.left, ssiz.y - patchInfo.top - patchInfo.bottom); self.texture(space, depth, textureId, ml.position, ml.size, color, mls, null); } const mr = sling.math.rect(dpos.x + dsiz.x - patchInfo.right, dpos.y + patchInfo.top, patchInfo.right, dsiz.y - patchInfo.top - patchInfo.bottom); if (ml.size.y > 0.0) { const mrs = sling.math.rect(spos.x + ssiz.x - patchInfo.right, spos.y + patchInfo.top, patchInfo.left, ssiz.y - patchInfo.top - patchInfo.bottom); self.texture(space, depth, textureId, mr.position, mr.size, color, mrs, null); } const m = sling.math.rect(dpos.x + patchInfo.left, dpos.y + patchInfo.top, dsiz.x - patchInfo.left - patchInfo.right, dsiz.y - patchInfo.top - patchInfo.bottom); if (ml.size.y > 0.0 and tm.size.x > 0.0) { const ms = sling.math.rect(spos.x + patchInfo.left, spos.y + patchInfo.top, ssiz.x - patchInfo.left - patchInfo.right, ssiz.y - patchInfo.top - patchInfo.bottom); self.texture(space, depth, textureId, m.position, m.size, color, ms, null); } } /// If thickness is null, this is a filled rectangle, otherwise thickness denotes the size of the outlines. pub fn rectangle(self: *Self, space: Space, depth: Depth, rect: sling.math.Rect, color: sling.math.Vec4, thickness: ?f32) void { var targetBuffer = if (space == .world) &self.worldRequests else &self.screenRequests; targetBuffer.append(.{ .depth = depth, .shader = self.currentShader, .data = .{ .Rectangle = .{ .value = rect, .thickness = if (thickness != null) thickness.? else -1, .color = color, } } }) catch unreachable; } pub fn text(self: *Self, space: Space, depth: Depth, fontId: usize, point: sling.math.Vec2, message: []const u8, color: sling.math.Vec4, originNormalized: ?sling.math.Vec2) void { var font = sling.asset.get(sling.asset.Font, fontId); var stamp: sling.math.Vec2 = point; var dests: []sling.math.Rect = sling.alloc.alloc(sling.math.Rect, message.len) catch unreachable; defer sling.alloc.free(dests); var fullRect: sling.math.Rect = .{ .position = point, .size = .{ .y = font.information.lineHeight } }; for (message) |char, i| { if (char == '\n') { fullRect.size.y += font.information.lineHeight; stamp.x = point.x; stamp.y += font.information.lineHeight; continue; } if (font.characters.get(char)) |character| { dests[i] = sling.math.Rect{ .size = character.atlasSourceRect.size, .position = .{ .x = stamp.x + character.offset.x, .y = stamp.y + character.offset.y, } }; stamp.x += character.advance; fullRect.size.x = std.math.max(fullRect.size.x, stamp.x - point.x); } } var offset = if (originNormalized == null) sling.math.Vec2{} else fullRect.size.mul(originNormalized.?); for (message) |char, i| { if (char == '\n') { continue; } if (font.characters.get(char)) |character| { const sourceRect = character.atlasSourceRect; const id = font.pages.items[character.atlas]; const pos = dests[i].position.sub(offset); const siz = dests[i].size; self.texture(space, depth, id, pos, siz, color, sourceRect, null); } } } pub fn line(self: *Self, space: Space, depth: Depth, from: sling.math.Vec2, to: sling.math.Vec2, thickness: f32, color: sling.math.Vec4) void { var targetBuffer = if (space == .world) &self.worldRequests else &self.screenRequests; targetBuffer.append(.{ .depth = depth, .shader = self.currentShader, .data = .{ .Line = .{ .from = from, .to = to, .width = thickness, .colorFrom = color, .colorTo = color, } } }) catch unreachable; } pub fn circle(self: *Self, space: Space, depth: Depth, point: sling.math.Vec2, radius: f32, color: sling.math.Vec4) void { var targetBuffer = if (space == .world) &self.worldRequests else &self.screenRequests; targetBuffer.append(.{ .depth = depth, .shader = self.currentShader, .data = .{ .Circle = .{ .position = point, .radius = radius, .color = color, } } }) catch unreachable; }
src/renderer.zig
const std = @import("std"); const zap = @import("zap"); const hyperia = @import("hyperia.zig"); const mem = std.mem; const math = std.math; const time = std.time; const mpsc = hyperia.mpsc; const testing = std.testing; const assert = std.debug.assert; const SpinLock = hyperia.sync.SpinLock; const Timer = @This(); pub const Handle = struct { event: mpsc.AsyncAutoResetEvent(void) = .{}, expires_at: usize = 0, pub fn start(self: *Timer.Handle, timer: *Timer, expires_at: usize) !void { self.event = .{}; self.expires_at = expires_at; try timer.add(self); } pub fn cancel(self: *Timer, Handle, timer: *Timer) void { if (!timer.cancel(self)) return; hyperia.pool.schedule(.{}, self.set()); } pub fn wait(self: *Timer.Handle) void { return self.event.wait(); } pub fn set(self: *Timer.Handle) ?*zap.Pool.Runnable { return self.event.set(); } }; lock: SpinLock = .{}, entries: std.PriorityQueue(*Timer.Handle), pub fn init(allocator: *mem.Allocator) Timer { return Timer{ .entries = std.PriorityQueue(*Timer.Handle).init(allocator, struct { fn lessThan(a: *Timer.Handle, b: *Timer.Handle) math.Order { return math.order(a.expires_at, b.expires_at); } }.lessThan), }; } pub fn deinit(self: *Timer, allocator: *mem.Allocator) void { self.entries.deinit(); } pub fn add(self: *Timer, handle: *Timer.Handle) !void { const held = self.lock.acquire(); defer held.release(); try self.entries.add(handle); } pub fn cancel(self: *Timer, handle: *Timer.Handle) bool { const held = self.lock.acquire(); defer held.release(); const i = mem.indexOfScalar(*Timer.Handle, self.entries.items, handle) orelse return false; assert(self.entries.removeIndex(i) == handle); return true; } pub fn delay(self: *Timer, current_time: usize) ?usize { const held = self.lock.acquire(); defer held.release(); const head = self.entries.peek() orelse return null; return math.sub(usize, head.expires_at, current_time) catch 0; } pub fn update(self: *Timer, current_time: usize, callback: anytype) void { const held = self.lock.acquire(); defer held.release(); while (true) { const head = self.entries.peek() orelse break; if (head.expires_at > current_time) break; callback.call(self.entries.remove()); } } test { testing.refAllDecls(@This()); } test "timer/async: add timers and execute them" { hyperia.init(); defer hyperia.deinit(); const allocator = testing.allocator; var timer = Timer.init(allocator); defer timer.deinit(allocator); var a: Timer.Handle = .{ .expires_at = @intCast(usize, time.milliTimestamp()) + 10 }; var b: Timer.Handle = .{ .expires_at = @intCast(usize, time.milliTimestamp()) + 20 }; var c: Timer.Handle = .{ .expires_at = @intCast(usize, time.milliTimestamp()) + 30 }; try timer.add(&a); try timer.add(&b); try timer.add(&c); var fa = async a.wait(); var fb = async b.wait(); var fc = async c.wait(); while (true) { time.sleep(timer.delay(@intCast(usize, time.milliTimestamp())) orelse break); var batch: zap.Pool.Batch = .{}; defer while (batch.pop()) |runnable| runnable.run(); timer.update(@intCast(usize, @intCast(usize, time.milliTimestamp())), struct { batch: *zap.Pool.Batch, pub fn call(self: @This(), handle: *Timer.Handle) void { self.batch.push(handle.set()); } }{ .batch = &batch }); } nosuspend await fa; nosuspend await fb; nosuspend await fc; } test "timer: add timers and update latest time" { const allocator = testing.allocator; var timer = Timer.init(allocator); defer timer.deinit(allocator); var a: Timer.Handle = .{ .expires_at = 10 }; var b: Timer.Handle = .{ .expires_at = 20 }; var c: Timer.Handle = .{ .expires_at = 30 }; try timer.add(&a); try timer.add(&b); try timer.add(&c); timer.update(25, struct { fn call(handle: *Timer.Handle) void { return {}; } }); testing.expect(timer.delay(25) == c.expires_at - 25); }
Timer.zig
const std = @import("std"); const builtin = @import("builtin"); const testing = std.testing; // Configuration // TODO: pass this option from the build system. pub const JANET_NO_NANBOX = false; pub const c = @cImport({ if (JANET_NO_NANBOX) { @cDefine("JANET_NO_NANBOX", {}); } @cInclude("janet.h"); }); // Bindings pub const Signal = enum(c_int) { ok, @"error", debug, yield, user0, user1, user2, user3, user4, user5, user6, user7, user8, user9, pub const Error = error{ @"error", debug, yield, user0, user1, user2, user3, user4, user5, user6, user7, user8, user9, }; pub fn toError(signal: Signal) Error { return switch (signal) { Signal.ok => unreachable, Signal.@"error" => Error.@"error", Signal.debug => Error.debug, Signal.yield => Error.yield, Signal.user0 => Error.user0, Signal.user1 => Error.user1, Signal.user2 => Error.user2, Signal.user3 => Error.user3, Signal.user4 => Error.user4, Signal.user5 => Error.user5, Signal.user6 => Error.user6, Signal.user7 => Error.user7, Signal.user8 => Error.user8, Signal.user9 => Error.user9, }; } }; pub fn init() !void { if (c.janet_init() != 0) return error.InitError; } pub fn deinit() void { c.janet_deinit(); } pub fn mcall(name: [:0]const u8, argv: []Janet) Signal.Error!void { const signal = @ptrCast( *Signal, &c.janet_mcall(name.ptr, @intCast(i32, argv.len), @ptrCast([*c]c.Janet, argv.ptr)), ).*; switch (signal) { .ok => {}, else => return signal.toError(), } } pub const TryState = extern struct { stackn: i32, gc_handle: c_int, vm_fiber: *Fiber, vm_jmp_buf: *c.jmp_buf, vm_return_reg: *Janet, buf: c.jmp_buf, payload: Janet, pub fn toC(self: *TryState) *c.JanetTryState { return @ptrCast(*c.JanetTryState, self); } pub fn tryInit(state: *TryState) void { c.janet_try_init(state.toC()); } pub fn @"try"(state: *TryState) Signal.Error!void { tryInit(state); const signal = blk: { if (builtin.target.os.tag.isDarwin() or builtin.target.os.tag == .freebsd or builtin.target.os.tag == .openbsd or builtin.target.os.tag == .netbsd or builtin.target.os.tag == .dragonfly) { break :blk @ptrCast(*Signal, &c._setjmp(&state.buf)).*; } else { break :blk @ptrCast(*Signal, &c.setjmp(&state.buf)).*; } }; switch (signal) { .ok => {}, else => return signal.toError(), } } pub fn restore(state: *TryState) void { c.janet_restore(state.toC()); } }; pub fn panic(message: [:0]const u8) noreturn { c.janet_panic(message.ptr); } pub fn print(message: [:0]const u8) void { c.janet_dynprintf("out", std.io.getStdOut().handle, message.ptr); } pub fn eprint(message: [:0]const u8) void { c.janet_dynprintf("err", std.io.getStdErr().handle, message.ptr); } pub fn arity(ary: i32, min: i32, max: i32) void { c.janet_arity(ary, min, max); } pub fn fixArity(ary: i32, fix: i32) void { c.janet_fixarity(ary, fix); } pub fn getMethod(method: Keyword, methods: [*]const Method, out: *Janet) bool { return c.janet_getmethod( method.toC(), @ptrCast([*c]const c.JanetMethod, methods), @ptrCast(*c.Janet, out), ) > 0; } // Returns Janet.nil if there's no next method. pub fn nextMethod(methods: [*]const Method, key: Janet) Janet { return Janet.fromC(c.janet_nextmethod(@ptrCast([*c]const c.JanetMethod, methods), key.toC())); } pub fn nil() Janet { return Janet.fromC(c.janet_wrap_nil()); } pub fn keyword(str: []const u8) Janet { return Janet.fromC(c.janet_keywordv(str.ptr, @intCast(i32, str.len))); } pub fn symbol(str: []const u8) Janet { return Janet.fromC(c.janet_symbolv(str.ptr, @intCast(i32, str.len))); } pub fn string(str: []const u8) Janet { return Janet.fromC(c.janet_stringv(str.ptr, @intCast(i32, str.len))); } pub fn numberSafe(x: f64) Janet { return Janet.fromC(c.janet_wrap_number_safe(x)); } pub fn wrap(comptime T: type, value: T) Janet { return switch (T) { f64 => Janet.fromC(c.janet_wrap_number(value)), u32 => Janet.fromC(c.janet_wrap_number(@intToFloat(f64, value))), i32 => Janet.fromC(c.janet_wrap_number(@intToFloat(f64, value))), i64 => Janet.fromC(c.janet_wrap_number(@intToFloat(f64, value))), usize => Janet.fromC(c.janet_wrap_number(@intToFloat(f64, value))), bool => if (value) Janet.fromC(c.janet_wrap_true()) else Janet.fromC(c.janet_wrap_false()), String => value.wrap(), Symbol => value.wrap(), Keyword => value.wrap(), *Array => value.wrap(), Tuple => value.wrap(), Struct => value.wrap(), *Fiber => value.wrap(), *Buffer => value.wrap(), *Function => value.wrap(), CFunction => value.wrap(), *Table => value.wrap(), *anyopaque => Janet.fromC(c.janet_wrap_pointer(value)), []const u8 => string(value), else => @compileError("Wrapping is not supported for '" ++ @typeName(T) ++ "'"), }; } pub fn get(comptime T: type, argv: [*]const Janet, n: i32) T { return switch (T) { f64 => c.janet_getnumber(Janet.toConstPtr(argv), n), u32 => @intCast(u32, c.janet_getnat(Janet.toConstPtr(argv), n)), i32 => c.janet_getinteger(Janet.toConstPtr(argv), n), i64 => c.janet_getinteger64(Janet.toConstPtr(argv), n), usize => c.janet_getsize(Janet.toConstPtr(argv), n), bool => c.janet_getboolean(Janet.toConstPtr(argv), n) > 0, *Array => Array.fromC(c.janet_getarray(Janet.toConstPtr(argv), n)), Tuple => Tuple.fromC(c.janet_gettuple(Janet.toConstPtr(argv), n)), *Table => Table.fromC(c.janet_gettable(Janet.toConstPtr(argv), n)), Struct => Struct.fromC(c.janet_getstruct(Janet.toConstPtr(argv), n)), String => String.fromC(c.janet_getstring(Janet.toConstPtr(argv), n)), Symbol => Symbol.fromC(c.janet_getsymbol(Janet.toConstPtr(argv), n)), Keyword => Keyword.fromC(c.janet_getkeyword(Janet.toConstPtr(argv), n)), *Buffer => Buffer.fromC(c.janet_getbuffer(Janet.toConstPtr(argv), n)), *anyopaque => c.janet_getpointer(Janet.toConstPtr(argv), n) orelse unreachable, CFunction => CFunction.fromC(c.janet_getcfunction(Janet.toConstPtr(argv), n)), *Fiber => Fiber.fromC(c.janet_getfiber(Janet.toConstPtr(argv), n) orelse unreachable), *Function => Function.fromC( c.janet_getfunction(Janet.toConstPtr(argv), n) orelse unreachable, ), View => @ptrCast(*View, &c.janet_getindexed(Janet.toConstPtr(argv), n)).*, ByteView => @ptrCast(*ByteView, &c.janet_getbytes(Janet.toConstPtr(argv), n)).*, DictView => @ptrCast(*DictView, &c.janet_getdictionary(Janet.toConstPtr(argv), n)).*, Range => @ptrCast(*Range, &c.janet_getslice(n, Janet.toConstPtr(argv))).*, AbstractType => @compileError("Please use 'getAbstract' instead"), else => @compileError("Get is not supported for '" ++ @typeName(T) ++ "'"), }; } pub fn getAbstract( argv: [*]const Janet, n: i32, comptime ValueType: type, at: *const Abstract(ValueType).Type, ) Abstract(ValueType) { return Abstract(ValueType).fromC( c.janet_getabstract(Janet.toConstPtr(argv), n, at.toC()) orelse unreachable, ); } pub fn getHalfRange(argv: [*]const Janet, n: i32, length: i32, which: [:0]const u8) i32 { return c.janet_gethalfrange(Janet.toConstPtr(argv), n, length, which.ptr); } pub fn getArgIndex(argv: [*]const Janet, n: i32, length: i32, which: [:0]const u8) i32 { return c.janet_getargindex(Janet.toConstPtr(argv), n, length, which.ptr); } pub fn getFlags(argv: [*]const Janet, n: i32, flags: [:0]const u8) u64 { return c.janet_getflags(Janet.toConstPtr(argv), n, flags.ptr); } pub fn opt(comptime T: type, argv: [*]const Janet, argc: i32, n: i32, dflt: T) T { return switch (T) { f64 => c.janet_optnumber(Janet.toConstPtr(argv), argc, n, dflt), u32 => @intCast(u32, c.janet_optnat(Janet.toConstPtr(argv), argc, n, @intCast(i32, dflt))), i32 => c.janet_optinteger(Janet.toConstPtr(argv), argc, n, dflt), i64 => c.janet_optinteger64(Janet.toConstPtr(argv), argc, n, dflt), usize => c.janet_optsize(Janet.toConstPtr(argv), argc, n, dflt), Tuple => Tuple.fromC(c.janet_opttuple(Janet.toConstPtr(argv), argc, n, dflt.toC())), Struct => Struct.fromC(c.janet_optstruct(Janet.toConstPtr(argv), argc, n, dflt.toC())), String => String.fromC(c.janet_optstring(Janet.toConstPtr(argv), argc, n, dflt.toC())), Symbol => Symbol.fromC(c.janet_optsymbol(Janet.toConstPtr(argv), argc, n, dflt.toC())), Keyword => Keyword.fromC(c.janet_optkeyword(Janet.toConstPtr(argv), argc, n, dflt.toC())), bool => c.janet_optboolean(Janet.toConstPtr(argv), argc, n, if (dflt) 1 else 0) > 0, *anyopaque => c.janet_optpointer(Janet.toConstPtr(argv), argc, n, dflt) orelse unreachable, CFunction => CFunction.fromC( c.janet_optcfunction(Janet.toConstPtr(argv), argc, n, dflt.toC()), ), *Fiber => Fiber.fromC( c.janet_optfiber(Janet.toConstPtr(argv), argc, n, dflt.toC()) orelse unreachable, ), *Function => Function.fromC( c.janet_optfunction(Janet.toConstPtr(argv), argc, n, dflt.toC()) orelse unreachable, ), AbstractType => @compileError("Please use 'optAbstract' instead"), *Array => @compileError("Please use 'optArray' instead"), *Table => @compileError("Please use 'optTable' instead"), *Buffer => @compileError("Please use 'optBuffer' instead"), else => @compileError("Opt is not supported for '" ++ @typeName(T) ++ "'"), }; } pub fn optAbstract( argv: [*]const Janet, argc: i32, n: i32, comptime ValueType: type, at: *const Abstract(ValueType).Type, dflt: Abstract(ValueType), ) Abstract(ValueType) { return c.janet_optabstract( Janet.toConstPtr(argv), argc, n, at.toC(), dflt.toC(), ) orelse unreachable; } pub fn optArray(argv: [*]const Janet, argc: i32, n: i32, dflt_len: i32) *Array { return Array.fromC(c.janet_optarray(Janet.toConstPtr(argv), argc, n, dflt_len)); } pub fn optTable(argv: [*]const Janet, argc: i32, n: i32, dflt_len: i32) *Table { return Table.fromC(c.janet_opttable(Janet.toConstPtr(argv), argc, n, dflt_len)); } pub fn optBuffer(argv: [*]const Janet, argc: i32, n: i32, dflt_len: i32) *Buffer { return Buffer.fromC(c.janet_optbuffer(Janet.toConstPtr(argv), argc, n, dflt_len)); } pub fn mark(x: Janet) void { c.janet_mark(x.toC()); } pub fn sweep() void { c.janet_sweep(); } pub fn collect() void { c.janet_collect(); } pub fn clearMemory() void { c.janet_clear_memory(); } pub fn gcRoot(root: Janet) void { c.janet_gcroot(root.toC()); } pub fn gcUnroot(root: Janet) c_int { return c.janet_gcunroot(root.toC()); } pub fn gcUnrootAll(root: Janet) c_int { return c.janet_gcunrootall(root.toC()); } pub fn gcLock() c_int { return c.janet_gclock(); } pub fn gcUnlock(handle: c_int) void { return c.janet_gcunlock(handle); } pub fn gcPressure(s: usize) void { c.janet_gcpressure(s); } // Waiting for "Allocgate", new model of allocators for Zig. After that we can wrap these // into idiomatic Zig allocators. pub fn malloc(size: usize) ?*anyopaque { return c.janet_malloc(size); } pub fn realloc(ptr: *anyopaque, size: usize) ?*anyopaque { return c.janet_realloc(ptr, size); } pub fn calloc(nmemb: usize, size: usize) ?*anyopaque { return c.janet_calloc(nmemb, size); } pub fn free(ptr: *anyopaque) void { c.janet_free(ptr); } pub const ScratchFinalizer = fn (ptr: *anyopaque) callconv(.C) void; pub const ScratchFinalizerC = fn (ptr: ?*anyopaque) callconv(.C) void; pub fn smalloc(size: usize) ?*anyopaque { return c.janet_smalloc(size); } pub fn srealloc(ptr: *anyopaque, size: usize) ?*anyopaque { return c.janet_srealloc(ptr, size); } pub fn scalloc(nmemb: usize, size: usize) ?*anyopaque { return c.janet_scalloc(nmemb, size); } pub fn sfinalizer(ptr: *anyopaque, finalizer: ScratchFinalizer) void { return c.janet_sfinalizer(ptr, @ptrCast(ScratchFinalizerC, finalizer)); } pub fn sfree(ptr: *anyopaque) void { c.janet_sfree(ptr); } pub fn sortedKeys(dict: [*]const KV, cap: i32, index_buffer: *i32) i32 { return c.janet_sorted_keys(@ptrCast([*c]const c.JanetKV, dict), cap, index_buffer); } pub fn dictionaryGet(data: [*]const KV, cap: i32, key: Janet) ?Janet { const value = Janet.fromC( c.janet_dictionary_get(@ptrCast([*c]const c.JanetKV, data), cap, key.toC()), ); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn dictionaryNext(kvs: [*]const KV, cap: i32, kv: *const KV) ?*const KV { return @ptrCast(?*const KV, c.janet_dictionary_next( @ptrCast([*c]const c.JanetKV, kvs), cap, @ptrCast([*c]const c.JanetKV, kv), )); } pub const BuildConfig = extern struct { major: c_uint, minor: c_uint, patch: c_uint, bits: c_uint, }; pub fn configCurrent() BuildConfig { return BuildConfig{ .major = c.JANET_VERSION_MAJOR, .minor = c.JANET_VERSION_MINOR, .patch = c.JANET_VERSION_PATCH, .bits = c.JANET_CURRENT_CONFIG_BITS, }; } const JanetType = enum(c_int) { number, nil, boolean, fiber, string, symbol, keyword, array, tuple, table, @"struct", buffer, function, cfunction, abstract, pointer, }; pub const TFLAG_NIL = (1 << @enumToInt(Janet.Type.nil)); pub const TFLAG_BOOLEAN = (1 << @enumToInt(Janet.Type.boolean)); pub const TFLAG_FIBER = (1 << @enumToInt(Janet.Type.fiber)); pub const TFLAG_NUMBER = (1 << @enumToInt(Janet.Type.number)); pub const TFLAG_STRING = (1 << @enumToInt(Janet.Type.string)); pub const TFLAG_SYMBOL = (1 << @enumToInt(Janet.Type.symbol)); pub const TFLAG_KEYWORD = (1 << @enumToInt(Janet.Type.keyword)); pub const TFLAG_ARRAY = (1 << @enumToInt(Janet.Type.array)); pub const TFLAG_TUPLE = (1 << @enumToInt(Janet.Type.tuple)); pub const TFLAG_TABLE = (1 << @enumToInt(Janet.Type.table)); pub const TFLAG_STRUCT = (1 << @enumToInt(Janet.Type.@"struct")); pub const TFLAG_BUFFER = (1 << @enumToInt(Janet.Type.buffer)); pub const TFLAG_FUNCTION = (1 << @enumToInt(Janet.Type.function)); pub const TFLAG_CFUNCTION = (1 << @enumToInt(Janet.Type.cfunction)); pub const TFLAG_ABSTRACT = (1 << @enumToInt(Janet.Type.abstract)); pub const TFLAG_POINTER = (1 << @enumToInt(Janet.Type.pointer)); // Some abstractions pub const TFLAG_BYTES = TFLAG_STRING | TFLAG_SYMBOL | TFLAG_BUFFER | TFLAG_KEYWORD; pub const TFLAG_INDEXED = TFLAG_ARRAY | TFLAG_TUPLE; pub const TFLAG_DICTIONARY = TFLAG_TABLE | TFLAG_STRUCT; pub const TFLAG_LENGTHABLE = TFLAG_BYTES | TFLAG_INDEXED | TFLAG_DICTIONARY; pub const TFLAG_CALLABLE = TFLAG_FUNCTION | TFLAG_CFUNCTION | TFLAG_LENGTHABLE | TFLAG_ABSTRACT; pub const Janet = blk: { if (JANET_NO_NANBOX or builtin.target.cpu.arch.isARM() or builtin.target.cpu.arch == .aarch64) { break :blk extern struct { as: extern union { @"u64": u64, number: f64, integer: i32, pointer: *anyopaque, cpointer: *const anyopaque, }, @"type": Type, pub const Type = JanetType; pub usingnamespace JanetMixin; }; } else if (builtin.target.cpu.arch == .x86_64) { break :blk extern union { @"u64": u64, @"i64": i64, number: f64, pointer: *anyopaque, pub const Type = JanetType; pub usingnamespace JanetMixin; }; } else if (builtin.target.cpu.arch.endianess() == .Big) { break :blk extern union { tagged: extern struct { @"type": u32, payload: extern union { integer: u32, pointer: *anyopaque, }, }, number: f64, @"u64": u64, pub const Type = JanetType; pub usingnamespace JanetMixin; }; } else if (builtin.target.cpu.arch.endianess() == .Little) { break :blk extern union { tagged: extern struct { payload: extern union { integer: u32, pointer: *anyopaque, }, @"type": u32, }, number: f64, @"u64": u64, pub const Type = JanetType; pub usingnamespace JanetMixin; }; } }; const JanetMixin = struct { pub fn fromC(janet: c.Janet) Janet { return @ptrCast(*const Janet, &janet).*; } pub fn toC(janet: *const Janet) c.Janet { return @ptrCast(*const c.Janet, janet).*; } pub fn toConstPtr(janet: [*]const Janet) [*c]const c.Janet { return @ptrCast([*c]const c.Janet, janet); } pub fn checkType(janet: Janet, typ: Janet.Type) bool { return c.janet_checktype(janet.toC(), @ptrCast(*const c.JanetType, &typ).*) > 0; } pub fn checkTypes(janet: Janet, typeflags: i32) bool { return c.janet_checktypes(janet.toC(), typeflags) > 0; } pub fn truthy(janet: Janet) bool { return c.janet_truthy(janet.toC()) > 0; } pub fn janetType(janet: Janet) Janet.Type { return @ptrCast(*Janet.Type, &c.janet_type(janet.toC())).*; } pub fn getAbstractType(janet: Janet) *const AbstractType { return AbstractType.fromC(c.janet_get_abstract_type(janet.toC())); } pub fn in(ds: Janet, key: Janet) ?Janet { const value = Janet.fromC(c.janet_in(ds.toC(), key.toC())); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn get(ds: Janet, key: Janet) ?Janet { const value = Janet.fromC(c.janet_get(ds.toC(), key.toC())); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn next(ds: Janet, key: Janet) ?Janet { const value = Janet.fromC(c.janet_next(ds.toC(), key.toC())); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn getIndex(ds: Janet, index: i32) ?Janet { const value = Janet.fromC(c.janet_getindex(ds.toC(), index)); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn length(x: Janet) i32 { return c.janet_length(x.toC()); } pub fn lengthv(x: Janet) Janet { return fromC(c.janet_lengthv(x.toC())); } pub fn put(ds: Janet, key: Janet, value: Janet) void { c.janet_put(ds.toC(), key.toC(), value.toC()); } pub fn putIndex(ds: Janet, index: i32, value: Janet) void { c.janet_putindex(ds.toC(), index, value.toC()); } pub fn indexedView(seq: Janet) !View { var data: [*]const Janet = undefined; var len: i32 = undefined; const rs = c.janet_indexed_view(seq.toC(), @ptrCast([*c][*c]const c.Janet, &data), &len); if (rs <= 0) return error.CannotConstructView; return View{ .items = data, .len = len }; } pub fn bytesView(str: Janet) !ByteView { var data: [*]const u8 = undefined; var len: i32 = undefined; const rs = c.janet_bytes_view(str.toC(), @ptrCast([*c][*c]const u8, &data), &len); if (rs <= 0) return error.CannotConstructView; return ByteView{ .bytes = data, .len = len }; } pub fn dictionaryView(tab: Janet) !DictView { var data: [*]const KV = undefined; var len: i32 = undefined; var cap: i32 = undefined; const rs = c.janet_dictionary_view( tab.toC(), @ptrCast([*c][*c]const c.JanetKV, &data), &len, &cap, ); if (rs <= 0) return error.CannotConstructView; return DictView{ .kvs = data, .len = len, .cap = cap }; } pub fn equals(x: Janet, y: Janet) bool { return c.janet_equals(x.toC(), y.toC()) > 0; } pub fn hash(x: Janet) i32 { return c.janet_hash(x.toC()); } pub fn compare(x: Janet, y: Janet) c_int { return c.janet_compare(x.toC(), y.toC()); } pub fn unwrap(janet: Janet, comptime T: type) !T { switch (T) { f64 => { if (!janet.checkType(.number)) return error.NotNumber; return c.janet_unwrap_number(janet.toC()); }, u32 => { if (!janet.checkType(.number)) return error.NotNumber; const value = c.janet_unwrap_integer(janet.toC()); if (value < 0) return error.NegativeValue; return @intCast(u32, value); }, i32 => { if (!janet.checkType(.number)) return error.NotNumber; return c.janet_unwrap_integer(janet.toC()); }, i64 => { if (!janet.checkType(.number)) return error.NotNumber; return @intCast(i64, c.janet_unwrap_integer(janet.toC())); }, usize => { if (!janet.checkType(.number)) return error.NotNumber; const value = c.janet_unwrap_integer(janet.toC()); if (value < 0) return error.NegativeValue; return @intCast(usize, value); }, bool => { if (!janet.checkType(.boolean)) return error.NotBoolean; return c.janet_unwrap_boolean(janet.toC()) > 0; }, String => { if (!janet.checkType(.string)) return error.NotString; return String.fromC(c.janet_unwrap_string(janet.toC())); }, Keyword => { if (!janet.checkType(.keyword)) return error.NotKeyword; return Keyword.fromC(c.janet_unwrap_keyword(janet.toC())); }, Symbol => { if (!janet.checkType(.symbol)) return error.NotSymbol; return Symbol.fromC(c.janet_unwrap_symbol(janet.toC())); }, Tuple => { if (!janet.checkType(.tuple)) return error.NotTuple; return Tuple.fromC(c.janet_unwrap_tuple(janet.toC())); }, *Array => { if (!janet.checkType(.array)) return error.NotArray; return @ptrCast(*Array, c.janet_unwrap_array(janet.toC())); }, *Buffer => { if (!janet.checkType(.buffer)) return error.NotBuffer; return @ptrCast(*Buffer, c.janet_unwrap_buffer(janet.toC())); }, Struct => { if (!janet.checkType(.@"struct")) return error.NotStruct; return Struct.fromC(c.janet_unwrap_struct(janet.toC())); }, *Table => { if (!janet.checkType(.table)) return error.NotTable; return @ptrCast(*Table, c.janet_unwrap_table(janet.toC())); }, *anyopaque => { if (!janet.checkType(.pointer)) return error.NotPointer; return c.janet_unwrap_pointer(janet.toC()) orelse unreachable; }, CFunction => { if (!janet.checkType(.cfunction)) return error.NotCFunction; return CFunction.fromC(c.janet_unwrap_cfunction(janet.toC())); }, *Function => { if (!janet.checkType(.function)) return error.NotFunction; return Function.fromC(c.janet_unwrap_function(janet.toC()) orelse unreachable); }, *Fiber => { if (!janet.checkType(.fiber)) return error.NotFiber; return Fiber.fromC(c.janet_unwrap_fiber(janet.toC()) orelse unreachable); }, AbstractType => @compileError("Please use 'unwrapAbstract' instead"), else => @compileError("Unwrapping is not supported for '" ++ @typeName(T) ++ "'"), } unreachable; } pub fn unwrapAbstract(janet: Janet, comptime ValueType: type) !Abstract(ValueType) { if (!janet.checkType(.abstract)) return error.NotAbstract; return Abstract(ValueType).fromC(c.janet_unwrap_abstract(janet.toC()) orelse unreachable); } }; pub const KV = extern struct { key: Janet, value: Janet, pub fn toC(self: *KV) [*c]c.JanetKV { return @ptrCast([*c]c.JanetKV, self); } }; pub const String = struct { slice: TypeImpl, pub const TypeImpl = [:0]const u8; pub const TypeC = [*:0]const u8; pub const Head = extern struct { gc: GCObject, length: i32, hash: i32, data: [*]const u8, }; pub fn toC(self: String) TypeC { return self.slice.ptr; } pub fn fromC(ptr: TypeC) String { return String{ .slice = std.mem.span(ptr) }; } pub fn init(buf: []u8) String { return fromC(c.janet_string(buf.ptr, @intCast(i32, buf.len))); } pub fn head(self: String) *Head { const h = c.janet_string_head(self.slice.ptr); const aligned_head = @alignCast(@alignOf(*Head), h); return @ptrCast(*Head, aligned_head); } pub fn length(self: String) i32 { return self.head().length; } pub fn begin(len: i32) [*]u8 { return @ptrCast([*]u8, c.janet_string_begin(len) orelse unreachable); } pub fn end(str: [*]u8) String { return fromC(c.janet_string_end(str)); } pub fn compare(lhs: String, rhs: String) c_int { return c.janet_string_compare(lhs.toC(), rhs.toC()); } pub fn equal(lhs: String, rhs: String) bool { return c.janet_string_equal(lhs.toC(), rhs.toC()) > 0; } pub fn equalConst(lhs: String, rhs: []u8, rhash: i32) bool { return c.janet_string_equalconst(lhs.toC(), rhs.ptr, @intCast(i32, rhs.len), rhash) > 0; } pub fn wrap(self: String) Janet { return Janet.fromC(c.janet_wrap_string(self.toC())); } }; pub const Symbol = struct { slice: TypeImpl, pub const TypeImpl = [:0]const u8; pub const TypeC = [*:0]const u8; pub fn toC(self: Symbol) TypeC { return self.slice.ptr; } pub fn fromC(ptr: TypeC) Symbol { return Symbol{ .slice = std.mem.span(ptr) }; } pub fn init(str: []const u8) Symbol { return fromC(c.janet_symbol(str.ptr, @intCast(i32, str.len))); } pub fn wrap(self: Symbol) Janet { return Janet.fromC(c.janet_wrap_symbol(self.toC())); } pub fn gen() Symbol { return fromC(c.janet_symbol_gen()); } }; pub const Keyword = struct { slice: TypeImpl, pub const TypeImpl = [:0]const u8; pub const TypeC = [*:0]const u8; pub fn toC(self: Keyword) TypeC { return self.slice.ptr; } pub fn fromC(ptr: TypeC) Keyword { return Keyword{ .slice = std.mem.span(ptr) }; } pub fn init(str: []const u8) Keyword { return fromC(c.janet_keyword(str.ptr, @intCast(i32, str.len))); } pub fn wrap(self: Keyword) Janet { return Janet.fromC(c.janet_wrap_keyword(self.toC())); } }; pub const Array = extern struct { gc: GCObject, count: i32, capacity: i32, data: [*]Janet, pub fn toC(self: *Array) *c.JanetArray { return @ptrCast(*c.JanetArray, self); } pub fn fromC(ptr: *c.JanetArray) *Array { return @ptrCast(*Array, ptr); } pub fn init(capacity: i32) *Array { return fromC(c.janet_array(capacity)); } pub fn initN(elements: []const Janet) *Array { return fromC(c.janet_array_n( @ptrCast([*c]const c.Janet, elements.ptr), @intCast(i32, elements.len), )); } pub fn ensure(self: *Array, capacity: i32, growth: i32) void { c.janet_array_ensure(self.toC(), capacity, growth); } pub fn setCount(self: *Array, count: i32) void { c.janet_array_setcount(self.toC(), count); } pub fn push(self: *Array, x: Janet) void { c.janet_array_push(self.toC(), x.toC()); } pub fn pop(self: *Array) Janet { return Janet.fromC(c.janet_array_pop(self.toC())); } pub fn peek(self: *Array) Janet { return Janet.fromC(c.janet_array_peek(self.toC())); } pub fn wrap(self: *Array) Janet { return Janet.fromC(c.janet_wrap_array(self.toC())); } }; pub const Buffer = extern struct { gc: GCObject, count: i32, capacity: i32, data: [*]u8, pub fn toC(self: *Buffer) *c.JanetBuffer { return @ptrCast(*c.JanetBuffer, self); } pub fn fromC(ptr: *c.JanetBuffer) *Buffer { return @ptrCast(*Buffer, ptr); } /// C function: janet_buffer_init pub fn init(buf: *Buffer, capacity: i32) *Buffer { return fromC(c.janet_buffer_init(buf.toC(), capacity)); } /// C function: janet_buffer pub fn initDynamic(capacity: i32) *Buffer { return fromC(c.janet_buffer(capacity)); } pub fn initN(bytes: []const u8) *Buffer { var buf = initDynamic(@intCast(i32, bytes.len)); buf.pushBytes(bytes); return buf; } pub fn deinit(self: *Buffer) void { c.janet_buffer_deinit(self.toC()); } pub fn ensure(self: *Buffer, capacity: i32, growth: i32) void { c.janet_buffer_ensure(self.toC(), capacity, growth); } pub fn setCount(self: *Buffer, count: i32) void { c.janet_buffer_setcount(self.toC(), count); } pub fn extra(self: *Buffer, n: i32) void { c.janet_buffer_extra(self.toC(), n); } pub fn pushBytes(self: *Buffer, bytes: []const u8) void { c.janet_buffer_push_bytes(self.toC(), bytes.ptr, @intCast(i32, bytes.len)); } pub fn pushString(self: *Buffer, str: String) void { c.janet_buffer_push_string(self.toC(), str.toC()); } pub fn pushU8(self: *Buffer, x: u8) void { c.janet_buffer_push_u8(self.toC(), x); } pub fn pushU16(self: *Buffer, x: u16) void { c.janet_buffer_push_u16(self.toC(), x); } pub fn pushU32(self: *Buffer, x: u32) void { c.janet_buffer_push_u32(self.toC(), x); } pub fn pushU64(self: *Buffer, x: u64) void { c.janet_buffer_push_u64(self.toC(), x); } pub fn wrap(self: *Buffer) Janet { return Janet.fromC(c.janet_wrap_buffer(self.toC())); } }; pub const Tuple = struct { slice: TypeImpl, pub const TypeImpl = []const Janet; pub const TypeC = [*c]const c.Janet; pub const Head = extern struct { gc: GCObject, length: i32, hash: i32, sm_line: i32, sm_column: i32, data: [*]const Janet, }; pub fn fromC(ptr: TypeC) Tuple { const p = @ptrCast([*]const Janet, ptr); const janet_head = c.janet_tuple_head(ptr); const h = @ptrCast(*Head, @alignCast(@alignOf(*Head), janet_head)); const len = @intCast(usize, h.length); return Tuple{ .slice = p[0..len] }; } pub fn toC(self: Tuple) TypeC { return @ptrCast(TypeC, self.slice.ptr); } pub fn head(self: Tuple) *Head { const h = c.janet_tuple_head(@ptrCast(*const c.Janet, self.slice.ptr)); const aligned_head = @alignCast(@alignOf(*Head), h); return @ptrCast(*Head, aligned_head); } pub fn length(self: Tuple) i32 { return self.head().length; } pub fn begin(len: i32) [*]Janet { return @ptrCast([*]Janet, c.janet_tuple_begin(len)); } pub fn end(tuple: [*]Janet) Tuple { return fromC(c.janet_tuple_end(@ptrCast([*c]c.Janet, tuple))); } pub fn initN(values: []const Janet) Tuple { return fromC(c.janet_tuple_n( @ptrCast([*c]const c.Janet, values.ptr), @intCast(i32, values.len), )); } pub fn wrap(self: Tuple) Janet { return Janet.fromC(c.janet_wrap_tuple(self.toC())); } }; pub const Struct = struct { ptr: TypeImpl, pub const TypeImpl = [*]const KV; pub const TypeC = [*c]const c.JanetKV; pub const Head = extern struct { gc: GCObject, length: i32, hash: i32, capacity: i32, proto: ?[*]const KV, data: [*]const KV, }; pub fn toC(self: Struct) TypeC { return @ptrCast([*]const c.JanetKV, self.ptr); } pub fn fromC(ptr: TypeC) Struct { return Struct{ .ptr = @ptrCast(TypeImpl, ptr) }; } pub fn initN(kvs: []const KV) Struct { var st = begin(@intCast(i32, kvs.len)); for (kvs) |kv| put(st, kv.key, kv.value); return end(st); } pub fn head(self: Struct) *Head { const h = c.janet_struct_head(@ptrCast(*const c.JanetKV, self.toC())); const aligned_head = @alignCast(@alignOf(*Head), h); return @ptrCast(*Head, aligned_head); } pub fn length(self: Struct) i32 { return self.head().length; } pub fn begin(count: i32) [*]KV { return @ptrCast([*]KV, c.janet_struct_begin(count)); } pub fn put(st: [*]KV, key: Janet, value: Janet) void { c.janet_struct_put(@ptrCast([*c]c.JanetKV, st), key.toC(), value.toC()); } pub fn end(st: [*]KV) Struct { return fromC(c.janet_struct_end(@ptrCast([*c]c.JanetKV, st))); } pub fn get(self: Struct, key: Janet) ?Janet { const value = Janet.fromC(c.janet_struct_get(self.toC(), key.toC())); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn getEx(self: Struct, key: Janet, which: *Struct) ?Janet { const value = Janet.fromC(c.janet_struct_get_ex( self.toC(), key.toC(), @ptrCast([*c]c.JanetStruct, &which.ptr), )); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn rawGet(self: Struct, key: Janet) ?Janet { const value = Janet.fromC(c.janet_struct_rawget(self.toC(), key.toC())); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn toTable(self: Struct) *Table { return Table.fromC(c.janet_struct_to_table(self.toC())); } pub fn find(self: Struct, key: Janet) ?*const KV { return @ptrCast(?*const KV, c.janet_struct_find(self.toC(), key.toC())); } pub fn wrap(self: *Struct) Janet { return Janet.fromC(c.janet_wrap_struct(self.toC())); } }; pub const Table = extern struct { gc: GCObject, count: i32, capacity: i32, deleted: i32, data: *KV, proto: ?*Table, pub fn toC(table: *Table) *c.JanetTable { return @ptrCast(*c.JanetTable, table); } pub fn fromC(janet_table: *c.JanetTable) *Table { return @ptrCast(*Table, janet_table); } /// C function: janet_table_init pub fn init(table: *Table, capacity: i32) *Table { return fromC(c.janet_table_init(table.toC(), capacity)); } /// C function: janet_table pub fn initDynamic(capacity: i32) *Table { return fromC(c.janet_table(capacity)); } pub fn initRaw(table: *Table, capacity: i32) *Table { return fromC(c.janet_table_init_raw(table.toC(), capacity)); } pub fn initN(kvs: []const KV) *Table { var table = initDynamic(@intCast(i32, kvs.len)); for (kvs) |kv| table.put(kv.key, kv.value); return table; } pub fn deinit(self: *Table) void { self.deinit(); } pub fn get(self: *Table, key: Janet) ?Janet { const value = Janet.fromC(c.janet_table_get(self.toC(), key.toC())); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn getEx(self: *Table, key: Janet, which: **Table) ?Janet { const value = Janet.fromC(c.janet_table_get_ex( self.toC(), key.toC(), @ptrCast([*c][*c]c.JanetTable, which), )); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn rawGet(self: *Table, key: Janet) ?Janet { const value = Janet.fromC(c.janet_table_rawget(self.toC(), key.toC())); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn remove(self: *Table, key: Janet) ?Janet { const value = Janet.fromC(c.janet_table_remove(self.toC(), key.toC())); if (value.checkType(.nil)) { return null; } else { return value; } } pub fn put(self: *Table, key: Janet, value: Janet) void { c.janet_table_put(self.toC(), key.toC(), value.toC()); } pub fn toStruct(self: *Table) Struct { return Struct.fromC(c.janet_table_to_struct(self.toC())); } pub fn mergeTable(self: *Table, other: *Table) void { c.janet_table_merge_table(self.toC(), other.toC()); } pub fn mergeStruct(self: *Table, other: Struct) void { c.janet_table_merge_struct(self.toC(), other.toC()); } pub fn find(self: *Table, key: Janet) ?*const KV { return @ptrCast(?*const KV, c.janet_table_find(self.toC(), key.toC())); } pub fn clone(self: *Table) *Table { return fromC(c.janet_table_clone(self.toC())); } pub fn clear(self: *Table) void { c.janet_table_clear(self.toC()); } pub fn wrap(self: *Table) Janet { return Janet.fromC(c.janet_wrap_table(self.toC())); } pub fn fromEnvironment(env: *Environment) *Table { return @ptrCast(*Table, env); } pub fn toEnvironment(table: *Table) *Environment { return @ptrCast(*Environment, table); } }; /// Inner structure is identical to `Table`. But this is a different concept from Table, the /// data structure. It allows to run code, define values and other things. Can be converted /// to or from Table. pub const Environment = extern struct { gc: GCObject, count: i32, capacity: i32, deleted: i32, data: *KV, proto: ?*Environment, pub fn toC(table: *Environment) *c.JanetTable { return @ptrCast(*c.JanetTable, table); } pub fn fromC(janet_table: *c.JanetTable) *Environment { return @ptrCast(*Environment, janet_table); } pub fn toTable(env: *Environment) *Table { return @ptrCast(*Table, env); } pub fn fromTable(table: *Table) *Environment { return @ptrCast(*Environment, table); } pub fn init(replacements: ?*Environment) *Environment { return coreEnv(replacements); } pub fn coreEnv(replacements: ?*Environment) *Environment { return Environment.fromC(c.janet_core_env(@ptrCast([*c]c.JanetTable, replacements))); } pub fn coreLookupTable(replacements: ?*Environment) *Environment { return Environment.fromC( c.janet_core_lookup_table(@ptrCast([*c]c.JanetTable, replacements)), ); } pub fn def( env: *Environment, name: [:0]const u8, val: Janet, documentation: ?[:0]const u8, ) void { if (documentation) |docs| { c.janet_def(env.toC(), name.ptr, val.toC(), docs.ptr); } else { c.janet_def(env.toC(), name.ptr, val.toC(), null); } } pub fn @"var"( env: *Environment, name: [:0]const u8, val: Janet, documentation: ?[:0]const u8, ) void { if (documentation) |docs| { c.janet_var(env.toC(), name.ptr, val.toC(), docs.ptr); } else { c.janet_var(env.toC(), name.ptr, val.toC(), null); } } pub fn cfuns(env: *Environment, reg_prefix: [:0]const u8, funs: [*]const Reg) void { return c.janet_cfuns(env.toC(), reg_prefix.ptr, @ptrCast([*c]const c.JanetReg, funs)); } pub fn cfunsExt(env: *Environment, reg_prefix: [:0]const u8, funs: [*]const RegExt) void { return c.janet_cfuns_ext( env.toC(), reg_prefix.ptr, @ptrCast([*c]const c.JanetRegExt, funs), ); } pub fn cfunsPrefix(env: *Environment, reg_prefix: [:0]const u8, funs: [*]const Reg) void { return c.janet_cfuns_prefix( env.toC(), reg_prefix.ptr, @ptrCast([*c]const c.JanetReg, funs), ); } pub fn cfunsExtPrefix(env: *Environment, reg_prefix: [:0]const u8, funs: [*]const RegExt) void { return c.janet_cfuns_ext_prefix( env.toC(), reg_prefix.ptr, @ptrCast([*c]const c.JanetRegExt, funs), ); } pub fn doString(env: *Environment, str: []const u8, source_path: [:0]const u8) !Janet { return try doBytes(env, str, source_path); } pub fn doBytes(env: *Environment, bytes: []const u8, source_path: [:0]const u8) !Janet { var janet: Janet = undefined; const errflags = c.janet_dobytes( env.toC(), bytes.ptr, @intCast(i32, bytes.len), source_path.ptr, @ptrCast([*c]c.Janet, &janet), ); if (errflags == 0) { return janet; } else if ((errflags & 0x01) == 0x01) { return error.RuntimeError; } else if ((errflags & 0x02) == 0x02) { return error.CompileError; } else if ((errflags & 0x04) == 0x04) { return error.ParseError; } else { return error.UnexpectedError; } unreachable; } pub fn envLookup(env: *Environment) *Environment { return Environment.fromC(c.janet_env_lookup(env.toC())); } pub fn envLookupInto( renv: *Environment, env: *Environment, prefix: [:0]const u8, recurse: c_int, ) void { c.janet_env_lookup_into(renv.toC(), env.toC(), prefix.ptr, recurse); } }; pub const GCObject = extern struct { flags: i32, data: extern union { next: *GCObject, refcount: i32, }, }; pub const Function = extern struct { gc: GCObject, def: *Def, envs: [*]*Env, pub const Def = extern struct { gc: GCObject, environments: [*]i32, constants: [*]Janet, defs: [*]*Def, bytecode: [*]u32, closure_bitset: [*]u32, sourcemap: *SourceMapping, source: String.TypeC, name: String.TypeC, flags: i32, slot_count: i32, arity: i32, min_arity: i32, max_arity: i32, constants_length: i32, bytecode_length: i32, environments_length: i32, defs_length: i32, }; pub const Env = extern struct { gc: GCObject, as: extern union { fiber: *Fiber, values: [*]Janet, }, length: i32, offset: i32, }; pub fn toC(self: *Function) *c.JanetFunction { return @ptrCast(*c.JanetFunction, self); } pub fn fromC(ptr: *c.JanetFunction) *Function { return @ptrCast(*Function, @alignCast(@alignOf(*Function), ptr)); } pub fn wrap(self: *Function) Janet { return Janet.fromC(c.janet_wrap_function(self.toC())); } pub fn pcall( fun: *Function, argv: []const Janet, out: *Janet, fiber: ?**Fiber, ) Signal.Error!void { const signal = @ptrCast(*Signal, &c.janet_pcall( fun.toC(), @intCast(i32, argv.len), Janet.toConstPtr(argv.ptr), @ptrCast(*c.Janet, out), @ptrCast([*c][*c]c.JanetFiber, fiber), )).*; switch (signal) { .ok => {}, else => return signal.toError(), } } pub fn call(fun: *Function, argv: []const Janet) Signal.Error!void { const signal = @ptrCast( *Signal, &c.janet_call(fun.toC(), @intCast(i32, argv.len), Janet.toConstPtr(argv.ptr)), ).*; switch (signal) { .ok => {}, else => return signal.toError(), } } }; pub const CFunction = struct { ptr: TypeImpl, pub const TypeImpl = fn (argc: i32, argv: [*]Janet) callconv(.C) Janet; pub const TypeC = fn (argc: i32, argv: [*c]c.Janet) callconv(.C) c.Janet; pub fn toC(self: CFunction) c.JanetCFunction { return @ptrCast(c.JanetCFunction, self.ptr); } pub fn fromC(ptr: c.JanetCFunction) CFunction { return CFunction{ .ptr = @ptrCast(TypeImpl, @alignCast(@alignOf(TypeImpl), ptr)) }; } pub fn wrap(self: CFunction) Janet { return Janet.fromC(c.janet_wrap_cfunction(self.toC())); } }; pub const Fiber = extern struct { gc: GCObject, flags: i32, frame: i32, stack_start: i32, stack_stop: i32, capacity: i32, max_stack: i32, env: *Table, data: [*]Janet, child: *Fiber, last_value: Janet, // #ifdef JANET_EV waiting: *ListenerState, sched_id: i32, supervisor_channel: *anyopaque, pub const Status = enum(c_int) { dead, @"error", debug, pending, user0, user1, user2, user3, user4, user5, user6, user7, user8, user9, new, alive, }; pub fn toC(self: *Fiber) *c.JanetFiber { return @ptrCast(*c.JanetFiber, self); } pub fn fromC(ptr: *c.JanetFiber) *Fiber { return @ptrCast(*Fiber, ptr); } pub fn init(callee: *Function, capacity: i32, argv: []const Janet) *Fiber { return fromC(c.janet_fiber( callee.toC(), capacity, @intCast(i32, argv.len), @ptrCast([*c]const c.Janet, argv.ptr), )); } pub fn reset(self: *Fiber, callee: *Function, argv: []const Janet) *Fiber { return fromC(c.janet_fiber_reset( self.toC(), callee.toC(), @intCast(i32, argv.len), @ptrCast([*c]const c.Janet, argv.ptr), )); } pub fn status(self: *Fiber) Status { return @intToEnum(Status, c.janet_fiber_status(self.toC())); } pub fn currentFiber() *Fiber { return fromC(c.janet_current_fiber()); } pub fn rootFiber() *Fiber { return fromC(c.janet_root_fiber()); } pub fn wrap(self: *Fiber) Janet { return Janet.fromC(c.janet_wrap_fiber(self.toC())); } pub fn @"continue"(fiber: *Fiber, in: Janet, out: *Janet) Signal.Error!void { const signal = @ptrCast( *Signal, &c.janet_continue(fiber.toC(), in.toC(), @ptrCast(*c.Janet, out)), ).*; switch (signal) { .ok => {}, else => return signal.toError(), } } pub fn continueSignal(fiber: *Fiber, in: Janet, out: *Janet, sig: Signal) Signal.Error!void { const signal = @ptrCast(*Signal, &c.janet_continue_signal( fiber.toC(), in.toC(), @ptrCast(*c.Janet, out), @ptrCast(*const c.JanetSignal, &sig).*, )).*; switch (signal) { .ok => {}, else => return signal.toError(), } } pub fn step(fiber: *Fiber, in: Janet, out: *Janet) Signal.Error!void { const signal = @ptrCast( *Signal, &c.janet_step(fiber.toC(), in.toC(), @ptrCast(*c.Janet, out)), ).*; switch (signal) { .ok => {}, else => return signal.toError(), } } pub fn stackstrace(fiber: *Fiber, err: Janet) void { c.janet_stacktrace(fiber.toC(), err.toC()); } pub fn stackstraceExt(fiber: *Fiber, err: Janet, prefix: [*:0]const u8) void { c.janet_stacktrace_ext(fiber.toC(), err.toC(), prefix.ptr); } }; pub const ListenerState = blk: { if (builtin.target.os.tag == .windows) { break :blk extern struct { machine: Listener, fiber: *Fiber, stream: *Stream, event: *anyopaque, tag: *anyopaque, bytes: c_int, }; } else { break :blk extern struct { machine: Listener, fiber: *Fiber, stream: *Stream, event: *anyopaque, _index: usize, _mask: c_int, _next: *ListenerState, }; } }; pub const Handle = blk: { if (builtin.target.os.tag == .windows) { break :blk *anyopaque; } else { break :blk c_int; } }; pub const HANDLE_NONE: Handle = blk: { if (builtin.target.os.tag == .windows) { break :blk null; } else { break :blk -1; } }; pub const Stream = extern struct { handle: Handle, flags: u32, state: *ListenerState, methods: *const anyopaque, _mask: c_int, }; pub const Listener = fn (state: *ListenerState, event: AsyncEvent) callconv(.C) AsyncStatus; pub const AsyncStatus = enum(c_int) { not_done, done, }; pub const AsyncEvent = enum(c_int) { init, mark, deinit, close, err, hup, read, write, cancel, complete, user, }; pub const SourceMapping = extern struct { line: i32, column: i32, }; pub const MARSHAL_UNSAFE = c.JANET_MARSHAL_UNSAFE; pub fn marshal(buf: *Buffer, x: Janet, rreg: *Environment, flags: c_int) void { c.janet_marshal(buf.toC(), x.toC(), rreg.toC(), flags); } pub fn unmarshal(bytes: []const u8, flags: c_int, reg: *Environment, next: [*]*const u8) Janet { return Janet.fromC(c.janet_unmarshal( bytes.ptr, bytes.len, flags, reg.toC(), @ptrCast([*c][*c]const u8, next), )); } pub const MarshalContext = extern struct { m_state: *anyopaque, u_state: *anyopaque, flags: c_int, data: [*]const u8, at: *AbstractType, pub fn toC(mc: *MarshalContext) *c.JanetMarshalContext { return @ptrCast(*c.JanetMarshalContext, mc); } pub fn fromC(mc: *c.JanetMarshalContext) *MarshalContext { return @ptrCast(*MarshalContext, mc); } pub fn marshal(ctx: *MarshalContext, comptime T: type, value: T) void { switch (T) { usize => c.janet_marshal_size(ctx.toC(), value), i32 => c.janet_marshal_int(ctx.toC(), value), i64 => c.janet_marshal_int64(ctx.toC(), value), u8 => c.janet_marshal_byte(ctx.toC(), value), []const u8 => c.janet_marshal_bytes(ctx.toC(), value.ptr, value.len), Janet => c.janet_marshal_janet(ctx.toC(), value.toC()), *anyopaque => c.janet_marshal_abstract(ctx.toC(), value), else => @compileError("Marshaling is not supported for '" ++ @typeName(T) ++ "'"), } } pub fn unmarshal(ctx: *MarshalContext, comptime T: type) T { return switch (T) { usize => c.janet_unmarshal_size(ctx.toC()), i32 => c.janet_unmarshal_int(ctx.toC()), i64 => c.janet_unmarshal_int64(ctx.toC()), u8 => c.janet_unmarshal_byte(ctx.toC()), Janet => Janet.fromC(c.janet_unmarshal_janet(ctx.toC())), *anyopaque => @compileError("Please use 'unmarshalAbstract' instead"), []u8, []const u8 => @compileError("Please use 'unmarshalBytes' instead"), else => @compileError("Unmarshaling is not supported for '" ++ @typeName(T) ++ "'"), }; } pub fn unmarshalEnsure(ctx: *MarshalContext, size: usize) void { c.janet_unmarshal_ensure(ctx.toC(), size); } pub fn unmarshalBytes(ctx: *MarshalContext, dest: []u8) void { c.janet_unmarshal_bytes(ctx.toC(), dest.ptr, dest.len); } pub fn unmarshalAbstract(ctx: *MarshalContext, size: usize) *anyopaque { return c.janet_unmarshal_abstract(ctx.toC(), size) orelse unreachable; } pub fn unmarshalAbstractReuse(ctx: *MarshalContext, p: *anyopaque) void { c.janet_unmarshal_abstract_reuse(ctx.toC(), p); } }; /// Specialized struct with typed pointers. `ValueType` must be the type of the actual value, /// not the pointer to the value, for example, for direct C translation of `*c_void`, `ValueType` /// must be `c_void`. pub fn Abstract(comptime ValueType: type) type { return struct { ptr: *ValueType, const Self = @This(); pub const Head = extern struct { gc: GCObject, @"type": *Type, size: usize, data: [*]c_longlong, }; pub const Type = extern struct { name: [*:0]const u8, gc: ?fn (p: *ValueType, len: usize) callconv(.C) c_int = null, gc_mark: ?fn (p: *ValueType, len: usize) callconv(.C) c_int = null, get: ?fn (p: *ValueType, key: Janet, out: *Janet) callconv(.C) c_int = null, put: ?fn (p: *ValueType, key: Janet, value: Janet) callconv(.C) void = null, marshal: ?fn (p: *ValueType, ctx: *MarshalContext) callconv(.C) void = null, unmarshal: ?fn (ctx: *MarshalContext) callconv(.C) *ValueType = null, to_string: ?fn (p: *ValueType, buffer: *Buffer) callconv(.C) void = null, compare: ?fn (lhs: *ValueType, rhs: *ValueType) callconv(.C) c_int = null, hash: ?fn (p: *ValueType, len: usize) callconv(.C) i32 = null, next: ?fn (p: *ValueType, key: Janet) callconv(.C) Janet = null, call: ?fn (p: *ValueType, argc: i32, argv: [*]Janet) callconv(.C) Janet = null, pub fn toC(self: *const Type) *const c.JanetAbstractType { return @ptrCast(*const c.JanetAbstractType, self); } pub fn fromC(p: *const c.JanetAbstractType) *const Type { return @ptrCast(*const Type, p); } pub fn toVoid(self: *const Type) *const Abstract(anyopaque).Type { return @ptrCast(*const Abstract(anyopaque).Type, self); } pub fn register(self: *const Type) void { c.janet_register_abstract_type(self.toC()); } }; pub fn toC(self: Self) *anyopaque { return @ptrCast(*anyopaque, self.ptr); } pub fn fromC(p: *anyopaque) Self { return Self{ .ptr = @ptrCast(*ValueType, @alignCast(@alignOf(*ValueType), p)) }; } pub fn init(value: *const Type) Self { if (ValueType != anyopaque) { return fromC(c.janet_abstract(value.toC(), @sizeOf(ValueType)) orelse unreachable); } else { unreachable; // please use initVoid } } pub fn initFromPtr(value: *ValueType) Self { return Self{ .ptr = value }; } pub fn initVoid(value: *const AbstractType, size: usize) Self { if (ValueType == anyopaque) { return fromC(c.janet_abstract(value.toC(), size) orelse unreachable); } else { unreachable; // please use init } } pub fn wrap(self: Self) Janet { return Janet.fromC(c.janet_wrap_abstract(self.toC())); } pub fn marshal(self: Self, ctx: *MarshalContext) void { c.janet_marshal_abstract(ctx.toC(), self.toC()); } pub fn unmarshal(ctx: *MarshalContext) Self { if (ValueType != anyopaque) { return fromC( c.janet_unmarshal_abstract(ctx.toC(), @sizeOf(ValueType)) orelse unreachable, ); } else { unreachable; // please use unmarshalAbstract } } }; } pub const VoidAbstract = Abstract(anyopaque); /// Pure translation of a C struct with pointers *c_void. pub const AbstractType = VoidAbstract.Type; pub const Reg = extern struct { name: ?[*:0]const u8, cfun: ?CFunction.TypeImpl, documentation: ?[*:0]const u8 = null, pub const empty = Reg{ .name = null, .cfun = null }; }; pub const RegExt = extern struct { name: ?[*:0]const u8, cfun: ?CFunction.TypeImpl, documentation: ?[*:0]const u8 = null, source_file: ?[*:0]const u8 = null, source_line: i32 = 0, pub const empty = RegExt{ .name = null, .cfun = null }; }; pub const Method = extern struct { name: [*:0]const u8, cfun: CFunction.TypeImpl, }; pub const View = extern struct { items: [*]const Janet, len: i32, pub fn slice(self: View) []const Janet { return self.items[0..self.len]; } }; pub const ByteView = extern struct { bytes: [*]const u8, len: i32, pub fn slice(self: ByteView) []const u8 { return self.bytes[0..self.len]; } }; pub const DictView = extern struct { kvs: [*]const KV, len: i32, cap: i32, pub fn slice(self: DictView) []const KV { return self.kvs[0..self.cap]; } }; pub const Range = extern struct { start: i32, end: i32, }; pub const RNG = extern struct { a: u32, b: u32, c: u32, d: u32, counter: u32, }; test "refAllDecls" { testing.refAllDecls(@This()); testing.refAllDecls(JanetMixin); testing.refAllDecls(String); testing.refAllDecls(Symbol); testing.refAllDecls(Keyword); testing.refAllDecls(Array); testing.refAllDecls(Buffer); testing.refAllDecls(Table); testing.refAllDecls(Struct); testing.refAllDecls(Tuple); testing.refAllDecls(Abstract(anyopaque)); testing.refAllDecls(Function); testing.refAllDecls(CFunction); testing.refAllDecls(Environment); testing.refAllDecls(MarshalContext); } test "hello world" { try init(); defer deinit(); const env = Environment.coreEnv(null); _ = try env.doString("(print `hello, world!`)", "main"); } test "unwrap values" { try init(); defer deinit(); const env = Environment.coreEnv(null); { const value = try env.doString("1", "main"); try testing.expectEqual(@as(i32, 1), try value.unwrap(i32)); } { const value = try env.doString("1", "main"); try testing.expectEqual(@as(f64, 1), try value.unwrap(f64)); } { const value = try env.doString("true", "main"); try testing.expectEqual(true, try value.unwrap(bool)); } { const value = try env.doString("\"str\"", "main"); try testing.expectEqualStrings("str", (try value.unwrap(String)).slice); } { const value = try env.doString(":str", "main"); try testing.expectEqualStrings("str", (try value.unwrap(Keyword)).slice); } { const value = try env.doString("'str", "main"); try testing.expectEqualStrings("str", (try value.unwrap(Symbol)).slice); } { const value = try env.doString("[58 true 36.0]", "main"); const tuple = try value.unwrap(Tuple); try testing.expectEqual(@as(i32, 3), tuple.head().length); try testing.expectEqual(@as(usize, 3), tuple.slice.len); try testing.expectEqual(@as(i32, 58), try tuple.slice[0].unwrap(i32)); try testing.expectEqual(true, try tuple.slice[1].unwrap(bool)); try testing.expectEqual(@as(f64, 36), try tuple.slice[2].unwrap(f64)); } { const value = try env.doString("@[58 true 36.0]", "main"); const array = try value.unwrap(*Array); try testing.expectEqual(@as(i32, 3), array.count); try testing.expectEqual(@as(i32, 58), try array.data[0].unwrap(i32)); try testing.expectEqual(true, try array.data[1].unwrap(bool)); try testing.expectEqual(@as(f64, 36), try array.data[2].unwrap(f64)); } { const value = try env.doString("@\"str\"", "main"); const buffer = try value.unwrap(*Buffer); try testing.expectEqual(@as(i32, 3), buffer.count); try testing.expectEqual(@as(u8, 's'), buffer.data[0]); try testing.expectEqual(@as(u8, 't'), buffer.data[1]); try testing.expectEqual(@as(u8, 'r'), buffer.data[2]); } { const value = try env.doString("{:kw 2 'sym 8 98 56}", "main"); _ = try value.unwrap(Struct); } { const value = try env.doString("@{:kw 2 'sym 8 98 56}", "main"); _ = try value.unwrap(*Table); } { const value = try env.doString("marshal", "main"); _ = try value.unwrap(CFunction); } { const value = try env.doString("+", "main"); _ = try value.unwrap(*Function); } { const value = try env.doString("(file/temp)", "main"); _ = try value.unwrapAbstract(anyopaque); } { const value = try env.doString("(fiber/current)", "main"); _ = try value.unwrap(*Fiber); } } test "janet_type" { try init(); defer deinit(); const env = Environment.coreEnv(null); const value = try env.doString("1", "main"); try testing.expectEqual(Janet.Type.number, value.janetType()); } test "janet_checktypes" { try init(); defer deinit(); const env = Environment.coreEnv(null); { const value = try env.doString("1", "main"); try testing.expectEqual(true, value.checkTypes(TFLAG_NUMBER)); } { const value = try env.doString(":str", "main"); try testing.expectEqual(true, value.checkTypes(TFLAG_BYTES)); } } test "struct" { try init(); defer deinit(); const env = Environment.coreEnv(null); const value = try env.doString("{:kw 2 'sym 8 98 56}", "main"); const st = try value.unwrap(Struct); const first_kv = st.get(keyword("kw")).?; const second_kv = st.get(symbol("sym")).?; const third_kv = st.get(wrap(i32, 98)).?; try testing.expectEqual(@as(i32, 3), st.head().length); try testing.expectEqual(@as(i32, 2), try first_kv.unwrap(i32)); try testing.expectEqual(@as(i32, 8), try second_kv.unwrap(i32)); try testing.expectEqual(@as(i32, 56), try third_kv.unwrap(i32)); if (st.get(wrap(i32, 123))) |_| return error.MustBeNull; } test "table" { try init(); defer deinit(); { const env = Environment.coreEnv(null); const value = try env.doString("@{:kw 2 'sym 8 98 56}", "main"); const table = try value.unwrap(*Table); const first_kv = table.get(keyword("kw")).?; const second_kv = table.get(symbol("sym")).?; const third_kv = table.get(wrap(i32, 98)).?; try testing.expectEqual(@as(i32, 3), table.count); try testing.expectEqual(@as(i32, 2), try first_kv.unwrap(i32)); try testing.expectEqual(@as(i32, 8), try second_kv.unwrap(i32)); try testing.expectEqual(@as(i32, 56), try third_kv.unwrap(i32)); if (table.get(wrap(i32, 123))) |_| return error.MustBeNull; } { var table = Table.initDynamic(5); table.put(keyword("apples"), wrap(i32, 2)); table.put(keyword("oranges"), wrap(i32, 8)); table.put(keyword("peaches"), wrap(i32, 1)); const apples = table.get(keyword("apples")).?; try testing.expectEqual(@as(i32, 2), try apples.unwrap(i32)); var which_table: *Table = undefined; const oranges = table.getEx(keyword("oranges"), &which_table).?; try testing.expectEqual(@as(i32, 8), try oranges.unwrap(i32)); try testing.expectEqual(table, which_table); const peaches = table.rawGet(keyword("peaches")).?; try testing.expectEqual(@as(i32, 1), try peaches.unwrap(i32)); _ = table.remove(keyword("peaches")); if (table.get(keyword("peaches"))) |_| return error.MustBeNull; } } const ZigStruct = struct { counter: u32, // Receives pointer to struct and argument pub fn inc(self: *ZigStruct, n: u32) void { self.counter += n; } // Can throw error pub fn dec(self: *ZigStruct) !void { if (self.counter == 0) return error.MustBeAboveZero; self.counter -= 1; } }; const ZigStructAbstract = Abstract(ZigStruct); const zig_struct_abstract_type = ZigStructAbstract.Type{ .name = "zig-struct" }; /// Initializer with an optional default value for the `counter`. fn cfunZigStruct(argc: i32, argv: [*]const Janet) callconv(.C) Janet { arity(argc, 0, 1); const st_abstract = ZigStructAbstract.init(&zig_struct_abstract_type); const n = opt(u32, argv, argc, 1, 1); st_abstract.ptr.counter = n; return st_abstract.wrap(); } /// `inc` wrapper which receives a struct and a number to increase the `counter` to. fn cfunInc(argc: i32, argv: [*]const Janet) callconv(.C) Janet { fixArity(argc, 2); var st_abstract = getAbstract(argv, 0, ZigStruct, &zig_struct_abstract_type); const n = get(u32, argv, 1); st_abstract.ptr.inc(n); return nil(); } /// `dec` wrapper which fails if we try to decrease the `counter` below 0. fn cfunDec(argc: i32, argv: [*]const Janet) callconv(.C) Janet { fixArity(argc, 1); var st_abstract = getAbstract(argv, 0, ZigStruct, &zig_struct_abstract_type); st_abstract.ptr.dec() catch { panic("expected failure, part of the test"); }; return nil(); } /// Simple getter returning an integer value. fn cfunGetcounter(argc: i32, argv: [*]const Janet) callconv(.C) Janet { fixArity(argc, 1); const st_abstract = getAbstract(argv, 0, ZigStruct, &zig_struct_abstract_type); return wrap(i32, @bitCast(i32, st_abstract.ptr.counter)); } const zig_struct_cfuns = [_]Reg{ Reg{ .name = "struct-init", .cfun = cfunZigStruct }, Reg{ .name = "inc", .cfun = cfunInc }, Reg{ .name = "dec", .cfun = cfunDec }, Reg{ .name = "get-counter", .cfun = cfunGetcounter }, Reg.empty, }; test "abstract initialized inside Janet" { try init(); defer deinit(); var env = Environment.coreEnv(null); env.cfunsPrefix("zig", &zig_struct_cfuns); _ = try env.doString("(def st (zig/struct-init))", "main"); // Default value is 1 if not supplied with the initializer. _ = try env.doString("(assert (= (zig/get-counter st) 1))", "main"); _ = try env.doString("(zig/dec st)", "main"); _ = try env.doString("(assert (= (zig/get-counter st) 0))", "main"); // Expected fail of dec function. try testing.expectError(error.RuntimeError, env.doString("(zig/dec st)", "main")); _ = try env.doString("(assert (= (zig/get-counter st) 0))", "main"); _ = try env.doString("(zig/inc st 5)", "main"); _ = try env.doString("(assert (= (zig/get-counter st) 5))", "main"); } test "abstract injected from Zig" { try init(); defer deinit(); var env = Environment.coreEnv(null); env.cfunsPrefix("zig", &zig_struct_cfuns); var st_abstract = ZigStructAbstract.init(&zig_struct_abstract_type); st_abstract.ptr.* = ZigStruct{ .counter = 2 }; env.def("st", st_abstract.wrap(), null); _ = try env.doString("(assert (= (zig/get-counter st) 2))", "main"); st_abstract.ptr.counter = 1; _ = try env.doString("(assert (= (zig/get-counter st) 1))", "main"); _ = try env.doString("(zig/dec st)", "main"); try testing.expectEqual(@as(u32, 0), st_abstract.ptr.counter); } // We need an allocator to pass to the Zig struct. const AllyAbstractType = Abstract(std.mem.Allocator).Type{ .name = "zig-allocator" }; /// Initializer to make Zig's allocator into existence. fn cfunInitZigAllocator(argc: i32, argv: [*]const Janet) callconv(.C) Janet { _ = argv; fixArity(argc, 0); const ally_abstract = Abstract(std.mem.Allocator).init(&AllyAbstractType); // This will have to be a global definition of an allocator, otherwise it is impossible // to get one inside this function. Here we are fine since `testing` has a global allocator. ally_abstract.ptr.* = testing.allocator; return ally_abstract.wrap(); } // With all the possible functions. const ComplexZigStruct = struct { counter: i32, // Storing native to Janet values in Zig data structure. storage: std.StringHashMap(Janet), // It needs to be initialized first with the Zig's allocator. pub fn init(ally: std.mem.Allocator) ComplexZigStruct { return ComplexZigStruct{ .counter = 0, .storage = std.StringHashMap(Janet).init(ally), }; } pub fn deinit(self: *ComplexZigStruct) void { self.storage.deinit(); } }; fn czsGc(st: *ComplexZigStruct, len: usize) callconv(.C) c_int { _ = len; st.deinit(); return 0; } fn czsGcMark(st: *ComplexZigStruct, len: usize) callconv(.C) c_int { _ = len; var it = st.storage.valueIterator(); while (it.next()) |value| { mark(value.*); } return 0; } fn czsGet(st: *ComplexZigStruct, key: Janet, out: *Janet) callconv(.C) c_int { const k = key.unwrap(Keyword) catch { panic("Not a keyword"); }; if (st.storage.get(k.slice)) |value| { out.* = value; return 1; } else { return 0; } } fn czsPut(st: *ComplexZigStruct, key: Janet, value: Janet) callconv(.C) void { const k = key.unwrap(Keyword) catch { panic("Not a keyword"); }; // HACK: allocating the key to be stored in our struct's `storage` via Janet's allocation // function which is never freed. I'm too lazy to implement allocating and deallocating of // strings via Zig's allocator, sorry. var allocated_key = @ptrCast( [*]u8, @alignCast(@alignOf([*]u8), malloc(k.slice.len)), )[0..k.slice.len]; std.mem.copy(u8, allocated_key, k.slice); st.storage.put(allocated_key, value) catch { panic("Out of memory"); }; } // We only marshal the counter, the `storage` is lost, as well as allocator. fn czsMarshal(st: *ComplexZigStruct, ctx: *MarshalContext) callconv(.C) void { var ally = st.storage.allocator; Abstract(ComplexZigStruct).initFromPtr(st).marshal(ctx); // HACK: we can't marshal more than one abstract type, so we marshal the pointer as integer // since the allocator is global and will not change its pointer during program execution. ctx.marshal(usize, @ptrToInt(&ally)); ctx.marshal(i32, st.counter); } fn czsUnmarshal(ctx: *MarshalContext) callconv(.C) *ComplexZigStruct { const st_abstract = Abstract(ComplexZigStruct).unmarshal(ctx); const allyp = ctx.unmarshal(usize); const ally = @intToPtr(*std.mem.Allocator, allyp); const counter = ctx.unmarshal(i32); st_abstract.ptr.counter = counter; st_abstract.ptr.storage = std.StringHashMap(Janet).init(ally.*); return st_abstract.ptr; } fn czsToString(st: *ComplexZigStruct, buffer: *Buffer) callconv(.C) void { _ = st; buffer.pushBytes("complex-zig-struct-printing"); } fn czsCompare(lhs: *ComplexZigStruct, rhs: *ComplexZigStruct) callconv(.C) c_int { if (lhs.counter > rhs.counter) { return 1; } else if (lhs.counter < rhs.counter) { return -1; } else { return 0; } } fn czsHash(st: *ComplexZigStruct, len: usize) callconv(.C) i32 { _ = st; _ = len; return 1337; } fn czsNext(st: *ComplexZigStruct, key: Janet) callconv(.C) Janet { if (key.checkType(.nil)) { var it = st.storage.keyIterator(); if (it.next()) |next_key| { return keyword(next_key.*); } else { return nil(); } } else { const str_key = key.unwrap(Keyword) catch { panic("Not a keyword"); }; var it = st.storage.keyIterator(); while (it.next()) |k| { if (std.mem.eql(u8, k.*, str_key.slice)) { if (it.next()) |next_key| { return keyword(next_key.*); } else { return nil(); } } } else { return nil(); } } unreachable; } // We set the counter to the supplied value. fn czsCall(st: *ComplexZigStruct, argc: i32, argv: [*]Janet) callconv(.C) Janet { fixArity(argc, 1); const new_counter = get(i32, argv, 0); st.counter = new_counter; return wrap(bool, true); } const complex_zig_struct_abstract_type = Abstract(ComplexZigStruct).Type{ .name = "complex-zig-struct", .gc = czsGc, .gc_mark = czsGcMark, .get = czsGet, .put = czsPut, .marshal = czsMarshal, .unmarshal = czsUnmarshal, .to_string = czsToString, .compare = czsCompare, .hash = czsHash, .next = czsNext, .call = czsCall, }; /// Initializer with an optional default value for the `counter`. fn cfunComplexZigStruct(argc: i32, argv: [*]const Janet) callconv(.C) Janet { fixArity(argc, 1); const st_abstract = Abstract(ComplexZigStruct).init(&complex_zig_struct_abstract_type); const ally_abstract = getAbstract(argv, 0, std.mem.Allocator, &AllyAbstractType); st_abstract.ptr.* = ComplexZigStruct.init(ally_abstract.ptr.*); return st_abstract.wrap(); } /// Simple getter returning an integer value. fn cfunGetComplexCounter(argc: i32, argv: [*]const Janet) callconv(.C) Janet { fixArity(argc, 1); const st_abstract = getAbstract(argv, 0, ComplexZigStruct, &complex_zig_struct_abstract_type); return wrap(i32, st_abstract.ptr.counter); } const complex_zig_struct_cfuns = [_]Reg{ Reg{ .name = "complex-struct-init", .cfun = cfunComplexZigStruct }, Reg{ .name = "get-counter", .cfun = cfunGetComplexCounter }, Reg{ .name = "alloc-init", .cfun = cfunInitZigAllocator }, Reg.empty, }; test "complex abstract" { try init(); // Testing `gc` function implementation. The memory for our abstract type is allocated // with testing.allocator, so the test will fail if we leak memory. And here Janet is // responsible for freeing all the allocated memory on deinit. defer deinit(); complex_zig_struct_abstract_type.register(); var env = Environment.coreEnv(null); env.cfunsPrefix("zig", &complex_zig_struct_cfuns); // Init our `testing.allocator` as a Janet abstract type. _ = try env.doString("(def ally (zig/alloc-init))", "main"); // Init our complex struct which requires a Zig allocator. _ = try env.doString("(def st (zig/complex-struct-init ally))", "main"); _ = try env.doString("(assert (= (zig/get-counter st) 0))", "main"); // Testing `get` implementation. _ = try env.doString("(assert (= (get st :me) nil))", "main"); // Testing `put` implementation. _ = try env.doString("(put st :me [1 2 3])", "main"); // Testing `gcMark` implementation. If we don't implement gcMark, GC will collect [1 2 3] // tuple and Janet will panic trying to retrieve it. _ = try env.doString("(gccollect)", "main"); _ = try env.doString("(assert (= (get st :me) [1 2 3]))", "main"); // Testing `call` implementation. _ = try env.doString("(st 5)", "main"); _ = try env.doString("(assert (= (zig/get-counter st) 5))", "main"); // Testing marshaling. _ = try env.doString("(def marshaled (marshal st))", "main"); _ = try env.doString("(def unmarshaled (unmarshal marshaled))", "main"); _ = try env.doString("(assert (= (zig/get-counter unmarshaled) 5))", "main"); // Testing `compare` implementation. _ = try env.doString("(assert (= (zig/get-counter st) 5))", "main"); _ = try env.doString("(assert (= (zig/get-counter unmarshaled) 5))", "main"); _ = try env.doString("(assert (compare= unmarshaled st))", "main"); _ = try env.doString("(st 3)", "main"); _ = try env.doString("(assert (compare> unmarshaled st))", "main"); // Testing `hash` implementation. _ = try env.doString("(assert (= (hash st) 1337))", "main"); // Testing `next` implementation. _ = try env.doString("(put st :mimi 42)", "main"); _ = try env.doString( \\(eachp [k v] st \\ (assert (or (and (= k :me) (= v [1 2 3])) \\ (and (= k :mimi) (= v 42))))) , "main", ); } test "function call" { try init(); defer deinit(); const env = Environment.coreEnv(null); const value = try env.doString("+", "main"); const func = try value.unwrap(*Function); var sum: Janet = undefined; try func.pcall(&[_]Janet{ wrap(i32, 2), wrap(i32, 2) }, &sum, null); try testing.expectEqual(@as(i32, 4), try sum.unwrap(i32)); }
src/janet.zig
const std = @import("std"); const backend = @import("backend.zig"); const data = @import("data.zig"); const Allocator = std.mem.Allocator; pub const Class = struct { typeName: []const u8, showFn: fn (widget: *Widget) anyerror!void, deinitFn: fn (widget: *Widget) void, preferredSizeFn: fn (widget: *const Widget, available: data.Size) data.Size, // offset into a list of updater optional pointers //updaters: []const usize, }; /// A widget is a unique representation and constant size of any view. pub const Widget = struct { data: usize, /// The allocator that can be used to free 'data' allocator: ?Allocator = null, peer: ?backend.PeerType = null, container_expanded: bool = false, class: *const Class, /// A widget can ONLY be parented by a Container parent: ?*Widget = null, name: *?[]const u8, /// If there is more available size than preferred size and the widget is not expanded, /// this will determine where will the widget be located horizontally. alignX: *data.DataWrapper(f32), /// If there is more available size than preferred size and the widget is not expanded, /// this will determine where will the widget be located vertically. alignY: *data.DataWrapper(f32), pub fn show(self: *Widget) anyerror!void { try self.class.showFn(self); } /// Get the preferred size for the given available space. /// With this system, minimum size is widget.getPreferredSize(Size { .width = 0, .height = 0 }), /// and maximum size is widget.getPreferredSize(Size { .width = std.math.maxInt(u32), .height = std.math.maxInt(u32) }) pub fn getPreferredSize(self: *const Widget, available: data.Size) data.Size { return self.class.preferredSizeFn(self, available); } /// Asserts widget data is of type T pub fn as(self: *const Widget, comptime T: type) *T { if (std.debug.runtime_safety) { if (!self.is(T)) { std.debug.panic("tried to cast widget to " ++ @typeName(T) ++ " but type is {s}", .{self.class.typeName}); } } return @intToPtr(*T, self.data); } /// Returns if the class of the widget corresponds to T pub fn is(self: *const Widget, comptime T: type) bool { return self.class == &T.WidgetClass; } /// If widget is an instance of T, returns widget.as(T), otherwise return null pub fn cast(self: *const Widget, comptime T: type) ?*T { if (self.is(T)) { return self.as(T); } else { return null; } } pub fn deinit(self: *Widget) void { self.class.deinitFn(self); } }; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const TestType = struct { randomData: u16 = 0x1234, pub const WidgetClass = Class{ .typeName = "TestType", .showFn = undefined, .deinitFn = undefined, .preferredSizeFn = undefined, }; }; test "widget basics" { var testWidget: TestType = .{}; const widget = Widget{ .data = @ptrToInt(&testWidget), .class = &TestType.WidgetClass, .name = undefined, .alignX = undefined, .alignY = undefined, }; try expect(widget.is(TestType)); const cast = widget.cast(TestType); try expect(cast != null); if (cast) |value| { try expectEqual(&testWidget, value); try expectEqual(@as(u16, 0x1234), value.randomData); } }
src/widget.zig
const std = @import("std"); const builtin = std.builtin; const io = std.io; const fs = std.fs; const process = std.process; const ChildProcess = std.ChildProcess; const warn = std.debug.warn; const mem = std.mem; const testing = std.testing; const max_doc_file_size = 10 * 1024 * 1024; const exe_ext = @as(std.zig.CrossTarget, .{}).exeFileExt(); const obj_ext = @as(std.zig.CrossTarget, .{}).oFileExt(); const tmp_dir_name = "docgen_tmp"; const test_out_path = tmp_dir_name ++ fs.path.sep_str ++ "test" ++ exe_ext; pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const allocator = &arena.allocator; var args_it = process.args(); if (!args_it.skip()) @panic("expected self arg"); const zig_exe = try (args_it.next(allocator) orelse @panic("expected zig exe arg")); defer allocator.free(zig_exe); const in_file_name = try (args_it.next(allocator) orelse @panic("expected input arg")); defer allocator.free(in_file_name); const out_file_name = try (args_it.next(allocator) orelse @panic("expected output arg")); defer allocator.free(out_file_name); var in_file = try fs.cwd().openFile(in_file_name, .{ .read = true }); defer in_file.close(); var out_file = try fs.cwd().createFile(out_file_name, .{}); defer out_file.close(); const input_file_bytes = try in_file.inStream().readAllAlloc(allocator, max_doc_file_size); var buffered_out_stream = io.bufferedOutStream(out_file.outStream()); var tokenizer = Tokenizer.init(in_file_name, input_file_bytes); var toc = try genToc(allocator, &tokenizer); try fs.cwd().makePath(tmp_dir_name); defer fs.cwd().deleteTree(tmp_dir_name) catch {}; try genHtml(allocator, &tokenizer, &toc, buffered_out_stream.outStream(), zig_exe); try buffered_out_stream.flush(); } const Token = struct { id: Id, start: usize, end: usize, const Id = enum { Invalid, Content, BracketOpen, TagContent, Separator, BracketClose, Eof, }; }; const Tokenizer = struct { buffer: []const u8, index: usize, state: State, source_file_name: []const u8, code_node_count: usize, const State = enum { Start, LBracket, Hash, TagName, Eof, }; fn init(source_file_name: []const u8, buffer: []const u8) Tokenizer { return Tokenizer{ .buffer = buffer, .index = 0, .state = State.Start, .source_file_name = source_file_name, .code_node_count = 0, }; } fn next(self: *Tokenizer) Token { var result = Token{ .id = Token.Id.Eof, .start = self.index, .end = undefined, }; while (self.index < self.buffer.len) : (self.index += 1) { const c = self.buffer[self.index]; switch (self.state) { State.Start => switch (c) { '{' => { self.state = State.LBracket; }, else => { result.id = Token.Id.Content; }, }, State.LBracket => switch (c) { '#' => { if (result.id != Token.Id.Eof) { self.index -= 1; self.state = State.Start; break; } else { result.id = Token.Id.BracketOpen; self.index += 1; self.state = State.TagName; break; } }, else => { result.id = Token.Id.Content; self.state = State.Start; }, }, State.TagName => switch (c) { '|' => { if (result.id != Token.Id.Eof) { break; } else { result.id = Token.Id.Separator; self.index += 1; break; } }, '#' => { self.state = State.Hash; }, else => { result.id = Token.Id.TagContent; }, }, State.Hash => switch (c) { '}' => { if (result.id != Token.Id.Eof) { self.index -= 1; self.state = State.TagName; break; } else { result.id = Token.Id.BracketClose; self.index += 1; self.state = State.Start; break; } }, else => { result.id = Token.Id.TagContent; self.state = State.TagName; }, }, State.Eof => unreachable, } } else { switch (self.state) { State.Start, State.LBracket, State.Eof => {}, else => { result.id = Token.Id.Invalid; }, } self.state = State.Eof; } result.end = self.index; return result; } const Location = struct { line: usize, column: usize, line_start: usize, line_end: usize, }; fn getTokenLocation(self: *Tokenizer, token: Token) Location { var loc = Location{ .line = 0, .column = 0, .line_start = 0, .line_end = 0, }; for (self.buffer) |c, i| { if (i == token.start) { loc.line_end = i; while (loc.line_end < self.buffer.len and self.buffer[loc.line_end] != '\n') : (loc.line_end += 1) {} return loc; } if (c == '\n') { loc.line += 1; loc.column = 0; loc.line_start = i + 1; } else { loc.column += 1; } } return loc; } }; fn parseError(tokenizer: *Tokenizer, token: Token, comptime fmt: []const u8, args: var) anyerror { const loc = tokenizer.getTokenLocation(token); const args_prefix = .{ tokenizer.source_file_name, loc.line + 1, loc.column + 1 }; warn("{}:{}:{}: error: " ++ fmt ++ "\n", args_prefix ++ args); if (loc.line_start <= loc.line_end) { warn("{}\n", .{tokenizer.buffer[loc.line_start..loc.line_end]}); { var i: usize = 0; while (i < loc.column) : (i += 1) { warn(" ", .{}); } } { const caret_count = token.end - token.start; var i: usize = 0; while (i < caret_count) : (i += 1) { warn("~", .{}); } } warn("\n", .{}); } return error.ParseError; } fn assertToken(tokenizer: *Tokenizer, token: Token, id: Token.Id) !void { if (token.id != id) { return parseError(tokenizer, token, "expected {}, found {}", .{ @tagName(id), @tagName(token.id) }); } } fn eatToken(tokenizer: *Tokenizer, id: Token.Id) !Token { const token = tokenizer.next(); try assertToken(tokenizer, token, id); return token; } const HeaderOpen = struct { name: []const u8, url: []const u8, n: usize, }; const SeeAlsoItem = struct { name: []const u8, token: Token, }; const ExpectedOutcome = enum { Succeed, Fail, BuildFail, }; const Code = struct { id: Id, name: []const u8, source_token: Token, is_inline: bool, mode: builtin.Mode, link_objects: []const []const u8, target_str: ?[]const u8, link_libc: bool, const Id = union(enum) { Test, TestError: []const u8, TestSafety: []const u8, Exe: ExpectedOutcome, Obj: ?[]const u8, Lib, }; }; const Link = struct { url: []const u8, name: []const u8, token: Token, }; const Node = union(enum) { Content: []const u8, Nav, Builtin: Token, HeaderOpen: HeaderOpen, SeeAlso: []const SeeAlsoItem, Code: Code, Link: Link, Syntax: Token, }; const Toc = struct { nodes: []Node, toc: []u8, urls: std.StringHashMap(Token), }; const Action = enum { Open, Close, }; fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc { var urls = std.StringHashMap(Token).init(allocator); errdefer urls.deinit(); var header_stack_size: usize = 0; var last_action = Action.Open; var last_columns: ?u8 = null; var toc_buf = std.ArrayList(u8).init(allocator); defer toc_buf.deinit(); var toc = toc_buf.outStream(); var nodes = std.ArrayList(Node).init(allocator); defer nodes.deinit(); try toc.writeByte('\n'); while (true) { const token = tokenizer.next(); switch (token.id) { Token.Id.Eof => { if (header_stack_size != 0) { return parseError(tokenizer, token, "unbalanced headers", .{}); } try toc.writeAll(" </ul>\n"); break; }, Token.Id.Content => { try nodes.append(Node{ .Content = tokenizer.buffer[token.start..token.end] }); }, Token.Id.BracketOpen => { const tag_token = try eatToken(tokenizer, Token.Id.TagContent); const tag_name = tokenizer.buffer[tag_token.start..tag_token.end]; if (mem.eql(u8, tag_name, "nav")) { _ = try eatToken(tokenizer, Token.Id.BracketClose); try nodes.append(Node.Nav); } else if (mem.eql(u8, tag_name, "builtin")) { _ = try eatToken(tokenizer, Token.Id.BracketClose); try nodes.append(Node{ .Builtin = tag_token }); } else if (mem.eql(u8, tag_name, "header_open")) { _ = try eatToken(tokenizer, Token.Id.Separator); const content_token = try eatToken(tokenizer, Token.Id.TagContent); const content = tokenizer.buffer[content_token.start..content_token.end]; var columns: ?u8 = null; while (true) { const bracket_tok = tokenizer.next(); switch (bracket_tok.id) { .BracketClose => break, .Separator => continue, .TagContent => { const param = tokenizer.buffer[bracket_tok.start..bracket_tok.end]; if (mem.eql(u8, param, "2col")) { columns = 2; } else { return parseError( tokenizer, bracket_tok, "unrecognized header_open param: {}", .{param}, ); } }, else => return parseError(tokenizer, bracket_tok, "invalid header_open token", .{}), } } header_stack_size += 1; const urlized = try urlize(allocator, content); try nodes.append(Node{ .HeaderOpen = HeaderOpen{ .name = content, .url = urlized, .n = header_stack_size, }, }); if (try urls.put(urlized, tag_token)) |entry| { parseError(tokenizer, tag_token, "duplicate header url: #{}", .{urlized}) catch {}; parseError(tokenizer, entry.value, "other tag here", .{}) catch {}; return error.ParseError; } if (last_action == Action.Open) { try toc.writeByte('\n'); try toc.writeByteNTimes(' ', header_stack_size * 4); if (last_columns) |n| { try toc.print("<ul style=\"columns: {}\">\n", .{n}); } else { try toc.writeAll("<ul>\n"); } } else { last_action = Action.Open; } last_columns = columns; try toc.writeByteNTimes(' ', 4 + header_stack_size * 4); try toc.print("<li><a id=\"toc-{}\" href=\"#{}\">{}</a>", .{ urlized, urlized, content }); } else if (mem.eql(u8, tag_name, "header_close")) { if (header_stack_size == 0) { return parseError(tokenizer, tag_token, "unbalanced close header", .{}); } header_stack_size -= 1; _ = try eatToken(tokenizer, Token.Id.BracketClose); if (last_action == Action.Close) { try toc.writeByteNTimes(' ', 8 + header_stack_size * 4); try toc.writeAll("</ul></li>\n"); } else { try toc.writeAll("</li>\n"); last_action = Action.Close; } } else if (mem.eql(u8, tag_name, "see_also")) { var list = std.ArrayList(SeeAlsoItem).init(allocator); errdefer list.deinit(); while (true) { const see_also_tok = tokenizer.next(); switch (see_also_tok.id) { Token.Id.TagContent => { const content = tokenizer.buffer[see_also_tok.start..see_also_tok.end]; try list.append(SeeAlsoItem{ .name = content, .token = see_also_tok, }); }, Token.Id.Separator => {}, Token.Id.BracketClose => { try nodes.append(Node{ .SeeAlso = list.toOwnedSlice() }); break; }, else => return parseError(tokenizer, see_also_tok, "invalid see_also token", .{}), } } } else if (mem.eql(u8, tag_name, "link")) { _ = try eatToken(tokenizer, Token.Id.Separator); const name_tok = try eatToken(tokenizer, Token.Id.TagContent); const name = tokenizer.buffer[name_tok.start..name_tok.end]; const url_name = blk: { const tok = tokenizer.next(); switch (tok.id) { Token.Id.BracketClose => break :blk name, Token.Id.Separator => { const explicit_text = try eatToken(tokenizer, Token.Id.TagContent); _ = try eatToken(tokenizer, Token.Id.BracketClose); break :blk tokenizer.buffer[explicit_text.start..explicit_text.end]; }, else => return parseError(tokenizer, tok, "invalid link token", .{}), } }; try nodes.append(Node{ .Link = Link{ .url = try urlize(allocator, url_name), .name = name, .token = name_tok, }, }); } else if (mem.eql(u8, tag_name, "code_begin")) { _ = try eatToken(tokenizer, Token.Id.Separator); const code_kind_tok = try eatToken(tokenizer, Token.Id.TagContent); var name: []const u8 = "test"; const maybe_sep = tokenizer.next(); switch (maybe_sep.id) { Token.Id.Separator => { const name_tok = try eatToken(tokenizer, Token.Id.TagContent); name = tokenizer.buffer[name_tok.start..name_tok.end]; _ = try eatToken(tokenizer, Token.Id.BracketClose); }, Token.Id.BracketClose => {}, else => return parseError(tokenizer, token, "invalid token", .{}), } const code_kind_str = tokenizer.buffer[code_kind_tok.start..code_kind_tok.end]; var code_kind_id: Code.Id = undefined; var is_inline = false; if (mem.eql(u8, code_kind_str, "exe")) { code_kind_id = Code.Id{ .Exe = ExpectedOutcome.Succeed }; } else if (mem.eql(u8, code_kind_str, "exe_err")) { code_kind_id = Code.Id{ .Exe = ExpectedOutcome.Fail }; } else if (mem.eql(u8, code_kind_str, "exe_build_err")) { code_kind_id = Code.Id{ .Exe = ExpectedOutcome.BuildFail }; } else if (mem.eql(u8, code_kind_str, "test")) { code_kind_id = Code.Id.Test; } else if (mem.eql(u8, code_kind_str, "test_err")) { code_kind_id = Code.Id{ .TestError = name }; name = "test"; } else if (mem.eql(u8, code_kind_str, "test_safety")) { code_kind_id = Code.Id{ .TestSafety = name }; name = "test"; } else if (mem.eql(u8, code_kind_str, "obj")) { code_kind_id = Code.Id{ .Obj = null }; } else if (mem.eql(u8, code_kind_str, "obj_err")) { code_kind_id = Code.Id{ .Obj = name }; name = "test"; } else if (mem.eql(u8, code_kind_str, "lib")) { code_kind_id = Code.Id.Lib; } else if (mem.eql(u8, code_kind_str, "syntax")) { code_kind_id = Code.Id{ .Obj = null }; is_inline = true; } else { return parseError(tokenizer, code_kind_tok, "unrecognized code kind: {}", .{code_kind_str}); } var mode: builtin.Mode = .Debug; var link_objects = std.ArrayList([]const u8).init(allocator); defer link_objects.deinit(); var target_str: ?[]const u8 = null; var link_libc = false; const source_token = while (true) { const content_tok = try eatToken(tokenizer, Token.Id.Content); _ = try eatToken(tokenizer, Token.Id.BracketOpen); const end_code_tag = try eatToken(tokenizer, Token.Id.TagContent); const end_tag_name = tokenizer.buffer[end_code_tag.start..end_code_tag.end]; if (mem.eql(u8, end_tag_name, "code_release_fast")) { mode = .ReleaseFast; } else if (mem.eql(u8, end_tag_name, "code_release_safe")) { mode = .ReleaseSafe; } else if (mem.eql(u8, end_tag_name, "code_link_object")) { _ = try eatToken(tokenizer, Token.Id.Separator); const obj_tok = try eatToken(tokenizer, Token.Id.TagContent); try link_objects.append(tokenizer.buffer[obj_tok.start..obj_tok.end]); } else if (mem.eql(u8, end_tag_name, "target_windows")) { target_str = "x86_64-windows"; } else if (mem.eql(u8, end_tag_name, "target_linux_x86_64")) { target_str = "x86_64-linux"; } else if (mem.eql(u8, end_tag_name, "target_linux_riscv64")) { target_str = "riscv64-linux"; } else if (mem.eql(u8, end_tag_name, "target_wasm")) { target_str = "wasm32-freestanding"; } else if (mem.eql(u8, end_tag_name, "target_wasi")) { target_str = "wasm32-wasi"; } else if (mem.eql(u8, end_tag_name, "link_libc")) { link_libc = true; } else if (mem.eql(u8, end_tag_name, "code_end")) { _ = try eatToken(tokenizer, Token.Id.BracketClose); break content_tok; } else { return parseError( tokenizer, end_code_tag, "invalid token inside code_begin: {}", .{end_tag_name}, ); } _ = try eatToken(tokenizer, Token.Id.BracketClose); } else unreachable; // TODO issue #707 try nodes.append(Node{ .Code = Code{ .id = code_kind_id, .name = name, .source_token = source_token, .is_inline = is_inline, .mode = mode, .link_objects = link_objects.toOwnedSlice(), .target_str = target_str, .link_libc = link_libc, }, }); tokenizer.code_node_count += 1; } else if (mem.eql(u8, tag_name, "syntax")) { _ = try eatToken(tokenizer, Token.Id.BracketClose); const content_tok = try eatToken(tokenizer, Token.Id.Content); _ = try eatToken(tokenizer, Token.Id.BracketOpen); const end_syntax_tag = try eatToken(tokenizer, Token.Id.TagContent); const end_tag_name = tokenizer.buffer[end_syntax_tag.start..end_syntax_tag.end]; if (!mem.eql(u8, end_tag_name, "endsyntax")) { return parseError( tokenizer, end_syntax_tag, "invalid token inside syntax: {}", .{end_tag_name}, ); } _ = try eatToken(tokenizer, Token.Id.BracketClose); try nodes.append(Node{ .Syntax = content_tok }); } else { return parseError(tokenizer, tag_token, "unrecognized tag name: {}", .{tag_name}); } }, else => return parseError(tokenizer, token, "invalid token", .{}), } } return Toc{ .nodes = nodes.toOwnedSlice(), .toc = toc_buf.toOwnedSlice(), .urls = urls, }; } fn urlize(allocator: *mem.Allocator, input: []const u8) ![]u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); const out = buf.outStream(); for (input) |c| { switch (c) { 'a'...'z', 'A'...'Z', '_', '-', '0'...'9' => { try out.writeByte(c); }, ' ' => { try out.writeByte('-'); }, else => {}, } } return buf.toOwnedSlice(); } fn escapeHtml(allocator: *mem.Allocator, input: []const u8) ![]u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); const out = buf.outStream(); try writeEscaped(out, input); return buf.toOwnedSlice(); } fn writeEscaped(out: var, input: []const u8) !void { for (input) |c| { try switch (c) { '&' => out.writeAll("&amp;"), '<' => out.writeAll("&lt;"), '>' => out.writeAll("&gt;"), '"' => out.writeAll("&quot;"), else => out.writeByte(c), }; } } //#define VT_RED "\x1b[31;1m" //#define VT_GREEN "\x1b[32;1m" //#define VT_CYAN "\x1b[36;1m" //#define VT_WHITE "\x1b[37;1m" //#define VT_BOLD "\x1b[0;1m" //#define VT_RESET "\x1b[0m" const TermState = enum { Start, Escape, LBracket, Number, AfterNumber, Arg, ArgNumber, ExpectEnd, }; test "term color" { const input_bytes = "A\x1b[32;1mgreen\x1b[0mB"; const result = try termColor(std.testing.allocator, input_bytes); defer std.testing.allocator.free(result); testing.expectEqualSlices(u8, "A<span class=\"t32\">green</span>B", result); } fn termColor(allocator: *mem.Allocator, input: []const u8) ![]u8 { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); var out = buf.outStream(); var number_start_index: usize = undefined; var first_number: usize = undefined; var second_number: usize = undefined; var i: usize = 0; var state = TermState.Start; var open_span_count: usize = 0; while (i < input.len) : (i += 1) { const c = input[i]; switch (state) { TermState.Start => switch (c) { '\x1b' => state = TermState.Escape, else => try out.writeByte(c), }, TermState.Escape => switch (c) { '[' => state = TermState.LBracket, else => return error.UnsupportedEscape, }, TermState.LBracket => switch (c) { '0'...'9' => { number_start_index = i; state = TermState.Number; }, else => return error.UnsupportedEscape, }, TermState.Number => switch (c) { '0'...'9' => {}, else => { first_number = std.fmt.parseInt(usize, input[number_start_index..i], 10) catch unreachable; second_number = 0; state = TermState.AfterNumber; i -= 1; }, }, TermState.AfterNumber => switch (c) { ';' => state = TermState.Arg, else => { state = TermState.ExpectEnd; i -= 1; }, }, TermState.Arg => switch (c) { '0'...'9' => { number_start_index = i; state = TermState.ArgNumber; }, else => return error.UnsupportedEscape, }, TermState.ArgNumber => switch (c) { '0'...'9' => {}, else => { second_number = std.fmt.parseInt(usize, input[number_start_index..i], 10) catch unreachable; state = TermState.ExpectEnd; i -= 1; }, }, TermState.ExpectEnd => switch (c) { 'm' => { state = TermState.Start; while (open_span_count != 0) : (open_span_count -= 1) { try out.writeAll("</span>"); } if (first_number != 0 or second_number != 0) { try out.print("<span class=\"t{}_{}\">", .{ first_number, second_number }); open_span_count += 1; } }, else => return error.UnsupportedEscape, }, } } return buf.toOwnedSlice(); } const builtin_types = [_][]const u8{ "f16", "f32", "f64", "f128", "c_longdouble", "c_short", "c_ushort", "c_int", "c_uint", "c_long", "c_ulong", "c_longlong", "c_ulonglong", "c_char", "c_void", "void", "bool", "isize", "usize", "noreturn", "type", "anyerror", "comptime_int", "comptime_float", }; fn isType(name: []const u8) bool { for (builtin_types) |t| { if (mem.eql(u8, t, name)) return true; } return false; } fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Token, raw_src: []const u8) !void { const src = mem.trim(u8, raw_src, " \n"); try out.writeAll("<code class=\"zig\">"); var tokenizer = std.zig.Tokenizer.init(src); var index: usize = 0; var next_tok_is_fn = false; while (true) { const prev_tok_was_fn = next_tok_is_fn; next_tok_is_fn = false; const token = tokenizer.next(); try writeEscaped(out, src[index..token.start]); switch (token.id) { .Eof => break, .Keyword_align, .Keyword_and, .Keyword_asm, .Keyword_async, .Keyword_await, .Keyword_break, .Keyword_catch, .Keyword_comptime, .Keyword_const, .Keyword_continue, .Keyword_defer, .Keyword_else, .Keyword_enum, .Keyword_errdefer, .Keyword_error, .Keyword_export, .Keyword_extern, .Keyword_for, .Keyword_if, .Keyword_inline, .Keyword_nakedcc, .Keyword_noalias, .Keyword_noinline, .Keyword_nosuspend, .Keyword_or, .Keyword_orelse, .Keyword_packed, .Keyword_anyframe, .Keyword_pub, .Keyword_resume, .Keyword_return, .Keyword_linksection, .Keyword_callconv, .Keyword_stdcallcc, .Keyword_struct, .Keyword_suspend, .Keyword_switch, .Keyword_test, .Keyword_threadlocal, .Keyword_try, .Keyword_union, .Keyword_unreachable, .Keyword_usingnamespace, .Keyword_var, .Keyword_volatile, .Keyword_allowzero, .Keyword_while, => { try out.writeAll("<span class=\"tok-kw\">"); try writeEscaped(out, src[token.start..token.end]); try out.writeAll("</span>"); }, .Keyword_fn => { try out.writeAll("<span class=\"tok-kw\">"); try writeEscaped(out, src[token.start..token.end]); try out.writeAll("</span>"); next_tok_is_fn = true; }, .Keyword_undefined, .Keyword_null, .Keyword_true, .Keyword_false, => { try out.writeAll("<span class=\"tok-null\">"); try writeEscaped(out, src[token.start..token.end]); try out.writeAll("</span>"); }, .StringLiteral, .MultilineStringLiteralLine, .CharLiteral, => { try out.writeAll("<span class=\"tok-str\">"); try writeEscaped(out, src[token.start..token.end]); try out.writeAll("</span>"); }, .Builtin => { try out.writeAll("<span class=\"tok-builtin\">"); try writeEscaped(out, src[token.start..token.end]); try out.writeAll("</span>"); }, .LineComment, .DocComment, .ContainerDocComment, .ShebangLine, => { try out.writeAll("<span class=\"tok-comment\">"); try writeEscaped(out, src[token.start..token.end]); try out.writeAll("</span>"); }, .Identifier => { if (prev_tok_was_fn) { try out.writeAll("<span class=\"tok-fn\">"); try writeEscaped(out, src[token.start..token.end]); try out.writeAll("</span>"); } else { const is_int = blk: { if (src[token.start] != 'i' and src[token.start] != 'u') break :blk false; var i = token.start + 1; if (i == token.end) break :blk false; while (i != token.end) : (i += 1) { if (src[i] < '0' or src[i] > '9') break :blk false; } break :blk true; }; if (is_int or isType(src[token.start..token.end])) { try out.writeAll("<span class=\"tok-type\">"); try writeEscaped(out, src[token.start..token.end]); try out.writeAll("</span>"); } else { try writeEscaped(out, src[token.start..token.end]); } } }, .IntegerLiteral, .FloatLiteral, => { try out.writeAll("<span class=\"tok-number\">"); try writeEscaped(out, src[token.start..token.end]); try out.writeAll("</span>"); }, .Bang, .Pipe, .PipePipe, .PipeEqual, .Equal, .EqualEqual, .EqualAngleBracketRight, .BangEqual, .LParen, .RParen, .Semicolon, .Percent, .PercentEqual, .LBrace, .RBrace, .LBracket, .RBracket, .Period, .PeriodAsterisk, .Ellipsis2, .Ellipsis3, .Caret, .CaretEqual, .Plus, .PlusPlus, .PlusEqual, .PlusPercent, .PlusPercentEqual, .Minus, .MinusEqual, .MinusPercent, .MinusPercentEqual, .Asterisk, .AsteriskEqual, .AsteriskAsterisk, .AsteriskPercent, .AsteriskPercentEqual, .Arrow, .Colon, .Slash, .SlashEqual, .Comma, .Ampersand, .AmpersandEqual, .QuestionMark, .AngleBracketLeft, .AngleBracketLeftEqual, .AngleBracketAngleBracketLeft, .AngleBracketAngleBracketLeftEqual, .AngleBracketRight, .AngleBracketRightEqual, .AngleBracketAngleBracketRight, .AngleBracketAngleBracketRightEqual, .Tilde, => try writeEscaped(out, src[token.start..token.end]), .Invalid, .Invalid_ampersands => return parseError( docgen_tokenizer, source_token, "syntax error", .{}, ), } index = token.end; } try out.writeAll("</code>"); } fn tokenizeAndPrint(docgen_tokenizer: *Tokenizer, out: var, source_token: Token) !void { const raw_src = docgen_tokenizer.buffer[source_token.start..source_token.end]; return tokenizeAndPrintRaw(docgen_tokenizer, out, source_token, raw_src); } fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var, zig_exe: []const u8) !void { var code_progress_index: usize = 0; var env_map = try process.getEnvMap(allocator); try env_map.set("ZIG_DEBUG_COLOR", "1"); const builtin_code = try getBuiltinCode(allocator, &env_map, zig_exe); for (toc.nodes) |node| { switch (node) { .Content => |data| { try out.writeAll(data); }, .Link => |info| { if (!toc.urls.contains(info.url)) { return parseError(tokenizer, info.token, "url not found: {}", .{info.url}); } try out.print("<a href=\"#{}\">{}</a>", .{ info.url, info.name }); }, .Nav => { try out.writeAll(toc.toc); }, .Builtin => |tok| { try out.writeAll("<pre>"); try tokenizeAndPrintRaw(tokenizer, out, tok, builtin_code); try out.writeAll("</pre>"); }, .HeaderOpen => |info| { try out.print( "<h{} id=\"{}\"><a href=\"#toc-{}\">{}</a> <a class=\"hdr\" href=\"#{}\">§</a></h{}>\n", .{ info.n, info.url, info.url, info.name, info.url, info.n }, ); }, .SeeAlso => |items| { try out.writeAll("<p>See also:</p><ul>\n"); for (items) |item| { const url = try urlize(allocator, item.name); if (!toc.urls.contains(url)) { return parseError(tokenizer, item.token, "url not found: {}", .{url}); } try out.print("<li><a href=\"#{}\">{}</a></li>\n", .{ url, item.name }); } try out.writeAll("</ul>\n"); }, .Syntax => |content_tok| { try tokenizeAndPrint(tokenizer, out, content_tok); }, .Code => |code| { code_progress_index += 1; warn("docgen example code {}/{}...", .{ code_progress_index, tokenizer.code_node_count }); const raw_source = tokenizer.buffer[code.source_token.start..code.source_token.end]; const trimmed_raw_source = mem.trim(u8, raw_source, " \n"); if (!code.is_inline) { try out.print("<p class=\"file\">{}.zig</p>", .{code.name}); } try out.writeAll("<pre>"); try tokenizeAndPrint(tokenizer, out, code.source_token); try out.writeAll("</pre>"); const name_plus_ext = try std.fmt.allocPrint(allocator, "{}.zig", .{code.name}); const tmp_source_file_name = try fs.path.join( allocator, &[_][]const u8{ tmp_dir_name, name_plus_ext }, ); try fs.cwd().writeFile(tmp_source_file_name, trimmed_raw_source); switch (code.id) { Code.Id.Exe => |expected_outcome| code_block: { const name_plus_bin_ext = try std.fmt.allocPrint(allocator, "{}{}", .{ code.name, exe_ext }); var build_args = std.ArrayList([]const u8).init(allocator); defer build_args.deinit(); try build_args.appendSlice(&[_][]const u8{ zig_exe, "build-exe", tmp_source_file_name, "--name", code.name, "--color", "on", "--cache", "on", }); try out.print("<pre><code class=\"shell\">$ zig build-exe {}.zig", .{code.name}); switch (code.mode) { .Debug => {}, .ReleaseSafe => { try build_args.append("--release-safe"); try out.print(" --release-safe", .{}); }, .ReleaseFast => { try build_args.append("--release-fast"); try out.print(" --release-fast", .{}); }, .ReleaseSmall => { try build_args.append("--release-small"); try out.print(" --release-small", .{}); }, } for (code.link_objects) |link_object| { const name_with_ext = try std.fmt.allocPrint(allocator, "{}{}", .{ link_object, obj_ext }); const full_path_object = try fs.path.join( allocator, &[_][]const u8{ tmp_dir_name, name_with_ext }, ); try build_args.append("--object"); try build_args.append(full_path_object); try out.print(" --object {}", .{name_with_ext}); } if (code.link_libc) { try build_args.append("-lc"); try out.print(" -lc", .{}); } const target = try std.zig.CrossTarget.parse(.{ .arch_os_abi = code.target_str orelse "native", }); if (code.target_str) |triple| { try build_args.appendSlice(&[_][]const u8{ "-target", triple }); if (!code.is_inline) { try out.print(" -target {}", .{triple}); } } if (expected_outcome == .BuildFail) { const result = try ChildProcess.exec(.{ .allocator = allocator, .argv = build_args.items, .env_map = &env_map, .max_output_bytes = max_doc_file_size, }); switch (result.term) { .Exited => |exit_code| { if (exit_code == 0) { warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr}); for (build_args.items) |arg| warn("{} ", .{arg}) else warn("\n", .{}); return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{}); } }, else => { warn("{}\nThe following command crashed:\n", .{result.stderr}); for (build_args.items) |arg| warn("{} ", .{arg}) else warn("\n", .{}); return parseError(tokenizer, code.source_token, "example compile crashed", .{}); }, } const escaped_stderr = try escapeHtml(allocator, result.stderr); const colored_stderr = try termColor(allocator, escaped_stderr); try out.print("\n{}</code></pre>\n", .{colored_stderr}); break :code_block; } const exec_result = exec(allocator, &env_map, build_args.items) catch return parseError(tokenizer, code.source_token, "example failed to compile", .{}); if (code.target_str) |triple| { if (mem.startsWith(u8, triple, "wasm32") or mem.startsWith(u8, triple, "riscv64-linux") or (mem.startsWith(u8, triple, "x86_64-linux") and std.Target.current.os.tag != .linux or std.Target.current.cpu.arch != .x86_64)) { // skip execution try out.print("</code></pre>\n", .{}); break :code_block; } } const path_to_exe_dir = mem.trim(u8, exec_result.stdout, " \r\n"); const path_to_exe_basename = try std.fmt.allocPrint(allocator, "{}{}", .{ code.name, target.exeFileExt(), }); const path_to_exe = try fs.path.join(allocator, &[_][]const u8{ path_to_exe_dir, path_to_exe_basename, }); const run_args = &[_][]const u8{path_to_exe}; var exited_with_signal = false; const result = if (expected_outcome == ExpectedOutcome.Fail) blk: { const result = try ChildProcess.exec(.{ .allocator = allocator, .argv = run_args, .env_map = &env_map, .max_output_bytes = max_doc_file_size, }); switch (result.term) { .Exited => |exit_code| { if (exit_code == 0) { warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr}); for (run_args) |arg| warn("{} ", .{arg}) else warn("\n", .{}); return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{}); } }, .Signal => exited_with_signal = true, else => {}, } break :blk result; } else blk: { break :blk exec(allocator, &env_map, run_args) catch return parseError(tokenizer, code.source_token, "example crashed", .{}); }; const escaped_stderr = try escapeHtml(allocator, result.stderr); const escaped_stdout = try escapeHtml(allocator, result.stdout); const colored_stderr = try termColor(allocator, escaped_stderr); const colored_stdout = try termColor(allocator, escaped_stdout); try out.print("\n$ ./{}\n{}{}", .{ code.name, colored_stdout, colored_stderr }); if (exited_with_signal) { try out.print("(process terminated by signal)", .{}); } try out.print("</code></pre>\n", .{}); }, Code.Id.Test => { var test_args = std.ArrayList([]const u8).init(allocator); defer test_args.deinit(); try test_args.appendSlice(&[_][]const u8{ zig_exe, "test", tmp_source_file_name, "--cache", "on", }); try out.print("<pre><code class=\"shell\">$ zig test {}.zig", .{code.name}); switch (code.mode) { .Debug => {}, .ReleaseSafe => { try test_args.append("--release-safe"); try out.print(" --release-safe", .{}); }, .ReleaseFast => { try test_args.append("--release-fast"); try out.print(" --release-fast", .{}); }, .ReleaseSmall => { try test_args.append("--release-small"); try out.print(" --release-small", .{}); }, } if (code.link_libc) { try test_args.append("-lc"); try out.print(" -lc", .{}); } if (code.target_str) |triple| { try test_args.appendSlice(&[_][]const u8{ "-target", triple }); try out.print(" -target {}", .{triple}); } const result = exec(allocator, &env_map, test_args.items) catch return parseError(tokenizer, code.source_token, "test failed", .{}); const escaped_stderr = try escapeHtml(allocator, result.stderr); const escaped_stdout = try escapeHtml(allocator, result.stdout); try out.print("\n{}{}</code></pre>\n", .{ escaped_stderr, escaped_stdout }); }, Code.Id.TestError => |error_match| { var test_args = std.ArrayList([]const u8).init(allocator); defer test_args.deinit(); try test_args.appendSlice(&[_][]const u8{ zig_exe, "test", "--color", "on", tmp_source_file_name, "--output-dir", tmp_dir_name, }); try out.print("<pre><code class=\"shell\">$ zig test {}.zig", .{code.name}); switch (code.mode) { .Debug => {}, .ReleaseSafe => { try test_args.append("--release-safe"); try out.print(" --release-safe", .{}); }, .ReleaseFast => { try test_args.append("--release-fast"); try out.print(" --release-fast", .{}); }, .ReleaseSmall => { try test_args.append("--release-small"); try out.print(" --release-small", .{}); }, } const result = try ChildProcess.exec(.{ .allocator = allocator, .argv = test_args.items, .env_map = &env_map, .max_output_bytes = max_doc_file_size, }); switch (result.term) { .Exited => |exit_code| { if (exit_code == 0) { warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr}); for (test_args.items) |arg| warn("{} ", .{arg}) else warn("\n", .{}); return parseError(tokenizer, code.source_token, "example incorrectly compiled", .{}); } }, else => { warn("{}\nThe following command crashed:\n", .{result.stderr}); for (test_args.items) |arg| warn("{} ", .{arg}) else warn("\n", .{}); return parseError(tokenizer, code.source_token, "example compile crashed", .{}); }, } if (mem.indexOf(u8, result.stderr, error_match) == null) { warn("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match }); return parseError(tokenizer, code.source_token, "example did not have expected compile error", .{}); } const escaped_stderr = try escapeHtml(allocator, result.stderr); const colored_stderr = try termColor(allocator, escaped_stderr); try out.print("\n{}</code></pre>\n", .{colored_stderr}); }, Code.Id.TestSafety => |error_match| { var test_args = std.ArrayList([]const u8).init(allocator); defer test_args.deinit(); try test_args.appendSlice(&[_][]const u8{ zig_exe, "test", tmp_source_file_name, "--output-dir", tmp_dir_name, }); var mode_arg: []const u8 = ""; switch (code.mode) { .Debug => {}, .ReleaseSafe => { try test_args.append("--release-safe"); mode_arg = " --release-safe"; }, .ReleaseFast => { try test_args.append("--release-fast"); mode_arg = " --release-fast"; }, .ReleaseSmall => { try test_args.append("--release-small"); mode_arg = " --release-small"; }, } const result = try ChildProcess.exec(.{ .allocator = allocator, .argv = test_args.items, .env_map = &env_map, .max_output_bytes = max_doc_file_size, }); switch (result.term) { .Exited => |exit_code| { if (exit_code == 0) { warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr}); for (test_args.items) |arg| warn("{} ", .{arg}) else warn("\n", .{}); return parseError(tokenizer, code.source_token, "example test incorrectly succeeded", .{}); } }, else => { warn("{}\nThe following command crashed:\n", .{result.stderr}); for (test_args.items) |arg| warn("{} ", .{arg}) else warn("\n", .{}); return parseError(tokenizer, code.source_token, "example compile crashed", .{}); }, } if (mem.indexOf(u8, result.stderr, error_match) == null) { warn("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match }); return parseError(tokenizer, code.source_token, "example did not have expected runtime safety error message", .{}); } const escaped_stderr = try escapeHtml(allocator, result.stderr); const colored_stderr = try termColor(allocator, escaped_stderr); try out.print("<pre><code class=\"shell\">$ zig test {}.zig{}\n{}</code></pre>\n", .{ code.name, mode_arg, colored_stderr, }); }, Code.Id.Obj => |maybe_error_match| { const name_plus_obj_ext = try std.fmt.allocPrint(allocator, "{}{}", .{ code.name, obj_ext }); const tmp_obj_file_name = try fs.path.join( allocator, &[_][]const u8{ tmp_dir_name, name_plus_obj_ext }, ); var build_args = std.ArrayList([]const u8).init(allocator); defer build_args.deinit(); const name_plus_h_ext = try std.fmt.allocPrint(allocator, "{}.h", .{code.name}); const output_h_file_name = try fs.path.join( allocator, &[_][]const u8{ tmp_dir_name, name_plus_h_ext }, ); try build_args.appendSlice(&[_][]const u8{ zig_exe, "build-obj", tmp_source_file_name, "--color", "on", "--name", code.name, "--output-dir", tmp_dir_name, }); if (!code.is_inline) { try out.print("<pre><code class=\"shell\">$ zig build-obj {}.zig", .{code.name}); } switch (code.mode) { .Debug => {}, .ReleaseSafe => { try build_args.append("--release-safe"); if (!code.is_inline) { try out.print(" --release-safe", .{}); } }, .ReleaseFast => { try build_args.append("--release-fast"); if (!code.is_inline) { try out.print(" --release-fast", .{}); } }, .ReleaseSmall => { try build_args.append("--release-small"); if (!code.is_inline) { try out.print(" --release-small", .{}); } }, } if (code.target_str) |triple| { try build_args.appendSlice(&[_][]const u8{ "-target", triple }); try out.print(" -target {}", .{triple}); } if (maybe_error_match) |error_match| { const result = try ChildProcess.exec(.{ .allocator = allocator, .argv = build_args.items, .env_map = &env_map, .max_output_bytes = max_doc_file_size, }); switch (result.term) { .Exited => |exit_code| { if (exit_code == 0) { warn("{}\nThe following command incorrectly succeeded:\n", .{result.stderr}); for (build_args.items) |arg| warn("{} ", .{arg}) else warn("\n", .{}); return parseError(tokenizer, code.source_token, "example build incorrectly succeeded", .{}); } }, else => { warn("{}\nThe following command crashed:\n", .{result.stderr}); for (build_args.items) |arg| warn("{} ", .{arg}) else warn("\n", .{}); return parseError(tokenizer, code.source_token, "example compile crashed", .{}); }, } if (mem.indexOf(u8, result.stderr, error_match) == null) { warn("{}\nExpected to find '{}' in stderr", .{ result.stderr, error_match }); return parseError(tokenizer, code.source_token, "example did not have expected compile error message", .{}); } const escaped_stderr = try escapeHtml(allocator, result.stderr); const colored_stderr = try termColor(allocator, escaped_stderr); try out.print("\n{}", .{colored_stderr}); } else { _ = exec(allocator, &env_map, build_args.items) catch return parseError(tokenizer, code.source_token, "example failed to compile", .{}); } if (!code.is_inline) { try out.print("</code></pre>\n", .{}); } }, Code.Id.Lib => { var test_args = std.ArrayList([]const u8).init(allocator); defer test_args.deinit(); try test_args.appendSlice(&[_][]const u8{ zig_exe, "build-lib", tmp_source_file_name, "--output-dir", tmp_dir_name, }); try out.print("<pre><code class=\"shell\">$ zig build-lib {}.zig", .{code.name}); switch (code.mode) { .Debug => {}, .ReleaseSafe => { try test_args.append("--release-safe"); try out.print(" --release-safe", .{}); }, .ReleaseFast => { try test_args.append("--release-fast"); try out.print(" --release-fast", .{}); }, .ReleaseSmall => { try test_args.append("--release-small"); try out.print(" --release-small", .{}); }, } if (code.target_str) |triple| { try test_args.appendSlice(&[_][]const u8{ "-target", triple }); try out.print(" -target {}", .{triple}); } const result = exec(allocator, &env_map, test_args.items) catch return parseError(tokenizer, code.source_token, "test failed", .{}); const escaped_stderr = try escapeHtml(allocator, result.stderr); const escaped_stdout = try escapeHtml(allocator, result.stdout); try out.print("\n{}{}</code></pre>\n", .{ escaped_stderr, escaped_stdout }); }, } warn("OK\n", .{}); }, } } } fn exec(allocator: *mem.Allocator, env_map: *std.BufMap, args: []const []const u8) !ChildProcess.ExecResult { const result = try ChildProcess.exec(.{ .allocator = allocator, .argv = args, .env_map = env_map, .max_output_bytes = max_doc_file_size, }); switch (result.term) { .Exited => |exit_code| { if (exit_code != 0) { warn("{}\nThe following command exited with code {}:\n", .{ result.stderr, exit_code }); for (args) |arg| warn("{} ", .{arg}) else warn("\n", .{}); return error.ChildExitError; } }, else => { warn("{}\nThe following command crashed:\n", .{result.stderr}); for (args) |arg| warn("{} ", .{arg}) else warn("\n", .{}); return error.ChildCrashed; }, } return result; } fn getBuiltinCode(allocator: *mem.Allocator, env_map: *std.BufMap, zig_exe: []const u8) ![]const u8 { const result = try exec(allocator, env_map, &[_][]const u8{ zig_exe, "builtin", }); return result.stdout; }
doc/docgen.zig
const std = @import("std"); const ArrayListAligned = std.ArrayListAligned; const fmt = std.fmt; const fs = std.fs; const heap = std.heap; const io = std.io; const math = std.math; const mem = std.mem; const os = std.os; const process = std.process; const Target = std.Target; const unicode = std.unicode; const clap = @import("thirdparty/clap"); const algorithm = @import("algorithm.zig"); const global = @import("global.zig"); const log = @import("log.zig"); const default_counter = 0; const default_word_count = 6; const max_password_length = 64; const min_password_length = 4; const max_word_count = 10; const min_word_count = 5; // UTF-8 requires at least four bytes to be able to display any code blocks in the farthest, most // usable unicode plane. const max_utf8_bytes = 4; // Variables that must be cleaned up when the application is terminated (including by SIGINT). var arena: ?heap.ArenaAllocator = null; var generated_password = [_]u8{0} ** 128; var user_password = [_]u8{0} ** (max_password_length * max_utf8_bytes); pub fn main() u8 { // From the official documentation: // // "It is generally recommended to avoid using @errorToInt, as the integer representation of an // error is not stable across source code changes." const MainError = enum(u8) { OK, EnvironmentVarsNotFound, InvalidArguments, TerminalUnobtainable, InvalidPassword, DicewareNotGenerated, }; arena = heap.ArenaAllocator.init(heap.page_allocator); defer arena.?.deinit(); const allocator = &arena.?.allocator; // os.getpid, which is used inside the deinitOnSigint function, has only // been implemented in linux as of Feb 11, 2020. if (Target.current.os.tag == .linux) { os.sigaction( os.SIGINT, &os.Sigaction{ .sigaction = struct { fn deinitOnSigint(signo: i32, info: *os.siginfo_t, context: ?*c_void) callconv(.C) void { mem.secureZero(u8, &generated_password); mem.secureZero(u8, &user_password); if (arena) |alloc| alloc.deinit(); // After cleaning up, proceed to kill this very program with a SIGINT // signal. We do this by resetting the SIGINT sigaction handler to SIG_DFL. // // Big thank-you to: // https://www.cons.org/cracauer/sigint.html for the protip. os.sigaction( os.SIGINT, &os.Sigaction{ .sigaction = os.SIG_DFL, .mask = os.empty_sigset, .flags = os.SA_SIGINFO, }, null, ); os.kill(os.linux.getpid(), os.SIGINT) catch unreachable; } }.deinitOnSigint, .mask = os.empty_sigset, .flags = os.SA_SIGINFO, }, null, ); } var site_name: []const u8 = undefined; var user_name: []const u8 = undefined; // As of the current version of Zig (Feb 7, 2020), only linux termios (for hiding password // input) is supported. var is_password_read_from_stdin = if (Target.current.os.tag == .linux) false else true; const stderr = &io.getStdErr().outStream(); const stdout = &io.getStdOut().outStream(); // Acquire environment variables. If they don't exist, assign default // values. var counter: u8 = block: { const counter_env_var = process.getEnvVarOwned(allocator, "DICE_COUNTER") catch |err| { switch (err) { error.OutOfMemory => log.fail("Failed acquiring environment variable; heap ran out of memory\n"), error.InvalidUtf8 => { if (Target.current.os.tag == .windows) log.fail("String contains invalid utf-8 codepoints\n") else unreachable; }, error.EnvironmentVariableNotFound => break :block default_counter, } return @enumToInt(MainError.EnvironmentVarsNotFound); }; break :block fmt.parseUnsigned(u8, counter_env_var, 10) catch |err| { const overflow_str = "DICE_COUNTER environment variable must range from {} to {}; " ++ "reverting to default value\n"; const invalid_char_str = "DICE_COUNTER environment variable contains invalid " ++ "characters; reverting to default value\n"; switch (err) { error.Overflow => log.warnf(overflow_str, .{ math.minInt(u8), math.maxInt(u8) }), error.InvalidCharacter => log.warn(invalid_char_str), } break :block default_counter; }; }; var word_count: u4 = block: { const word_count_env_var = process.getEnvVarOwned(allocator, "DICE_WORD_COUNT") catch |err| { switch (err) { error.OutOfMemory => log.fail("Failed acquiring environment variable; heap ran out of memory\n"), error.InvalidUtf8 => { if (Target.current.os.tag == .windows) log.fail("String contains invalid utf-8 codepoints\n") else unreachable; }, error.EnvironmentVariableNotFound => break :block default_word_count, } return @enumToInt(MainError.EnvironmentVarsNotFound); }; const temp_var = fmt.parseUnsigned(u4, word_count_env_var, 10) catch |err| { const overflow_str = "DICE_WORD_COUNT environment variable must range from {} to " ++ "{}; reverting to default value\n"; const invalid_char_str = "DICE_WORD_COUNT environment variable contains invalid " ++ "characters; reverting to default value\n"; switch (err) { error.Overflow => log.warnf(overflow_str, .{ min_word_count, max_word_count }), error.InvalidCharacter => log.warn(invalid_char_str), } break :block default_word_count; }; if (temp_var < min_word_count or temp_var > max_word_count) { const warn_msg = "DICE_WORD_COUNT environment variable must range from {} to {}; " ++ "reverting to default value\n"; log.warnf(warn_msg, .{ min_word_count, max_word_count }); break :block default_word_count; } break :block temp_var; }; // Argument parsing. { const S = struct { fn printHelp(exe: []const u8, outstream: *fs.File.OutStream) void { const usage_header_str = "Usage: {} [options] <username> <sitename>\n" ++ "\n" ++ "Options:\n"; const str0 = " -c, --counter=VALUE Set the counter value [default: {}]\n"; const str1 = " --stdin Read password from standard input\n" ++ " For OSes other than Linux, this is the default\n"; const str2 = " -w, --words=VALUE Set the number of words for the resulting generated password\n" ++ " The value must range from {} to {} [default: {}]\n"; const str3 = " -v, --verbose Print extra information\n"; const str4 = " -h, --help Display this help and exit immediately\n"; const str5 = " --version Display version information and exit immediately\n"; outstream.print(usage_header_str, .{exe}) catch {}; outstream.print(str0, .{default_counter}) catch {}; _ = outstream.write(str1) catch {}; outstream.print(str2, .{ min_word_count, max_word_count, default_word_count }) catch {}; _ = outstream.write(str3) catch {}; _ = outstream.write(str4) catch {}; _ = outstream.write(str5) catch {}; } }; const params = comptime [_]clap.Param(u8){ clap.Param(u8){ .id = 'c', .names = clap.Names{ .short = 'c', .long = "counter" }, .takes_value = true, }, clap.Param(u8){ .id = 'w', .names = clap.Names{ .short = 'w', .long = "words" }, .takes_value = true, }, clap.Param(u8){ .id = '|', .names = clap.Names{ .long = "stdin" }, }, clap.Param(u8){ .id = 'v', .names = clap.Names{ .short = 'v', .long = "verbose" }, }, // Help and version clap.Param(u8){ .id = 'h', .names = clap.Names{ .short = 'h', .long = "help" }, }, clap.Param(u8){ .id = '#', .names = clap.Names{ .long = "version" }, }, // Positional arguments clap.Param(u8){ .id = '@', .takes_value = true, }, }; var iter = clap.args.OsIterator.init(allocator) catch |err| { log.failf("Iteration failed ({})\n", .{err}); return @enumToInt(MainError.InvalidArguments); }; var parser = clap.StreamingClap(u8, clap.args.OsIterator){ .params = &params, .iter = &iter, }; var positional = ArrayListAligned([]const u8, null).init(allocator); while (parser.next() catch |err| { log.failf("Parse iteration failed ({})\n", .{err}); S.printHelp(if (iter.exe_arg) |exe_arg| exe_arg else global.name, stderr); return @enumToInt(MainError.InvalidArguments); }) |arg| { switch (arg.param.id) { 'c' => counter = fmt.parseUnsigned(@TypeOf(counter), arg.value.?, 10) catch |err| { const min_counter = math.minInt(@TypeOf(counter)); const max_counter = math.maxInt(@TypeOf(counter)); switch (err) { error.Overflow => log.failf("Counter value must range from {} to {}\n", .{ min_counter, max_counter }), error.InvalidCharacter => log.fail("Counter value contains invalid characters\n"), } return @enumToInt(MainError.InvalidArguments); }, 'w' => { word_count = block: { const temp_var = fmt.parseUnsigned(@TypeOf(word_count), arg.value.?, 10) catch |err| { switch (err) { error.Overflow => log.failf("Word count value must range from {} to {}\n", .{ min_word_count, max_word_count }), error.InvalidCharacter => log.fail("Word count value contains invalid characters\n"), } return @enumToInt(MainError.InvalidArguments); }; if (temp_var < min_word_count or temp_var > max_word_count) { log.failf("Word count value must range from {} to {}\n", .{ min_word_count, max_word_count }); return @enumToInt(MainError.InvalidArguments); } break :block temp_var; }; }, '|' => is_password_read_from_stdin = true, 'v' => log.setVerbose(true), 'h' => { S.printHelp(if (iter.exe_arg) |exe_arg| exe_arg else global.name, stdout); return @enumToInt(MainError.OK); }, '#' => { stdout.print("{} {}\n", .{ global.name, global.version_string }) catch {}; _ = stdout.write("Copyright (c) 2020 nofmal\n") catch {}; _ = stdout.write("Licensed under the zlib license\n") catch {}; return @enumToInt(MainError.OK); }, '@' => positional.append(arg.value.?) catch { log.fail("Failed appending positional arguments; heap ran out of memory\n"); return @enumToInt(MainError.InvalidArguments); }, else => unreachable, } } if (positional.items.len < 2) { log.fail("Username and sitename are required\n"); S.printHelp(if (iter.exe_arg) |exe_arg| exe_arg else global.name, stderr); return @enumToInt(MainError.InvalidArguments); } const pos = positional.toOwnedSlice(); if (pos[0].len == 0) { log.fail("User name must not be empty\n"); return @enumToInt(MainError.InvalidArguments); } if (pos[1].len == 0) { log.fail("Site name must not be empty\n"); return @enumToInt(MainError.InvalidArguments); } if (!unicode.utf8ValidateSlice(pos[0])) { log.fail("User name contains illegal codepoints\n"); return @enumToInt(MainError.InvalidArguments); } if (!unicode.utf8ValidateSlice(pos[1])) { log.fail("Site name contains illegal codepoints\n"); return @enumToInt(MainError.InvalidArguments); } user_name = pos[0]; site_name = pos[1]; } log.verbose("\"user_parameter\": {\n"); log.verbosef(" \"user_name\": {},\n", .{user_name}); log.verbosef(" \"site_name\": {},\n", .{site_name}); log.verbosef(" \"counter\": {},\n", .{counter}); log.verbosef(" \"word_count\": {},\n", .{word_count}); log.verbose("},\n"); var bytes_read: usize = 0; defer bytes_read = 0; // Input and process password { const P = struct { fn validatePasswordLength(string: []const u8) error{ InvalidCodepoints, PasswordTooShort, PasswordTooLong, }!void { const password_length = global.utf8Length(string) catch return error.InvalidCodepoints; if (password_length < min_password_length) return error.PasswordTooShort else if (password_length > max_password_length) return error.PasswordTooLong; } }; const stdin_file = io.getStdIn(); const stdin_stream = &stdin_file.inStream(); if (is_password_read_from_stdin) { bytes_read = stdin_stream.readAll(&user_password) catch |err| { log.failf("Failed to read from standard input ({})\n", .{err}); return @enumToInt(MainError.InvalidPassword); }; errdefer mem.secureZero(u8, &user_password); P.validatePasswordLength(user_password[0..bytes_read]) catch |err| { switch (err) { error.InvalidCodepoints => log.fail("Password contains illegal codepoints\n"), error.PasswordTooShort => log.failf("Password must be longer than {} letters\n", .{min_password_length}), error.PasswordTooLong => log.failf("Password must be shorter than {} letters\n", .{max_password_length}), } return @enumToInt(MainError.InvalidPassword); }; } else { const old_terminal = os.tcgetattr(stdin_file.handle) catch |err| { log.failf("Failed getting terminal attributes ({})\n", .{err}); return @enumToInt(MainError.TerminalUnobtainable); }; var new_terminal = old_terminal; new_terminal.lflag &= ~@as(u32, os.ECHO); new_terminal.lflag |= os.ICANON | os.ISIG; new_terminal.iflag |= os.ICRNL; os.tcsetattr(stdin_file.handle, os.TCSA.FLUSH, new_terminal) catch unreachable; defer os.tcsetattr(stdin_file.handle, os.TCSA.FLUSH, old_terminal) catch unreachable; while (true) { stderr.print("[{}] password for {}: ", .{ global.name, user_name }) catch {}; while (true) : (bytes_read += 1) { const byte = stdin_stream.readByte() catch |err| { _ = stderr.write("\n") catch {}; log.failf("Failed to read from input ({})\n", .{err}); return @enumToInt(MainError.InvalidPassword); }; switch (byte) { '\r', '\n' => break, else => { if (bytes_read < user_password.len) { user_password[bytes_read] = byte; } else { _ = stderr.write("\n") catch {}; log.fail("Password buffer overflowed\n"); return @enumToInt(MainError.InvalidPassword); } }, } } _ = stderr.write("\n") catch {}; P.validatePasswordLength(user_password[0..bytes_read]) catch |err| { switch (err) { error.InvalidCodepoints => log.fail("Password contains illegal codepoints; please try again\n"), error.PasswordTooShort => log.failf("Password must be longer than {} letters; please try again\n", .{min_password_length}), error.PasswordTooLong => log.failf("Password must be shorter than {} letters; please try again\n", .{max_password_length}), } mem.set(u8, user_password[0..bytes_read], 0); continue; }; break; } } // End of password processing } defer mem.secureZero(u8, &user_password); algorithm.generatePassword( allocator, algorithm.Param{ .user_name = user_name, .site_name = site_name, .counter = counter, .word_count = word_count, }, user_password[0..bytes_read], &generated_password, ) catch |err| { switch (err) { error.SaltNotAllocated => log.fail("Heap ran out of memory while generating salt\n"), error.KeyNotAllocated => log.fail("Heap ran out of memory while generating key\n"), error.SeedNotAllocated => log.fail("Heap ran out of memory while generating seed\n"), error.OutOfMemory => log.fail("Failed allocating memory during diceware phase\n"), } return @enumToInt(MainError.DicewareNotGenerated); }; defer mem.secureZero(u8, &generated_password); { var i: usize = 0; for (generated_password) |byte, index| { if (byte == 0) { i = index; break; } } _ = stderr.write("your password is: ") catch {}; stdout.print("{}", .{generated_password[0..i]}) catch {}; _ = stderr.write("\n") catch {}; } return @enumToInt(MainError.OK); }
src/main.zig
const std = @import("std"); const zua = @import("zua"); const parse = zua.parse; // Tests for comparing the output of Zua's parser with Lua's. // Expects @import("build_options").fuzzed_parse_inputs_dir to be a path to // a directory containing a corpus of inputs to test and // @import("build_options").fuzzed_parse_outputs_dir to be a path to a // directory containing the bytecode obtained by running the input // through the Lua parser and dumper (or an error string). // // A usable corpus/outputs pair can be obtained from // https://github.com/squeek502/fuzzing-lua const verboseTestPrinting = false; const printErrorContextDifferences = false; const build_options = @import("build_options"); const inputs_dir_path = build_options.fuzzed_parse_inputs_dir; const outputs_dir_path = build_options.fuzzed_parse_outputs_dir; test "fuzzed_parse input/output pairs" { const allocator = std.testing.allocator; var inputs_dir = try std.fs.cwd().openDir(inputs_dir_path, .{ .iterate = true }); defer inputs_dir.close(); var outputs_dir = try std.fs.cwd().openDir(outputs_dir_path, .{}); defer outputs_dir.close(); var n: usize = 0; var nskipped: usize = 0; var nlexerrors: usize = 0; var nbytecode: usize = 0; var inputs_iterator = inputs_dir.iterate(); while (try inputs_iterator.next()) |entry| { if (entry.kind != .File) continue; if (verboseTestPrinting) { std.debug.print("\n{s}\n", .{entry.name}); } const contents = try inputs_dir.readFileAlloc(allocator, entry.name, std.math.maxInt(usize)); defer allocator.free(contents); const expectedContents = try outputs_dir.readFileAlloc(allocator, entry.name, std.math.maxInt(usize)); defer allocator.free(expectedContents); // apparently luac can miscompile certain inputs which gives a blank output file if (expectedContents.len == 0) { if (verboseTestPrinting) { std.debug.print("luac miscompilation: {s}\n", .{entry.name}); } continue; } const is_bytecode_expected = std.mem.startsWith(u8, expectedContents, zua.dump.signature); const is_error_expected = !is_bytecode_expected; var lexer = zua.lex.Lexer.init(contents, "fuzz"); var parser = zua.parse.Parser.init(&lexer); if (parser.parse(allocator)) |tree| { defer tree.deinit(); if (is_error_expected) { std.debug.print("{s}:\n```\n{s}\n```\n\nexpected error:\n{s}\n\ngot tree:\n", .{ entry.name, std.fmt.fmtSliceEscapeLower(contents), expectedContents }); try tree.dump(std.io.getStdErr().writer()); std.debug.print("\n", .{}); unreachable; } else { nbytecode += 1; continue; } } else |err| { if (is_bytecode_expected) { std.debug.print("{s}:\n```\n{s}\n```\n\nexpected no error, got:\n{}\n", .{ entry.name, std.fmt.fmtSliceEscapeLower(contents), err }); unreachable; } else { if (isInErrorSet(err, zua.lex.LexError)) { nlexerrors += 1; } // ignore this error for now, it's in lcode.c if (std.mem.indexOf(u8, expectedContents, "function or expression too complex") != null) { nskipped += 1; continue; } // ignore this error for now, not sure if it'll be a ParseError if (std.mem.indexOf(u8, expectedContents, "has more than 200 local variables") != null) { nskipped += 1; continue; } // ignore this error for now, not sure if it'll be a ParseError if (std.mem.indexOf(u8, expectedContents, "variables in assignment") != null) { nskipped += 1; continue; } // ignore this error for now, see long_str_nesting_compat TODO if (std.mem.indexOf(u8, expectedContents, "nesting of [[...]]") != null) { nskipped += 1; continue; } const err_msg = try parser.renderErrorAlloc(allocator); defer allocator.free(err_msg); var nearIndex = std.mem.lastIndexOf(u8, expectedContents, " near '"); if (nearIndex) |i| { try std.testing.expectEqualStrings(expectedContents[0..i], err_msg[0..std.math.min(i, err_msg.len)]); if (printErrorContextDifferences) { if (!std.mem.eql(u8, expectedContents, err_msg)) { std.debug.print("\n{s}\nexpected: {s}\nactual: {s}\n", .{ entry.name, std.fmt.fmtSliceEscapeLower(expectedContents), std.fmt.fmtSliceEscapeLower(err_msg) }); } } } else { try std.testing.expectEqualStrings(expectedContents, err_msg); } } } n += 1; } std.debug.print( "\n{} input/output pairs tested: {} passed, {} skipped (skipped {} bytecode outputs, {} unimplemented errors)\n", .{ n + nskipped + nbytecode, n, nskipped + nbytecode, nbytecode, nskipped }, ); if (nlexerrors > 0) { std.debug.print("note: {d} lexer-specific errors found, can prune them with fuzzed_parse_prune\n", .{nlexerrors}); } } pub fn isInErrorSet(err: anyerror, comptime T: type) bool { for (std.meta.fields(T)) |field| { if (std.mem.eql(u8, std.meta.tagName(err), field.name)) { return true; } } return false; }
test/fuzzed_parse.zig
const std = @import("std"); const testing = std.testing; const c = @import("c.zig").c; const key = @import("key.zig"); /// Possible value for various window hints, etc. pub const dont_care = c.GLFW_DONT_CARE; pub const Error = @import("errors.zig").Error; const getError = @import("errors.zig").getError; pub const Action = @import("action.zig").Action; pub const GamepadAxis = @import("gamepad_axis.zig").GamepadAxis; pub const GamepadButton = @import("gamepad_button.zig").GamepadButton; pub const gamepad_axis = @import("gamepad_axis.zig"); pub const gamepad_button = @import("gamepad_button.zig"); pub const GammaRamp = @import("GammaRamp.zig"); pub const Image = @import("Image.zig"); pub const Joystick = @import("Joystick.zig"); pub const Monitor = @import("Monitor.zig"); pub const mouse_button = @import("mouse_button.zig"); pub const version = @import("version.zig"); pub const VideoMode = @import("VideoMode.zig"); pub const Window = @import("Window.zig"); pub const Cursor = @import("Cursor.zig"); pub const Key = key.Key; pub usingnamespace @import("clipboard.zig"); pub usingnamespace @import("opengl.zig"); pub usingnamespace @import("vulkan.zig"); pub usingnamespace @import("time.zig"); pub usingnamespace @import("hat.zig"); pub usingnamespace @import("mod.zig"); const internal_debug = @import("internal_debug.zig"); /// Initializes the GLFW library. /// /// This function initializes the GLFW library. Before most GLFW functions can be used, GLFW must /// be initialized, and before an application terminates GLFW should be terminated in order to free /// any resources allocated during or after initialization. /// /// If this function fails, it calls glfw.Terminate before returning. If it succeeds, you should /// call glfw.Terminate before the application exits. /// /// Additional calls to this function after successful initialization but before termination will /// return immediately with no error. /// /// macos: This function will change the current directory of the application to the /// `Contents/Resources` subdirectory of the application's bundle, if present. This can be disabled /// with the glfw.COCOA_CHDIR_RESOURCES init hint. /// /// x11: This function will set the `LC_CTYPE` category of the application locale according to the /// current environment if that category is still "C". This is because the "C" locale breaks /// Unicode text input. /// /// @thread_safety This function must only be called from the main thread. pub inline fn init(hints: InitHints) Error!void { internal_debug.toggleInitialized(); internal_debug.assertInitialized(); errdefer internal_debug.toggleInitialized(); inline for (comptime std.meta.fieldNames(InitHints)) |field_name| { const init_hint = @field(InitHint, field_name); const init_value = @field(hints, field_name); initHint(init_hint, init_value) catch |err| switch (err) { Error.InvalidValue => unreachable, else => unreachable, }; } _ = c.glfwInit(); getError() catch |err| return switch (err) { Error.PlatformError => err, else => unreachable, }; } /// Terminates the GLFW library. /// /// This function destroys all remaining windows and cursors, restores any modified gamma ramps /// and frees any other allocated resources. Once this function is called, you must again call /// glfw.init successfully before you will be able to use most GLFW functions. /// /// If GLFW has been successfully initialized, this function should be called before the /// application exits. If initialization fails, there is no need to call this function, as it is /// called by glfw.init before it returns failure. /// /// This function has no effect if GLFW is not initialized. /// /// Possible errors include glfw.Error.PlatformError. /// /// remark: This function may be called before glfw.init. /// /// warning: The contexts of any remaining windows must not be current on any other thread when /// this function is called. /// /// reentrancy: This function must not be called from a callback. /// /// thread_safety: This function must only be called from the main thread. pub inline fn terminate() void { internal_debug.assertInitialized(); internal_debug.toggleInitialized(); c.glfwTerminate(); } /// Initialization hints for passing into glfw.init pub const InitHints = struct { /// Specifies whether to also expose joystick hats as buttons, for compatibility with earlier /// versions of GLFW that did not have glfwGetJoystickHats. joystick_hat_buttons: bool = true, /// macOS specific init hint. Ignored on other platforms. /// /// Specifies whether to set the current directory to the application to the Contents/Resources /// subdirectory of the application's bundle, if present. cocoa_chdir_resources: bool = true, /// macOS specific init hint. Ignored on other platforms. /// /// specifies whether to create a basic menu bar, either from a nib or manually, when the first /// window is created, which is when AppKit is initialized. cocoa_menubar: bool = true, }; /// Initialization hints for passing into glfw.initHint const InitHint = enum(c_int) { /// Specifies whether to also expose joystick hats as buttons, for compatibility with earlier /// versions of GLFW that did not have glfwGetJoystickHats. /// /// Possible values are `true` and `false`. joystick_hat_buttons = c.GLFW_JOYSTICK_HAT_BUTTONS, /// macOS specific init hint. Ignored on other platforms. /// /// Specifies whether to set the current directory to the application to the Contents/Resources /// subdirectory of the application's bundle, if present. /// /// Possible values are `true` and `false`. cocoa_chdir_resources = c.GLFW_COCOA_CHDIR_RESOURCES, /// macOS specific init hint. Ignored on other platforms. /// /// specifies whether to create a basic menu bar, either from a nib or manually, when the first /// window is created, which is when AppKit is initialized. /// /// Possible values are `true` and `false`. cocoa_menubar = c.GLFW_COCOA_MENUBAR, }; /// Sets the specified init hint to the desired value. /// /// This function sets hints for the next initialization of GLFW. /// /// The values you set hints to are never reset by GLFW, but they only take effect during /// initialization. Once GLFW has been initialized, any values you set will be ignored until the /// library is terminated and initialized again. /// /// Some hints are platform specific. These may be set on any platform but they will only affect /// their specific platform. Other platforms will ignore them. Setting these hints requires no /// platform specific headers or functions. /// /// @param hint: The init hint to set. /// @param value: The new value of the init hint. /// /// Possible errors include glfw.Error.InvalidEnum and glfw.Error.InvalidValue. /// /// @remarks This function may be called before glfw.init. /// /// @thread_safety This function must only be called from the main thread. fn initHint(hint: InitHint, value: anytype) Error!void { switch (@typeInfo(@TypeOf(value))) { .Int, .ComptimeInt => c.glfwInitHint(@enumToInt(hint), @intCast(c_int, value)), .Bool => c.glfwInitHint(@enumToInt(hint), @intCast(c_int, @boolToInt(value))), else => @compileError("expected a int or bool, got " ++ @typeName(@TypeOf(value))), } getError() catch |err| return switch (err) { Error.InvalidValue => err, else => unreachable, }; } /// Returns a string describing the compile-time configuration. /// /// This function returns the compile-time generated version string of the GLFW library binary. It /// describes the version, platform, compiler and any platform-specific compile-time options. It /// should not be confused with the OpenGL or OpenGL ES version string, queried with `glGetString`. /// /// __Do not use the version string__ to parse the GLFW library version. Use the glfw.version /// constants instead. /// /// @return The ASCII encoded GLFW version string. /// /// @errors None. /// /// @remark This function may be called before @ref glfwInit. /// /// @pointer_lifetime The returned string is static and compile-time generated. /// /// @thread_safety This function may be called from any thread. pub inline fn getVersionString() [:0]const u8 { return std.mem.span(c.glfwGetVersionString()); } /// Processes all pending events. /// /// This function processes only those events that are already in the event queue and then returns /// immediately. Processing events will cause the window and input callbacks associated with those /// events to be called. /// /// On some platforms, a window move, resize or menu operation will cause event processing to /// block. This is due to how event processing is designed on those platforms. You can use the /// window refresh callback (see window_refresh) to redraw the contents of your window when /// necessary during such operations. /// /// Do not assume that callbacks you set will _only_ be called in response to event processing /// functions like this one. While it is necessary to poll for events, window systems that require /// GLFW to register callbacks of its own can pass events to GLFW in response to many window system /// function calls. GLFW will pass those events on to the application callbacks before returning. /// /// Event processing is not required for joystick input to work. /// /// Possible errors include glfw.Error.NotInitialized and glfw.Error.PlatformError. /// /// @reentrancy This function must not be called from a callback. /// /// @thread_safety This function must only be called from the main thread. /// /// see also: events, glfw.waitEvents, glfw.waitEventsTimeout pub inline fn pollEvents() Error!void { internal_debug.assertInitialized(); c.glfwPollEvents(); getError() catch |err| return switch (err) { Error.PlatformError => err, else => unreachable, }; } /// Waits until events are queued and processes them. /// /// This function puts the calling thread to sleep until at least one event is available in the /// event queue. Once one or more events are available, it behaves exactly like glfw.pollEvents, /// i.e. the events in the queue are processed and the function then returns immediately. /// Processing events will cause the window and input callbacks associated with those events to be /// called. /// /// Since not all events are associated with callbacks, this function may return without a callback /// having been called even if you are monitoring all callbacks. /// /// On some platforms, a window move, resize or menu operation will cause event processing to /// block. This is due to how event processing is designed on those platforms. You can use the /// window refresh callback (see window_refresh) to redraw the contents of your window when /// necessary during such operations. /// /// Do not assume that callbacks you set will _only_ be called in response to event processing /// functions like this one. While it is necessary to poll for events, window systems that require /// GLFW to register callbacks of its own can pass events to GLFW in response to many window system /// function calls. GLFW will pass those events on to the application callbacks before returning. /// /// Event processing is not required for joystick input to work. /// /// Possible errors include glfw.Error.NotInitialized and glfw.Error.PlatformError. /// /// @reentrancy This function must not be called from a callback. /// /// @thread_safety This function must only be called from the main thread. /// /// see also: events, glfw.pollEvents, glfw.waitEventsTimeout pub inline fn waitEvents() Error!void { internal_debug.assertInitialized(); c.glfwWaitEvents(); getError() catch |err| return switch (err) { Error.PlatformError => err, else => unreachable, }; } /// Waits with timeout until events are queued and processes them. /// /// This function puts the calling thread to sleep until at least one event is available in the /// event queue, or until the specified timeout is reached. If one or more events are available, it /// behaves exactly like glfw.pollEvents, i.e. the events in the queue are processed and the /// function then returns immediately. Processing events will cause the window and input callbacks /// associated with those events to be called. /// /// The timeout value must be a positive finite number. /// /// Since not all events are associated with callbacks, this function may return without a callback /// having been called even if you are monitoring all callbacks. /// /// On some platforms, a window move, resize or menu operation will cause event processing to /// block. This is due to how event processing is designed on those platforms. You can use the /// window refresh callback (see window_refresh) to redraw the contents of your window when /// necessary during such operations. /// /// Do not assume that callbacks you set will _only_ be called in response to event processing /// functions like this one. While it is necessary to poll for events, window systems that require /// GLFW to register callbacks of its own can pass events to GLFW in response to many window system /// function calls. GLFW will pass those events on to the application callbacks before returning. /// /// Event processing is not required for joystick input to work. /// /// @param[in] timeout The maximum amount of time, in seconds, to wait. /// /// Possible errors include glfw.Error.NotInitialized, glfw.Error.InvalidValue and glfw.Error.PlatformError. /// /// @reentrancy This function must not be called from a callback. /// /// @thread_safety This function must only be called from the main thread. /// /// see also: events, glfw.pollEvents, glfw.waitEvents pub inline fn waitEventsTimeout(timeout: f64) Error!void { internal_debug.assertInitialized(); c.glfwWaitEventsTimeout(timeout); getError() catch |err| return switch (err) { // TODO: Consider whether to catch 'GLFW_INVALID_VALUE' from GLFW, or assert that 'timeout' is positive here, in the same manner as GLFW, // and make its branch unreachable. Error.InvalidValue, Error.PlatformError, => err, else => unreachable, }; } /// Posts an empty event to the event queue. /// /// This function posts an empty event from the current thread to the event queue, causing /// glfw.waitEvents or glfw.waitEventsTimeout to return. /// /// Possible errors include glfw.Error.NotInitialized and glfw.Error.PlatformError. /// /// @thread_safety This function may be called from any thread. /// /// see also: events, glfw.waitEvents, glfw.waitEventsTimeout pub inline fn postEmptyEvent() Error!void { internal_debug.assertInitialized(); c.glfwPostEmptyEvent(); getError() catch |err| return switch (err) { Error.PlatformError => err, else => unreachable, }; } /// Returns whether raw mouse motion is supported. /// /// This function returns whether raw mouse motion is supported on the current system. This status /// does not change after GLFW has been initialized so you only need to check this once. If you /// attempt to enable raw motion on a system that does not support it, glfw.Error.PlatformError will /// be emitted. /// /// Raw mouse motion is closer to the actual motion of the mouse across a surface. It is not /// affected by the scaling and acceleration applied to the motion of the desktop cursor. That /// processing is suitable for a cursor while raw motion is better for controlling for example a 3D /// camera. Because of this, raw mouse motion is only provided when the cursor is disabled. /// /// @return `true` if raw mouse motion is supported on the current machine, or `false` otherwise. /// /// @thread_safety This function must only be called from the main thread. /// /// see also: raw_mouse_motion, glfw.setInputMode pub inline fn rawMouseMotionSupported() bool { internal_debug.assertInitialized(); const supported = c.glfwRawMouseMotionSupported(); getError() catch unreachable; // Only error 'GLFW_NOT_INITIALIZED' is impossible return supported == c.GLFW_TRUE; } pub fn basicTest() !void { try init(.{}); defer terminate(); const window = Window.create(640, 480, "GLFW example", null, null, .{}) catch |err| { // return without fail, because most of our CI environments are headless / we cannot open // windows on them. std.debug.print("note: failed to create window: {}\n", .{err}); return; }; defer window.destroy(); var start = std.time.milliTimestamp(); while (std.time.milliTimestamp() < start + 1000 and !window.shouldClose()) { c.glfwPollEvents(); } } test "getVersionString" { // Reference these so the tests in these files get pulled in / ran. _ = Monitor; _ = GammaRamp; _ = Image; _ = key; _ = Joystick; _ = VideoMode; _ = Window; _ = Cursor; std.debug.print("\nGLFW version v{}.{}.{}\n", .{ version.major, version.minor, version.revision }); std.debug.print("\nstring: {s}\n", .{getVersionString()}); } test "pollEvents" { try init(.{ .cocoa_chdir_resources = true }); defer terminate(); } test "pollEvents" { try init(.{}); defer terminate(); try pollEvents(); } test "waitEventsTimeout" { try init(.{}); defer terminate(); try waitEventsTimeout(0.25); } test "postEmptyEvent_and_waitEvents" { try init(.{}); defer terminate(); try postEmptyEvent(); try waitEvents(); } test "rawMouseMotionSupported" { try init(.{}); defer terminate(); _ = rawMouseMotionSupported(); } test "basic" { try basicTest(); }
glfw/src/main.zig
const std = @import("std"); const Sdk = @import("Sdk.zig"); const Builder = std.build.Builder; pub fn build(b: *Builder) !void { const sdk = Sdk.init(b); const mode = b.standardReleaseOptions(); const target = b.standardTargetOptions(.{}); const sdl_linkage = b.option(std.build.LibExeObjStep.Linkage, "link", "Defines how to link SDL2 when building with mingw32") orelse .dynamic; const skip_tests = b.option(bool, "skip-test", "When set, skips the test suite to be run. This is required for cross-builds") orelse false; if (!skip_tests) { const lib_test = b.addTest("src/wrapper/sdl.zig"); lib_test.setTarget(.{ // copy over the abi so we compile the test with -msvc or -gnu for windows .abi = if (target.isWindows()) target.abi else null, }); lib_test.addPackage(sdk.getNativePackage("sdl-native")); lib_test.linkSystemLibrary("sdl2_image"); sdk.link(lib_test, .dynamic); const test_lib_step = b.step("test", "Runs the library tests."); test_lib_step.dependOn(&lib_test.step); } const demo_wrapper = b.addExecutable("demo-wrapper", "examples/wrapper.zig"); demo_wrapper.setBuildMode(mode); demo_wrapper.setTarget(target); sdk.link(demo_wrapper, sdl_linkage); demo_wrapper.addPackage(sdk.getWrapperPackage("sdl2")); demo_wrapper.install(); const demo_native = b.addExecutable("demo-native", "examples/native.zig"); demo_native.setBuildMode(mode); demo_native.setTarget(target); sdk.link(demo_native, sdl_linkage); demo_native.addPackage(sdk.getNativePackage("sdl2")); demo_native.install(); const run_demo_wrappr = demo_wrapper.run(); run_demo_wrappr.step.dependOn(b.getInstallStep()); const run_demo_native = demo_wrapper.run(); run_demo_native.step.dependOn(b.getInstallStep()); const run_demo_wrapper_step = b.step("run-wrapper", "Runs the demo for the SDL2 wrapper library"); run_demo_wrapper_step.dependOn(&run_demo_wrappr.step); const run_demo_native_step = b.step("run-native", "Runs the demo for the SDL2 native library"); run_demo_native_step.dependOn(&run_demo_native.step); }
build.zig
const std = @import("std"); const Client = @import("../Client.zig"); const util = @import("../util.zig"); const log = std.log.scoped(.zCord); const Heartbeat = @This(); mailbox: util.Mailbox(enum { start, ack, stop, terminate }), handler: *std.Thread, pub fn init(client: *Client) !Heartbeat { return Heartbeat{ .mailbox = .{}, .handler = try std.Thread.spawn(threadHandler, client), }; } pub fn deinit(self: *Heartbeat) void { self.mailbox.putOverwrite(.terminate); // Reap the thread self.handler.wait(); } fn threadHandler(client: *Client) void { var heartbeat_interval_ms: u64 = 0; var ack = false; while (true) { if (heartbeat_interval_ms == 0) { switch (client.heartbeat.mailbox.get()) { .start => { heartbeat_interval_ms = client.connect_info.?.heartbeat_interval_ms; ack = true; }, .ack, .stop => {}, .terminate => return, } } else { // Force fire the heartbeat earlier const timeout_ms = heartbeat_interval_ms - 1000; if (client.heartbeat.mailbox.getWithTimeout(timeout_ms * std.time.ns_per_ms)) |msg| { switch (msg) { .start => {}, .ack => { log.info("<< ♥", .{}); ack = true; }, .stop => heartbeat_interval_ms = 0, .terminate => return, } continue; } if (ack) { ack = false; if (client.sendCommand(.heartbeat, client.connect_info.?.seq)) |_| { log.info(">> ♡", .{}); continue; } else |_| { log.info("Heartbeat send failed. Reconnecting...", .{}); } } else { log.info("Missed heartbeat. Reconnecting...", .{}); } std.os.shutdown(client.ssl_tunnel.?.tcp_conn.handle, .both) catch |err| { log.warn("Shutdown failed: {}", .{err}); }; heartbeat_interval_ms = 0; } } }
src/Client/Heartbeat.zig
const std = @import("std"); const os = std.os; const linux = os.linux; const errno = linux.getErrno; const pid_t = linux.pid_t; const fd_t = linux.fd_t; const off_t = linux.off_t; const rusage = linux.rusage; const itimerspec = linux.itimerspec; const UnexpectedError = os.UnexpectedError; const unexpectedErrno = os.unexpectedErrno; pub const CloneArgs = extern struct { flags: u64 = 0, pidfd: u64 = 0, child_tid: u64 = 0, parent_tid: u64 = 0, exit_signal: u64 = 0, stack: u64 = 0, stack_size: u64 = 0, tls: u64 = 0, set_tid: u64 = 0, set_tid_size: u64 = 0, cgroup: u64 = 0, }; pub const Clone3Error = error{ SystemResources, DomainControllerEnabled, DomainInvalid, PidAlreadyExists, PermissionDenied, } || UnexpectedError; pub fn clone3(args: CloneArgs) Clone3Error!pid_t { const rc = linux.syscall2(.clone3, @ptrToInt(&args), @sizeOf(CloneArgs)); switch (errno(rc)) { .SUCCESS => return @intCast(pid_t, rc), .AGAIN => return error.SystemResources, .BUSY => return error.DomainControllerEnabled, .EXIST => return error.PidAlreadyExists, .INVAL => unreachable, .NOMEM => return error.SystemResources, .NOSPC => return error.SystemResources, .OPNOTSUPP => return error.DomainInvalid, .PERM => return error.PermissionDenied, .USERS => return error.SystemResources, else => |err| return unexpectedErrno(err), } } pub const SPLICE = struct { pub const MOVE = 1; pub const NONBLOCK = 2; pub const MORE = 4; pub const GIFT = 8; }; pub const SpliceError = error{ WouldBlock, SystemResources, } || UnexpectedError; pub fn splice(fd_in: fd_t, off_in: ?*off_t, fd_out: fd_t, off_out: ?*off_t, len: usize, flags: u32) SpliceError!usize { const rc = linux.syscall6(.splice, @bitCast(usize, @as(isize, fd_in)), @ptrToInt(off_in), @bitCast(usize, @as(isize, fd_out)), @ptrToInt(off_out), len, flags); switch (errno(rc)) { .SUCCESS => return rc, .AGAIN => return error.WouldBlock, .BADF => unreachable, .INVAL => unreachable, .NOMEM => return error.SystemResources, .SPIPE => unreachable, else => |err| return unexpectedErrno(err), } } pub const Wait4Error = error{ WouldBlock, NoChild, } || UnexpectedError; pub const Wait4Result = struct { pid: pid_t, status: u32, rusage: rusage, }; pub fn wait4(pid: pid_t, flags: u32) Wait4Error!Wait4Result { var status: u32 = undefined; var usage: rusage = undefined; while (true) { const rc = linux.syscall4(.wait4, @bitCast(usize, @as(isize, pid)), @ptrToInt(&status), flags, @ptrToInt(&usage)); switch (errno(rc)) { .SUCCESS => return Wait4Result{ .pid = @intCast(pid_t, rc), .status = status, .rusage = usage, }, .INTR => continue, .AGAIN => return error.WouldBlock, .CHILD => return error.NoChild, .FAULT => unreachable, .INVAL => unreachable, else => |err| return unexpectedErrno(err), } } } pub const MountError = error{ AccessDenied, FileBusy, MountLoop, SystemResources, NameTooLong, NoDevice, FileNotFound, NotBlock, NotDir, PermissionDenied, ReadOnly, } || UnexpectedError; pub fn mount(source: ?[*:0]const u8, target: ?[*:0]const u8, fstype: ?[*:0]const u8, flags: u32, data: ?*anyopaque) MountError!void { const rc = linux.syscall5(.mount, @ptrToInt(source), @ptrToInt(target), @ptrToInt(fstype), flags, @ptrToInt(data)); switch (errno(rc)) { .SUCCESS => return, .ACCES => return error.AccessDenied, .BUSY => return error.FileBusy, .FAULT => unreachable, .INVAL => unreachable, .LOOP => return error.MountLoop, .MFILE => return error.SystemResources, .NAMETOOLONG => return error.NameTooLong, .NODEV => return error.NoDevice, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .NOTBLK => return error.NotBlock, .NOTDIR => return error.NotDir, .NXIO => unreachable, .PERM => return error.PermissionDenied, .ROFS => return error.ReadOnly, else => |err| return unexpectedErrno(err), } } const UmountError = error{ Marked, FileBusy, NameTooLong, FileNotFound, SystemResources, PermissionDenied, } || UnexpectedError; pub fn umount2(target: ?[*:0]const u8, flags: u32) UmountError!void { const rc = linux.syscall2(.umount2, @ptrToInt(target), flags); switch (errno(rc)) { .SUCCESS => return, .AGAIN => return error.Marked, .BUSY => return error.FileBusy, .INVAL => unreachable, .NAMETOOLONG => return error.NameTooLong, .NOENT => return error.FileNotFound, .NOMEM => return error.SystemResources, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub const PivotRootError = error{ DeviceBusy, NotDir, PermissionDenied, } || UnexpectedError; pub fn pivot_root(new_root: [*:0]const u8, put_old: [*:0]const u8) PivotRootError!void { const rc = linux.syscall2(.pivot_root, @ptrToInt(new_root), @ptrToInt(put_old)); switch (errno(rc)) { .SUCCESS => return, .BUSY => return error.DeviceBusy, .INVAL => unreachable, .NOTDIR => return error.NotDir, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub const TimerfdCreateError = error{ SystemResources, PermissionDenied, } || UnexpectedError; pub fn timerfd_create(clockid: i32, flags: u32) TimerfdCreateError!fd_t { const rc = linux.timerfd_create(clockid, flags); switch (errno(rc)) { .SUCCESS => return @intCast(fd_t, rc), .INVAL => unreachable, .MFILE => return error.SystemResources, .NODEV => return error.SystemResources, .NOMEM => return error.SystemResources, .PERM => return error.PermissionDenied, else => |err| return unexpectedErrno(err), } } pub const TimerfdSetError = error{ Canceled, } || UnexpectedError; pub fn timerfd_settime(fd: fd_t, flags: u32, new_value: *const itimerspec, old_value: ?*itimerspec) TimerfdSetError!void { const rc = linux.timerfd_settime(fd, flags, new_value, old_value); switch (errno(rc)) { .SUCCESS => return, .BADF => unreachable, .CANCELED => return error.Canceled, .FAULT => unreachable, .INVAL => unreachable, else => |err| return unexpectedErrno(err), } }
src/sys.zig
const wlr = @import("../wlroots.zig"); const wayland = @import("wayland"); const wl = wayland.server.wl; const wlr_proto = wayland.server.wlr; pub const LayerShellV1 = extern struct { global: *wl.Global, server_destroy: wl.Listener(*wl.Server), events: extern struct { new_surface: wl.Signal(*LayerSurfaceV1), destroy: wl.Signal(*LayerShellV1), }, data: usize, extern fn wlr_layer_shell_v1_create(server: *wl.Server) ?*LayerShellV1; pub const create = wlr_layer_shell_v1_create; }; pub const LayerSurfaceV1 = extern struct { pub const State = extern struct { anchor: wlr_proto.LayerSurfaceV1.Anchor, exclusive_zone: i32, margin: extern struct { top: u32, right: u32, bottom: u32, left: u32, }, keyboard_interactive: bool, desired_width: u32, desired_height: u32, actual_width: u32, actual_height: u32, layer: wlr_proto.LayerShellV1.Layer, }; pub const Configure = extern struct { /// LayerSurfaceV1.configure_list link: wl.list.Link, serial: u32, state: State, }; surface: *wlr.Surface, output: ?*wlr.Output, resource: *wl.Resource, shell: *LayerShellV1, /// wlr.XdgPopup.link popups: wl.list.Head(wlr.XdgPopup, "link"), namespace: [*:0]u8, added: bool, configured: bool, mapped: bool, closed: bool, configure_serial: u32, configure_next_serial: u32, configure_list: wl.list.Head(LayerSurfaceV1.Configure, "link"), acked_configure: ?*Configure, client_pending: State, server_pending: State, current: State, surface_destroy: wl.Listener(*wlr.Surface), events: extern struct { destroy: wl.Signal(*LayerSurfaceV1), map: wl.Signal(*LayerSurfaceV1), unmap: wl.Signal(*LayerSurfaceV1), new_popup: wl.Signal(*wlr.XdgPopup), }, data: usize, extern fn wlr_layer_surface_v1_configure(surface: *LayerSurfaceV1, width: u32, height: u32) void; pub const configure = wlr_layer_surface_v1_configure; extern fn wlr_layer_surface_v1_close(surface: *LayerSurfaceV1) void; pub const close = wlr_layer_surface_v1_close; extern fn wlr_layer_surface_v1_from_wlr_surface(surface: *wlr.Surface) *LayerSurfaceV1; pub const fromSurface = wlr_layer_surface_v1_from_wlr_surface; extern fn wlr_layer_surface_v1_for_each_surface( surface: *LayerSurfaceV1, iterator: fn (*wlr.Surface, c_int, c_int, ?*c_void) callconv(.C) void, user_data: ?*c_void, ) void; pub fn forEachSurface( surface: *LayerSurfaceV1, comptime T: type, iterator: fn (surface: *wlr.Surface, sx: c_int, sy: c_int, data: T) callconv(.C) void, data: T, ) void { wlr_layer_surface_v1_for_each_surface(surface, iterator, data); } extern fn wlr_layer_surface_v1_for_each_popup( surface: *LayerSurfaceV1, iterator: fn (*wlr.Surface, c_int, c_int, ?*c_void) callconv(.C) void, user_data: ?*c_void, ) void; pub fn forEachPopup( surface: *LayerSurfaceV1, comptime T: type, iterator: fn (surface: *wlr.Surface, sx: c_int, sy: c_int, data: T) callconv(.C) void, data: T, ) void { wlr_layer_surface_v1_for_each_popup(surface, iterator, data); } extern fn wlr_layer_surface_v1_surface_at(surface: *LayerSurfaceV1, sx: f64, sy: f64, sub_x: *f64, sub_y: *f64) ?*wlr.Surface; pub const surfaceAt = wlr_layer_surface_v1_surface_at; };
src/types/layer_shell_v1.zig
const std = @import("std"); const fs = std.fs; fn instToCode(inst: []const u8) i32 { std.debug.assert(inst.len == 3); return @as(i32, inst[0]) | @as(i32, inst[1]) << 8 | @as(i32, inst[2]) << 16; } const NOP = comptime instToCode("nop"); const ACC = comptime instToCode("acc"); const JMP = comptime instToCode("jmp"); pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = &gpa.allocator; const input = try fs.cwd().readFileAlloc(allocator, "data/input_08_1.txt", std.math.maxInt(usize)); var lines = std.mem.tokenize(input, "\n"); var code = std.ArrayList(i32).init(allocator); defer code.deinit(); while (lines.next()) |raw_line| { var line = std.mem.trim(u8, raw_line, " \r\n"); if (line.len == 0) break; var op_arg = std.mem.tokenize(line, " "); try code.append(instToCode(op_arg.next().?)); try code.append(try std.fmt.parseInt(i32, op_arg.next().?, 10)); } { // Solution 1 var bitmap_visited = try allocator.alloc(u64, code.items.len / 2 + 63 & ~@as(usize, 63)); std.mem.set(u64, bitmap_visited, 0); defer allocator.free(bitmap_visited); var pid: i32 = 0; var acc: i32 = 0; while (true) { const upid = @intCast(usize, pid * 2); if (bitmapGet(bitmap_visited, @intCast(usize, pid))) break; bitmapSet(bitmap_visited, @intCast(usize, pid)); const instr = code.items[upid]; const value = code.items[upid + 1]; pid += switch (instr) { NOP => 1, ACC => blk: { acc += value; break :blk 1; }, JMP => value, else => { std.debug.print("Got a bad instruction: {} at position: {}", .{instr, pid}); unreachable; } }; } std.debug.print("Day 08 - Solution 1: {}\n", .{acc}); } { // Solution 2 const bitmap_len = code.items.len / 2 + 63 & ~@as(usize, 63); var bitmap_visited = try allocator.alloc(u64, bitmap_len); var bitmap_visited_at_change = try allocator.alloc(u64, bitmap_len); defer allocator.free(bitmap_visited); defer allocator.free(bitmap_visited_at_change); std.mem.set(u64, bitmap_visited, 0); std.mem.set(u64, bitmap_visited_at_change, 0); var pid: i32 = 0; var acc: i32 = 0; var changed: i32 = -1; var acc_at_change: i32 = 0; var rollback = false; while (true) { const upid = @intCast(usize, pid * 2); if (upid == code.items.len) { // We got to the end break; } var instr = code.items[upid]; const value = code.items[upid + 1]; if (changed == -1 and (instr == NOP or instr == JMP) and !rollback) { changed = pid; acc_at_change = acc; std.mem.copy(u64, bitmap_visited_at_change, bitmap_visited); instr = if (instr == NOP) JMP else NOP; } rollback = false; bitmapSet(bitmap_visited, @intCast(usize, pid)); pid += switch (instr) { NOP => 1, ACC => blk: { acc += value; break :blk 1; }, JMP => value, else => { // std.debug.print("Got a bad instruction: {} at position: {}", .{instr, pid}); unreachable; } }; if (bitmapGet(bitmap_visited, @intCast(usize, pid))) { //std.debug.print("Find a loop at instruction: {} - Going back to: {}\n", .{pid, changed}); // We ended up in a loop, restore the state before changed std.debug.assert(changed != -1); pid = changed; acc = acc_at_change; std.mem.copy(u64, bitmap_visited, bitmap_visited_at_change); changed = -1; rollback = true; } } std.debug.print("Day 08 - Solution 2: {}\n", .{acc}); } } fn bitmapGet(bitmap:[] u64, index: usize) bool { const bitmap_index = index / 64; const bitmap_shift = @intCast(u6, index - (bitmap_index * 64)); return (bitmap[bitmap_index] & (@as(u64, 1) << bitmap_shift)) != 0; } fn bitmapSet(bitmap:[] u64, index: usize) void { const bitmap_index = @divFloor(index, 64); const bitmap_shift = @intCast(u6, index - (bitmap_index * 64)); bitmap[bitmap_index] |= @as(u64, 1) << bitmap_shift; }
2020/src/day_08.zig
const std = @import("std"); const renderkit = @import("renderkit"); const gk = @import("../gamekit.zig"); const math = gk.math; const IndexBuffer = renderkit.IndexBuffer; const VertexBuffer = renderkit.VertexBuffer; const Vertex = gk.gfx.Vertex; const Texture = gk.gfx.Texture; pub const Batcher = struct { mesh: gk.gfx.DynamicMesh(u16, Vertex), draw_calls: std.ArrayList(DrawCall), begin_called: bool = false, frame: u32 = 0, // tracks when a batch is started in a new frame so that state can be reset vert_index: usize = 0, // current index into the vertex array quad_count: usize = 0, // total quads that we have not yet rendered buffer_offset: i32 = 0, // offset into the vertex buffer of the first non-rendered vert const DrawCall = struct { image: renderkit.Image, quad_count: i32, }; fn createDynamicMesh(allocator: std.mem.Allocator, max_sprites: u16) !gk.gfx.DynamicMesh(u16, Vertex) { var indices = allocator.alloc(u16, max_sprites * 6) catch unreachable; defer allocator.free(indices); var i: usize = 0; while (i < max_sprites) : (i += 1) { indices[i * 3 * 2 + 0] = @intCast(u16, i) * 4 + 0; indices[i * 3 * 2 + 1] = @intCast(u16, i) * 4 + 1; indices[i * 3 * 2 + 2] = @intCast(u16, i) * 4 + 2; indices[i * 3 * 2 + 3] = @intCast(u16, i) * 4 + 0; indices[i * 3 * 2 + 4] = @intCast(u16, i) * 4 + 2; indices[i * 3 * 2 + 5] = @intCast(u16, i) * 4 + 3; } return try gk.gfx.DynamicMesh(u16, Vertex).init(allocator, max_sprites * 4, indices); } pub fn init(allocator: std.mem.Allocator, max_sprites: u16) Batcher { if (max_sprites * 6 > std.math.maxInt(u16)) @panic("max_sprites exceeds u16 index buffer size"); return .{ .mesh = createDynamicMesh(allocator, max_sprites) catch unreachable, .draw_calls = std.ArrayList(DrawCall).initCapacity(allocator, 10) catch unreachable, }; } pub fn deinit(self: *Batcher) void { self.mesh.deinit(); self.draw_calls.deinit(); } pub fn begin(self: *Batcher) void { std.debug.assert(!self.begin_called); // reset all state for new frame if (self.frame != gk.time.frames()) { self.frame = gk.time.frames(); self.vert_index = 0; self.buffer_offset = 0; } self.begin_called = true; } pub fn end(self: *Batcher) void { std.debug.assert(self.begin_called); self.flush(); self.begin_called = false; } /// should be called when any graphics state change will occur such as setting a new shader or RenderState pub fn flush(self: *Batcher) void { if (self.quad_count == 0) return; self.mesh.appendVertSlice(@intCast(usize, self.buffer_offset), @intCast(usize, self.quad_count * 4)); // run through all our accumulated draw calls var base_element: i32 = 0; for (self.draw_calls.items) |*draw_call| { self.mesh.bindImage(draw_call.image, 0); self.mesh.draw(base_element, draw_call.quad_count * 6); self.buffer_offset += draw_call.quad_count * 4; draw_call.image = renderkit.invalid_resource_id; base_element += draw_call.quad_count * 6; } self.quad_count = 0; self.draw_calls.items.len = 0; } /// ensures the vert buffer has enough space and manages the draw call command buffer when textures change fn ensureCapacity(self: *Batcher, texture: Texture) !void { // if we run out of buffer we have to flush the batch and possibly discard and resize the whole buffer if (self.vert_index + 4 > self.mesh.verts.len - 1) { self.flush(); self.vert_index = 0; self.buffer_offset = 0; // with GL we can just orphan the buffer self.mesh.updateAllVerts(); self.mesh.bindings.vertex_buffer_offsets[0] = 0; } // start a new draw call if we dont already have one going or whenever the texture changes if (self.draw_calls.items.len == 0 or self.draw_calls.items[self.draw_calls.items.len - 1].image != texture.img) { try self.draw_calls.append(.{ .image = texture.img, .quad_count = 0 }); } } pub fn drawTex(self: *Batcher, pos: math.Vec2, col: u32, texture: Texture) void { self.ensureCapacity(texture) catch |err| { std.log.warn("Batcher.draw failed to append a draw call with error: {}\n", .{err}); return; }; var verts = self.mesh.verts[self.vert_index .. self.vert_index + 4]; verts[0].pos = pos; // tl verts[0].uv = .{ .x = 0, .y = 0 }; verts[0].col = col; verts[1].pos = .{ .x = pos.x + texture.width, .y = pos.y }; // tr verts[1].uv = .{ .x = 1, .y = 0 }; verts[1].col = col; verts[2].pos = .{ .x = pos.x + texture.width, .y = pos.y + texture.height }; // br verts[2].uv = .{ .x = 1, .y = 1 }; verts[2].col = col; verts[3].pos = .{ .x = pos.x, .y = pos.y + texture.height }; // bl verts[3].uv = .{ .x = 0, .y = 1 }; verts[3].col = col; self.draw_calls.items[self.draw_calls.items.len - 1].quad_count += 1; self.quad_count += 1; self.vert_index += 4; } pub fn draw(self: *Batcher, texture: Texture, quad: math.Quad, mat: math.Mat32, color: math.Color) void { self.ensureCapacity(texture) catch |err| { std.log.warn("Batcher.draw failed to append a draw call with error: {}\n", .{err}); return; }; // copy the quad positions, uvs and color into vertex array transforming them with the matrix as we do it mat.transformQuad(self.mesh.verts[self.vert_index .. self.vert_index + 4], quad, color); self.draw_calls.items[self.draw_calls.items.len - 1].quad_count += 1; self.quad_count += 1; self.vert_index += 4; } };
gamekit/graphics/batcher.zig
const std = @import("../std.zig"); const builtin = @import("builtin"); const ResetEvent = @This(); const os = std.os; const assert = std.debug.assert; const testing = std.testing; const Atomic = std.atomic.Atomic; const Futex = std.Thread.Futex; impl: Impl = .{}, /// Returns if the ResetEvent was set(). /// Once reset() is called, this returns false until the next set(). /// The memory accesses before the set() can be said to happen before isSet() returns true. pub fn isSet(self: *const ResetEvent) bool { return self.impl.isSet(); } /// Block's the callers thread until the ResetEvent is set(). /// This is effectively a more efficient version of `while (!isSet()) {}`. /// The memory accesses before the set() can be said to happen before wait() returns. pub fn wait(self: *ResetEvent) void { self.impl.wait(null) catch |err| switch (err) { error.Timeout => unreachable, // no timeout provided so we shouldn't have timed-out }; } /// Block's the callers thread until the ResetEvent is set(), or until the corresponding timeout expires. /// If the timeout expires before the ResetEvent is set, `error.Timeout` is returned. /// This is effectively a more efficient version of `while (!isSet()) {}`. /// The memory accesses before the set() can be said to happen before timedWait() returns without error. pub fn timedWait(self: *ResetEvent, timeout_ns: u64) error{Timeout}!void { return self.impl.wait(timeout_ns); } /// Marks the ResetEvent as "set" and unblocks any threads in `wait()` or `timedWait()` to observe the new state. /// The ResetEvent says "set" until reset() is called, making future set() calls do nothing semantically. /// The memory accesses before set() can be said to happen before isSet() returns true or wait()/timedWait() return successfully. pub fn set(self: *ResetEvent) void { self.impl.set(); } /// Unmarks the ResetEvent from its "set" state if set() was called previously. /// It is undefined behavior is reset() is called while threads are blocked in wait() or timedWait(). /// Concurrent calls to set(), isSet() and reset() are allowed. pub fn reset(self: *ResetEvent) void { self.impl.reset(); } const Impl = if (builtin.single_threaded) SingleThreadedImpl else FutexImpl; const SingleThreadedImpl = struct { is_set: bool = false, fn isSet(self: *const Impl) bool { return self.is_set; } fn wait(self: *Impl, timeout: ?u64) error{Timeout}!void { if (self.isSet()) { return; } // There are no other threads to wake us up. // So if we wait without a timeout we would never wake up. const timeout_ns = timeout orelse { unreachable; // deadlock detected }; std.time.sleep(timeout_ns); return error.Timeout; } fn set(self: *Impl) void { self.is_set = true; } fn reset(self: *Impl) void { self.is_set = false; } }; const FutexImpl = struct { state: Atomic(u32) = Atomic(u32).init(unset), const unset = 0; const waiting = 1; const is_set = 2; fn isSet(self: *const Impl) bool { // Acquire barrier ensures memory accesses before set() happen before we return true. return self.state.load(.Acquire) == is_set; } fn wait(self: *Impl, timeout: ?u64) error{Timeout}!void { // Outline the slow path to allow isSet() to be inlined if (!self.isSet()) { return self.waitUntilSet(timeout); } } fn waitUntilSet(self: *Impl, timeout: ?u64) error{Timeout}!void { @setCold(true); // Try to set the state from `unset` to `waiting` to indicate // to the set() thread that others are blocked on the ResetEvent. // We avoid using any strict barriers until the end when we know the ResetEvent is set. var state = self.state.load(.Monotonic); if (state == unset) { state = self.state.compareAndSwap(state, waiting, .Monotonic, .Monotonic) orelse waiting; } // Wait until the ResetEvent is set since the state is waiting. if (state == waiting) { var futex_deadline = Futex.Deadline.init(timeout); while (true) { const wait_result = futex_deadline.wait(&self.state, waiting); // Check if the ResetEvent was set before possibly reporting error.Timeout below. state = self.state.load(.Monotonic); if (state != waiting) { break; } try wait_result; } } // Acquire barrier ensures memory accesses before set() happen before we return. assert(state == is_set); self.state.fence(.Acquire); } fn set(self: *Impl) void { // Quick check if the ResetEvent is already set before doing the atomic swap below. // set() could be getting called quite often and multiple threads calling swap() increases contention unnecessarily. if (self.state.load(.Monotonic) == is_set) { return; } // Mark the ResetEvent as set and unblock all waiters waiting on it if any. // Release barrier ensures memory accesses before set() happen before the ResetEvent is observed to be "set". if (self.state.swap(is_set, .Release) == waiting) { Futex.wake(&self.state, std.math.maxInt(u32)); } } fn reset(self: *Impl) void { self.state.store(unset, .Monotonic); } }; test "ResetEvent - smoke test" { // make sure the event is unset var event = ResetEvent{}; try testing.expectEqual(false, event.isSet()); // make sure the event gets set event.set(); try testing.expectEqual(true, event.isSet()); // make sure the event gets unset again event.reset(); try testing.expectEqual(false, event.isSet()); // waits should timeout as there's no other thread to set the event try testing.expectError(error.Timeout, event.timedWait(0)); try testing.expectError(error.Timeout, event.timedWait(std.time.ns_per_ms)); // set the event again and make sure waits complete event.set(); event.wait(); try event.timedWait(std.time.ns_per_ms); try testing.expectEqual(true, event.isSet()); } test "ResetEvent - signaling" { // This test requires spawning threads if (builtin.single_threaded) { return error.SkipZigTest; } const Context = struct { in: ResetEvent = .{}, out: ResetEvent = .{}, value: usize = 0, fn input(self: *@This()) !void { // wait for the value to become 1 self.in.wait(); self.in.reset(); try testing.expectEqual(self.value, 1); // bump the value and wake up output() self.value = 2; self.out.set(); // wait for output to receive 2, bump the value and wake us up with 3 self.in.wait(); self.in.reset(); try testing.expectEqual(self.value, 3); // bump the value and wake up output() for it to see 4 self.value = 4; self.out.set(); } fn output(self: *@This()) !void { // start with 0 and bump the value for input to see 1 try testing.expectEqual(self.value, 0); self.value = 1; self.in.set(); // wait for input to receive 1, bump the value to 2 and wake us up self.out.wait(); self.out.reset(); try testing.expectEqual(self.value, 2); // bump the value to 3 for input to see (rhymes) self.value = 3; self.in.set(); // wait for input to bump the value to 4 and receive no more (rhymes) self.out.wait(); self.out.reset(); try testing.expectEqual(self.value, 4); } }; var ctx = Context{}; const thread = try std.Thread.spawn(.{}, Context.output, .{&ctx}); defer thread.join(); try ctx.input(); } test "ResetEvent - broadcast" { // This test requires spawning threads if (builtin.single_threaded) { return error.SkipZigTest; } const num_threads = 10; const Barrier = struct { event: ResetEvent = .{}, counter: Atomic(usize) = Atomic(usize).init(num_threads), fn wait(self: *@This()) void { if (self.counter.fetchSub(1, .AcqRel) == 1) { self.event.set(); } } }; const Context = struct { start_barrier: Barrier = .{}, finish_barrier: Barrier = .{}, fn run(self: *@This()) void { self.start_barrier.wait(); self.finish_barrier.wait(); } }; var ctx = Context{}; var threads: [num_threads - 1]std.Thread = undefined; for (threads) |*t| t.* = try std.Thread.spawn(.{}, Context.run, .{&ctx}); defer for (threads) |t| t.join(); ctx.run(); }
lib/std/Thread/ResetEvent.zig
const std = @import("std"); const debug = std.debug; const testing = std.testing; const expect = testing.expect; const expectError = testing.expectError; const warn = debug.warn; const mem = std.mem; const math = std.math; const Allocator = mem.Allocator; const builtin = @import("builtin"); const TypeId = builtin.TypeId; const DBG = false; const DBG1 = false; /// `bytes` is a utf-8 encoded string pub fn parseFloat(comptime T: type, bytes: []const u8) anyerror!T { switch (TypeId(@typeInfo(T))) { TypeId.Float => return ParseNumber(T).parse(bytes), else => @compileError("Expecting Float"), } } test "ParseNumber.parseFloat" { expect((try parseFloat(f64, "-1.000_001e10")) == -1.000001e10); expect((try parseFloat(f32, "0.1")) == 0.1); // Couple of examples using ParseNumber directly expect((try ParseNumber(i32).parse("0x1234_ABCD")) == 0x1234ABCD); const parseF32 = ParseNumber(f32).parse; expect((try parseF32("-1.0")) == -1.0); } /// The returned struct has a parse member /// that takes a slice and returns a T. pub fn ParseNumber(comptime T: type) type { return struct { const Self = @This(); pub fn parse(str: []const u8) anyerror!T { if (DBG) warn("ParseNumber:+ str={}\n", str); var it: U8Iter() = undefined; it.set(str, 0); var result = try switch (TypeId(@typeInfo(T))) { TypeId.Int => parseIntegerNumber(T, &it), TypeId.Float => parseFloatNumber(T, &it), else => @compileError("Expecting Int or Float"), }; // Skip any trailing WS and if we didn't conusme the entire string it's an error _ = it.skipWs(); if (it.idx < str.len) return error.NoValue; if (DBG) warn("ParseNumber:- str={} result={}\n", str, result); return result; } }; } fn toLower(ch: u8) u8 { return if ((ch >= 'A') and (ch <= 'Z')) ch + ('a' - 'A') else ch; } fn U8Iter() type { return struct { const Self = @This(); initial_idx: usize, idx: usize, str: []const u8, pub fn init(str: []const u8, initial_idx: usize) Self { return Self{ .initial_idx = initial_idx, .idx = initial_idx, .str = str, }; } pub fn set(pSelf: *Self, str: []const u8, initial_idx: usize) void { pSelf.initial_idx = initial_idx; pSelf.idx = initial_idx; pSelf.str = str; } pub fn done(pSelf: *Self) bool { return pSelf.idx >= pSelf.str.len; } pub fn next(pSelf: *Self) void { if (!pSelf.done()) pSelf.idx += 1; if (DBG) warn("I: next {}\n", pSelf); } pub fn curIdx(pSelf: *Self) usize { return pSelf.idx; } pub fn curCh(pSelf: *Self) u8 { if (pSelf.done()) return 0; return pSelf.str[pSelf.idx]; } pub fn curChLc(pSelf: *Self) u8 { return toLower(pSelf.curCh()); } // Peek next character, if end of string pub fn peekNextCh(pSelf: *Self) u8 { if (pSelf.done()) return 0; return pSelf.curCh(); } // Peek Prev character or first character or 0 if end of string pub fn peekPrevCh(pSelf: *Self) u8 { var idx = if (pSelf.idx > pSelf.initial_idx) pSelf.idx - 1 else pSelf.idx; if (pSelf.done()) return 0; return pSelf.str[idx]; } // Next character or 0 if end of string pub fn nextCh(pSelf: *Self) u8 { if (pSelf.done()) return 0; var ch = pSelf.str[pSelf.idx]; pSelf.idx += 1; return ch; } // Prev character or first character or if string is empty 0 pub fn prevCh(pSelf: *Self) u8 { if (pSelf.idx > pSelf.initial_idx) pSelf.idx -= 1; return pSelf.peekPrevCh(); } // Next character skipping white space characters or 0 if end of string // Ignore ' ':0x20, HT:0x9 // What about LF:0xA, VT:0xB, FF:0xC, CR:0xD, NEL:0x85, NBS:0xA0? // Or other White Space chars: https://en.wikipedia.org/wiki/Whitespace_character pub fn skipWs(pSelf: *Self) u8 { var ch = pSelf.curCh(); while ((ch == ' ') or (ch == '\t')) { pSelf.next(); ch = pSelf.curCh(); } if (DBG) warn("SkipWs:- ch='{c}':0x{x} {}\n", ch, ch, pSelf); return ch; } // Next character converted to lower case or 0 if end of string pub fn nextChLc(pSelf: *Self) u8 { return toLower(pSelf.nextCh()); } // Prev character converted to lower case or 0 the string is empty pub fn prevChLc(pSelf: *Self) u8 { return toLower(pSelf.prevCh()); } // Next character converted to lower case skipping leading white space character pub fn nextChLcSkipWs(pSelf: *Self) u8 { return toLower(pSelf.skipWs()); } }; } fn ParseResult(comptime T: type) type { return struct { const Self = @This(); last_idx: usize, value: T, value_set: bool, digits: usize, pub fn init() Self { //warn("PR: init\n"); return Self{ .last_idx = 0, .value = 0, .value_set = false, .digits = 0, }; } pub fn reinit(pSelf: *Self) void { //warn("PR: reinit\n"); pSelf.last_idx = 0; pSelf.value = 0; pSelf.value_set = false; pSelf.digits = 0; } pub fn set(pSelf: *Self, v: T, last_idx: usize, digits: usize) void { //warn("PR: set v={} last_idx={}\n", v, last_idx); pSelf.last_idx = last_idx; pSelf.value = v; pSelf.value_set = true; pSelf.digits = digits; } }; } // Return last charter with 0 if end of string fn parseNumber(comptime T: type, pIter: *U8Iter(), radix_val: usize) ParseResult(T) { var result = ParseResult(T).init(); pIter.initial_idx = pIter.idx; var ch = pIter.nextChLc(); if (DBG) warn("PN:+ pr={}, it={} ch='{c}':0x{x}\n", result, pIter, ch, ch); defer if (DBG) warn("PN:- pr={} it={} ch='{c}':0x{x}\n", result, pIter, ch, ch); var radix = radix_val; var value: u128 = 0; var negative: bool = false; // Handle leading +, - if (ch == '-') { ch = pIter.nextChLc(); if (DBG1) warn("PN: neg ch='{c}':0x{x}\n", ch, ch); negative = true; } else if (ch == '+') { ch = pIter.nextChLc(); if (DBG1) warn("PN: plus ch='{c}':0x{x}\n", ch, ch); negative = false; } // Handle radix if not passed if (radix == 0) { if ((ch == '0') and !pIter.done()) { switch (pIter.nextChLc()) { 'b' => { radix = 2; ch = pIter.nextChLc(); }, 'o' => { radix = 8; ch = pIter.nextChLc(); }, 'd' => { radix = 10; ch = pIter.nextChLc(); }, 'x' => { radix = 16; ch = pIter.nextChLc(); }, else => { radix = 10; ch = pIter.prevChLc(); }, } if (DBG1) warn("PN: radix={} ch='{c}':0x{x}\n", radix, ch, ch); } else { radix = 10; if (DBG1) warn("PN: default radix={} ch='{c}':0x{x}\n", radix, ch, ch); } } // Handle remaining digits until end of string or an invalid character var digits: usize = 0; while (ch != 0) : (ch = pIter.nextChLc()) { if (DBG1) warn("PN: TOL value={} it={} digits={} ch='{c}':0x{x}\n", value, pIter, digits, ch, ch); if (ch == '_') { continue; } var v: u8 = undefined; if ((ch >= '0') and (ch <= '9')) { v = ch - '0'; } else if ((ch >= 'a') and (ch <= 'f')) { v = 10 + (ch - 'a'); } else { // An invalid character, done if (DBG1) warn("PN: bad ch='{c}':0x{x}\n", ch, ch); _ = pIter.prevCh(); break; } // An invalid character for current radix, done if (v >= radix) { if (DBG1) warn("PN: v:{} >= radix:{} ch='{c}':0x{x}\n", v, radix, ch, ch); _ = pIter.prevCh(); break; } value *= radix; value += v; digits += 1; } if (DBG1) warn("PN: AL value={} it={} digits={}\n", value, pIter, digits); // Only continue if there were digits if (digits > 0) { if (DBG1) warn("PN: digits > 0 value={}\n", value); if (negative) { if (DBG1) warn("PN: negative\n"); value = @bitCast(u128, -1 *% @intCast(i128, value)); } if (DBG1) warn("PN: before T.is_signed value={}\n", value); if (T.is_signed) { var svalue: i128 = @intCast(i128, value); var smax: T = math.maxInt(T); var smin: T = math.minInt(T); if (DBG1) warn("PN: signed svalue={} smin={} smax={}\n", svalue, smin, smax); if (svalue >= math.minInt(T) and svalue <= math.maxInt(T)) { result.set(@intCast(T, @intCast(i128, value) & @intCast(T, -1)), pIter.curIdx(), digits); } } else { if (DBG1) warn("PN: after T.is_signed was false value={}\n", value); var umax: T = math.maxInt(T); if (DBG1) warn("PN:umax={}\n", umax); if (value <= math.maxInt(T)) { result.set(@intCast(T, value & math.maxInt(T)), pIter.curIdx(), digits); } } } return result; } fn parseIntegerNumber(comptime T: type, pIter: *U8Iter()) anyerror!T { var result = ParseResult(T).init(); var ch = pIter.skipWs(); if (DBG) warn("PIN:+ pr={} it={} ch='{c}':0x{x}\n", result, pIter, ch, ch); defer if (DBG) warn("PIN:- pr={} it={} ch='{c}':0x{x}\n", result, pIter, ch, ch); result = parseNumber(T, pIter, 0); if (!result.value_set) { if (DBG) warn("PIN: error no value\n"); return error.NoValue; } return result.value; } fn parseFloatNumber(comptime T: type, pIter: *U8Iter()) anyerror!T { var ch = pIter.skipWs(); var pr = ParseResult(T).init(); var negative: T = 1; if (DBG) warn("PFN:+ pr={} it={} ch='{c}':0x{x}\n", pr, pIter, ch, ch); defer if (DBG) warn("PFN:- pr={} it={} ch='{c}':0x{x}\n", pr, pIter, ch, ch); if (ch == '-') { negative = -1; ch = pIter.nextChLc(); } // Get Tens var pr_tens = parseNumber(i128, pIter, 10); if (pr_tens.value_set) { if (DBG1) warn("PFN: pr_tens={} it={} ch='{c}':0x{x}\n", pr_tens, pIter, pIter.curCh(), pIter.curCh()); var pr_fraction = ParseResult(i128).init(); var pr_exponent = ParseResult(i128).init(); if (pIter.curCh() == '.') { // Get fraction pIter.next(); pr_fraction = parseNumber(i128, pIter, 10); if (!pr_fraction.value_set) { if (DBG1) warn("PFN: no fraction\n"); pr_fraction.set(0, pIter.idx, 0); } } if (DBG1) warn("PFN: pr_fraction={} it={} ch='{c}':0x{x}\n", pr_fraction, pIter, pIter.curCh(), pIter.curCh()); if (pIter.curCh() == 'e') { // Get Exponent pIter.next(); // skip e pr_exponent = parseNumber(i128, pIter, 10); if (!pr_exponent.value_set) { if (DBG1) warn("PFN: no exponent\n"); pr_exponent.set(0, pIter.idx, 0); } } if (DBG1) warn("PFN: pr_exponent={} it={} ch='{c}':0x{x}\n", pr_exponent, pIter, pIter.curCh(), pIter.curCh()); var tens = @intToFloat(T, pr_tens.value); var fraction = @intToFloat(T, pr_fraction.value) / std.math.pow(T, 10, @intToFloat(T, pr_fraction.digits)); var significand: T = if (pr_tens.value >= 0) tens + fraction else tens - fraction; var value = negative * significand * std.math.pow(T, @intToFloat(T, 10), @intToFloat(T, pr_exponent.value)); pr.set(value, pIter.idx, pr_tens.digits + pr_fraction.digits); if (DBG1) warn("PFN: pr.value={}\n", pr.value); return pr.value; } if (DBG) warn("PFN: error no value\n"); return error.NoValue; } test "ParseNumber.parseIntegerNumber" { var ch: u8 = undefined; var it: U8Iter() = undefined; it.set("", 0); expectError(error.NoValue, parseIntegerNumber(u8, &it)); it.set("0", 0); var vU8 = try parseIntegerNumber(u8, &it); expect(vU8 == 0); expect(it.idx == 1); it.set("1 2", 0); vU8 = try parseIntegerNumber(u8, &it); if (DBG) warn("vU8={} it={}\n", vU8, it); expect(vU8 == 1); expect(it.idx == 1); vU8 = try parseIntegerNumber(u8, &it); if (DBG) warn("vU8={} it={}\n", vU8, it); expect(vU8 == 2); expect(it.idx == 3); it.set("\t0", 0); vU8 = try parseIntegerNumber(u8, &it); expect(vU8 == 0); expect(it.idx == 2); it.set(" \t0", 0); vU8 = try parseIntegerNumber(u8, &it); expect(vU8 == 0); expect(it.idx == 3); it.set(" \t 0", 0); vU8 = try parseIntegerNumber(u8, &it); expect(vU8 == 0); expect(it.idx == 4); it.set("1.", 0); vU8 = try parseIntegerNumber(u8, &it); expect(vU8 == 1); expect(it.idx == 1); } test "ParseNumber.parseFloatNumber" { if (DBG) warn("\n"); var ch: u8 = undefined; var it: U8Iter() = undefined; var vF32: f32 = undefined; it.set("", 0); expectError(error.NoValue, parseFloatNumber(f32, &it)); it.set("0", 0); vF32 = try parseFloatNumber(f32, &it); expect(vF32 == 0); expect(it.idx == 1); it.set("1", 0); vF32 = try parseFloatNumber(f32, &it); expect(vF32 == 1); expect(it.idx == 1); it.set("+1", 0); vF32 = try parseFloatNumber(f32, &it); expect(vF32 == 1); expect(it.idx == 2); it.set("-1", 0); vF32 = try parseFloatNumber(f32, &it); expect(vF32 == -1); expect(it.idx == 2); it.set("1.2", 0); vF32 = try parseFloatNumber(f32, &it); expect(vF32 == 1.2); expect(it.idx == 3); it.set("1e1", 0); vF32 = try parseFloatNumber(f32, &it); expect(vF32 == 10); expect(it.idx == 3); it.set("1.2 3.4", 0); vF32 = try parseFloatNumber(f32, &it); if (DBG) warn("vF32={} it={}\n", vF32, it); expect(vF32 == 1.2); expect(it.idx == 3); vF32 = try parseFloatNumber(f32, &it); if (DBG) warn("vF32={} it={}\n", vF32, it); expect(vF32 == 3.4); expect(it.idx == 7); } test "ParseNumber" { expectError(error.NoValue, ParseNumber(u8).parse("")); expect((try ParseNumber(u8).parse("0")) == 0); expect((try ParseNumber(u8).parse("00")) == 0); expect((try ParseNumber(u8).parse(" 1")) == 1); expect((try ParseNumber(u8).parse(" 2 ")) == 2); expectError(error.NoValue, ParseNumber(u8).parse(" 2d")); const s = " \t 123\t"; var slice = s[0..]; expect((try ParseNumber(u8).parse(slice)) == 123); expect((try ParseNumber(i8).parse("-1")) == -1); expect((try ParseNumber(i8).parse("1")) == 1); expect((try ParseNumber(i8).parse("+1")) == 1); expect((try ParseNumber(i8).parse("01")) == 1); expect((try ParseNumber(i8).parse("001")) == 1); expect((try ParseNumber(i8).parse("-01")) == -1); expect((try ParseNumber(i8).parse("-001")) == -1); expect((try ParseNumber(u8).parse("0b0")) == 0); expect((try ParseNumber(u8).parse("0b1")) == 1); expect((try ParseNumber(u8).parse("0b1010_0101")) == 0xA5); expectError(error.NoValue, ParseNumber(u8).parse("0b2")); expect((try ParseNumber(u8).parse("0o0")) == 0); expect((try ParseNumber(u8).parse("0o1")) == 1); expect((try ParseNumber(u8).parse("0o7")) == 7); expect((try ParseNumber(u8).parse("0o77")) == 0x3f); expect((try ParseNumber(u32).parse("0o111_777")) == 0b1001001111111111); expectError(error.NoValue, ParseNumber(u8).parse("0b8")); expect((try ParseNumber(u8).parse("0d0")) == 0); expect((try ParseNumber(u8).parse("0d1")) == 1); expect((try ParseNumber(i8).parse("-0d1")) == -1); expect((try ParseNumber(i8).parse("+0d1")) == 1); expect((try ParseNumber(u8).parse("0d9")) == 9); expect((try ParseNumber(u8).parse("0")) == 0); expect((try ParseNumber(u8).parse("9")) == 9); expect((try ParseNumber(u8).parse("127")) == 0x7F); expect((try ParseNumber(u8).parse("255")) == 255); expect((try ParseNumber(u64).parse("123_456_789")) == 123456789); expect((try ParseNumber(u8).parse("0x0")) == 0x0); expect((try ParseNumber(u8).parse("0x1")) == 0x1); expect((try ParseNumber(u8).parse("0x9")) == 0x9); expect((try ParseNumber(u8).parse("0xa")) == 0xa); expect((try ParseNumber(u8).parse("0xf")) == 0xf); expect((try ParseNumber(i128).parse("-170141183460469231731687303715884105728")) == @bitCast(i128, @intCast(u128, 0x80000000000000000000000000000000))); expect((try ParseNumber(i128).parse("-170141183460469231731687303715884105727")) == @bitCast(i128, @intCast(u128, 0x80000000000000000000000000000001))); expect((try ParseNumber(i128).parse("-1")) == @bitCast(i128, @intCast(u128, 0xffffffffffffffffffffffffffffffff))); expect((try ParseNumber(i128).parse("0")) == @bitCast(i128, @intCast(u128, 0x00000000000000000000000000000000))); expect((try ParseNumber(i128).parse("170141183460469231731687303715884105726")) == @bitCast(i128, @intCast(u128, 0x7ffffffffffffffffffffffffffffffe))); expect((try ParseNumber(i128).parse("170141183460469231731687303715884105727")) == @bitCast(i128, @intCast(u128, 0x7fffffffffffffffffffffffffffffff))); expect((try ParseNumber(u128).parse("0")) == 0); expect((try ParseNumber(u128).parse("1")) == 1); expect((try ParseNumber(u128).parse("340282366920938463463374607431768211454")) == 0xfffffffffffffffffffffffffffffffe); expect((try ParseNumber(u128).parse("340282366920938463463374607431768211455")) == 0xffffffffffffffffffffffffffffffff); expect((try ParseNumber(u128).parse("0x1234_5678_9ABc_Def0_0FEd_Cba9_8765_4321")) == 0x123456789ABcDef00FEdCba987654321); expectError(error.NoValue, ParseNumber(u8).parse("0xg")); expect((try ParseNumber(f32).parse("0")) == 0); expect((try ParseNumber(f32).parse("-1")) == -1); expect((try ParseNumber(f32).parse("1.")) == 1.0); expect((try ParseNumber(f32).parse("1e0")) == 1); expect((try ParseNumber(f32).parse("1e1")) == 10); expect((try ParseNumber(f32).parse("1e-1")) == 0.1); expect((try ParseNumber(f32).parse("-0.75780")) == -0.75780); expect((try ParseNumber(f64).parse("0.1")) == 0.1); expect((try ParseNumber(f64).parse("-1.")) == -1.0); expect((try ParseNumber(f64).parse("-2.1")) == -2.1); expect((try ParseNumber(f64).parse("-1.2")) == -1.2); expect((try ParseNumber(f64).parse("-00.75780")) == -0.75780); expect(floatFuzzyEql(f64, try ParseNumber(f64).parse("1.2e2"), 1.2e2, 0.00001)); expect(floatFuzzyEql(f64, try ParseNumber(f64).parse("-1.2e-2"), -1.2e-2, 0.00001)); } fn floatFuzzyEql(comptime T: type, lhs: T, rhs: T, fuz: T) bool { // Determine which is larger and smallerj // then add the fuz to smaller and subract from larger // If smaller >= larger then they are equal var smaller: T = undefined; var larger: T = undefined; if (lhs > rhs) { larger = lhs - fuz; smaller = rhs + fuz; } else { larger = rhs - fuz; smaller = lhs + fuz; } if (DBG1) warn("smaller={} larger={}\n", smaller, larger); return smaller >= larger; } test "ParseNumber.errors" { expectError(error.NoValue, ParseNumber(u8).parse("-0d1")); expectError(error.NoValue, ParseNumber(u8).parse("-1")); expectError(error.NoValue, ParseNumber(u8).parse("-127")); expectError(error.NoValue, ParseNumber(u8).parse("-128")); expectError(error.NoValue, ParseNumber(u8).parse("256")); if (!DBG and !DBG1) { // Only test if DBG and DBG1 are both false as u0/i1 can't be printed expectError(error.NoValue, ParseNumber(u0).parse("1")); // Causes error: // /home/wink/opt/lib/zig/std/fmt.zig:715:52: error: integer value 1 cannot be implicitly casted to type 'i1' // const new_value = @intCast(uint, -(value + 1)) + 1; //expectError(error.NoValue, ParseNumber(i1).parse("1")); } expectError(error.NoValue, ParseNumber(u1).parse("2")); expectError(error.NoValue, ParseNumber(u2).parse("4")); expectError(error.NoValue, ParseNumber(u8).parse("256")); } test "ParseNumber.non-u8-sizes" { if (!DBG and !DBG1) { // Only test if DBG and DBG1 are both false as u0/u1 can't be printed expect((try ParseNumber(u0).parse("0")) == 0); const parseI1 = ParseNumber(i1).parse; expect((try parseI1("0")) == 0); expect((try parseI1("-1")) == -1); } const parseU1 = ParseNumber(u1).parse; expect((try parseU1("0")) == 0); expect((try parseU1("1")) == 1); const parseU2 = ParseNumber(u2).parse; expect((try parseU2("0")) == 0); expect((try parseU2("1")) == 1); expect((try parseU2("2")) == 2); expect((try parseU2("3")) == 3); expect((try ParseNumber(u127).parse("12345678901234567890")) == u127(12345678901234567890)); expect((try ParseNumber(i127).parse("-12345678901234567890")) == i127(-12345678901234567890)); } test "ParseNumber.parseF32" { const parseF32 = ParseNumber(f32).parse; var vf32 = try parseF32("123.e4"); expect(vf32 == f32(123e4)); } test "ParseNumber.leading.zeros" { const parseF32 = ParseNumber(f32).parse; expect((try parseF32("01")) == 1); expect((try parseF32("001.001e001")) == 1.001e001); expect((try parseF32("-01")) == -1); expect((try parseF32("-001.001e-001")) == -1.001e-001); } test "ParseNumber.no.leading.digit" { // TODO: Should we allow this const parseF32 = ParseNumber(f32).parse; // expect((try parseF32("0.1")) == f32(.1)); // Zig compiler error expectError(error.NoValue, parseF32(".1")); expectError(error.NoValue, parseF32("-.1")); } test "ParseNumber.trailing.decimal" { const parseF32 = ParseNumber(f32).parse; expect((try parseF32("0.")) == f32(0.)); expect((try parseF32("-0.")) == f32(-0.)); expect((try parseF32("1.")) == f32(1.)); expect((try parseF32("-1.")) == f32(-1.)); }
parse_number.zig
const std = @import("std"); const io = std.io; const mem = std.mem; const fs = std.fs; const process = std.process; const Allocator = mem.Allocator; const ArrayList = std.ArrayList; const Buffer = std.Buffer; const Target = std.Target; const self_hosted_main = @import("main.zig"); const errmsg = @import("errmsg.zig"); const DepTokenizer = @import("dep_tokenizer.zig").Tokenizer; const assert = std.debug.assert; var stderr_file: fs.File = undefined; var stderr: *io.OutStream(fs.File.WriteError) = undefined; var stdout: *io.OutStream(fs.File.WriteError) = undefined; comptime { _ = @import("dep_tokenizer.zig"); } // ABI warning export fn stage2_zen(ptr: *[*]const u8, len: *usize) void { const info_zen = @import("main.zig").info_zen; ptr.* = info_zen; len.* = info_zen.len; } // ABI warning export fn stage2_panic(ptr: [*]const u8, len: usize) void { @panic(ptr[0..len]); } // ABI warning const Error = extern enum { None, OutOfMemory, InvalidFormat, SemanticAnalyzeFail, AccessDenied, Interrupted, SystemResources, FileNotFound, FileSystem, FileTooBig, DivByZero, Overflow, PathAlreadyExists, Unexpected, ExactDivRemainder, NegativeDenominator, ShiftedOutOneBits, CCompileErrors, EndOfFile, IsDir, NotDir, UnsupportedOperatingSystem, SharingViolation, PipeBusy, PrimitiveTypeNotFound, CacheUnavailable, PathTooLong, CCompilerCannotFindFile, NoCCompilerInstalled, ReadingDepFile, InvalidDepFile, MissingArchitecture, MissingOperatingSystem, UnknownArchitecture, UnknownOperatingSystem, UnknownABI, InvalidFilename, DiskQuota, DiskSpace, UnexpectedWriteFailure, UnexpectedSeekFailure, UnexpectedFileTruncationFailure, Unimplemented, OperationAborted, BrokenPipe, NoSpaceLeft, NotLazy, IsAsync, ImportOutsidePkgPath, UnknownCpu, UnknownSubArchitecture, UnknownCpuFeature, InvalidCpuFeatures, InvalidLlvmCpuFeaturesFormat, UnknownApplicationBinaryInterface, }; const FILE = std.c.FILE; const ast = std.zig.ast; const translate_c = @import("translate_c.zig"); /// Args should have a null terminating last arg. export fn stage2_translate_c( out_ast: **ast.Tree, out_errors_ptr: *[*]translate_c.ClangErrMsg, out_errors_len: *usize, args_begin: [*]?[*]const u8, args_end: [*]?[*]const u8, resources_path: [*:0]const u8, ) Error { var errors = @as([*]translate_c.ClangErrMsg, undefined)[0..0]; out_ast.* = translate_c.translate(std.heap.c_allocator, args_begin, args_end, &errors, resources_path) catch |err| switch (err) { error.SemanticAnalyzeFail => { out_errors_ptr.* = errors.ptr; out_errors_len.* = errors.len; return Error.CCompileErrors; }, error.OutOfMemory => return Error.OutOfMemory, }; return Error.None; } export fn stage2_free_clang_errors(errors_ptr: [*]translate_c.ClangErrMsg, errors_len: usize) void { translate_c.freeErrors(errors_ptr[0..errors_len]); } export fn stage2_render_ast(tree: *ast.Tree, output_file: *FILE) Error { const c_out_stream = &std.io.COutStream.init(output_file).stream; _ = std.zig.render(std.heap.c_allocator, c_out_stream, tree) catch |e| switch (e) { error.WouldBlock => unreachable, // stage1 opens stuff in exclusively blocking mode error.SystemResources => return Error.SystemResources, error.OperationAborted => return Error.OperationAborted, error.BrokenPipe => return Error.BrokenPipe, error.DiskQuota => return Error.DiskQuota, error.FileTooBig => return Error.FileTooBig, error.NoSpaceLeft => return Error.NoSpaceLeft, error.AccessDenied => return Error.AccessDenied, error.OutOfMemory => return Error.OutOfMemory, error.Unexpected => return Error.Unexpected, error.InputOutput => return Error.FileSystem, }; return Error.None; } // TODO: just use the actual self-hosted zig fmt. Until https://github.com/ziglang/zig/issues/2377, // we use a blocking implementation. export fn stage2_fmt(argc: c_int, argv: [*]const [*:0]const u8) c_int { if (std.debug.runtime_safety) { fmtMain(argc, argv) catch unreachable; } else { fmtMain(argc, argv) catch |e| { std.debug.warn("{}\n", .{@errorName(e)}); return -1; }; } return 0; } fn fmtMain(argc: c_int, argv: [*]const [*:0]const u8) !void { const allocator = std.heap.c_allocator; var args_list = std.ArrayList([]const u8).init(allocator); const argc_usize = @intCast(usize, argc); var arg_i: usize = 0; while (arg_i < argc_usize) : (arg_i += 1) { try args_list.append(mem.toSliceConst(u8, argv[arg_i])); } stdout = &std.io.getStdOut().outStream().stream; stderr_file = std.io.getStdErr(); stderr = &stderr_file.outStream().stream; const args = args_list.toSliceConst()[2..]; var color: errmsg.Color = .Auto; var stdin_flag: bool = false; var check_flag: bool = false; var input_files = ArrayList([]const u8).init(allocator); { var i: usize = 0; while (i < args.len) : (i += 1) { const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "--help")) { try stdout.write(self_hosted_main.usage_fmt); process.exit(0); } else if (mem.eql(u8, arg, "--color")) { if (i + 1 >= args.len) { try stderr.write("expected [auto|on|off] after --color\n"); process.exit(1); } i += 1; const next_arg = args[i]; if (mem.eql(u8, next_arg, "auto")) { color = .Auto; } else if (mem.eql(u8, next_arg, "on")) { color = .On; } else if (mem.eql(u8, next_arg, "off")) { color = .Off; } else { try stderr.print("expected [auto|on|off] after --color, found '{}'\n", .{next_arg}); process.exit(1); } } else if (mem.eql(u8, arg, "--stdin")) { stdin_flag = true; } else if (mem.eql(u8, arg, "--check")) { check_flag = true; } else { try stderr.print("unrecognized parameter: '{}'", .{arg}); process.exit(1); } } else { try input_files.append(arg); } } } if (stdin_flag) { if (input_files.len != 0) { try stderr.write("cannot use --stdin with positional arguments\n"); process.exit(1); } const stdin_file = io.getStdIn(); var stdin = stdin_file.inStream(); const source_code = try stdin.stream.readAllAlloc(allocator, self_hosted_main.max_src_size); defer allocator.free(source_code); const tree = std.zig.parse(allocator, source_code) catch |err| { try stderr.print("error parsing stdin: {}\n", .{err}); process.exit(1); }; defer tree.deinit(); var error_it = tree.errors.iterator(0); while (error_it.next()) |parse_error| { try printErrMsgToFile(allocator, parse_error, tree, "<stdin>", stderr_file, color); } if (tree.errors.len != 0) { process.exit(1); } if (check_flag) { const anything_changed = try std.zig.render(allocator, io.null_out_stream, tree); const code = if (anything_changed) @as(u8, 1) else @as(u8, 0); process.exit(code); } _ = try std.zig.render(allocator, stdout, tree); return; } if (input_files.len == 0) { try stderr.write("expected at least one source file argument\n"); process.exit(1); } var fmt = Fmt{ .seen = Fmt.SeenMap.init(allocator), .any_error = false, .color = color, .allocator = allocator, }; for (input_files.toSliceConst()) |file_path| { try fmtPath(&fmt, file_path, check_flag); } if (fmt.any_error) { process.exit(1); } } const FmtError = error{ SystemResources, OperationAborted, IoPending, BrokenPipe, Unexpected, WouldBlock, FileClosed, DestinationAddressRequired, DiskQuota, FileTooBig, InputOutput, NoSpaceLeft, AccessDenied, OutOfMemory, RenameAcrossMountPoints, ReadOnlyFileSystem, LinkQuotaExceeded, FileBusy, } || fs.File.OpenError; fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool) FmtError!void { if (fmt.seen.exists(file_path)) return; try fmt.seen.put(file_path); const source_code = io.readFileAlloc(fmt.allocator, file_path) catch |err| switch (err) { error.IsDir, error.AccessDenied => { // TODO make event based (and dir.next()) var dir = try fs.cwd().openDirList(file_path); defer dir.close(); var dir_it = dir.iterate(); while (try dir_it.next()) |entry| { if (entry.kind == .Directory or mem.endsWith(u8, entry.name, ".zig")) { const full_path = try fs.path.join(fmt.allocator, &[_][]const u8{ file_path, entry.name }); try fmtPath(fmt, full_path, check_mode); } } return; }, else => { // TODO lock stderr printing try stderr.print("unable to open '{}': {}\n", .{ file_path, err }); fmt.any_error = true; return; }, }; defer fmt.allocator.free(source_code); const tree = std.zig.parse(fmt.allocator, source_code) catch |err| { try stderr.print("error parsing file '{}': {}\n", .{ file_path, err }); fmt.any_error = true; return; }; defer tree.deinit(); var error_it = tree.errors.iterator(0); while (error_it.next()) |parse_error| { try printErrMsgToFile(fmt.allocator, parse_error, tree, file_path, stderr_file, fmt.color); } if (tree.errors.len != 0) { fmt.any_error = true; return; } if (check_mode) { const anything_changed = try std.zig.render(fmt.allocator, io.null_out_stream, tree); if (anything_changed) { try stderr.print("{}\n", .{file_path}); fmt.any_error = true; } } else { const baf = try io.BufferedAtomicFile.create(fmt.allocator, file_path); defer baf.destroy(); const anything_changed = try std.zig.render(fmt.allocator, baf.stream(), tree); if (anything_changed) { try stderr.print("{}\n", .{file_path}); try baf.finish(); } } } const Fmt = struct { seen: SeenMap, any_error: bool, color: errmsg.Color, allocator: *mem.Allocator, const SeenMap = std.BufSet; }; fn printErrMsgToFile( allocator: *mem.Allocator, parse_error: *const ast.Error, tree: *ast.Tree, path: []const u8, file: fs.File, color: errmsg.Color, ) !void { const color_on = switch (color) { .Auto => file.isTty(), .On => true, .Off => false, }; const lok_token = parse_error.loc(); const span = errmsg.Span{ .first = lok_token, .last = lok_token, }; const first_token = tree.tokens.at(span.first); const last_token = tree.tokens.at(span.last); const start_loc = tree.tokenLocationPtr(0, first_token); const end_loc = tree.tokenLocationPtr(first_token.end, last_token); var text_buf = try std.Buffer.initSize(allocator, 0); var out_stream = &std.io.BufferOutStream.init(&text_buf).stream; try parse_error.render(&tree.tokens, out_stream); const text = text_buf.toOwnedSlice(); const stream = &file.outStream().stream; try stream.print("{}:{}:{}: error: {}\n", .{ path, start_loc.line + 1, start_loc.column + 1, text }); if (!color_on) return; // Print \r and \t as one space each so that column counts line up for (tree.source[start_loc.line_start..start_loc.line_end]) |byte| { try stream.writeByte(switch (byte) { '\r', '\t' => ' ', else => byte, }); } try stream.writeByte('\n'); try stream.writeByteNTimes(' ', start_loc.column); try stream.writeByteNTimes('~', last_token.end - first_token.start); try stream.writeByte('\n'); } export fn stage2_DepTokenizer_init(input: [*]const u8, len: usize) stage2_DepTokenizer { const t = std.heap.c_allocator.create(DepTokenizer) catch @panic("failed to create .d tokenizer"); t.* = DepTokenizer.init(std.heap.c_allocator, input[0..len]); return stage2_DepTokenizer{ .handle = t, }; } export fn stage2_DepTokenizer_deinit(self: *stage2_DepTokenizer) void { self.handle.deinit(); } export fn stage2_DepTokenizer_next(self: *stage2_DepTokenizer) stage2_DepNextResult { const otoken = self.handle.next() catch { const textz = std.Buffer.init(&self.handle.arena.allocator, self.handle.error_text) catch @panic("failed to create .d tokenizer error text"); return stage2_DepNextResult{ .type_id = .error_, .textz = textz.toSlice().ptr, }; }; const token = otoken orelse { return stage2_DepNextResult{ .type_id = .null_, .textz = undefined, }; }; const textz = std.Buffer.init(&self.handle.arena.allocator, token.bytes) catch @panic("failed to create .d tokenizer token text"); return stage2_DepNextResult{ .type_id = switch (token.id) { .target => .target, .prereq => .prereq, }, .textz = textz.toSlice().ptr, }; } const stage2_DepTokenizer = extern struct { handle: *DepTokenizer, }; const stage2_DepNextResult = extern struct { type_id: TypeId, // when type_id == error --> error text // when type_id == null --> undefined // when type_id == target --> target pathname // when type_id == prereq --> prereq pathname textz: [*]const u8, const TypeId = extern enum { error_, null_, target, prereq, }; }; // ABI warning export fn stage2_attach_segfault_handler() void { if (std.debug.runtime_safety and std.debug.have_segfault_handling_support) { std.debug.attachSegfaultHandler(); } } // ABI warning export fn stage2_progress_create() *std.Progress { const ptr = std.heap.c_allocator.create(std.Progress) catch @panic("out of memory"); ptr.* = std.Progress{}; return ptr; } // ABI warning export fn stage2_progress_destroy(progress: *std.Progress) void { std.heap.c_allocator.destroy(progress); } // ABI warning export fn stage2_progress_start_root( progress: *std.Progress, name_ptr: [*]const u8, name_len: usize, estimated_total_items: usize, ) *std.Progress.Node { return progress.start( name_ptr[0..name_len], if (estimated_total_items == 0) null else estimated_total_items, ) catch @panic("timer unsupported"); } // ABI warning export fn stage2_progress_disable_tty(progress: *std.Progress) void { progress.terminal = null; } // ABI warning export fn stage2_progress_start( node: *std.Progress.Node, name_ptr: [*]const u8, name_len: usize, estimated_total_items: usize, ) *std.Progress.Node { const child_node = std.heap.c_allocator.create(std.Progress.Node) catch @panic("out of memory"); child_node.* = node.start( name_ptr[0..name_len], if (estimated_total_items == 0) null else estimated_total_items, ); child_node.activate(); return child_node; } // ABI warning export fn stage2_progress_end(node: *std.Progress.Node) void { node.end(); if (&node.context.root != node) { std.heap.c_allocator.destroy(node); } } // ABI warning export fn stage2_progress_complete_one(node: *std.Progress.Node) void { node.completeOne(); } // ABI warning export fn stage2_progress_update_node(node: *std.Progress.Node, done_count: usize, total_count: usize) void { node.completed_items = done_count; node.estimated_total_items = total_count; node.activate(); node.context.maybeRefresh(); } fn cpuFeaturesFromLLVM( arch: Target.Arch, llvm_cpu_name_z: ?[*:0]const u8, llvm_cpu_features_opt: ?[*:0]const u8, ) !Target.CpuFeatures { var result = arch.getBaselineCpuFeatures(); if (llvm_cpu_name_z) |cpu_name_z| { const llvm_cpu_name = mem.toSliceConst(u8, cpu_name_z); for (arch.allCpus()) |cpu| { const this_llvm_name = cpu.llvm_name orelse continue; if (mem.eql(u8, this_llvm_name, llvm_cpu_name)) { // Here we use the non-dependencies-populated set, // so that subtracting features later in this function // affect the prepopulated set. result = Target.CpuFeatures{ .cpu = cpu, .features = cpu.features, }; break; } } } const all_features = arch.allFeaturesList(); if (llvm_cpu_features_opt) |llvm_cpu_features| { var it = mem.tokenize(mem.toSliceConst(u8, llvm_cpu_features), ","); while (it.next()) |decorated_llvm_feat| { var op: enum { add, sub, } = undefined; var llvm_feat: []const u8 = undefined; if (mem.startsWith(u8, decorated_llvm_feat, "+")) { op = .add; llvm_feat = decorated_llvm_feat[1..]; } else if (mem.startsWith(u8, decorated_llvm_feat, "-")) { op = .sub; llvm_feat = decorated_llvm_feat[1..]; } else { return error.InvalidLlvmCpuFeaturesFormat; } for (all_features) |feature, index_usize| { const this_llvm_name = feature.llvm_name orelse continue; if (mem.eql(u8, llvm_feat, this_llvm_name)) { const index = @intCast(Target.Cpu.Feature.Set.Index, index_usize); switch (op) { .add => result.features.addFeature(index), .sub => result.features.removeFeature(index), } break; } } } } result.features.populateDependencies(all_features); return result; } // ABI warning export fn stage2_cmd_targets(zig_triple: [*:0]const u8) c_int { cmdTargets(zig_triple) catch |err| { std.debug.warn("unable to list targets: {}\n", .{@errorName(err)}); return -1; }; return 0; } fn cmdTargets(zig_triple: [*:0]const u8) !void { var target = try Target.parse(mem.toSliceConst(u8, zig_triple)); target.Cross.cpu_features = blk: { const llvm = @import("llvm.zig"); const llvm_cpu_name = llvm.GetHostCPUName(); const llvm_cpu_features = llvm.GetNativeFeatures(); break :blk try cpuFeaturesFromLLVM(target.Cross.arch, llvm_cpu_name, llvm_cpu_features); }; return @import("print_targets.zig").cmdTargets( std.heap.c_allocator, &[0][]u8{}, &std.io.getStdOut().outStream().stream, target, ); } const Stage2CpuFeatures = struct { allocator: *mem.Allocator, cpu_features: Target.CpuFeatures, llvm_features_str: ?[*:0]const u8, builtin_str: [:0]const u8, cache_hash: [:0]const u8, const Self = @This(); fn createFromNative(allocator: *mem.Allocator) !*Self { const arch = Target.current.getArch(); const llvm = @import("llvm.zig"); const llvm_cpu_name = llvm.GetHostCPUName(); const llvm_cpu_features = llvm.GetNativeFeatures(); const cpu_features = try cpuFeaturesFromLLVM(arch, llvm_cpu_name, llvm_cpu_features); return createFromCpuFeatures(allocator, arch, cpu_features); } fn createFromCpuFeatures( allocator: *mem.Allocator, arch: Target.Arch, cpu_features: Target.CpuFeatures, ) !*Self { const self = try allocator.create(Self); errdefer allocator.destroy(self); const cache_hash = try std.fmt.allocPrint0(allocator, "{}\n{}", .{ cpu_features.cpu.name, cpu_features.features.asBytes(), }); errdefer allocator.free(cache_hash); const generic_arch_name = arch.genericName(); var builtin_str_buffer = try std.Buffer.allocPrint(allocator, \\CpuFeatures{{ \\ .cpu = &Target.{}.cpu.{}, \\ .features = Target.{}.featureSet(&[_]Target.{}.Feature{{ \\ , .{ generic_arch_name, cpu_features.cpu.name, generic_arch_name, generic_arch_name, }); defer builtin_str_buffer.deinit(); var llvm_features_buffer = try std.Buffer.initSize(allocator, 0); defer llvm_features_buffer.deinit(); for (arch.allFeaturesList()) |feature, index_usize| { const index = @intCast(Target.Cpu.Feature.Set.Index, index_usize); const is_enabled = cpu_features.features.isEnabled(index); if (feature.llvm_name) |llvm_name| { const plus_or_minus = "-+"[@boolToInt(is_enabled)]; try llvm_features_buffer.appendByte(plus_or_minus); try llvm_features_buffer.append(llvm_name); try llvm_features_buffer.append(","); } if (is_enabled) { // TODO some kind of "zig identifier escape" function rather than // unconditionally using @"" syntax try builtin_str_buffer.append(" .@\""); try builtin_str_buffer.append(feature.name); try builtin_str_buffer.append("\",\n"); } } try builtin_str_buffer.append( \\ }), \\}; \\ ); assert(mem.endsWith(u8, llvm_features_buffer.toSliceConst(), ",")); llvm_features_buffer.shrink(llvm_features_buffer.len() - 1); self.* = Self{ .allocator = allocator, .cpu_features = cpu_features, .llvm_features_str = llvm_features_buffer.toOwnedSlice().ptr, .builtin_str = builtin_str_buffer.toOwnedSlice(), .cache_hash = cache_hash, }; return self; } fn destroy(self: *Self) void { self.allocator.free(self.cache_hash); self.allocator.free(self.builtin_str); // TODO if (self.llvm_features_str) |llvm_features_str| self.allocator.free(llvm_features_str); self.allocator.destroy(self); } }; // ABI warning export fn stage2_cpu_features_parse( result: **Stage2CpuFeatures, zig_triple: ?[*:0]const u8, cpu_name: ?[*:0]const u8, cpu_features: ?[*:0]const u8, ) Error { result.* = stage2ParseCpuFeatures(zig_triple, cpu_name, cpu_features) catch |err| switch (err) { error.OutOfMemory => return .OutOfMemory, error.UnknownArchitecture => return .UnknownArchitecture, error.UnknownSubArchitecture => return .UnknownSubArchitecture, error.UnknownOperatingSystem => return .UnknownOperatingSystem, error.UnknownApplicationBinaryInterface => return .UnknownApplicationBinaryInterface, error.MissingOperatingSystem => return .MissingOperatingSystem, error.MissingArchitecture => return .MissingArchitecture, error.InvalidLlvmCpuFeaturesFormat => return .InvalidLlvmCpuFeaturesFormat, error.InvalidCpuFeatures => return .InvalidCpuFeatures, }; return .None; } fn stage2ParseCpuFeatures( zig_triple_oz: ?[*:0]const u8, cpu_name_oz: ?[*:0]const u8, cpu_features_oz: ?[*:0]const u8, ) !*Stage2CpuFeatures { const zig_triple_z = zig_triple_oz orelse return Stage2CpuFeatures.createFromNative(std.heap.c_allocator); const target = try Target.parse(mem.toSliceConst(u8, zig_triple_z)); const arch = target.Cross.arch; const cpu = if (cpu_name_oz) |cpu_name_z| blk: { const cpu_name = mem.toSliceConst(u8, cpu_name_z); break :blk arch.parseCpu(cpu_name) catch |err| switch (err) { error.UnknownCpu => { std.debug.warn("Unknown CPU: '{}'\nAvailable CPUs for architecture '{}':\n", .{ cpu_name, @tagName(arch), }); for (arch.allCpus()) |cpu| { std.debug.warn(" {}\n", .{cpu.name}); } process.exit(1); }, else => |e| return e, }; } else target.Cross.cpu_features.cpu; var set = if (cpu_features_oz) |cpu_features_z| blk: { const cpu_features = mem.toSliceConst(u8, cpu_features_z); break :blk arch.parseCpuFeatureSet(cpu, cpu_features) catch |err| switch (err) { error.UnknownCpuFeature => { std.debug.warn( \\Unknown CPU features specified. \\Available CPU features for architecture '{}': \\ , .{@tagName(arch)}); for (arch.allFeaturesList()) |feature| { std.debug.warn(" {}\n", .{feature.name}); } process.exit(1); }, else => |e| return e, }; } else cpu.features; if (arch.subArchFeature()) |index| { set.addFeature(index); } set.populateDependencies(arch.allFeaturesList()); return Stage2CpuFeatures.createFromCpuFeatures(std.heap.c_allocator, arch, .{ .cpu = cpu, .features = set, }); } // ABI warning export fn stage2_cpu_features_get_cache_hash( cpu_features: *const Stage2CpuFeatures, ptr: *[*:0]const u8, len: *usize, ) void { ptr.* = cpu_features.cache_hash.ptr; len.* = cpu_features.cache_hash.len; } // ABI warning export fn stage2_cpu_features_get_builtin_str( cpu_features: *const Stage2CpuFeatures, ptr: *[*:0]const u8, len: *usize, ) void { ptr.* = cpu_features.builtin_str.ptr; len.* = cpu_features.builtin_str.len; } // ABI warning export fn stage2_cpu_features_get_llvm_cpu(cpu_features: *const Stage2CpuFeatures) ?[*:0]const u8 { return if (cpu_features.cpu_features.cpu.llvm_name) |s| s.ptr else null; } // ABI warning export fn stage2_cpu_features_get_llvm_features(cpu_features: *const Stage2CpuFeatures) ?[*:0]const u8 { return cpu_features.llvm_features_str; }
src-self-hosted/stage1.zig
const assert = @import("std").debug.assert; pub const CString = [*:0]const u8; pub const MutCString = [*:0]u8; pub const Bool32 = i32; pub const FileType = extern enum { invalid, gltf, glb, }; pub const Result = extern enum { success, data_too_short, unknown_format, invalid_json, invalid_gltf, invalid_options, file_not_found, io_error, out_of_memory, legacy_gltf, }; pub const MemoryOptions = extern struct { alloc: ?fn (?*c_void, usize) callconv(.C) ?*c_void = null, free: ?fn (?*c_void, ?*c_void) callconv(.C) void = null, user_data: ?*c_void = null, }; pub const FileOptions = extern struct { read: ?fn (*const MemoryOptions, *const FileOptions, CString, *usize, *(?*c_void)) callconv(.C) Result = null, release: ?fn (*const MemoryOptions, *const FileOptions, ?*c_void) callconv(.C) void = null, user_data: ?*c_void = null, }; pub const Options = extern struct { type: FileType = .invalid, json_token_count: usize = 0, memory: MemoryOptions = MemoryOptions{}, file: FileOptions = FileOptions{}, }; pub const BufferViewType = extern enum { invalid, indices, vertices, }; pub const AttributeType = extern enum { invalid, position, normal, tangent, texcoord, color, joints, weights, }; pub const ComponentType = extern enum { invalid, r_8, r_8u, r_16, r_16u, r_32u, r_32f, }; pub const Type = extern enum { invalid, scalar, vec2, vec3, vec4, mat2, mat3, mat4, }; pub const PrimitiveType = extern enum { points, lines, line_loop, line_strip, triangles, triangle_strip, triangle_fan, }; pub const AlphaMode = extern enum { opaqueMode, mask, blend, }; pub const AnimationPathType = extern enum { invalid, translation, rotation, scale, weights, }; pub const InterpolationType = extern enum { linear, step, cubic_spline, }; pub const CameraType = extern enum { invalid, perspective, orthographic, }; pub const LightType = extern enum { invalid, directional, point, spot, }; pub const Extras = extern struct { start_offset: usize, end_offset: usize, }; pub const Buffer = extern struct { size: usize, uri: ?MutCString, data: ?*c_void, extras: Extras, }; pub const BufferView = extern struct { buffer: *Buffer, offset: usize, size: usize, stride: usize, type: BufferViewType, extras: Extras, }; pub const AccessorSparse = extern struct { count: usize, indices_buffer_view: ?*BufferView, indices_byte_offset: usize, indices_component_type: ComponentType, values_buffer_view: ?*BufferView, values_byte_offset: usize, extras: Extras, indices_extras: Extras, values_extras: Extras, }; pub const Accessor = extern struct { component_type: ComponentType, normalized: Bool32, type: Type, offset: usize, count: usize, stride: usize, buffer_view: ?*BufferView, has_min: Bool32, min: [16]f32, has_max: Bool32, max: [16]f32, is_sparse: Bool32, sparse: AccessorSparse, // valid only if is_sparse != 0 extras: Extras, pub inline fn unpackFloatsCount(self: *const Accessor) usize { return cgltf_accessor_unpack_floats(self, null, 0); } pub inline fn unpackFloats(self: *const Accessor, outBuffer: []f32) []f32 { const actualCount = cgltf_accessor_unpack_floats(self, outBuffer.ptr, outBuffer.len); return outBuffer[0..actualCount]; } pub inline fn readFloat(self: *const Accessor, index: usize, outFloats: []f32) bool { assert(outFloats.len == numComponents(self.type)); const result = cgltf_accessor_read_float(self, index, outFloats.ptr, outFloats.len); return result != 0; } pub inline fn readUint(self: *const Accessor, index: usize, outInts: []u32) bool { assert(outInts.len == numComponents(self.type)); const result = cgltf_accessor_read_uint(self, index, outInts.ptr, outInts.len); return result != 0; } pub inline fn readIndex(self: *const Accessor, index: usize) usize { return cgltf_accessor_read_index(self, index); } }; pub const Attribute = extern struct { name: ?MutCString, type: AttributeType, index: i32, data: *Accessor, }; pub const Image = extern struct { name: ?MutCString, uri: ?MutCString, buffer_view: ?*BufferView, mime_type: ?MutCString, extras: Extras, }; pub const Sampler = extern struct { mag_filter: i32, min_filter: i32, wrap_s: i32, wrap_t: i32, extras: Extras, }; pub const Texture = extern struct { name: ?MutCString, image: ?*Image, sampler: ?*Sampler, extras: Extras, }; pub const TextureTransform = extern struct { offset: [2]f32, rotation: f32, scale: [2]f32, texcoord: i32, }; pub const TextureView = extern struct { texture: ?*Texture, texcoord: i32, scale: f32, has_transform: Bool32, transform: TextureTransform, extras: Extras, }; pub const PbrMetallicRoughness = extern struct { base_color_texture: TextureView, metallic_roughness_texture: TextureView, base_color_factor: [4]f32, metallic_factor: f32, roughness_factor: f32, extras: Extras, }; pub const PbrSpecularGlossiness = extern struct { diffuse_texture: TextureView, specular_glossiness_texture: TextureView, diffuse_factor: [4]f32, specular_factor: [3]f32, glossiness_factor: f32, }; pub const Material = extern struct { name: ?MutCString, has_pbr_metallic_roughness: Bool32, has_pbr_specular_glossiness: Bool32, pbr_metallic_roughness: PbrMetallicRoughness, pbr_specular_glossiness: PbrSpecularGlossiness, normal_texture: TextureView, occlusion_texture: TextureView, emissive_texture: TextureView, emissive_factor: [3]f32, alpha_mode: AlphaMode, alpha_cutoff: f32, double_sided: Bool32, unlit: Bool32, extras: Extras, }; pub const MorphTarget = extern struct { attributes: [*]Attribute, attributes_count: usize, }; pub const Primitive = extern struct { type: PrimitiveType, indices: ?*Accessor, material: ?*Material, attributes: [*]Attribute, attributes_count: usize, targets: [*]MorphTarget, targets_count: usize, extras: Extras, }; pub const Mesh = extern struct { name: ?MutCString, primitives: [*]Primitive, primitives_count: usize, weights: [*]f32, weights_count: usize, target_names: [*]MutCString, target_names_count: usize, extras: Extras, }; pub const Skin = extern struct { name: ?MutCString, joints: [*]*Node, joints_count: usize, skeleton: ?*Node, inverse_bind_matrices: ?*Accessor, extras: Extras, }; pub const CameraPerspective = extern struct { aspect_ratio: f32, yfov: f32, zfar: f32, znear: f32, extras: Extras, }; pub const CameraOrthographic = extern struct { xmag: f32, ymag: f32, zfar: f32, znear: f32, extras: Extras, }; pub const Camera = extern struct { name: ?MutCString, type: CameraType, data: extern union { perspective: CameraPerspective, orthographic: CameraOrthographic, }, extras: Extras, }; pub const Light = extern struct { name: ?MutCString, color: [3]f32, intensity: f32, type: LightType, range: f32, spot_inner_cone_angle: f32, spot_outer_cone_angle: f32, }; pub const Node = extern struct { name: ?MutCString, parent: ?*Node, children: [*]*Node, children_count: usize, skin: ?*Skin, mesh: ?*Mesh, camera: ?*Camera, light: ?*Light, weights: [*]f32, weights_count: usize, has_translation: Bool32, has_rotation: Bool32, has_scale: Bool32, has_matrix: Bool32, translation: [3]f32, rotation: [4]f32, scale: [3]f32, matrix: [16]f32, extras: Extras, pub inline fn transformLocal(self: *const Node) [16]f32 { var transform: [16]f32 = undefined; cgltf_node_transform_local(self, &transform); return transform; } pub inline fn transformWorld(self: *const Node) [16]f32 { var transform: [16]f32 = undefined; cgltf_node_transform_world(self, &transform); return transform; } }; pub const Scene = extern struct { name: ?MutCString, nodes: [*]*Node, nodes_count: usize, extras: Extras, }; pub const AnimationSampler = extern struct { input: *Accessor, output: *Accessor, interpolation: InterpolationType, extras: Extras, }; pub const AnimationChannel = extern struct { sampler: *AnimationSampler, target_node: ?*Node, target_path: AnimationPathType, extras: Extras, }; pub const Animation = extern struct { name: ?MutCString, samplers: [*]AnimationSampler, samplers_count: usize, channels: [*]AnimationChannel, channels_count: usize, extras: Extras, }; pub const Asset = extern struct { copyright: ?MutCString, generator: ?MutCString, version: ?MutCString, min_version: ?MutCString, extras: Extras, }; pub const Data = extern struct { file_type: FileType, file_data: ?*c_void, asset: Asset, meshes: [*]Mesh, meshes_count: usize, materials: [*]Material, materials_count: usize, accessors: [*]Accessor, accessors_count: usize, buffer_views: [*]BufferView, buffer_views_count: usize, buffers: [*]Buffer, buffers_count: usize, images: [*]Image, images_count: usize, textures: [*]Texture, textures_count: usize, samplers: [*]Sampler, samplers_count: usize, skins: [*]Skin, skins_count: usize, cameras: [*]Camera, cameras_count: usize, lights: [*]Light, lights_count: usize, nodes: [*]Node, nodes_count: usize, scenes: [*]Scene, scenes_count: usize, scene: ?*Scene, animations: [*]Animation, animations_count: usize, extras: Extras, extensions_used: [*]MutCString, extensions_used_count: usize, extensions_required: [*]MutCString, extensions_required_count: usize, json: [*]const u8, json_size: usize, bin: ?*const c_void, bin_size: usize, memory: MemoryOptions, file: FileOptions, }; pub inline fn parse(options: *const Options, data: []const u8) !*Data { var out_data: ?*Data = undefined; const result = cgltf_parse(options, data.ptr, data.len, &out_data); if (result == .success) return out_data.?; switch (result) { .data_too_short => return error.CgltfDataTooShort, .unknown_format => return error.CgltfUnknownFormat, .invalid_json => return error.CgltfInvalidJson, .invalid_gltf => return error.CgltfInvalidGltf, .invalid_options => return error.CgltfInvalidOptions, .file_not_found => return error.CgltfFileNotFound, .io_error => return error.CgltfIOError, .out_of_memory => return error.OutOfMemory, .legacy_gltf => return error.CgltfLegacyGltf, else => unreachable, } } pub inline fn parseFile(options: Options, path: CString) !*Data { var out_data: ?*Data = undefined; const result = cgltf_parse_file(&options, path, &out_data); if (result == .success) return out_data.?; switch (result) { .data_too_short => return error.CgltfDataTooShort, .unknown_format => return error.CgltfUnknownFormat, .invalid_json => return error.CgltfInvalidJson, .invalid_gltf => return error.CgltfInvalidGltf, .invalid_options => return error.CgltfInvalidOptions, .file_not_found => return error.CgltfFileNotFound, .io_error => return error.CgltfIOError, .out_of_memory => return error.OutOfMemory, .legacy_gltf => return error.CgltfLegacyGltf, else => unreachable, } } pub inline fn loadBuffers(options: Options, data: *Data, gltf_path: CString) !void { const result = cgltf_load_buffers(&options, data, gltf_path); if (result == .success) return; switch (result) { .data_too_short => return error.CgltfDataTooShort, .unknown_format => return error.CgltfUnknownFormat, .invalid_json => return error.CgltfInvalidJson, .invalid_gltf => return error.CgltfInvalidGltf, .invalid_options => return error.CgltfInvalidOptions, .file_not_found => return error.CgltfFileNotFound, .io_error => return error.CgltfIOError, .out_of_memory => return error.OutOfMemory, .legacy_gltf => return error.CgltfLegacyGltf, else => unreachable, } } pub inline fn loadBufferBase64(options: Options, size: usize, base64: []const u8) ![]u8 { assert(base64.len >= (size * 4 + 2) / 3); var out: ?*c_void = null; const result = cgltf_load_buffer_base64(&options, size, base64.ptr, &out); if (result == .success) return @ptrCast([*]u8, out.?)[0..size]; switch (result) { .io_error => return error.CgltfIOError, .out_of_memory => return error.OutOfMemory, else => unreachable, } } pub inline fn validate(data: *Data) Result { return cgltf_validate(data); } pub inline fn free(data: *Data) void { cgltf_free(data); } pub fn numComponents(inType: Type) usize { // translated because we should at least try to inline this. return switch (inType) { .vec2 => 2, .vec3 => 3, .vec4 => 4, .mat2 => 4, .mat3 => 9, .mat4 => 16, else => 1, }; } pub inline fn copyExtrasJsonCount(data: *const Data, extras: *const Extras) usize { var size: usize = 0; var result = cgltf_copy_extras_json(data, extras, null, &size); assert(result == .success); // can only fail on invalid size ptr return size; } pub inline fn copyExtrasJson(data: *const Data, extras: *const Extras, outBuffer: []u8) []u8 { var size: usize = outBuffer.len; var result = cgltf_copy_extras_json(data, extras, outBuffer.ptr, &size); assert(result == .success); // can only fail on invalid size ptr return outBuffer[0..size]; } pub extern fn cgltf_parse(options: [*c]const Options, data: ?*const c_void, size: usize, out_data: [*c]([*c]Data)) Result; pub extern fn cgltf_parse_file(options: [*c]const Options, path: [*c]const u8, out_data: [*c]([*c]Data)) Result; pub extern fn cgltf_load_buffers(options: [*c]const Options, data: [*c]Data, gltf_path: [*c]const u8) Result; pub extern fn cgltf_load_buffer_base64(options: [*c]const Options, size: usize, base64: [*c]const u8, out_data: [*c](?*c_void)) Result; pub extern fn cgltf_validate(data: [*c]Data) Result; pub extern fn cgltf_free(data: [*c]Data) void; pub extern fn cgltf_node_transform_local(node: [*c]const Node, out_matrix: [*c]f32) void; pub extern fn cgltf_node_transform_world(node: [*c]const Node, out_matrix: [*c]f32) void; pub extern fn cgltf_accessor_read_float(accessor: [*c]const Accessor, index: usize, out: [*c]f32, element_size: usize) Bool32; pub extern fn cgltf_accessor_read_uint(accessor: [*c]const Accessor, index: usize, out: [*c]u32, element_size: usize) Bool32; pub extern fn cgltf_accessor_read_index(accessor: [*c]const Accessor, index: usize) usize; pub extern fn cgltf_num_components(type_0: Type) usize; pub extern fn cgltf_accessor_unpack_floats(accessor: [*c]const Accessor, out: [*c]f32, float_count: usize) usize; pub extern fn cgltf_copy_extras_json(data: [*c]const Data, extras: [*c]const Extras, dest: [*c]u8, dest_size: [*c]usize) Result;
include/cgltf.zig
const std = @import("std"); const Tokenizer = @import("tokenize.zig").Tokenizer; const Token = @import("tokenize.zig").Token; const mem = std.mem; const fs = std.fs; const process = std.process; const math = std.math; const data = @import("data.zig"); const parseStringLiteral = std.zig.parseStringLiteral; const elf = std.elf; const assert = std.debug.assert; const Cmd = enum { exe, obj, dis, targets, tokenize, }; const Assembly = struct { allocator: *mem.Allocator, input_files: []const []const u8, asm_files: []AsmFile, target: std.Target, errors: std.ArrayList(Error), entry_addr: ?u64 = null, output_file: ?[]const u8, file_offset: u64, next_map_addr: u64 = 0x10000, sections: SectionTable, const SectionTable = std.StringHashMap(*Section); pub const SourceInfo = struct { token: Token, source: []const u8, file_name: []const u8, }; const Error = union(enum) { unexpected_token: struct { source_info: SourceInfo, }, unrecognized_directive: struct { source_info: SourceInfo, }, unrecognized_instruction: struct { source_info: SourceInfo, }, symbol_outside_section: struct { source_info: SourceInfo, }, duplicate_symbol: struct { source_info: SourceInfo, other_symbol: Token, }, bad_integer_literal: struct { source_info: SourceInfo, }, instr_outside_symbol: struct { source_info: SourceInfo, }, bad_string_literal: struct { source_info: SourceInfo, bad_index: usize, }, bad_section_flag: struct { source_info: SourceInfo, bad_index: usize, bad_byte: u8, }, too_many_args: struct { source_info: SourceInfo, }, unknown_symbol: struct { source_info: SourceInfo, }, fn printToStream(stream: var, comptime message: []const u8, source_info: SourceInfo, args: ...) !void { const loc = tokenLocation(source_info.source, source_info.token); try stream.print( "{}:{}:{}: " ++ message, source_info.file_name, loc.line + 1, loc.column + 1, args, ); } fn render(self: Error, stream: var) !void { switch (self) { .unexpected_token => |info| { try printToStream( stream, "error: unexpected token: {}\n", info.source_info, @tagName(info.source_info.token.id), ); }, .unrecognized_directive => |info| { const si = info.source_info; try printToStream( stream, "error: unrecognized directive: {}\n", si, si.source[si.token.start..si.token.end], ); }, .unrecognized_instruction => |info| { const si = info.source_info; try printToStream( stream, "error: instruction name or parameters do not match asm database: {}\n", si, si.source[si.token.start..si.token.end], ); }, .symbol_outside_section => |info| { const si = info.source_info; try printToStream( stream, "error: symbol outside section: {}\n", si, si.source[si.token.start..si.token.end], ); }, .bad_integer_literal => |info| { const si = info.source_info; try printToStream( stream, "error: invalid integer literal: {}\n", si, si.source[si.token.start..si.token.end], ); }, .bad_string_literal => |info| { const loc = tokenLocation(info.source_info.source, info.source_info.token); try stream.print( "{}:{}:{}: error: invalid byte in string literal\n", info.source_info.file_name, loc.line + 1, loc.column + 1 + info.bad_index, ); }, .bad_section_flag => |info| { const loc = tokenLocation(info.source_info.source, info.source_info.token); try stream.print( "{}:{}:{}: error: invalid section flag: '{c}'\n", info.source_info.file_name, loc.line + 1, loc.column + 1 + info.bad_index, info.bad_byte, ); }, .instr_outside_symbol => |info| { const si = info.source_info; try printToStream( stream, "error: instruction outside symbol\n", si, ); }, .duplicate_symbol => |info| { const si = info.source_info; const other_loc = tokenLocation(si.source, info.other_symbol); try printToStream( stream, "error: duplicate symbol: {}\n" ++ "{}:{}:{}: note: original definition. \n", si, si.source[si.token.start..si.token.end], si.file_name, other_loc.line + 1, other_loc.column + 1, ); }, .too_many_args => |info| { const si = info.source_info; try printToStream( stream, "error: too many args\n", si, ); }, .unknown_symbol => |info| { const si = info.source_info; try printToStream( stream, "error: unknown symbol\n", si, ); }, } } }; }; const Section = struct { name: []const u8, layout: std.ArrayList(*Symbol), alignment: u32, file_offset: u64, file_size: u64, virt_addr: u64, mem_size: u64, flags: u32, }; const Symbol = struct { /// `undefined` until a second pass when addresses are calculated. addr: u64, /// Starts at 0. Increments with instructions being added. size: u64, source_token: Token, name: []const u8, section: *Section, ops: std.ArrayList(PseudoOp), source_file: *AsmFile, }; const Instruction = struct { props: *const data.Instruction, args: []Arg, }; const PseudoOp = union(enum) { instruction: Instruction, data: []const u8, }; const Location = struct { line: usize, column: usize, line_start: usize, line_end: usize, }; fn tokenLocation(source: []const u8, token: Token) Location { const start_index = 0; var loc = Location{ .line = 0, .column = 0, .line_start = start_index, .line_end = source.len, }; const token_start = token.start; for (source[start_index..]) |c, i| { if (i + start_index == token_start) { loc.line_end = i + start_index; while (loc.line_end < source.len and source[loc.line_end] != '\n') : (loc.line_end += 1) {} return loc; } if (c == '\n') { loc.line += 1; loc.column = 0; loc.line_start = i + 1; } else { loc.column += 1; } } return loc; } pub fn main() anyerror!void { var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator); defer arena.deinit(); const allocator = &arena.allocator; var assembly = Assembly{ .allocator = allocator, .target = .Native, .input_files = undefined, .errors = std.ArrayList(Assembly.Error).init(allocator), .output_file = null, .sections = Assembly.SectionTable.init(allocator), .asm_files = undefined, .file_offset = 0, }; var input_files = std.ArrayList([]const u8).init(allocator); var maybe_cmd: ?Cmd = null; var debug_errors = false; const args = try process.argsAlloc(allocator); var arg_i: usize = 1; while (arg_i < args.len) : (arg_i += 1) { const full_arg = args[arg_i]; if (mem.startsWith(u8, full_arg, "-")) { const arg = full_arg[1..]; if (mem.eql(u8, arg, "help")) { try dumpUsage(std.io.getStdOut()); return; } else if (mem.eql(u8, arg, "debug-errors")) { debug_errors = true; } else { arg_i += 1; if (arg_i >= args.len) { std.debug.warn("Expected another parameter after '{}'\n", full_arg); dumpStdErrUsageAndExit(); } else if (mem.eql(u8, arg, "target")) { assembly.target = try std.Target.parse(args[arg_i]); } else if (mem.eql(u8, arg, "o")) { assembly.output_file = args[arg_i]; } else { std.debug.warn("Invalid parameter: {}\n", full_arg); dumpStdErrUsageAndExit(); } } } else if (maybe_cmd == null) { inline for (std.meta.fields(Cmd)) |field| { if (mem.eql(u8, full_arg, field.name)) { maybe_cmd = @field(Cmd, field.name); break; } } else { std.debug.warn("Invalid command: {}\n", full_arg); dumpStdErrUsageAndExit(); } } else { try input_files.append(full_arg); } } const cmd = maybe_cmd orelse { std.debug.warn("Expected a command parameter\n"); dumpStdErrUsageAndExit(); }; switch (cmd) { .targets => { try std.io.getStdOut().write( \\x86_64-linux \\ ); return; }, .exe => { assembly.input_files = input_files.toSliceConst(); assembleExecutable(&assembly) catch |err| switch (err) { error.ParseFailure => { const stream = &std.io.getStdErr().outStream().stream; for (assembly.errors.toSliceConst()) |asm_err| { try asm_err.render(stream); } if (debug_errors) { return err; } else { process.exit(1); } }, else => |e| return e, }; }, .obj => { std.debug.warn("object files not yet implemented\n"); process.exit(1); }, .dis => { std.debug.warn("disassembly not yet implemented\n"); process.exit(1); }, .tokenize => { const stdout = &std.io.getStdOut().outStream().stream; const cwd = fs.cwd(); for (input_files.toSliceConst()) |input_file| { const source = try cwd.readFileAlloc(allocator, input_file, math.maxInt(usize)); var tokenizer = Tokenizer.init(source); while (true) { const token = tokenizer.next(); if (token.id == .eof) break; try stdout.print("{}: {}\n", @tagName(token.id), source[token.start..token.end]); } } }, } } const Arg = union(enum) { register: data.Register, immediate: u64, symbol_ref: Token, }; const AsmFile = struct { source: []const u8, file_name: []const u8, tokenizer: Tokenizer, assembly: *Assembly, current_section: ?*Section = null, current_symbol: ?*Symbol = null, globals: GlobalSet, put_back_buffer: [1]Token, put_back_count: u1, symbols: SymbolTable, const SymbolTable = std.StringHashMap(*Symbol); const GlobalSet = std.StringHashMap(void); fn tokenSlice(asm_file: AsmFile, token: Token) []const u8 { return asm_file.source[token.start..token.end]; } fn findOrCreateSection(self: *AsmFile, name: []const u8, flags: u32) !*Section { const gop = try self.assembly.sections.getOrPut(name); if (gop.found_existing) { // TODO deal with flags conflicts return gop.kv.value; } const section = try self.assembly.sections.allocator.create(Section); section.* = Section{ .name = name, .layout = std.ArrayList(*Symbol).init(self.assembly.sections.allocator), .alignment = 0x1000, .file_offset = undefined, .file_size = 0, .virt_addr = undefined, .mem_size = 0, .flags = flags, }; gop.kv.value = section; return section; } fn setCurrentSection(self: *AsmFile, name: []const u8, flags: u32) !void { const section = try self.findOrCreateSection(name, flags); self.current_section = section; } fn setCurrentSectionFlagsTok(self: *AsmFile, name: []const u8, flags_tok: Token) !void { var flags: u32 = 0; const flags_str = blk: { const tok_slice = self.tokenSlice(flags_tok); // skip over the double quotes break :blk tok_slice[1 .. tok_slice.len - 1]; }; for (flags_str) |b, offset| switch (b) { 'a', 'r' => flags |= elf.PF_R, 'w' => flags |= elf.PF_W, 'x' => flags |= elf.PF_X, else => { try self.assembly.errors.append(.{ .bad_section_flag = .{ .source_info = newSourceInfo(self, flags_tok), .bad_index = offset, .bad_byte = b, }, }); return error.ParseFailure; }, }; return self.setCurrentSection(name, flags); } fn beginSymbol(self: *AsmFile, source_token: Token, name: []const u8) !void { const current_section = self.current_section orelse { try self.assembly.errors.append(.{ .symbol_outside_section = .{ .source_info = newSourceInfo(self, source_token) }, }); return error.ParseFailure; }; const symbol = try self.symbols.allocator.create(Symbol); symbol.* = Symbol{ .addr = undefined, .size = 0, .source_token = source_token, .name = name, .section = current_section, .ops = std.ArrayList(PseudoOp).init(self.assembly.allocator), .source_file = self, }; if (try self.symbols.put(name, symbol)) |existing_entry| { try self.assembly.errors.append(.{ .duplicate_symbol = .{ .source_info = newSourceInfo(self, source_token), .other_symbol = existing_entry.value.source_token, }, }); return error.ParseFailure; } try current_section.layout.append(symbol); self.current_symbol = symbol; } fn addGlobal(self: *AsmFile, name: []const u8) !void { _ = try self.globals.put(name, {}); } fn nextToken(self: *AsmFile) Token { if (self.put_back_count == 0) { return self.tokenizer.next(); } else { self.put_back_count -= 1; return self.put_back_buffer[self.put_back_count]; } } fn eatToken(self: *AsmFile, id: Token.Id) ?Token { const token = self.nextToken(); if (token.id == id) return token; self.putBackToken(token); return null; } fn putBackToken(self: *AsmFile, token: Token) void { self.put_back_buffer[self.put_back_count] = token; self.put_back_count += 1; } fn expectToken(asm_file: *AsmFile, id: Token.Id) !Token { const token = asm_file.nextToken(); if (token.id != id) { try asm_file.assembly.errors.append(.{ .unexpected_token = .{ .source_info = newSourceInfo(asm_file, token) }, }); return error.ParseFailure; } return token; } fn getCurrentSymbol(asm_file: *AsmFile, source_token: Token) !*Symbol { return asm_file.current_symbol orelse { try asm_file.assembly.errors.append(.{ .instr_outside_symbol = .{ .source_info = newSourceInfo(asm_file, source_token) }, }); return error.ParseFailure; }; } }; fn newSourceInfo(asm_file: *AsmFile, tok: Token) Assembly.SourceInfo { return .{ .token = tok, .source = asm_file.source, .file_name = asm_file.file_name, }; } fn assembleExecutable(assembly: *Assembly) !void { const cwd = fs.cwd(); assembly.asm_files = try assembly.allocator.alloc(AsmFile, assembly.input_files.len); for (assembly.input_files) |input_file, input_file_index| { const asm_file = &assembly.asm_files[input_file_index]; asm_file.* = .{ .assembly = assembly, .file_name = input_file, .globals = AsmFile.GlobalSet.init(assembly.allocator), .symbols = AsmFile.SymbolTable.init(assembly.allocator), .source = try cwd.readFileAlloc(assembly.allocator, input_file, math.maxInt(usize)), .tokenizer = undefined, .put_back_buffer = undefined, .put_back_count = 0, }; asm_file.tokenizer = Tokenizer.init(asm_file.source); while (true) { const token = asm_file.nextToken(); switch (token.id) { .line_break => continue, .eof => break, .period => { const dir_ident = try asm_file.expectToken(.identifier); const dir_name = asm_file.tokenSlice(dir_ident); if (mem.eql(u8, dir_name, "text")) { try asm_file.setCurrentSection("text", elf.PF_R | elf.PF_X); _ = try asm_file.expectToken(.line_break); } else if (mem.eql(u8, dir_name, "globl")) { while (true) { const ident = try asm_file.expectToken(.identifier); try asm_file.addGlobal(asm_file.tokenSlice(ident)); if (asm_file.eatToken(.comma)) |_| continue else break; } _ = try asm_file.expectToken(.line_break); } else if (mem.eql(u8, dir_name, "section")) { _ = try asm_file.expectToken(.period); const sect_name_token = try asm_file.expectToken(.identifier); const sect_name = asm_file.tokenSlice(sect_name_token); _ = try asm_file.expectToken(.comma); const flags_tok = try asm_file.expectToken(.string_literal); try asm_file.setCurrentSectionFlagsTok(sect_name, flags_tok); _ = try asm_file.expectToken(.line_break); } else if (mem.eql(u8, dir_name, "ascii")) { const current_symbol = try asm_file.getCurrentSymbol(dir_ident); const str_lit_tok = try asm_file.expectToken(.string_literal); const str_lit = asm_file.tokenSlice(str_lit_tok); var bad_index: usize = undefined; const bytes = parseStringLiteral( assembly.allocator, str_lit, &bad_index, ) catch |err| switch (err) { error.InvalidCharacter => { try assembly.errors.append(.{ .bad_string_literal = .{ .source_info = newSourceInfo(asm_file, str_lit_tok), .bad_index = bad_index, }, }); return error.ParseFailure; }, error.OutOfMemory => |e| return e, }; try current_symbol.ops.append(.{ .data = bytes }); current_symbol.size += bytes.len; _ = try asm_file.expectToken(.line_break); } else { try assembly.errors.append(.{ .unrecognized_directive = .{ .source_info = newSourceInfo(asm_file, dir_ident) }, }); return error.ParseFailure; } }, .identifier => { if (asm_file.eatToken(.colon)) |_| { const symbol_name = asm_file.tokenSlice(token); try asm_file.beginSymbol(token, symbol_name); } else { var arg_toks: [2]Token = undefined; var arg_count: usize = 0; var last_comma_tok: Token = undefined; while (arg_count < arg_toks.len) { const tok = asm_file.nextToken(); if (tok.id == .line_break) break; arg_toks[arg_count] = tok; arg_count += 1; if (asm_file.eatToken(.comma)) |comma_tok| { last_comma_tok = comma_tok; continue; } else { break; } } else { try assembly.errors.append(.{ .too_many_args = .{ .source_info = newSourceInfo(asm_file, last_comma_tok) }, }); return error.ParseFailure; } const wanted_instr_name = asm_file.tokenSlice(token); const inst = outer: for (data.instructions) |*inst| { if (!mem.eql(u8, inst.name, wanted_instr_name)) continue; if (inst.args.len != arg_count) continue; for (inst.args) |inst_arg, i| switch (inst_arg) { .register => |reg| { if (arg_toks[i].id != .identifier) continue :outer; const src_reg_name = asm_file.tokenSlice(arg_toks[i]); // TODO validate register name if (!mem.eql(u8, src_reg_name, @tagName(reg))) continue :outer; }, .immediate => switch (arg_toks[i].id) { .integer_literal, .char_literal, .identifier => {}, else => continue :outer, }, }; break :outer inst; } else { try assembly.errors.append(.{ .unrecognized_instruction = .{ .source_info = newSourceInfo(asm_file, token) }, }); return error.ParseFailure; }; const current_symbol = try asm_file.getCurrentSymbol(token); var args = std.ArrayList(Arg).init(assembly.allocator); for (arg_toks[0..arg_count]) |arg_token| { const arg_text = asm_file.tokenSlice(arg_token); var arg: Arg = undefined; switch (arg_token.id) { .integer_literal => { var text: []const u8 = undefined; var base: u8 = undefined; if (mem.startsWith(u8, arg_text, "0x")) { base = 16; text = arg_text[2..]; } else if (mem.startsWith(u8, arg_text, "0b")) { base = 2; text = arg_text[2..]; } else if (mem.startsWith(u8, arg_text, "0o")) { base = 8; text = arg_text[2..]; } else { base = 10; text = arg_text; } const imm = std.fmt.parseUnsigned(u64, text, base) catch |err| { try asm_file.assembly.errors.append(.{ .bad_integer_literal = .{ .source_info = newSourceInfo(asm_file, arg_token) }, }); return error.ParseFailure; }; arg = Arg{ .immediate = imm }; }, .identifier => { inline for (std.meta.fields(data.Register)) |field| { if (mem.eql(u8, arg_text, field.name)) { const reg = @field(data.Register, field.name); arg = Arg{ .register = reg }; break; } } else { arg = Arg{ .symbol_ref = arg_token }; } }, else => { try asm_file.assembly.errors.append(.{ .unexpected_token = .{ .source_info = newSourceInfo(asm_file, arg_token) }, }); return error.ParseFailure; }, } try args.append(arg); } try current_symbol.ops.append(.{ .instruction = .{ .props = inst, .args = args.toSliceConst(), }, }); current_symbol.size += inst.size; } }, else => { try assembly.errors.append(.{ .unexpected_token = .{ .source_info = newSourceInfo(asm_file, token) }, }); return error.ParseFailure; }, } } } const output_file = assembly.output_file orelse blk: { const basename = fs.path.basename(assembly.input_files[0]); const dot_index = mem.lastIndexOfScalar(u8, basename, '.') orelse basename.len; break :blk basename[0..dot_index]; }; const file = try std.fs.cwd().createFile(output_file, .{ .mode = 0o755, .truncate = false, }); defer file.close(); const ptr_width: PtrWidth = switch (assembly.target.getArchPtrBitWidth()) { 32 => ._32, 64 => ._64, else => return error.UnsupportedArchitecture, }; const ehdr_size: u64 = switch (ptr_width) { ._32 => @sizeOf(elf.Elf32_Ehdr), ._64 => @sizeOf(elf.Elf64_Ehdr), }; const phdr_size: u64 = switch (ptr_width) { ._32 => @sizeOf(elf.Elf32_Phdr), ._64 => @sizeOf(elf.Elf64_Phdr), }; const section_names = [_][]const u8{ "rodata", "text" }; assembly.file_offset = ehdr_size + phdr_size * section_names.len; for (section_names) |section_name| { const section = (assembly.sections.get(section_name) orelse break).value; assembly.file_offset = mem.alignForward(assembly.file_offset, section.alignment); assembly.next_map_addr = mem.alignForward(assembly.next_map_addr, section.alignment); section.file_offset = assembly.file_offset; section.virt_addr = assembly.next_map_addr; try file.seekTo(assembly.file_offset); const prev_file_offset = assembly.file_offset; const prev_map_addr = assembly.next_map_addr; try writeSection(assembly, section, file, ptr_width); section.file_size = assembly.file_offset - prev_file_offset; section.mem_size = assembly.next_map_addr - prev_map_addr; } try file.seekTo(0); try writeElfHeader(assembly, file, ptr_width, &section_names); } fn writeSection(assembly: *Assembly, section: *Section, file: fs.File, ptr_width: PtrWidth) !void { const endian = assembly.target.getArch().endian(); for (section.layout.toSliceConst()) |symbol| { symbol.addr = assembly.next_map_addr; for (symbol.ops.toSliceConst()) |pseudo_op| { switch (pseudo_op) { .instruction => |inst| { var buf: [8]u8 = undefined; var index: usize = 0; if (inst.props.prefix) |prefix| { buf[index] = prefix; index += 1; } buf[index] = inst.props.po; index += 1; for (inst.args) |arg| switch (arg) { .register => {}, .immediate => |x| { mem.writeInt(u32, @ptrCast(*[4]u8, &buf[index]), @intCast(u32, x), endian); index += 4; }, .symbol_ref => |other_sym_tok| { const other_sym_name = symbol.source_file.tokenSlice(other_sym_tok); const other_symbol = symbol.source_file.symbols.getValue(other_sym_name) orelse { try assembly.errors.append(.{ .unknown_symbol = .{ .source_info = newSourceInfo( symbol.source_file, other_sym_tok, ), }, }); return error.ParseFailure; }; mem.writeInt(u32, @ptrCast(*[4]u8, &buf[index]), @intCast(u32, other_symbol.addr), endian); index += 4; }, }; if (inst.props.suffix) |suffix| { buf[index] = suffix; index += 1; } try file.write(buf[0..index]); assembly.next_map_addr += index; assembly.file_offset += index; }, .data => |slice| { try file.write(slice); assembly.next_map_addr += slice.len; assembly.file_offset += slice.len; }, } } if (mem.eql(u8, symbol.name, "_start")) { if (assembly.entry_addr) |prev_addr| { @panic("TODO emit error for _start already defined"); } else { assembly.entry_addr = symbol.addr; } } } } const PtrWidth = enum { _32, _64, }; fn writeElfHeader( assembly: *Assembly, /// Expected to be already seeked to position 0 in the file. file: fs.File, ptr_width: PtrWidth, section_names: []const []const u8, ) !void { const endian = assembly.target.getArch().endian(); var hdr_buf: [math.max(@sizeOf(elf.Elf64_Ehdr), @sizeOf(elf.Elf64_Phdr))]u8 = undefined; var index: usize = 0; mem.copy(u8, hdr_buf[index..], "\x7fELF"); index += 4; hdr_buf[index] = switch (ptr_width) { ._32 => 1, ._64 => 2, }; index += 1; hdr_buf[index] = switch (endian) { .Little => 1, .Big => 2, }; index += 1; hdr_buf[index] = 1; // ELF version index += 1; // OS ABI, often set to 0 regardless of target platform // ABI Version, possibly used by glibc but not by static executables // padding mem.set(u8, hdr_buf[index..][0..9], 0); index += 9; assert(index == 16); // TODO: https://github.com/ziglang/zig/issues/863 makes this (and all following) @ptrCast unnecessary mem.writeInt(u16, @ptrCast(*[2]u8, &hdr_buf[index]), @enumToInt(elf.ET.EXEC), endian); index += 2; const machine = assembly.target.getArch().toElfMachine(); mem.writeInt(u16, @ptrCast(*[2]u8, &hdr_buf[index]), @enumToInt(machine), endian); index += 2; // ELF Version, again mem.writeInt(u32, @ptrCast(*[4]u8, &hdr_buf[index]), 1, endian); index += 4; switch (ptr_width) { ._32 => { // e_entry mem.writeInt(u32, @ptrCast(*[4]u8, &hdr_buf[index]), @intCast(u32, assembly.entry_addr.?), endian); index += 4; // e_phoff mem.writeInt(u32, @ptrCast(*[4]u8, &hdr_buf[index]), @sizeOf(elf.Elf32_Ehdr), endian); index += 4; // e_shoff mem.writeInt(u32, @ptrCast(*[4]u8, &hdr_buf[index]), 0, endian); index += 4; }, ._64 => { // e_entry mem.writeInt(u64, @ptrCast(*[8]u8, &hdr_buf[index]), assembly.entry_addr.?, endian); index += 8; // e_phoff mem.writeInt(u64, @ptrCast(*[8]u8, &hdr_buf[index]), @sizeOf(elf.Elf64_Ehdr), endian); index += 8; // e_shoff mem.writeInt(u64, @ptrCast(*[8]u8, &hdr_buf[index]), 0, endian); index += 8; }, } const e_flags = 0; mem.writeInt(u32, @ptrCast(*[4]u8, &hdr_buf[index]), e_flags, endian); index += 4; const e_ehsize: u16 = switch (ptr_width) { ._32 => @sizeOf(elf.Elf32_Ehdr), ._64 => @sizeOf(elf.Elf64_Ehdr), }; mem.writeInt(u16, @ptrCast(*[2]u8, &hdr_buf[index]), e_ehsize, endian); index += 2; const e_phentsize: u16 = switch (ptr_width) { ._32 => @sizeOf(elf.Elf32_Phdr), ._64 => @sizeOf(elf.Elf64_Phdr), }; mem.writeInt(u16, @ptrCast(*[2]u8, &hdr_buf[index]), e_phentsize, endian); index += 2; const e_phnum = @intCast(u16, section_names.len); mem.writeInt(u16, @ptrCast(*[2]u8, &hdr_buf[index]), e_phnum, endian); index += 2; const e_shentsize: u16 = switch (ptr_width) { ._32 => @sizeOf(elf.Elf32_Shdr), ._64 => @sizeOf(elf.Elf64_Shdr), }; mem.writeInt(u16, @ptrCast(*[2]u8, &hdr_buf[index]), e_shentsize, endian); index += 2; const e_shnum = 0; mem.writeInt(u16, @ptrCast(*[2]u8, &hdr_buf[index]), e_shnum, endian); index += 2; const e_shstrndx = 0; mem.writeInt(u16, @ptrCast(*[2]u8, &hdr_buf[index]), e_shstrndx, endian); index += 2; assert(index == e_ehsize); try file.write(hdr_buf[0..index]); // Program headers for (section_names) |section_name| { const section = (assembly.sections.get(section_name) orelse break).value; index = 0; const p_type = elf.PT_LOAD; mem.writeInt(u32, @ptrCast(*[4]u8, &hdr_buf[index]), p_type, endian); index += 4; switch (ptr_width) { ._32 => @panic("TODO"), ._64 => { mem.writeInt(u32, @ptrCast(*[4]u8, &hdr_buf[index]), section.flags, endian); index += 4; mem.writeInt(u64, @ptrCast(*[8]u8, &hdr_buf[index]), section.file_offset, endian); index += 8; mem.writeInt(u64, @ptrCast(*[8]u8, &hdr_buf[index]), section.virt_addr, endian); index += 8; mem.writeInt(u64, @ptrCast(*[8]u8, &hdr_buf[index]), section.virt_addr, endian); index += 8; mem.writeInt(u64, @ptrCast(*[8]u8, &hdr_buf[index]), section.file_size, endian); index += 8; mem.writeInt(u64, @ptrCast(*[8]u8, &hdr_buf[index]), section.mem_size, endian); index += 8; mem.writeInt(u64, @ptrCast(*[8]u8, &hdr_buf[index]), section.alignment, endian); index += 8; }, else => unreachable, } assert(index == e_phentsize); try file.write(hdr_buf[0..index]); } } fn dumpStdErrUsageAndExit() noreturn { dumpUsage(std.io.getStdErr()) catch {}; process.exit(1); } fn dumpUsage(file: fs.File) !void { try file.write( \\Usage: zasm [command] [options] \\ \\Commands: \\ exe [files] create an executable file \\ obj [files] create an object file \\ dis [file] disassemble a binary into source \\ targets list the supported targets to stdout \\ tokenize [file] (debug) tokenize the input files \\ \\Options: \\ -o [file] override output file name \\ -help dump this help text to stdout \\ -target [arch]-[os] specify the target for positional arguments \\ -debug-errors (debug) show stack trace on error \\ ); } test "" { _ = Token; _ = Tokenizer; }
src/main.zig
const std = @import("std"); const util = @import("util"); const common = @import("../common.zig"); const gen3 = @import("../gen3.zig"); const rom = @import("../rom.zig"); const debug = std.debug; const mem = std.mem; const lu16 = rom.int.lu16; const lu32 = rom.int.lu32; const lu64 = rom.int.lu64; pub fn Offset(comptime _T: type) type { return struct { pub const T = _T; offset: usize, pub fn init(data_slice: []const u8, p: *const T) @This() { const data_ptr = @ptrToInt(data_slice.ptr); const item_ptr = @ptrToInt(p); debug.assert(data_ptr <= item_ptr); debug.assert(item_ptr + @sizeOf(T) <= data_ptr + data_slice.len); return @This(){ .offset = item_ptr - data_ptr }; } pub fn end(offset: @This()) usize { return offset.offset + @sizeOf(T); } pub fn ptr(offset: @This(), data: []u8) *T { return &mem.bytesAsSlice(T, data[offset.offset..][0..@sizeOf(T)])[0]; } }; } pub fn Section(comptime _Item: type) type { return struct { pub const Item = _Item; start: usize, len: usize, pub fn init(data_slice: []const u8, items: []const Item) @This() { const data_ptr = @ptrToInt(data_slice.ptr); const item_ptr = @ptrToInt(items.ptr); debug.assert(data_ptr <= item_ptr); debug.assert(item_ptr + items.len * @sizeOf(Item) <= data_ptr + data_slice.len); return @This(){ .start = item_ptr - data_ptr, .len = items.len, }; } pub fn end(sec: @This()) usize { return sec.start + @sizeOf(Item) * sec.len; } pub fn slice(sec: @This(), data: []u8) []Item { return mem.bytesAsSlice(Item, data[sec.start..sec.end()]); } }; } pub const AbilityNameSection = Section([13]u8); pub const BaseStatsSection = Section(gen3.BasePokemon); pub const EmeraldPokedexSection = Section(gen3.EmeraldPokedexEntry); pub const EvolutionSection = Section([5]gen3.Evolution); pub const HmSection = Section(lu16); pub const ItemSection = Section(gen3.Item); pub const LevelUpLearnsetPointerSection = Section(gen3.Ptr([*]gen3.LevelUpMove)); pub const MachineLearnsetSection = Section(lu64); pub const MapHeaderSection = Section(gen3.MapHeader); pub const MoveNameSection = Section([13]u8); pub const MoveSection = Section(gen3.Move); pub const PokemonNameSection = Section([11]u8); pub const RSFrLgPokedexSection = Section(gen3.RSFrLgPokedexEntry); pub const SpeciesToNationalDexSection = Section(lu16); pub const StarterOffset = Offset(lu16); pub const TextDelaySection = Section(u8); pub const TmSection = Section(lu16); pub const TrainerSection = Section(gen3.Trainer); pub const TypeEffectivenessSection = Section(common.TypeEffectiveness); pub const TypeNameSection = Section([7]u8); pub const WildPokemonHeaderSection = Section(gen3.WildPokemonHeader); pub const Info = struct { game_title: util.TerminatedArray(12, u8, 0), gamecode: [4]u8, version: common.Version, software_version: u8, text_delays: TextDelaySection, starters: [3]StarterOffset, // In some games, the starters are repeated in multible places. // For games where this isn't true, we just repeat the same offsets // twice starters_repeat: [3]StarterOffset, trainers: TrainerSection, moves: MoveSection, machine_learnsets: MachineLearnsetSection, pokemons: BaseStatsSection, evolutions: EvolutionSection, level_up_learnset_pointers: LevelUpLearnsetPointerSection, type_effectiveness: TypeEffectivenessSection, hms: HmSection, tms: TmSection, pokedex: union { rsfrlg: RSFrLgPokedexSection, emerald: EmeraldPokedexSection, }, species_to_national_dex: SpeciesToNationalDexSection, items: ItemSection, wild_pokemon_headers: WildPokemonHeaderSection, map_headers: MapHeaderSection, pokemon_names: PokemonNameSection, ability_names: AbilityNameSection, move_names: AbilityNameSection, type_names: TypeNameSection, }; pub const infos = [_]Info{ emerald_us_info, ruby_us_info, sapphire_us_info, fire_us_info, leaf_us_info, }; const emerald_us_info = Info{ .game_title = .{ .data = "POKEMON EMER".* }, .gamecode = "BPEE".*, .version = .emerald, .software_version = 0, .text_delays = .{ .start = 6353044, .len = 3 }, .trainers = .{ .start = 3211312, .len = 855 }, .moves = .{ .start = 3262616, .len = 355 }, .machine_learnsets = .{ .start = 3270808, .len = 412 }, .pokemons = .{ .start = 3277772, .len = 412 }, .evolutions = .{ .start = 3298076, .len = 412 }, .level_up_learnset_pointers = .{ .start = 3314556, .len = 412 }, .type_effectiveness = .{ .start = 3255528, .len = 111 }, .hms = .{ .start = 3317482, .len = 8 }, .tms = .{ .start = 6380436, .len = 50 }, .pokedex = .{ .emerald = .{ .start = 5682608, .len = 387 } }, .species_to_national_dex = .{ .start = 3267714, .len = 411 }, .items = .{ .start = 5781920, .len = 377 }, .wild_pokemon_headers = .{ .start = 5582152, .len = 124 }, .map_headers = .{ .start = 4727992, .len = 518 }, .pokemon_names = .{ .start = 3245512, .len = 412 }, .ability_names = .{ .start = 3258075, .len = 78 }, .move_names = .{ .start = 3250044, .len = 355 }, .type_names = .{ .start = 3255864, .len = 18 }, .starters = .{ .{ .offset = 0x005B1DF8 }, .{ .offset = 0x005B1DFA }, .{ .offset = 0x005B1DFC }, }, .starters_repeat = .{ .{ .offset = 0x005B1DF8 }, .{ .offset = 0x005B1DFA }, .{ .offset = 0x005B1DFC }, }, }; pub const fire_us_info = Info{ .game_title = .{ .data = "POKEMON FIRE".* }, .gamecode = "BPRE".*, .version = .fire_red, .software_version = 1, .text_delays = .{ .start = 4322456, .len = 3 }, .trainers = .{ .start = 2353976, .len = 743 }, .moves = .{ .start = 2428020, .len = 355 }, .machine_learnsets = .{ .start = 2436152, .len = 412 }, .pokemons = .{ .start = 2443252, .len = 412 }, .evolutions = .{ .start = 2463684, .len = 412 }, .level_up_learnset_pointers = .{ .start = 2480164, .len = 412 }, .type_effectiveness = .{ .start = 2420928, .len = 111 }, .hms = .{ .start = 2482308, .len = 8 }, .tms = .{ .start = 4564484, .len = 50 }, .pokedex = .{ .rsfrlg = .{ .start = 4516016, .len = 387 } }, .species_to_national_dex = .{ .start = 2433118, .len = 411 }, .items = .{ .start = 4042904, .len = 374 }, .wild_pokemon_headers = .{ .start = 3972392, .len = 132 }, .map_headers = .{ .start = 3469816, .len = 425 }, .pokemon_names = .{ .start = 2383696, .len = 412 }, .ability_names = .{ .start = 2423984, .len = 78 }, .move_names = .{ .start = 2388228, .len = 355 }, .type_names = .{ .start = 2421264, .len = 18 }, .starters = .{ .{ .offset = 0x00169C2D }, .{ .offset = 0x00169C2D + 515 }, .{ .offset = 0x00169C2D + 461 }, }, .starters_repeat = .{ .{ .offset = 0x00169C2D + 5 + 461 }, .{ .offset = 0x00169C2D + 5 }, .{ .offset = 0x00169C2D + 5 + 515 }, }, }; pub const leaf_us_info = Info{ .game_title = .{ .data = "POKEMON LEAF".* }, .gamecode = "BPGE".*, .version = .leaf_green, .software_version = 1, .text_delays = .{ .start = 4322004, .len = 3 }, .trainers = .{ .start = 2353940, .len = 743 }, .moves = .{ .start = 2427984, .len = 355 }, .machine_learnsets = .{ .start = 2436116, .len = 412 }, .pokemons = .{ .start = 2443216, .len = 412 }, .evolutions = .{ .start = 2463652, .len = 412 }, .level_up_learnset_pointers = .{ .start = 2480132, .len = 412 }, .type_effectiveness = .{ .start = 2420892, .len = 111 }, .hms = .{ .start = 2482276, .len = 8 }, .tms = .{ .start = 4562996, .len = 50 }, .pokedex = .{ .rsfrlg = .{ .start = 4514528, .len = 387 } }, .species_to_national_dex = .{ .start = 2433082, .len = 411 }, .items = .{ .start = 4042452, .len = 374 }, .wild_pokemon_headers = .{ .start = 3971940, .len = 132 }, .map_headers = .{ .start = 3469784, .len = 425 }, .pokemon_names = .{ .start = 2383660, .len = 412 }, .ability_names = .{ .start = 2423948, .len = 78 }, .move_names = .{ .start = 2388192, .len = 355 }, .type_names = .{ .start = 2421228, .len = 18 }, .starters = .{ .{ .offset = 0x00169C09 }, .{ .offset = 0x00169C09 + 515 }, .{ .offset = 0x00169C09 + 461 }, }, .starters_repeat = .{ .{ .offset = 0x00169C09 + 5 + 461 }, .{ .offset = 0x00169C09 + 5 }, .{ .offset = 0x00169C09 + 5 + 515 }, }, }; pub const ruby_us_info = Info{ .game_title = .{ .data = "POKEMON RUBY".* }, .gamecode = "AXVE".*, .version = .ruby, .software_version = 1, .text_delays = .{ .start = 1993712, .len = 3 }, .trainers = .{ .start = 2032916, .len = 337 }, .moves = .{ .start = 2076996, .len = 355 }, .machine_learnsets = .{ .start = 2085128, .len = 412 }, .pokemons = .{ .start = 2092080, .len = 412 }, .evolutions = .{ .start = 2112384, .len = 412 }, .level_up_learnset_pointers = .{ .start = 2128864, .len = 412 }, .type_effectiveness = .{ .start = 2070328, .len = 111 }, .hms = .{ .start = 2130738, .len = 8 }, .tms = .{ .start = 3630364, .len = 50 }, .pokedex = .{ .rsfrlg = .{ .start = 3872884, .len = 387 } }, .species_to_national_dex = .{ .start = 2082094, .len = 411 }, .items = .{ .start = 3954048, .len = 349 }, .wild_pokemon_headers = .{ .start = 3789932, .len = 97 }, .map_headers = .{ .start = 3167328, .len = 394 }, .pokemon_names = .{ .start = 2060676, .len = 412 }, .ability_names = .{ .start = 2073184, .len = 78 }, .move_names = .{ .start = 2065208, .len = 355 }, .type_names = .{ .start = 2070664, .len = 18 }, .starters = .{ .{ .offset = 0x003F76E0 }, .{ .offset = 0x003F76E2 }, .{ .offset = 0x003F76E4 }, }, .starters_repeat = .{ .{ .offset = 0x003F76E0 }, .{ .offset = 0x003F76E2 }, .{ .offset = 0x003F76E4 }, }, }; pub const sapphire_us_info = Info{ .game_title = .{ .data = "POKEMON SAPP".* }, .gamecode = "AXPE".*, .version = .sapphire, .software_version = 1, .text_delays = .{ .start = 1993600, .len = 3 }, .trainers = .{ .start = 2032804, .len = 337 }, .moves = .{ .start = 2076884, .len = 355 }, .machine_learnsets = .{ .start = 2085016, .len = 412 }, .pokemons = .{ .start = 2091968, .len = 412 }, .evolutions = .{ .start = 2112272, .len = 412 }, .level_up_learnset_pointers = .{ .start = 2128752, .len = 412 }, .type_effectiveness = .{ .start = 2070216, .len = 111 }, .hms = .{ .start = 2130626, .len = 8 }, .tms = .{ .start = 3630252, .len = 50 }, .pokedex = .{ .rsfrlg = .{ .start = 3872976, .len = 387 } }, .species_to_national_dex = .{ .start = 2081982, .len = 411 }, .items = .{ .start = 3954140, .len = 349 }, .wild_pokemon_headers = .{ .start = 3789492, .len = 97 }, .map_headers = .{ .start = 3167216, .len = 394 }, .pokemon_names = .{ .start = 2060564, .len = 412 }, .ability_names = .{ .start = 2073072, .len = 78 }, .move_names = .{ .start = 2065096, .len = 355 }, .type_names = .{ .start = 2070552, .len = 18 }, .starters = .{ .{ .offset = 0x003F773C }, .{ .offset = 0x003F773E }, .{ .offset = 0x003F7740 }, }, .starters_repeat = .{ .{ .offset = 0x003F773C }, .{ .offset = 0x003F773E }, .{ .offset = 0x003F7740 }, }, };
src/core/gen3/offsets.zig
const std = @import("std"); const subcommands = @import("../subcommands.zig"); const shared = @import("../shared.zig"); const zsw = @import("zsw"); const log = std.log.scoped(.groups); pub const name = "groups"; pub const usage = \\Usage: {0s} [user] \\ or: {0s} OPTION \\ \\Display the current group names. \\The optional [user] parameter will display the groups for the named user. \\ \\ -h, --help display this help and exit \\ --version output version information and exit \\ ; // io // .{ // .stderr: std.io.Writer, // .stdin: std.io.Reader, // .stdout: std.io.Writer, // }, // args // struct { // fn next(self: *Self) ?shared.Arg, // // // intended to only be called for the first argument // fn nextWithHelpOrVersion(self: *Self) !?shared.Arg, // // fn nextRaw(self: *Self) ?[]const u8, // } pub fn execute( allocator: std.mem.Allocator, io: anytype, args: anytype, system: zsw.System, exe_path: []const u8, ) subcommands.Error!u8 { const z = shared.tracy.traceNamed(@src(), name); defer z.end(); _ = exe_path; const opt_arg = try args.nextWithHelpOrVersion(); const passwd_file = system.cwd().openFile("/etc/passwd", .{}) catch { return shared.printError(@This(), io, "unable to read '/etc/passwd'"); }; defer if (shared.free_on_close) passwd_file.close(); return if (opt_arg) |arg| otherUser(allocator, io, arg, passwd_file, system) else currentUser(allocator, io, passwd_file, system); } fn currentUser( allocator: std.mem.Allocator, io: anytype, passwd_file: zsw.File, system: zsw.System, ) subcommands.Error!u8 { const z = shared.tracy.traceNamed(@src(), "current user"); defer z.end(); log.info("currentUser called", .{}); const euid = system.geteuid(); var passwd_buffered_reader = std.io.bufferedReader(passwd_file.reader()); const passwd_reader = passwd_buffered_reader.reader(); var line_buffer = std.ArrayList(u8).init(allocator); defer if (shared.free_on_close) line_buffer.deinit(); while (true) { passwd_reader.readUntilDelimiterArrayList(&line_buffer, '\n', std.math.maxInt(usize)) catch |err| switch (err) { error.EndOfStream => break, else => return shared.printError(@This(), io, "unable to read '/etc/passwd'"), }; var column_iter = std.mem.tokenize(u8, line_buffer.items, ":"); const user_name = column_iter.next() orelse return shared.printError(@This(), io, "format of '/etc/passwd' is invalid"); // skip password stand-in _ = column_iter.next() orelse return shared.printError(@This(), io, "format of '/etc/passwd' is invalid"); const user_id_slice = column_iter.next() orelse return shared.printError(@This(), io, "format of '/etc/passwd' is invalid"); if (std.fmt.parseUnsigned(std.os.uid_t, user_id_slice, 10)) |user_id| { if (user_id == euid) { log.debug("found matching user id: {}", .{user_id}); const primary_group_id_slice = column_iter.next() orelse return shared.printError(@This(), io, "format of '/etc/passwd' is invalid"); if (std.fmt.parseUnsigned(std.os.uid_t, primary_group_id_slice, 10)) |primary_group_id| { return printGroups(allocator, user_name, primary_group_id, io, system); } else |_| { return shared.printError(@This(), io, "format of '/etc/passwd' is invalid"); } } else { log.debug("found non-matching user id: {}", .{user_id}); } } else |_| { return shared.printError(@This(), io, "format of '/etc/passwd' is invalid"); } } return shared.printError(@This(), io, "'/etc/passwd' does not contain the current effective uid"); } fn otherUser( allocator: std.mem.Allocator, io: anytype, arg: shared.Arg, passwd_file: zsw.File, system: zsw.System, ) subcommands.Error!u8 { const z = shared.tracy.traceNamed(@src(), "other user"); defer z.end(); z.addText(arg.raw); log.info("otherUser called, arg='{s}'", .{arg.raw}); var passwd_buffered_reader = std.io.bufferedReader(passwd_file.reader()); const passwd_reader = passwd_buffered_reader.reader(); var line_buffer = std.ArrayList(u8).init(allocator); defer if (shared.free_on_close) line_buffer.deinit(); while (true) { passwd_reader.readUntilDelimiterArrayList(&line_buffer, '\n', std.math.maxInt(usize)) catch |err| switch (err) { error.EndOfStream => break, else => return shared.printError(@This(), io, "unable to read '/etc/passwd'"), }; var column_iter = std.mem.tokenize(u8, line_buffer.items, ":"); const user_name = column_iter.next() orelse return shared.printError(@This(), io, "format of '/etc/passwd' is invalid"); if (!std.mem.eql(u8, user_name, arg.raw)) { log.debug("found non-matching user: {s}", .{user_name}); continue; } log.debug("found matching user: {s}", .{user_name}); // skip password stand-in _ = column_iter.next() orelse return shared.printError(@This(), io, "format of '/etc/passwd' is invalid"); // skip user id _ = column_iter.next() orelse return shared.printError(@This(), io, "format of '/etc/passwd' is invalid"); const primary_group_id_slice = column_iter.next() orelse return shared.printError(@This(), io, "format of '/etc/passwd' is invalid"); if (std.fmt.parseUnsigned(std.os.uid_t, primary_group_id_slice, 10)) |primary_group_id| { return printGroups(allocator, user_name, primary_group_id, io, system); } else |_| { return shared.printError(@This(), io, "format of '/etc/passwd' is invalid"); } } return shared.printError(@This(), io, "'/etc/passwd' does not contain the current effective uid"); } fn printGroups( allocator: std.mem.Allocator, user_name: []const u8, primary_group_id: std.os.uid_t, io: anytype, system: zsw.System, ) !u8 { const z = shared.tracy.traceNamed(@src(), "print groups"); defer z.end(); z.addText(user_name); log.info("printGroups called, user_name='{s}', primary_group_id={}", .{ user_name, primary_group_id }); var group_file = system.cwd().openFile("/etc/group", .{}) catch { return shared.printError(@This(), io, "unable to read '/etc/group'"); }; defer if (shared.free_on_close) group_file.close(); var group_buffered_reader = std.io.bufferedReader(group_file.reader()); const group_reader = group_buffered_reader.reader(); var line_buffer = std.ArrayList(u8).init(allocator); defer if (shared.free_on_close) line_buffer.deinit(); var first = true; while (true) { group_reader.readUntilDelimiterArrayList(&line_buffer, '\n', std.math.maxInt(usize)) catch |err| switch (err) { error.EndOfStream => break, else => return shared.printError(@This(), io, "unable to read '/etc/group'"), }; var column_iter = std.mem.tokenize(u8, line_buffer.items, ":"); const group_name = column_iter.next() orelse return shared.printError(@This(), io, "format of '/etc/group' is invalid"); // skip password stand-in _ = column_iter.next() orelse return shared.printError(@This(), io, "format of '/etc/group' is invalid"); const group_id_slice = column_iter.next() orelse return shared.printError(@This(), io, "format of '/etc/group' is invalid"); if (std.fmt.parseUnsigned(std.os.uid_t, group_id_slice, 10)) |group_id| { if (group_id == primary_group_id) { if (!first) { io.stdout.writeByte(' ') catch |err| shared.unableToWriteTo("stdout", io, err); } io.stdout.writeAll(group_name) catch |err| shared.unableToWriteTo("stdout", io, err); first = false; continue; } } else |_| { return shared.printError(@This(), io, "format of '/etc/group' is invalid"); } const members = column_iter.next() orelse continue; if (members.len == 0) continue; var member_iter = std.mem.tokenize(u8, members, ","); while (member_iter.next()) |member| { if (std.mem.eql(u8, member, user_name)) { if (!first) { io.stdout.writeByte(' ') catch |err| shared.unableToWriteTo("stdout", io, err); } io.stdout.writeAll(group_name) catch |err| shared.unableToWriteTo("stdout", io, err); first = false; break; } } } io.stdout.writeByte('\n') catch |err| shared.unableToWriteTo("stdout", io, err); return 0; } test "groups no args" { var test_system = try TestSystem.init(); defer test_system.deinit(); try std.testing.expectEqual( @as(u8, 0), try subcommands.testExecute( @This(), &.{}, .{ .system = test_system.backend.system(), }, ), ); } test "groups help" { try subcommands.testHelp(@This()); } test "groups version" { try subcommands.testVersion(@This()); } const TestSystem = struct { backend: BackendType, const BackendType = zsw.Backend(.{ .fallback_to_host = true, .file_system = true, .linux_user_group = true, }); pub fn init() !TestSystem { var file_system = blk: { var file_system = try zsw.FileSystemDescription.init(std.testing.allocator); errdefer file_system.deinit(); const etc = try file_system.root.addDirectory("etc"); try etc.addFile( "passwd", \\root:x:0:0::/root:/bin/bash \\bin:x:1:1::/:/usr/bin/nologin \\daemon:x:2:2::/:/usr/bin/nologin \\mail:x:8:12::/var/spool/mail:/usr/bin/nologin \\ftp:x:14:11::/srv/ftp:/usr/bin/nologin \\http:x:33:33::/srv/http:/usr/bin/nologin \\nobody:x:65534:65534:Nobody:/:/usr/bin/nologin \\user:x:1000:1001:User:/home/user:/bin/bash \\ , ); try etc.addFile( "group", \\root:x:0:root \\sys:x:3:bin,user \\mem:x:8: \\ftp:x:11: \\mail:x:12: \\log:x:19: \\smmsp:x:25: \\proc:x:26: \\games:x:50: \\lock:x:54: \\network:x:90: \\floppy:x:94: \\scanner:x:96: \\power:x:98: \\adm:x:999:daemon \\wheel:x:998:user \\utmp:x:997: \\audio:x:996: \\disk:x:995: \\input:x:994: \\kmem:x:993: \\kvm:x:992: \\lp:x:991: \\optical:x:990: \\render:x:989: \\sgx:x:988: \\storage:x:987: \\tty:x:5: \\uucp:x:986: \\video:x:985: \\users:x:984:user \\rfkill:x:982:user \\bin:x:1:daemon \\daemon:x:2:bin \\http:x:33: \\nobody:x:65534: \\user:x:1001: , ); break :blk file_system; }; defer file_system.deinit(); var linux_user_group: zsw.LinuxUserGroupDescription = .{ .initial_euid = 1000, }; var backend = try BackendType.init(std.testing.allocator, .{ .file_system = file_system, .linux_user_group = linux_user_group, }); errdefer backend.deinit(); return TestSystem{ .backend = backend, }; } pub fn deinit(self: *TestSystem) void { self.backend.deinit(); } }; comptime { std.testing.refAllDecls(@This()); }
src/subcommands/groups.zig
const std = @import("std"); const generate = @import("generator.zig").generate; const path = std.fs.path; const Builder = std.build.Builder; const Step = std.build.Step; /// build.zig integration for Vulkan binding generation. This step can be used to generate /// Vulkan bindings at compiletime from vk.xml, by providing the path to vk.xml and the output /// path relative to zig-cache. The final file can then be added to the project using /// `std.build.Builder.addPackagePath`. pub const GenerateStep = struct { step: Step, builder: *Builder, /// The path to vk.xml spec_path: []const u8, /// The full path where the final generated binding will be placed. When using this step, /// this path should be passed to addPackagePath. full_out_path: []const u8, /// Initialize a Vulkan generation step, for `builder`. `spec_path` is the path to /// vk.xml, relative to the project root. The generated bindings will be placed at /// `out_path`, which is relative to the root directory. pub fn init(builder: *Builder, spec_path: []const u8, out_path: []const u8) *GenerateStep { const self = builder.allocator.create(GenerateStep) catch unreachable; self.* = .{ .step = Step.init(.Custom, "vulkan-generate", builder.allocator, make), .builder = builder, .spec_path = spec_path, .full_out_path = path.join(builder.allocator, &[_][]const u8{ self.builder.build_root, out_path, }) catch unreachable, }; return self; } /// Internal build function. This reads `vk.xml`, and passes it to `generate`, which then generates /// the final bindings. The resulting generated bindings are not formatted, which is why an ArrayList /// writer is passed instead of a file writer. This is then formatted into standard formatting /// by parsing it and rendering with `std.zig.parse` and `std.zig.render` respectively. fn make(step: *Step) !void { const self = @fieldParentPtr(GenerateStep, "step", step); const cwd = std.fs.cwd(); var out_buffer = std.ArrayList(u8).init(self.builder.allocator); const spec = try cwd.readFileAlloc(self.builder.allocator, self.spec_path, std.math.maxInt(usize)); try generate(self.builder.allocator, spec, out_buffer.writer()); const tree = try std.zig.parse(self.builder.allocator, out_buffer.items); const dir = path.dirname(self.full_out_path).?; try cwd.makePath(dir); const output_file = cwd.createFile(self.full_out_path, .{}) catch unreachable; _ = try std.zig.render(self.builder.allocator, output_file.outStream(), tree); } };
render/lib/vulkan-zig/generator/vulkan/build-integration.zig
const std = @import("std"); const assert = std.debug.assert; const fmt = std.fmt; const io = std.io; const math = std.math; const mem = std.mem; const Allocator = std.mem.Allocator; const deflate_const = @import("deflate_const.zig"); const fast = @import("deflate_fast.zig"); const hm_bw = @import("huffman_bit_writer.zig"); const mu = @import("mem_utils.zig"); const token = @import("token.zig"); pub const Compression = enum(i5) { /// huffman_only disables Lempel-Ziv match searching and only performs Huffman /// entropy encoding. This mode is useful in compressing data that has /// already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) /// that lacks an entropy encoder. Compression gains are achieved when /// certain bytes in the input stream occur more frequently than others. /// /// Note that huffman_only produces a compressed output that is /// RFC 1951 compliant. That is, any valid DEFLATE decompressor will /// continue to be able to decompress this output. huffman_only = -2, /// Same as level_6 default_compression = -1, /// Does not attempt any compression; only adds the necessary DEFLATE framing. no_compression = 0, /// Prioritizes speed over output size, based on Snappy's LZ77-style encoder best_speed = 1, level_2 = 2, level_3 = 3, level_4 = 4, level_5 = 5, level_6 = 6, level_7 = 7, level_8 = 8, /// Prioritizes smaller output size over speed best_compression = 9, }; const log_window_size = 15; const window_size = 1 << log_window_size; const window_mask = window_size - 1; // The LZ77 step produces a sequence of literal tokens and <length, offset> // pair tokens. The offset is also known as distance. The underlying wire // format limits the range of lengths and offsets. For example, there are // 256 legitimate lengths: those in the range [3, 258]. This package's // compressor uses a higher minimum match length, enabling optimizations // such as finding matches via 32-bit loads and compares. const base_match_length = deflate_const.base_match_length; // The smallest match length per the RFC section 3.2.5 const min_match_length = 4; // The smallest match length that the compressor actually emits const max_match_length = deflate_const.max_match_length; const base_match_offset = deflate_const.base_match_offset; // The smallest match offset const max_match_offset = deflate_const.max_match_offset; // The largest match offset // The maximum number of tokens we put into a single flate block, just to // stop things from getting too large. const max_flate_block_tokens = 1 << 14; const max_store_block_size = deflate_const.max_store_block_size; const hash_bits = 17; // After 17 performance degrades const hash_size = 1 << hash_bits; const hash_mask = (1 << hash_bits) - 1; const max_hash_offset = 1 << 24; const skip_never = math.maxInt(u32); const CompressionLevel = struct { good: u16, lazy: u16, nice: u16, chain: u16, fast_skip_hashshing: u32, }; fn levels(compression: Compression) CompressionLevel { switch (compression) { .no_compression, .best_speed, // best_speed uses a custom algorithm; see deflate_fast.zig .huffman_only, => return .{ .good = 0, .lazy = 0, .nice = 0, .chain = 0, .fast_skip_hashshing = 0, }, // For levels 2-3 we don't bother trying with lazy matches. .level_2 => return .{ .good = 4, .lazy = 0, .nice = 16, .chain = 8, .fast_skip_hashshing = 5, }, .level_3 => return .{ .good = 4, .lazy = 0, .nice = 32, .chain = 32, .fast_skip_hashshing = 6, }, // Levels 4-9 use increasingly more lazy matching and increasingly stringent conditions for // "good enough". .level_4 => return .{ .good = 4, .lazy = 4, .nice = 16, .chain = 16, .fast_skip_hashshing = skip_never, }, .level_5 => return .{ .good = 8, .lazy = 16, .nice = 32, .chain = 32, .fast_skip_hashshing = skip_never, }, .default_compression, .level_6, => return .{ .good = 8, .lazy = 16, .nice = 128, .chain = 128, .fast_skip_hashshing = skip_never, }, .level_7 => return .{ .good = 8, .lazy = 32, .nice = 128, .chain = 256, .fast_skip_hashshing = skip_never, }, .level_8 => return .{ .good = 32, .lazy = 128, .nice = 258, .chain = 1024, .fast_skip_hashshing = skip_never, }, .best_compression => return .{ .good = 32, .lazy = 258, .nice = 258, .chain = 4096, .fast_skip_hashshing = skip_never, }, } } // matchLen returns the number of matching bytes in a and b // up to length 'max'. Both slices must be at least 'max' // bytes in size. fn matchLen(a: []u8, b: []u8, max: u32) u32 { var bounded_a = a[0..max]; var bounded_b = b[0..max]; for (bounded_a) |av, i| { if (bounded_b[i] != av) { return @intCast(u32, i); } } return max; } const hash_mul = 0x1e35a7bd; // hash4 returns a hash representation of the first 4 bytes // of the supplied slice. // The caller must ensure that b.len >= 4. fn hash4(b: []u8) u32 { return ((@as(u32, b[3]) | @as(u32, b[2]) << 8 | @as(u32, b[1]) << 16 | @as(u32, b[0]) << 24) *% hash_mul) >> (32 - hash_bits); } // bulkHash4 will compute hashes using the same // algorithm as hash4 fn bulkHash4(b: []u8, dst: []u32) u32 { if (b.len < min_match_length) { return 0; } var hb = @as(u32, b[3]) | @as(u32, b[2]) << 8 | @as(u32, b[1]) << 16 | @as(u32, b[0]) << 24; dst[0] = (hb *% hash_mul) >> (32 - hash_bits); var end = b.len - min_match_length + 1; var i: u32 = 1; while (i < end) : (i += 1) { hb = (hb << 8) | @as(u32, b[i + 3]); dst[i] = (hb *% hash_mul) >> (32 - hash_bits); } return hb; } const CompressorOptions = struct { level: Compression = .default_compression, dictionary: ?[]const u8 = null, }; /// Returns a new Compressor compressing data at the given level. /// Following zlib, levels range from 1 (best_speed) to 9 (best_compression); /// higher levels typically run slower but compress more. Level 0 /// (no_compression) does not attempt any compression; it only adds the /// necessary DEFLATE framing. /// Level -1 (default_compression) uses the default compression level. /// Level -2 (huffman_only) will use Huffman compression only, giving /// a very fast compression for all types of input, but sacrificing considerable /// compression efficiency. /// /// `dictionary` is optional and initializes the new `Compressor` with a preset dictionary. /// The returned Compressor behaves as if the dictionary had been written to it without producing /// any compressed output. The compressed data written to hm_bw can only be decompressed by a /// Decompressor initialized with the same dictionary. /// /// The compressed data will be passed to the provided `writer`, see `writer()` and `write()`. pub fn compressor( allocator: Allocator, writer: anytype, options: CompressorOptions, ) !Compressor(@TypeOf(writer)) { return Compressor(@TypeOf(writer)).init(allocator, writer, options); } pub fn Compressor(comptime WriterType: anytype) type { return struct { const Self = @This(); /// A Writer takes data written to it and writes the compressed /// form of that data to an underlying writer. pub const Writer = io.Writer(*Self, Error, write); /// Returns a Writer that takes data written to it and writes the compressed /// form of that data to an underlying writer. pub fn writer(self: *Self) Writer { return .{ .context = self }; } pub const Error = WriterType.Error; allocator: Allocator, compression: Compression, compression_level: CompressionLevel, // Inner writer wrapped in a HuffmanBitWriter hm_bw: hm_bw.HuffmanBitWriter(WriterType) = undefined, bulk_hasher: fn ([]u8, []u32) u32, sync: bool, // requesting flush best_speed_enc: *fast.DeflateFast, // Encoder for best_speed // Input hash chains // hash_head[hashValue] contains the largest inputIndex with the specified hash value // If hash_head[hashValue] is within the current window, then // hash_prev[hash_head[hashValue] & window_mask] contains the previous index // with the same hash value. chain_head: u32, hash_head: []u32, // [hash_size]u32, hash_prev: []u32, // [window_size]u32, hash_offset: u32, // input window: unprocessed data is window[index..window_end] index: u32, window: []u8, window_end: usize, block_start: usize, // window index where current tokens start byte_available: bool, // if true, still need to process window[index-1]. // queued output tokens tokens: []token.Token, tokens_count: u16, // deflate state length: u32, offset: u32, hash: u32, max_insert_index: usize, err: bool, // hash_match must be able to contain hashes for the maximum match length. hash_match: []u32, // [max_match_length - 1]u32, // dictionary dictionary: ?[]const u8, fn fillDeflate(self: *Self, b: []const u8) u32 { if (self.index >= 2 * window_size - (min_match_length + max_match_length)) { // shift the window by window_size mem.copy(u8, self.window, self.window[window_size .. 2 * window_size]); self.index -= window_size; self.window_end -= window_size; if (self.block_start >= window_size) { self.block_start -= window_size; } else { self.block_start = math.maxInt(u32); } self.hash_offset += window_size; if (self.hash_offset > max_hash_offset) { var delta = self.hash_offset - 1; self.hash_offset -= delta; self.chain_head -|= delta; // Iterate over slices instead of arrays to avoid copying // the entire table onto the stack (https://golang.org/issue/18625). for (self.hash_prev) |v, i| { if (v > delta) { self.hash_prev[i] = @intCast(u32, v - delta); } else { self.hash_prev[i] = 0; } } for (self.hash_head) |v, i| { if (v > delta) { self.hash_head[i] = @intCast(u32, v - delta); } else { self.hash_head[i] = 0; } } } } var n = mu.copy(self.window[self.window_end..], b); self.window_end += n; return @intCast(u32, n); } fn writeBlock(self: *Self, tokens: []token.Token, index: usize) !void { if (index > 0) { var window: ?[]u8 = null; if (self.block_start <= index) { window = self.window[self.block_start..index]; } self.block_start = index; try self.hm_bw.writeBlock(tokens, false, window); return; } return; } // fillWindow will fill the current window with the supplied // dictionary and calculate all hashes. // This is much faster than doing a full encode. // Should only be used after a reset. fn fillWindow(self: *Self, in_b: []const u8) void { var b = in_b; // Do not fill window if we are in store-only mode (look at the fill() function to see // Compressions which use fillStore() instead of fillDeflate()). if (self.compression == .no_compression or self.compression == .huffman_only or self.compression == .best_speed) { return; } // fillWindow() must not be called with stale data assert(self.index == 0 and self.window_end == 0); // If we are given too much, cut it. if (b.len > window_size) { b = b[b.len - window_size ..]; } // Add all to window. mem.copy(u8, self.window, b); var n = b.len; // Calculate 256 hashes at the time (more L1 cache hits) var loops = (n + 256 - min_match_length) / 256; var j: usize = 0; while (j < loops) : (j += 1) { var index = j * 256; var end = index + 256 + min_match_length - 1; if (end > n) { end = n; } var to_check = self.window[index..end]; var dst_size = to_check.len - min_match_length + 1; if (dst_size <= 0) { continue; } var dst = self.hash_match[0..dst_size]; _ = self.bulk_hasher(to_check, dst); var new_h: u32 = 0; for (dst) |val, i| { var di = i + index; new_h = val; var hh = &self.hash_head[new_h & hash_mask]; // Get previous value with the same hash. // Our chain should point to the previous value. self.hash_prev[di & window_mask] = hh.*; // Set the head of the hash chain to us. hh.* = @intCast(u32, di + self.hash_offset); } self.hash = new_h; } // Update window information. self.window_end = n; self.index = @intCast(u32, n); } const Match = struct { length: u32, offset: u32, ok: bool, }; // Try to find a match starting at pos whose length is greater than prev_length. // We only look at self.compression_level.chain possibilities before giving up. fn findMatch( self: *Self, pos: u32, prev_head: u32, prev_length: u32, lookahead: u32, ) Match { var length: u32 = 0; var offset: u32 = 0; var ok: bool = false; var min_match_look: u32 = max_match_length; if (lookahead < min_match_look) { min_match_look = lookahead; } var win = self.window[0 .. pos + min_match_look]; // We quit when we get a match that's at least nice long var nice = win.len - pos; if (self.compression_level.nice < nice) { nice = self.compression_level.nice; } // If we've got a match that's good enough, only look in 1/4 the chain. var tries = self.compression_level.chain; length = prev_length; if (length >= self.compression_level.good) { tries >>= 2; } var w_end = win[pos + length]; var w_pos = win[pos..]; var min_index = pos -| window_size; var i = prev_head; while (tries > 0) : (tries -= 1) { if (w_end == win[i + length]) { var n = matchLen(win[i..], w_pos, min_match_look); if (n > length and (n > min_match_length or pos - i <= 4096)) { length = n; offset = pos - i; ok = true; if (n >= nice) { // The match is good enough that we don't try to find a better one. break; } w_end = win[pos + n]; } } if (i == min_index) { // hash_prev[i & window_mask] has already been overwritten, so stop now. break; } if (@intCast(u32, self.hash_prev[i & window_mask]) < self.hash_offset) { break; } i = @intCast(u32, self.hash_prev[i & window_mask]) - self.hash_offset; if (i < min_index) { break; } } return Match{ .length = length, .offset = offset, .ok = ok }; } fn writeStoredBlock(self: *Self, buf: []u8) !void { try self.hm_bw.writeStoredHeader(buf.len, false); try self.hm_bw.writeBytes(buf); } // encSpeed will compress and store the currently added data, // if enough has been accumulated or we at the end of the stream. fn encSpeed(self: *Self) !void { // We only compress if we have max_store_block_size. if (self.window_end < max_store_block_size) { if (!self.sync) { return; } // Handle small sizes. if (self.window_end < 128) { switch (self.window_end) { 0 => return, 1...16 => { try self.writeStoredBlock(self.window[0..self.window_end]); }, else => { try self.hm_bw.writeBlockHuff(false, self.window[0..self.window_end]); self.err = self.hm_bw.err; }, } self.window_end = 0; self.best_speed_enc.reset(); return; } } // Encode the block. self.tokens_count = 0; self.best_speed_enc.encode( self.tokens, &self.tokens_count, self.window[0..self.window_end], ); // If we removed less than 1/16th, Huffman compress the block. if (self.tokens_count > self.window_end - (self.window_end >> 4)) { try self.hm_bw.writeBlockHuff(false, self.window[0..self.window_end]); } else { try self.hm_bw.writeBlockDynamic( self.tokens[0..self.tokens_count], false, self.window[0..self.window_end], ); } self.err = self.hm_bw.err; self.window_end = 0; } fn initDeflate(self: *Self) !void { self.window = try self.allocator.alloc(u8, 2 * window_size); self.hash_offset = 1; self.tokens = try self.allocator.alloc(token.Token, max_flate_block_tokens); self.tokens_count = 0; mem.set(token.Token, self.tokens, 0); self.length = min_match_length - 1; self.offset = 0; self.byte_available = false; self.index = 0; self.hash = 0; self.chain_head = 0; self.bulk_hasher = bulkHash4; } fn deflate(self: *Self) !void { if (self.window_end - self.index < min_match_length + max_match_length and !self.sync) { return; } self.max_insert_index = self.window_end -| (min_match_length - 1); if (self.index < self.max_insert_index) { self.hash = hash4(self.window[self.index .. self.index + min_match_length]); } while (true) { assert(self.index <= self.window_end); var lookahead = self.window_end -| self.index; if (lookahead < min_match_length + max_match_length) { if (!self.sync) { break; } assert(self.index <= self.window_end); if (lookahead == 0) { // Flush current output block if any. if (self.byte_available) { // There is still one pending token that needs to be flushed self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[self.index - 1])); self.tokens_count += 1; self.byte_available = false; } if (self.tokens.len > 0) { try self.writeBlock(self.tokens[0..self.tokens_count], self.index); self.tokens_count = 0; } break; } } if (self.index < self.max_insert_index) { // Update the hash self.hash = hash4(self.window[self.index .. self.index + min_match_length]); var hh = &self.hash_head[self.hash & hash_mask]; self.chain_head = @intCast(u32, hh.*); self.hash_prev[self.index & window_mask] = @intCast(u32, self.chain_head); hh.* = @intCast(u32, self.index + self.hash_offset); } var prev_length = self.length; var prev_offset = self.offset; self.length = min_match_length - 1; self.offset = 0; var min_index = self.index -| window_size; if (self.hash_offset <= self.chain_head and self.chain_head - self.hash_offset >= min_index and (self.compression_level.fast_skip_hashshing != skip_never and lookahead > min_match_length - 1 or self.compression_level.fast_skip_hashshing == skip_never and lookahead > prev_length and prev_length < self.compression_level.lazy)) { { var fmatch = self.findMatch( self.index, self.chain_head -| self.hash_offset, min_match_length - 1, @intCast(u32, lookahead), ); if (fmatch.ok) { self.length = fmatch.length; self.offset = fmatch.offset; } } } if (self.compression_level.fast_skip_hashshing != skip_never and self.length >= min_match_length or self.compression_level.fast_skip_hashshing == skip_never and prev_length >= min_match_length and self.length <= prev_length) { // There was a match at the previous step, and the current match is // not better. Output the previous match. if (self.compression_level.fast_skip_hashshing != skip_never) { self.tokens[self.tokens_count] = token.matchToken(@intCast(u32, self.length - base_match_length), @intCast(u32, self.offset - base_match_offset)); self.tokens_count += 1; } else { self.tokens[self.tokens_count] = token.matchToken( @intCast(u32, prev_length - base_match_length), @intCast(u32, prev_offset -| base_match_offset), ); self.tokens_count += 1; } // Insert in the hash table all strings up to the end of the match. // index and index-1 are already inserted. If there is not enough // lookahead, the last two strings are not inserted into the hash // table. if (self.length <= self.compression_level.fast_skip_hashshing) { var newIndex: u32 = 0; if (self.compression_level.fast_skip_hashshing != skip_never) { newIndex = self.index + self.length; } else { newIndex = self.index + prev_length - 1; } var index = self.index; index += 1; while (index < newIndex) : (index += 1) { if (index < self.max_insert_index) { self.hash = hash4(self.window[index .. index + min_match_length]); // Get previous value with the same hash. // Our chain should point to the previous value. var hh = &self.hash_head[self.hash & hash_mask]; self.hash_prev[index & window_mask] = hh.*; // Set the head of the hash chain to us. hh.* = @intCast(u32, index + self.hash_offset); } } self.index = index; if (self.compression_level.fast_skip_hashshing == skip_never) { self.byte_available = false; self.length = min_match_length - 1; } } else { // For matches this long, we don't bother inserting each individual // item into the table. self.index += self.length; if (self.index < self.max_insert_index) { self.hash = hash4(self.window[self.index .. self.index + min_match_length]); } } if (self.tokens_count == max_flate_block_tokens) { // The block includes the current character try self.writeBlock(self.tokens[0..self.tokens_count], self.index); self.tokens_count = 0; } } else { if (self.compression_level.fast_skip_hashshing != skip_never or self.byte_available) { var i = self.index -| 1; if (self.compression_level.fast_skip_hashshing != skip_never) { i = self.index; } self.tokens[self.tokens_count] = token.literalToken(@intCast(u32, self.window[i])); self.tokens_count += 1; if (self.tokens_count == max_flate_block_tokens) { try self.writeBlock(self.tokens[0..self.tokens_count], i + 1); self.tokens_count = 0; } } self.index += 1; if (self.compression_level.fast_skip_hashshing == skip_never) { self.byte_available = true; } } } } fn fillStore(self: *Self, b: []const u8) u32 { var n = mu.copy(self.window[self.window_end..], b); self.window_end += n; return @intCast(u32, n); } fn store(self: *Self) !void { if (self.window_end > 0 and (self.window_end == max_store_block_size or self.sync)) { try self.writeStoredBlock(self.window[0..self.window_end]); self.window_end = 0; } } // storeHuff compresses and stores the currently added data // when the self.window is full or we are at the end of the stream. fn storeHuff(self: *Self) !void { if (self.window_end < self.window.len and !self.sync or self.window_end == 0) { return; } try self.hm_bw.writeBlockHuff(false, self.window[0..self.window_end]); self.err = self.hm_bw.err; self.window_end = 0; } pub fn bytesWritten(self: *Self) usize { return self.hm_bw.bytes_written; } /// Writes the compressed form of `input` to the underlying writer. pub fn write(self: *Self, input: []const u8) !usize { var buf = input; // writes data to hm_bw, which will eventually write the // compressed form of data to its underlying writer. while (buf.len > 0) { try self.step(); var filled = self.fill(buf); buf = buf[filled..]; } return input.len; } /// Flushes any pending data to the underlying writer. /// It is useful mainly in compressed network protocols, to ensure that /// a remote reader has enough data to reconstruct a packet. /// Flush does not return until the data has been written. /// Calling `flush()` when there is no pending data still causes the Writer /// to emit a sync marker of at least 4 bytes. /// If the underlying writer returns an error, `flush()` returns that error. /// /// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. pub fn flush(self: *Self) !void { self.sync = true; try self.step(); try self.hm_bw.writeStoredHeader(0, false); try self.hm_bw.flush(); self.sync = false; return; } fn step(self: *Self) !void { switch (self.compression) { .no_compression => return self.store(), .huffman_only => return self.storeHuff(), .best_speed => return self.encSpeed(), .default_compression, .level_2, .level_3, .level_4, .level_5, .level_6, .level_7, .level_8, .best_compression, => return self.deflate(), } } fn fill(self: *Self, b: []const u8) u32 { switch (self.compression) { .no_compression => return self.fillStore(b), .huffman_only => return self.fillStore(b), .best_speed => return self.fillStore(b), .default_compression, .level_2, .level_3, .level_4, .level_5, .level_6, .level_7, .level_8, .best_compression, => return self.fillDeflate(b), } } fn init( allocator: Allocator, in_writer: WriterType, options: CompressorOptions, ) !Self { var s = Self{ .allocator = undefined, .compression = undefined, .compression_level = undefined, .hm_bw = undefined, // HuffmanBitWriter .bulk_hasher = undefined, .sync = false, .best_speed_enc = undefined, // Best speed encoder .chain_head = 0, .hash_head = undefined, .hash_prev = undefined, // previous hash .hash_offset = 0, .index = 0, .window = undefined, .window_end = 0, .block_start = 0, .byte_available = false, .tokens = undefined, .tokens_count = 0, .length = 0, .offset = 0, .hash = 0, .max_insert_index = 0, .err = false, // Error .hash_match = undefined, .dictionary = options.dictionary, }; s.hm_bw = try hm_bw.huffmanBitWriter(allocator, in_writer); s.allocator = allocator; s.hash_head = try allocator.alloc(u32, hash_size); s.hash_prev = try allocator.alloc(u32, window_size); s.hash_match = try allocator.alloc(u32, max_match_length - 1); mem.set(u32, s.hash_head, 0); mem.set(u32, s.hash_prev, 0); mem.set(u32, s.hash_match, 0); switch (options.level) { .no_compression => { s.compression = options.level; s.compression_level = levels(options.level); s.window = try allocator.alloc(u8, max_store_block_size); s.tokens = try allocator.alloc(token.Token, 0); }, .huffman_only => { s.compression = options.level; s.compression_level = levels(options.level); s.window = try allocator.alloc(u8, max_store_block_size); s.tokens = try allocator.alloc(token.Token, 0); }, .best_speed => { s.compression = options.level; s.compression_level = levels(options.level); s.window = try allocator.alloc(u8, max_store_block_size); s.tokens = try allocator.alloc(token.Token, max_store_block_size); s.best_speed_enc = try allocator.create(fast.DeflateFast); s.best_speed_enc.* = fast.deflateFast(); try s.best_speed_enc.init(allocator); }, .default_compression => { s.compression = .level_6; s.compression_level = levels(.level_6); try s.initDeflate(); if (options.dictionary != null) { s.fillWindow(options.dictionary.?); } }, .level_2, .level_3, .level_4, .level_5, .level_6, .level_7, .level_8, .best_compression, => { s.compression = options.level; s.compression_level = levels(options.level); try s.initDeflate(); if (options.dictionary != null) { s.fillWindow(options.dictionary.?); } }, } return s; } /// Release all allocated memory. pub fn deinit(self: *Self) void { self.hm_bw.deinit(); self.allocator.free(self.window); self.allocator.free(self.tokens); self.allocator.free(self.hash_head); self.allocator.free(self.hash_prev); self.allocator.free(self.hash_match); if (self.compression == .best_speed) { self.best_speed_enc.deinit(); self.allocator.destroy(self.best_speed_enc); } } /// Reset discards the inner writer's state and replace the inner writer with new_writer. /// new_writer must be of the same type as the previous writer. pub fn reset(self: *Self, new_writer: WriterType) void { self.hm_bw.reset(new_writer); self.sync = false; switch (self.compression) { // Reset window .no_compression => self.window_end = 0, // Reset window, tokens, and encoder .best_speed => { self.window_end = 0; self.tokens_count = 0; self.best_speed_enc.reset(); }, // Reset everything and reinclude the dictionary if there is one .huffman_only, .default_compression, .level_2, .level_3, .level_4, .level_5, .level_6, .level_7, .level_8, .best_compression, => { self.chain_head = 0; mem.set(u32, self.hash_head, 0); mem.set(u32, self.hash_prev, 0); self.hash_offset = 1; self.index = 0; self.window_end = 0; self.block_start = 0; self.byte_available = false; self.tokens_count = 0; self.length = min_match_length - 1; self.offset = 0; self.hash = 0; self.max_insert_index = 0; if (self.dictionary != null) { self.fillWindow(self.dictionary.?); } }, } } /// Writes any pending data to the underlying writer. pub fn close(self: *Self) !void { self.sync = true; try self.step(); try self.hm_bw.writeStoredHeader(0, true); try self.hm_bw.flush(); return; } }; } // tests const expect = std.testing.expect; const testing = std.testing; const ArrayList = std.ArrayList; const DeflateTest = struct { in: []const u8, level: Compression, out: []const u8, }; var deflate_tests = [_]DeflateTest{ // Level 0 .{ .in = &[_]u8{}, .level = .no_compression, .out = &[_]u8{ 1, 0, 0, 255, 255 }, }, // Level -1 .{ .in = &[_]u8{0x11}, .level = .default_compression, .out = &[_]u8{ 18, 4, 4, 0, 0, 255, 255 }, }, .{ .in = &[_]u8{0x11}, .level = .level_6, .out = &[_]u8{ 18, 4, 4, 0, 0, 255, 255 }, }, // Level 4 .{ .in = &[_]u8{0x11}, .level = .level_4, .out = &[_]u8{ 18, 4, 4, 0, 0, 255, 255 }, }, // Level 0 .{ .in = &[_]u8{0x11}, .level = .no_compression, .out = &[_]u8{ 0, 1, 0, 254, 255, 17, 1, 0, 0, 255, 255 }, }, .{ .in = &[_]u8{ 0x11, 0x12 }, .level = .no_compression, .out = &[_]u8{ 0, 2, 0, 253, 255, 17, 18, 1, 0, 0, 255, 255 }, }, .{ .in = &[_]u8{ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11 }, .level = .no_compression, .out = &[_]u8{ 0, 8, 0, 247, 255, 17, 17, 17, 17, 17, 17, 17, 17, 1, 0, 0, 255, 255 }, }, // Level 2 .{ .in = &[_]u8{}, .level = .level_2, .out = &[_]u8{ 1, 0, 0, 255, 255 }, }, .{ .in = &[_]u8{0x11}, .level = .level_2, .out = &[_]u8{ 18, 4, 4, 0, 0, 255, 255 }, }, .{ .in = &[_]u8{ 0x11, 0x12 }, .level = .level_2, .out = &[_]u8{ 18, 20, 2, 4, 0, 0, 255, 255 }, }, .{ .in = &[_]u8{ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11 }, .level = .level_2, .out = &[_]u8{ 18, 132, 2, 64, 0, 0, 0, 255, 255 }, }, // Level 9 .{ .in = &[_]u8{}, .level = .best_compression, .out = &[_]u8{ 1, 0, 0, 255, 255 }, }, .{ .in = &[_]u8{0x11}, .level = .best_compression, .out = &[_]u8{ 18, 4, 4, 0, 0, 255, 255 }, }, .{ .in = &[_]u8{ 0x11, 0x12 }, .level = .best_compression, .out = &[_]u8{ 18, 20, 2, 4, 0, 0, 255, 255 }, }, .{ .in = &[_]u8{ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11 }, .level = .best_compression, .out = &[_]u8{ 18, 132, 2, 64, 0, 0, 0, 255, 255 }, }, }; test "deflate" { for (deflate_tests) |dt| { var output = ArrayList(u8).init(testing.allocator); defer output.deinit(); var comp = try compressor(testing.allocator, output.writer(), .{ .level = dt.level }); _ = try comp.write(dt.in); try comp.close(); comp.deinit(); try expect(mem.eql(u8, output.items, dt.out)); } } test "bulkHash4" { for (deflate_tests) |x| { if (x.out.len < min_match_length) { continue; } // double the test data var out = try testing.allocator.alloc(u8, x.out.len * 2); defer testing.allocator.free(out); mem.copy(u8, out[0..x.out.len], x.out); mem.copy(u8, out[x.out.len..], x.out); var j: usize = 4; while (j < out.len) : (j += 1) { var y = out[0..j]; var dst = try testing.allocator.alloc(u32, y.len - min_match_length + 1); defer testing.allocator.free(dst); _ = bulkHash4(y, dst); for (dst) |got, i| { var want = hash4(y[i..]); try expect(got == want); } } } }
lib/std/compress/deflate/compressor.zig
pub const PAGE_SIZE: usize = 4096; //// // Return address as either usize or a pointer of type T. // // Arguments: // T: The desired output type. // address: Address to be returned. // // Returns: // The given address as type T (usize or a pointer). // fn intOrPtr(comptime T: type, address: usize) T { return if (T == usize) address else @intToPtr(T, address); } //// // Return address as an usize. // // Arguments: // address: Address to be returned. // // Returns: // The given address as type usize. // fn int(address: var) usize { return if (@typeOf(address) == usize) address else @ptrToInt(address); } //// // Page-align an address downward. // // Arguments: // address: Address to align. // // Returns: // The aligned address. // pub fn pageBase(address: var) @typeOf(address) { const result = int(address) & (~PAGE_SIZE +% 1); return intOrPtr(@typeOf(address), result); } //// // Page-align an address upward. // // Arguments: // address: Address to align. // // Returns: // The aligned address. // pub fn pageAlign(address: var) @typeOf(address) { const result = (int(address) + PAGE_SIZE - 1) & (~PAGE_SIZE +% 1); return intOrPtr(@typeOf(address), result); } //// // Halt the CPU. // pub inline fn hlt() noreturn { while (true) { asm volatile ("hlt"); } } //// // Disable interrupts. // pub inline fn cli() void { asm volatile ("cli"); } //// // Enable interrupts. // pub inline fn sti() void { asm volatile ("sti"); } //// // Completely stop the computer. // pub inline fn hang() noreturn { cli(); hlt(); } //// // Load a new Interrupt Descriptor Table. // // Arguments: // idtr: Address of the IDTR register. // pub inline fn lidt(idtr: usize) void { asm volatile ("lidt (%[idtr])" : : [idtr] "r" (idtr)); } //// // Load a new Task Register. // // Arguments: // desc: Segment selector of the TSS. // pub inline fn ltr(desc: u16) void { asm volatile ("ltr %[desc]" : : [desc] "r" (desc)); } //// // Invalidate the TLB entry associated with the given virtual address. // // Arguments: // v_addr: Virtual address to invalidate. // pub inline fn invlpg(v_addr: usize) void { asm volatile ("invlpg (%[v_addr])" : : [v_addr] "r" (v_addr) : "memory"); } //// // Read the CR2 control register. // pub inline fn readCR2() usize { return asm volatile ("mov %%cr2, %[result]" : [result] "=r" (-> usize)); } //// // Write the CR3 control register. // pub inline fn writeCR3(pd: usize) void { asm volatile ("mov %[pd], %%cr3" : : [pd] "r" (pd)); } //// // Read a byte from a port. // // Arguments: // port: Port from where to read. // // Returns: // The read byte. // pub inline fn inb(port: u16) u8 { return asm volatile ("inb %[port], %[result]" : [result] "={al}" (-> u8) : [port] "N{dx}" (port)); } //// // Write a byte on a port. // // Arguments: // port: Port where to write the value. // value: Value to be written. // pub inline fn outb(port: u16, value: u8) void { asm volatile ("outb %[value], %[port]" : : [value] "{al}" (value), [port] "N{dx}" (port)); }
kernel/x86.zig
const Object = @This(); const std = @import("std"); const assert = std.debug.assert; const fs = std.fs; const io = std.io; const log = std.log.scoped(.object); const macho = std.macho; const mem = std.mem; const Allocator = mem.Allocator; const parseName = @import("Zld.zig").parseName; usingnamespace @import("commands.zig"); allocator: *Allocator, file: fs.File, name: []u8, ar_name: ?[]u8 = null, header: macho.mach_header_64, load_commands: std.ArrayListUnmanaged(LoadCommand) = .{}, segment_cmd_index: ?u16 = null, symtab_cmd_index: ?u16 = null, dysymtab_cmd_index: ?u16 = null, build_version_cmd_index: ?u16 = null, data_in_code_cmd_index: ?u16 = null, text_section_index: ?u16 = null, // __DWARF segment sections dwarf_debug_info_index: ?u16 = null, dwarf_debug_abbrev_index: ?u16 = null, dwarf_debug_str_index: ?u16 = null, dwarf_debug_line_index: ?u16 = null, dwarf_debug_ranges_index: ?u16 = null, symtab: std.ArrayListUnmanaged(macho.nlist_64) = .{}, strtab: std.ArrayListUnmanaged(u8) = .{}, data_in_code_entries: std.ArrayListUnmanaged(macho.data_in_code_entry) = .{}, pub fn deinit(self: *Object) void { for (self.load_commands.items) |*lc| { lc.deinit(self.allocator); } self.load_commands.deinit(self.allocator); self.symtab.deinit(self.allocator); self.strtab.deinit(self.allocator); self.data_in_code_entries.deinit(self.allocator); self.allocator.free(self.name); if (self.ar_name) |v| { self.allocator.free(v); } self.file.close(); } /// Caller owns the returned Object instance and is responsible for calling /// `deinit` to free allocated memory. pub fn initFromFile(allocator: *Allocator, arch: std.Target.Cpu.Arch, name: []const u8, file: fs.File) !Object { var reader = file.reader(); const header = try reader.readStruct(macho.mach_header_64); if (header.filetype != macho.MH_OBJECT) { // Reset file cursor. try file.seekTo(0); return error.NotObject; } const this_arch: std.Target.Cpu.Arch = switch (header.cputype) { macho.CPU_TYPE_ARM64 => .aarch64, macho.CPU_TYPE_X86_64 => .x86_64, else => |value| { log.err("unsupported cpu architecture 0x{x}", .{value}); return error.UnsupportedCpuArchitecture; }, }; if (this_arch != arch) { log.err("mismatched cpu architecture: found {s}, expected {s}", .{ this_arch, arch }); return error.MismatchedCpuArchitecture; } var self = Object{ .allocator = allocator, .name = try allocator.dupe(u8, name), .file = file, .header = header, }; try self.readLoadCommands(reader, .{}); if (self.symtab_cmd_index != null) { try self.readSymtab(); try self.readStrtab(); } if (self.data_in_code_cmd_index != null) try self.readDataInCode(); log.debug("\n\n", .{}); log.debug("{s} defines symbols", .{self.name}); for (self.symtab.items) |sym| { const symname = self.getString(sym.n_strx); log.debug("'{s}': {}", .{ symname, sym }); } return self; } pub const ReadOffset = struct { offset: ?u32 = null, }; pub fn readLoadCommands(self: *Object, reader: anytype, offset: ReadOffset) !void { const offset_mod = offset.offset orelse 0; try self.load_commands.ensureCapacity(self.allocator, self.header.ncmds); var i: u16 = 0; while (i < self.header.ncmds) : (i += 1) { var cmd = try LoadCommand.read(self.allocator, reader); switch (cmd.cmd()) { macho.LC_SEGMENT_64 => { self.segment_cmd_index = i; var seg = cmd.Segment; for (seg.sections.items) |*sect, j| { const index = @intCast(u16, j); const segname = parseName(&sect.segname); const sectname = parseName(&sect.sectname); if (mem.eql(u8, segname, "__DWARF")) { if (mem.eql(u8, sectname, "__debug_info")) { self.dwarf_debug_info_index = index; } else if (mem.eql(u8, sectname, "__debug_abbrev")) { self.dwarf_debug_abbrev_index = index; } else if (mem.eql(u8, sectname, "__debug_str")) { self.dwarf_debug_str_index = index; } else if (mem.eql(u8, sectname, "__debug_line")) { self.dwarf_debug_line_index = index; } else if (mem.eql(u8, sectname, "__debug_ranges")) { self.dwarf_debug_ranges_index = index; } } else if (mem.eql(u8, segname, "__TEXT")) { if (mem.eql(u8, sectname, "__text")) { self.text_section_index = index; } } sect.offset += offset_mod; if (sect.reloff > 0) sect.reloff += offset_mod; } seg.inner.fileoff += offset_mod; }, macho.LC_SYMTAB => { self.symtab_cmd_index = i; cmd.Symtab.symoff += offset_mod; cmd.Symtab.stroff += offset_mod; }, macho.LC_DYSYMTAB => { self.dysymtab_cmd_index = i; }, macho.LC_BUILD_VERSION => { self.build_version_cmd_index = i; }, macho.LC_DATA_IN_CODE => { self.data_in_code_cmd_index = i; }, else => { log.debug("Unknown load command detected: 0x{x}.", .{cmd.cmd()}); }, } self.load_commands.appendAssumeCapacity(cmd); } } pub fn readSymtab(self: *Object) !void { const symtab_cmd = self.load_commands.items[self.symtab_cmd_index.?].Symtab; var buffer = try self.allocator.alloc(u8, @sizeOf(macho.nlist_64) * symtab_cmd.nsyms); defer self.allocator.free(buffer); _ = try self.file.preadAll(buffer, symtab_cmd.symoff); try self.symtab.ensureCapacity(self.allocator, symtab_cmd.nsyms); // TODO this align case should not be needed. // Probably a bug in stage1. const slice = @alignCast(@alignOf(macho.nlist_64), mem.bytesAsSlice(macho.nlist_64, buffer)); self.symtab.appendSliceAssumeCapacity(slice); } pub fn readStrtab(self: *Object) !void { const symtab_cmd = self.load_commands.items[self.symtab_cmd_index.?].Symtab; var buffer = try self.allocator.alloc(u8, symtab_cmd.strsize); defer self.allocator.free(buffer); _ = try self.file.preadAll(buffer, symtab_cmd.stroff); try self.strtab.ensureCapacity(self.allocator, symtab_cmd.strsize); self.strtab.appendSliceAssumeCapacity(buffer); } pub fn getString(self: *const Object, str_off: u32) []const u8 { assert(str_off < self.strtab.items.len); return mem.spanZ(@ptrCast([*:0]const u8, self.strtab.items.ptr + str_off)); } pub fn readSection(self: Object, allocator: *Allocator, index: u16) ![]u8 { const seg = self.load_commands.items[self.segment_cmd_index.?].Segment; const sect = seg.sections.items[index]; var buffer = try allocator.alloc(u8, sect.size); _ = try self.file.preadAll(buffer, sect.offset); return buffer; } pub fn readDataInCode(self: *Object) !void { const index = self.data_in_code_cmd_index orelse return; const data_in_code = self.load_commands.items[index].LinkeditData; var buffer = try self.allocator.alloc(u8, data_in_code.datasize); defer self.allocator.free(buffer); _ = try self.file.preadAll(buffer, data_in_code.dataoff); var stream = io.fixedBufferStream(buffer); var reader = stream.reader(); while (true) { const dice = reader.readStruct(macho.data_in_code_entry) catch |err| switch (err) { error.EndOfStream => break, else => |e| return e, }; try self.data_in_code_entries.append(self.allocator, dice); } }
src/link/MachO/Object.zig
const std = @import("std"); const build = std.build; pub const cache = ".zigmod/deps"; pub fn addAllTo(exe: *build.LibExeObjStep) void { @setEvalBranchQuota(1_000_000); for (packages) |pkg| { exe.addPackage(pkg); } if (c_include_dirs.len > 0 or c_source_files.len > 0) { exe.linkLibC(); } for (c_include_dirs) |dir| { exe.addIncludeDir(dir); } inline for (c_source_files) |fpath| { exe.addCSourceFile(fpath[1], @field(c_source_flags, fpath[0])); } for (system_libs) |lib| { exe.linkSystemLibrary(lib); } } fn get_flags(comptime index: usize) []const u8 { return @field(c_source_flags, _paths[index]); } pub const _ids = .{ "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "0npcrzfdlrvkf44mzjo8bduj9gmqyefo0j3rstt6b0pm2r6r", }; pub const _paths = .{ "", "/v/git/github.com/yaml/libyaml/tag-0.2.5/", "/v/git/github.com/nektro/zig-ansi/commit-25039ca/", "/v/git/github.com/ziglibs/known-folders/commit-f0f4188/", "/v/git/github.com/Vexu/zuri/commit-41bcd78/", "/v/git/github.com/alexnask/iguanaTLS/commit-1767e48/", "/v/git/github.com/nektro/zig-licenses/commit-1a19e4b/", }; pub const package_data = struct { pub const _s84v9o48ucb0xq0cmzq0cn433hgw0iaqztugja16h8bzxu3h = build.Pkg{ .name = "ansi", .path = cache ++ "/v/git/github.com/nektro/zig-ansi/commit-25039ca/src/lib.zig", .dependencies = &[_]build.Pkg{ } }; pub const _2ta738wrqbaqzl3iwzoo8nj35k9ynwz5p5iyz80ryrpp4ttf = build.Pkg{ .name = "known-folders", .path = cache ++ "/v/git/github.com/ziglibs/known-folders/commit-f0f4188/known-folders.zig", .dependencies = &[_]build.Pkg{ } }; pub const _2b7mq571jmq31ktmpigopu29480iw245heueajgxzxn7ab8o = build.Pkg{ .name = "zuri", .path = cache ++ "/v/git/github.com/Vexu/zuri/commit-41bcd78/src/zuri.zig", .dependencies = &[_]build.Pkg{ } }; pub const _csbnipaad8n77buaszsnjvlmn6j173fl7pkprsctelswjywe = build.Pkg{ .name = "iguanatls", .path = cache ++ "/v/git/github.com/alexnask/iguanaTLS/commit-1767e48/src/main.zig", .dependencies = &[_]build.Pkg{ } }; pub const _0npcrzfdlrvkf44mzjo8bduj9gmqyefo0j3rstt6b0pm2r6r = build.Pkg{ .name = "licenses", .path = cache ++ "/v/git/github.com/nektro/zig-licenses/commit-1a19e4b/src/lib.zig", .dependencies = &[_]build.Pkg{ } }; }; pub const packages = &[_]build.Pkg{ package_data._s84v9o48ucb0xq0cmzq0cn433hgw0iaqztugja16h8bzxu3h, package_data._2ta738wrqbaqzl3iwzoo8nj35k9ynwz5p5iyz80ryrpp4ttf, package_data._2b7mq571jmq31ktmpigopu29480iw245heueajgxzxn7ab8o, package_data._csbnipaad8n77buaszsnjvlmn6j173fl7pkprsctelswjywe, package_data._0npcrzfdlrvkf44mzjo8bduj9gmqyefo0j3rstt6b0pm2r6r, }; pub const pkgs = struct { pub const ansi = packages[1]; pub const known_folders = packages[2]; pub const zuri = packages[3]; pub const iguanatls = packages[4]; pub const licenses = packages[5]; }; pub const c_include_dirs = &[_][]const u8{ cache ++ _paths[1] ++ "include", }; pub const c_source_flags = struct { pub const @"8mdbh0zuneb0i3hs5jby5je0heem1i6yxusl7c8y8qx68hqc" = &.{"-DYAML_VERSION_MAJOR=0","-DYAML_VERSION_MINOR=2","-DYAML_VERSION_PATCH=5","-DYAML_VERSION_STRING=\"0.2.5\"","-DYAML_DECLARE_STATIC=1",}; }; pub const c_source_files = &[_][2][]const u8{ [_][]const u8{_ids[1], cache ++ _paths[1] ++ "src/api.c"}, [_][]const u8{_ids[1], cache ++ _paths[1] ++ "src/dumper.c"}, [_][]const u8{_ids[1], cache ++ _paths[1] ++ "src/emitter.c"}, [_][]const u8{_ids[1], cache ++ _paths[1] ++ "src/loader.c"}, [_][]const u8{_ids[1], cache ++ _paths[1] ++ "src/parser.c"}, [_][]const u8{_ids[1], cache ++ _paths[1] ++ "src/reader.c"}, [_][]const u8{_ids[1], cache ++ _paths[1] ++ "src/scanner.c"}, [_][]const u8{_ids[1], cache ++ _paths[1] ++ "src/writer.c"}, }; pub const system_libs = &[_][]const u8{ };
deps.zig
const std = @import("std"); const assert = std.debug.assert; pub const Checksum = struct { // TODO: how can I create a collection of strings in zig? words: [250][26]u8, count: usize, maxlen: usize, count2: usize, count3: usize, pub fn init() Checksum { const allocator = std.heap.direct_allocator; return Checksum{ .words = undefined, .count = 0, .maxlen = 0, .count2 = 0, .count3 = 0, }; } pub fn deinit(self: *Checksum) void {} pub fn add_word(self: *Checksum, word: []const u8) void { std.mem.copy(u8, self.words[self.count][0..], word); self.count += 1; if (self.maxlen < word.len) self.maxlen = word.len; var chars: [26]usize = undefined; std.mem.set(usize, chars[0..], 0); for (word) |c| { const p: usize = c - 'a'; chars[p] += 1; } var found2: bool = false; var found3: bool = false; for (chars) |c| { if (c == 2) { if (!found2) { found2 = true; self.count2 += 1; // std.debug.warn("WORD [{}] => 2\n", word); continue; } } if (c == 3) { if (!found3) { found3 = true; self.count3 += 1; // std.debug.warn("WORD [{}] => 3\n", word); continue; } } } } pub fn compute_checksum(self: *Checksum) usize { return self.count2 * self.count3; } pub fn find_common_letters(self: *Checksum, buf: *[26]u8) usize { var j: usize = 0; while (j < self.count) : (j += 1) { var k: usize = j + 1; while (k < self.count) : (k += 1) { var dtot: usize = 0; var dpos: usize = 0; var l: usize = 0; while (l < self.maxlen) : (l += 1) { if (self.words[j][l] == self.words[k][l]) continue; dpos = l; dtot += 1; } if (dtot != 1) continue; // std.debug.warn("MATCH {} = [{}] - [{}]\n", dpos, self.words[j][0..self.maxlen], self.words[k][0..self.maxlen]); std.mem.copy(u8, buf[0..], self.words[j][0..dpos]); std.mem.copy(u8, buf[dpos..], self.words[k][dpos + 1 ..]); return self.maxlen - 1; } } return 0; } }; test "simple checksum" { const Data = struct { values: []const u8, expected: isize, }; const data = \\abcdef \\bababc \\abbcde \\abcccd \\aabcdd \\abcdee \\ababab ; var checksum = Checksum.init(); defer checksum.deinit(); var it = std.mem.separate(data, "\n"); while (it.next()) |line| { checksum.add_word(line); } assert(checksum.compute_checksum() == 12); } test "simple common letters" { const Data = struct { values: []const u8, expected: isize, }; const data = \\abcde \\fghij \\klmno \\pqrst \\fguij \\axcye \\wvxyz ; var checksum = Checksum.init(); defer checksum.deinit(); var it = std.mem.separate(data, "\n"); while (it.next()) |line| { checksum.add_word(line); } var buf: [26]u8 = undefined; const len = checksum.find_common_letters(&buf); // std.debug.warn("MATCH: [{}]\n", buf[0..len]); assert(std.mem.compare(u8, buf[0..len], "fgij") == std.mem.Compare.Equal); }
2018/p02/checksum.zig
const std = @import("std"); pub const Command = struct { long_name: []const u8, commands: ?[]Command = null, pub fn deinit(self: Command, allocator: std.mem.Allocator) void { if (self.commands) |cmds| { for (cmds) |cmd| { cmd.deinit(allocator); } allocator.free(cmds); } } }; pub const Parser = struct { iter: Iterator, root_command: *Command, cur_command: *Command, const Iterator = struct { args: [][:0]u8, i: u32 = 0, pub fn init(args: [][:0]u8) Iterator { return Iterator{ .args = args, .i = 0 }; } // iterating over command line arguments pub fn next(self: *Iterator) ?[:0]u8 { if (self.i < self.args.len) { const a = self.args[self.i]; self.i += 1; return a; } return null; } // read ahead pub fn skip(self: *Iterator) ?[:0]u8 { if (self.i < self.args.len) { return self.args[self.i]; } return null; } }; pub fn init( args: [][:0]u8, commands: []const Command, allocator: std.mem.Allocator, ) Parser { const cmds = allocator.alloc(Command, commands.len) catch unreachable; for (commands) |c, i| { cmds[i] = c; } const c = allocator.create(Command) catch unreachable; c.* = Command{ .long_name = "hello", .commands = cmds }; var iter = Iterator.init(args); return Parser{ .iter = iter, .root_command = c, .cur_command = c, }; } pub fn deinit(self: Parser, allocator: std.mem.Allocator) void { self.root_command.deinit(allocator); allocator.destroy(self.root_command); } pub fn parse(self: *Parser) !void { std.debug.print("cur_command: {s}\n", .{self.cur_command.long_name}); const a = self.iter.next(); std.debug.print("a: {s}\n", .{a.?}); std.debug.print("cur_command: {s}\n", .{self.cur_command.long_name}); // @1 running the next line will print broken cur_command std.debug.print("cur_command: {s}\n", .{self.cur_command.long_name}); // @2 running the next two lines will panic (running with @1 won't panic but will print broken cur_command) const b = self.iter.next(); std.debug.print("b: {s}\n", .{b.?}); // @3 running the next two lines with @1 and @2 will panic with very long text std.debug.print("cur_command: {s}\n", .{self.cur_command.long_name}); std.debug.print("cur_command: {s}\n", .{self.cur_command.long_name}); } }; pub fn main() anyerror!void { const alloc = std.heap.ArenaAllocator.init(std.heap.page_allocator).allocator(); const args = try std.process.argsAlloc(alloc); defer std.process.argsFree(alloc, args); var parser = Parser.init(args); try parser.parse(); } // mocking std.process.args const TestArgs = struct { args: [][:0]u8, fn init(args: []const []const u8) !TestArgs { var arr = try std.testing.allocator.alloc([:0]u8, args.len); for (args) |arg, i| { arr[i] = try std.testing.allocator.dupeZ(u8, arg); } return TestArgs{ .args = arr }; } fn deinit(self: TestArgs) void { for (self.args) |a| { std.testing.allocator.free(a); } } }; test "test" { var args = try TestArgs.init(&[_][]const u8{ "test", "--verbose", "hello", "--user", "root", "foo" }); defer std.testing.allocator.free(args.args); defer args.deinit(); var parser = Parser.init(args.args, &[_]Command{ .{ .long_name = "test" }, .{ .long_name = "test2" } }, std.testing.allocator); defer parser.deinit(std.testing.allocator); try parser.parse(); }
experiments/src/main.zig
const std = @import("std"); const stdout = std.io.getStdOut().writer(); var gpallocator = std.heap.GeneralPurposeAllocator(.{}){}; var allocator = &gpallocator.allocator; // This code was written AFTER the Go iterations. const asc_u64 = std.sort.asc(u64); pub fn main() !void { const f = try std.fs.cwd().openFile("input", .{ .read = true }); const b = try f.reader().readAllAlloc(allocator, 1024000); var line_iterator = std.mem.split(b, "\n"); var numbers_list = std.ArrayList(u64).init(allocator); while (true) { var line = line_iterator.next() orelse break; var lnum = std.fmt.parseInt(u64, line, 10) catch continue; try numbers_list.append(lnum); } var numbers = numbers_list.toOwnedSlice(); const preamble_len = 25; var preamble: [preamble_len]u64 = [1]u64{0} ** preamble_len; var illegal_n: ?u64 = null; for (numbers) |n, i| { if (i < preamble_len) { preamble[i] = n; continue; } if (!is_valid_sum(&preamble, n)) { illegal_n = n; break; } // Shift the array backwards and replace the last element. std.mem.copy(u64, preamble[0..], preamble[1..]); preamble[preamble_len - 1] = n; } var slice = array_sum(numbers, illegal_n.?).?; var conti = numbers[slice[0]..slice[1]]; std.sort.sort(u64, conti, {}, asc_u64); try stdout.print("{}\n", .{conti[0] + conti[conti.len - 1]}); } fn array_sum(numbers: []u64, sum: u64) ?[2]usize { for (numbers) |_, i| { var current_sum = numbers[i]; var j = i + 1; while (j < numbers.len) : (j += 1) { if (current_sum == sum) { return [2]usize{ i, j - 1 }; } if (current_sum > sum) { break; } current_sum += numbers[j]; } } return null; } fn is_valid_sum(preamble: []u64, sum: u64) bool { for (preamble) |n| { for (preamble) |m| { if (n + m == sum) { return true; } } } return false; }
09/part2.zig
const std = @import("std"); const builtin = @import("builtin"); const common = @import("common.zig"); const slimy = @import("slimy.zig"); fn isSlime(world_seed: i64, x: i32, z: i32) bool { @setRuntimeSafety(false); // Init slime seed var seed = world_seed +% @as(i64, x * x *% 4987142) + @as(i64, x *% 5947611) + @as(i64, z * z) * 4392871 + @as(i64, z *% 389711); seed ^= 987234911; // Init LCG seed const magic = 0x5DEECE66D; const mask = (1 << 48) - 1; seed = (seed ^ magic) & mask; // Calculate random result seed = (seed *% magic +% 0xB) & mask; const bits = @intCast(i32, seed >> 48 - 31); const val = @mod(bits, 10); std.debug.assert(bits >= val - 9); return val == 0; } test "isSlime" { const expected = [10][10]u1{ .{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, .{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, .{ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 }, .{ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, .{ 0, 0, 0, 1, 0, 0, 0, 0, 0, 1 }, .{ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 }, .{ 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 }, .{ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0 }, .{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, .{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }, }; for (expected) |row, y| { for (row) |e, x| { try std.testing.expectEqual( e != 0, isSlime(1, @intCast(i32, x), @intCast(i32, y)), ); } } } test "isSlime with Z 23" { try std.testing.expect(!isSlime(1, -1, 23)); } fn checkLocation(world_seed: i64, cx: i32, cz: i32) u32 { @setRuntimeSafety(false); var count: u32 = 0; for (common.mask) |row, mz| { for (row) |bit, mx| { const x = @intCast(i32, mx) + cx - row.len / 2; const z = @intCast(i32, mz) + cz - common.mask.len / 2; count += @boolToInt(bit and isSlime(world_seed, x, z)); } } return count; } pub fn search( params: slimy.SearchParams, context: anytype, comptime resultCallback: fn (@TypeOf(context), slimy.Result) void, comptime progressCallback: ?fn (@TypeOf(context), completed: u64, total: u64) void, ) !void { try Searcher(struct { ctx: @TypeOf(context), const Self = @This(); pub fn reportResult(self: Self, result: slimy.Result) void { resultCallback(self.ctx, result); } pub fn reportProgress(self: Self, completed: u64, total: u64) void { if (progressCallback) |callback| { callback(self.ctx, completed, total); } } }).init(params, .{ .ctx = context }).search(); } // Context should have the following functions: // pub fn reportResult(self: Context, result: slimy.Result) void; // pub fn reportProgress(self: Context, completed: u64, total: u64) void; pub fn Searcher(comptime Context: type) type { return struct { world_seed: i64, threshold: i32, x0: i32, z0: i32, x1: i32, z1: i32, threads: u8, ctx: Context, const Self = @This(); pub fn init(params: slimy.SearchParams, context: Context) Self { std.debug.assert(params.method.cpu > 0); return .{ .world_seed = params.world_seed, .threshold = params.threshold, .x0 = params.x0, .x1 = params.x1, .z0 = params.z0, .z1 = params.z1, .threads = params.method.cpu, .ctx = context, }; } pub fn search(self: Self) !void { if (self.threads == 1) { self.searchSinglethread(); } else if (builtin.single_threaded) { unreachable; } else { try self.searchMultithread(); } } pub fn searchSinglethread(self: Self) void { const total_chunks = @intCast(u64, self.x1 - self.x0) * @intCast(u64, self.z1 - self.z0); var completed_chunks: u64 = 0; const step = 100; var z0 = self.z0; while (z0 < self.z1) : (z0 += step) { const z1 = std.math.min(z0 + step, self.z1); var x0 = self.x0; while (x0 < self.x1) : (x0 += step) { const x1 = std.math.min(x0 + step, self.x1); self.searchArea(x0, x1, z0, z1); completed_chunks += @intCast(u64, x1 - x0) * @intCast(u64, z1 - z0); self.ctx.reportProgress(completed_chunks, total_chunks); } } } pub fn searchMultithread( self: Self, ) !void { var i: u8 = 0; var thr: ?std.Thread = null; while (i < self.threads) : (i += 1) { thr = try std.Thread.spawn(.{}, searchWorker, .{ self, i, thr }); } thr.?.join(); } fn searchWorker(self: Self, thread_idx: u8, prev_thread: ?std.Thread) void { // TODO: work stealing const thread_width = @intCast(u31, self.z1 - self.z0) / self.threads; const z0 = self.z0 + thread_idx * thread_width; const z1 = if (thread_idx == self.threads - 1) self.z1 // Last thread, consume all remaining area else z0 + thread_width; // TODO: progress reporting self.searchArea(self.x0, self.x1, z0, z1); // This creates a linked list of threads, so we can just join the last one from the main thread if (prev_thread) |thr| thr.join(); } // TODO: cache isSlime results fn searchArea( self: Self, x0: i32, x1: i32, z0: i32, z1: i32, ) void { var z = z0; while (z < z1) : (z += 1) { var x = x0; while (x < x1) : (x += 1) { const count = checkLocation(self.world_seed, x, z); if (count >= self.threshold) { self.ctx.reportResult(.{ .x = x, .z = z, .count = count, }); } } } } }; }
src/cpu.zig
const std = @import("std"); const rk = @import("renderkit"); const gfx = @import("../gamekit.zig").gfx; pub const OffscreenPass = struct { pass: rk.Pass, color_texture: gfx.Texture, color_texture2: ?gfx.Texture = null, color_texture3: ?gfx.Texture = null, color_texture4: ?gfx.Texture = null, depth_stencil_texture: ?gfx.Texture = null, pub fn init(width: i32, height: i32) OffscreenPass { return initWithOptions(width, height, .nearest, .clamp); } pub fn initMrt(width: i32, height: i32, tex_cnt: usize) OffscreenPass { var pass = OffscreenPass{ .pass = undefined, .color_texture = undefined, }; pass.color_texture = gfx.Texture.initOffscreen(width, height, .nearest, .clamp); pass.color_texture2 = if (tex_cnt > 1) gfx.Texture.initOffscreen(width, height, .nearest, .clamp) else null; pass.color_texture3 = if (tex_cnt > 2) gfx.Texture.initOffscreen(width, height, .nearest, .clamp) else null; pass.color_texture4 = if (tex_cnt > 3) gfx.Texture.initOffscreen(width, height, .nearest, .clamp) else null; var desc = rk.PassDesc{ .color_img = pass.color_texture.img, .color_img2 = if (pass.color_texture2) |t| t.img else null, .color_img3 = if (pass.color_texture3) |t| t.img else null, .color_img4 = if (pass.color_texture4) |t| t.img else null, }; pass.pass = rk.createPass(desc); return pass; } pub fn initWithOptions(width: i32, height: i32, filter: rk.TextureFilter, wrap: rk.TextureWrap) OffscreenPass { const color_tex = gfx.Texture.initOffscreen(width, height, filter, wrap); const pass = rk.createPass(.{ .color_img = color_tex.img, }); return .{ .pass = pass, .color_texture = color_tex }; } pub fn initWithStencil(width: i32, height: i32, filter: rk.TextureFilter, wrap: rk.TextureWrap) OffscreenPass { const color_tex = gfx.Texture.initOffscreen(width, height, filter, wrap); const depth_stencil_img = gfx.Texture.initStencil(width, height, filter, wrap); const pass = rk.createPass(.{ .color_img = color_tex.img, .depth_stencil_img = depth_stencil_img.img, }); return .{ .pass = pass, .color_texture = color_tex, .depth_stencil_texture = depth_stencil_img }; } pub fn deinit(self: *const OffscreenPass) void { // Pass MUST be destroyed first! It relies on the Textures being present. rk.destroyPass(self.pass); self.color_texture.deinit(); if (self.color_texture2) |t| t.deinit(); if (self.color_texture3) |t| t.deinit(); if (self.color_texture4) |t| t.deinit(); if (self.depth_stencil_texture) |depth_stencil| depth_stencil.deinit(); } };
gamekit/graphics/offscreen_pass.zig
const std = @import("std"); const builtin = @import("builtin"); const http = @import("apple_pie"); const redis = @import("okredis"); const tar = @import("tar"); const version = @import("version"); const zzz = @import("zzz"); usingnamespace @import("common.zig"); const FV = redis.commands.hashes.utils.FV; const FixBuf = redis.types.FixBuf; const GET = redis.commands.strings.GET; const HMGET = redis.commands.hashes.HMGET; const HSET = redis.commands.hashes.HSET; const OrErr = redis.types.OrErr; const SADD = redis.commands.sets.SADD; const SET = redis.commands.strings.SET; const SISMEMBER = redis.commands.sets.SISMEMBER; const freeReply = redis.freeReply; const Allocator = std.mem.Allocator; const ArenaAllocator = std.heap.ArenaAllocator; const fs = http.FileServer; const os = std.os; const router = http.router; const archive_path = "/var/www/archive"; pub const io_mode = .evented; var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{ .stack_trace_frames = 20, }){}; const gpa = &general_purpose_allocator.allocator; var server = http.Server.init(); const interrupt = os.Sigaction{ .handler = .{ .sigaction = interruptFn }, .mask = os.empty_sigset, .flags = os.SA_SIGINFO | os.SA_RESETHAND, }; fn interruptFn(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const c_void) callconv(.C) void { if (sig == std.c.SIGINT) { server.shutdown(); _ = general_purpose_allocator.deinit(); std.process.exit(0); } } const optional_fields = [_][]const u8{ "description", "homepage_url", "source_url", "license", }; pub fn main() !void { defer _ = general_purpose_allocator.deinit(); try fs.init(gpa, .{ .dir_path = archive_path, .base_path = "archive" }); defer fs.deinit(); os.sigaction(std.c.SIGINT, &interrupt, null); @setEvalBranchQuota(2000); try server.run( gpa, try std.net.Address.parseIp("127.0.0.1", 42069), comptime router.router(&[_]router.Route{ router.get("/pkgs", index), // return all latest packages router.get("/pkgs/:user", userPkgs), // return all latest packages for a user router.get("/pkgs/:user/:pkg", versions), router.get("/pkgs/:user/:pkg/latest", latest), router.get("/pkgs/:user/:pkg/:version", pkgInfo), router.get("/tags/:tag", tagPkgs), // return all latest packages for a tag router.get("/trees/:user/:pkg/:version", trees), router.get("/archive/:user/:pkg/:version", archive), router.post("/publish", publish), }), ); } fn getLatest(client: *redis.Client, user: []const u8, pkg: []const u8) !version.Semver { const key = try std.fmt.allocPrint(gpa, "user:{s}:pkg:{s}:latest", .{ user, pkg }); defer gpa.free(key); const version_buf = try client.send(FixBuf(80), .{ "GET", key }); return try version.Semver.parse(version_buf.toSlice()); } fn streamPkgToJson( client: *redis.Client, arena: *ArenaAllocator, json: anytype, user: []const u8, pkg: []const u8, ) !void { const allocator = &arena.allocator; const semver = try getLatest(client, user, pkg); const ver_str = try std.fmt.allocPrint(allocator, "{}", .{semver}); const key = try std.fmt.allocPrint(allocator, "user:{s}:pkg:{s}:{}", .{ user, pkg, semver, }); const tags_key = try std.fmt.allocPrint(allocator, "{s}:tags", .{key}); const deps_key = try std.fmt.allocPrint(allocator, "{s}:deps", .{key}); const build_deps_key = try std.fmt.allocPrint(allocator, "{s}:build_deps", .{key}); const metadata = try client.sendAlloc(PkgMetadata, allocator, HMGET.forStruct(PkgMetadata).init(key)); try json.beginObject(); try json.objectField("user"); try json.emitString(user); try json.objectField("name"); try json.emitString(pkg); try json.objectField("version"); try json.emitString(ver_str); inline for (std.meta.fields(PkgMetadata)) |field| { switch (field.field_type) { u64 => { try json.objectField(field.name); try json.emitNumber(@field(metadata, field.name)); }, ?[]const u8 => { if (@field(metadata, field.name)) |value| { try json.objectField(field.name); try json.emitString(value); } }, else => |T| @compileError("didn't account for this type: " ++ @typeName(T)), } } const tags = try client.sendAlloc([][]const u8, allocator, .{ "SMEMBERS", tags_key }); const deps = try client.sendAlloc([][]const u8, allocator, .{ "SMEMBERS", deps_key }); const build_deps = try client.sendAlloc([][]const u8, allocator, .{ "SMEMBERS", build_deps_key }); try json.objectField("tags"); try json.beginArray(); for (tags) |tag| { try json.arrayElem(); try json.emitString(tag); } try json.endArray(); var parser = std.json.Parser.init(allocator, false); defer parser.deinit(); try json.objectField("deps"); try json.beginArray(); for (deps) |dep| { const value_tree = try parser.parse(dep); try json.arrayElem(); try json.emitJson(value_tree.root); parser.reset(); } try json.endArray(); try json.objectField("build_deps"); try json.beginArray(); for (build_deps) |build_dep| { const value_tree = try parser.parse(build_dep); try json.arrayElem(); try json.emitJson(value_tree.root); parser.reset(); } try json.endArray(); try json.endObject(); } fn cachePkgs(client: *redis.Client, arena: *ArenaAllocator) !void { const allocator = &arena.allocator; const pkgs = try client.sendAlloc([][]const u8, allocator, .{ "SMEMBERS", "pkgs" }); var fifo = std.fifo.LinearFifo(u8, .{ .Dynamic = {} }).init(allocator); defer fifo.deinit(); var json = std.json.writeStream(fifo.writer(), 5); try json.beginArray(); for (pkgs) |pkg| { try json.arrayElem(); var it = std.mem.tokenize(pkg, "/"); try streamPkgToJson( client, arena, &json, it.next() orelse return error.NoUser, it.next() orelse return error.NoPkg, ); } try json.endArray(); try client.send(void, SET.init("pkgs_json", fifo.readableSlice(0), .{ .Seconds = 3600 }, .NoConditions)); } fn index(resp: *http.Response, req: http.Request) !void { var arena = ArenaAllocator.init(gpa); defer arena.deinit(); const allocator = &arena.allocator; var client: redis.Client = undefined; try client.init(try std.net.connectUnixSocket("/var/run/redis/redis.sock")); defer client.close(); // first try to get cached value const reply: ?[]const u8 = switch (try client.sendAlloc(OrErr([]const u8), allocator, .{ "GET", "pkgs_json" })) { .Ok => |val| val, .Nil => blk: { try cachePkgs(&client, &arena); break :blk null; }, .Err => { try resp.notFound(); return error.Redis; }, }; const json = reply orelse client.sendAlloc([]const u8, allocator, .{ "GET", "pkgs_json" }) catch { std.log.err("failed to get cached json", .{}); try resp.notFound(); return; }; try resp.headers.put("Content-Type", "application/json"); try resp.headers.put("Access-Control-Allow-Origin", "*"); try resp.writer().writeAll(json); } fn versions(resp: *http.Response, req: http.Request, args: struct { user: []const u8, pkg: []const u8, }) !void { var arena = ArenaAllocator.init(gpa); defer arena.deinit(); const allocator = &arena.allocator; var client: redis.Client = undefined; try client.init(try std.net.connectUnixSocket("/var/run/redis/redis.sock")); defer client.close(); const key = try std.fmt.allocPrint(allocator, "user:{s}:pkg:{s}", .{ args.user, args.pkg, }); const vers = try client.sendAlloc([][]const u8, allocator, .{ "SMEMBERS", key }); if (vers.len == 0) { try resp.notFound(); return; } var json = std.json.writeStream(resp.writer(), 4); try json.beginArray(); for (vers) |ver| { try json.arrayElem(); try json.emitString(ver); } try json.endArray(); try resp.headers.put("Content-Type", "application/json"); try resp.headers.put("Access-Control-Allow-Origin", "*"); } fn userPkgs(resp: *http.Response, req: http.Request, user: []const u8) !void { var arena = ArenaAllocator.init(gpa); defer arena.deinit(); const allocator = &arena.allocator; const key = try std.fmt.allocPrint(allocator, "user:{s}:pkgs", .{user}); var client: redis.Client = undefined; try client.init(try std.net.connectUnixSocket("/var/run/redis/redis.sock")); defer client.close(); const exists = try client.send(u1, .{ "EXISTS", key[0 .. key.len - 5] }); if (exists == 0) { try resp.notFound(); return; } const pkgs = try client.sendAlloc([][]const u8, allocator, .{ "SMEMBERS", key }); try resp.headers.put("Content-Type", "application/json"); try resp.headers.put("Access-Control-Allow-Origin", "*"); var json = std.json.writeStream(resp.writer(), 5); try json.beginArray(); for (pkgs) |pkg| { try json.arrayElem(); try streamPkgToJson(&client, &arena, &json, user, pkg); } try json.endArray(); } fn tagPkgs(resp: *http.Response, req: http.Request, tag: []const u8) !void { var arena = ArenaAllocator.init(gpa); defer arena.deinit(); const allocator = &arena.allocator; const key = try std.fmt.allocPrint(allocator, "tag:{s}", .{tag}); var client: redis.Client = undefined; try client.init(try std.net.connectUnixSocket("/var/run/redis/redis.sock")); defer client.close(); const exists = try client.send(u1, .{ "EXISTS", key }); if (exists == 0) { try resp.notFound(); return; } const pkgs = try client.sendAlloc([][]const u8, allocator, .{ "SMEMBERS", key }); try resp.headers.put("Content-Type", "application/json"); try resp.headers.put("Access-Control-Allow-Origin", "*"); var json = std.json.writeStream(resp.writer(), 5); try json.beginArray(); for (pkgs) |pkg| { try json.arrayElem(); var it = std.mem.tokenize(pkg, "/"); try streamPkgToJson( &client, &arena, &json, it.next() orelse return error.NoUser, it.next() orelse return error.NoPkg, ); } try json.endArray(); } // get the latest version of a package fn latest(resp: *http.Response, req: http.Request, args: struct { user: []const u8, pkg: []const u8, }) !void { var client: redis.Client = undefined; try client.init(try std.net.connectUnixSocket("/var/run/redis/redis.sock")); defer client.close(); var query = try req.url.queryParameters(gpa); defer query.deinit(); if (query.get("v")) |range_str| { const range = try version.Range.parse(range_str); const key = try std.fmt.allocPrint(gpa, "user:{s}:pkg:{s}", .{ args.user, args.pkg, }); defer gpa.free(key); const vers = try client.sendAlloc([][]const u8, gpa, .{ "SMEMBERS", key }); defer freeReply(vers, gpa); var semver: ?version.Semver = null; for (vers) |ver_str| { const new_semver = version.Semver.parse(ver_str) catch |err| { continue; }; if (!range.contains(new_semver)) continue; semver = if (semver) |ver| if (new_semver.cmp(ver) == .gt) new_semver else semver else new_semver; } if (semver) |ver| { try resp.writer().print("{}", .{ver}); } else { try resp.notFound(); } } else { try resp.writer().print("{}", .{try getLatest(&client, args.user, args.pkg)}); } try resp.headers.put("Access-Control-Allow-Origin", "*"); } const PkgMetadata = struct { description: ?[]const u8, license: ?[]const u8, source_url: ?[]const u8, homepage_url: ?[]const u8, downloads: u64, }; // get all the info of a package fn pkgInfo(resp: *http.Response, req: http.Request, args: struct { user: []const u8, pkg: []const u8, version: []const u8, }) !void { var arena = ArenaAllocator.init(gpa); defer arena.deinit(); const allocator = &arena.allocator; // validate version string _ = version.Semver.parse(args.version) catch { try resp.notFound(); return; }; var client: redis.Client = undefined; try client.init(try std.net.connectUnixSocket("/var/run/redis/redis.sock")); defer client.close(); const key = try std.fmt.allocPrint(allocator, "user:{s}:pkg:{s}:{s}", .{ args.user, args.pkg, args.version, }); const tags_key = try std.fmt.allocPrint(allocator, "{s}:tags", .{key}); const deps_key = try std.fmt.allocPrint(allocator, "{s}:deps", .{key}); const build_deps_key = try std.fmt.allocPrint(allocator, "{s}:build_deps", .{key}); const reply = client.sendAlloc(OrErr(PkgMetadata), allocator, HMGET.forStruct(PkgMetadata).init(key)) catch { try resp.notFound(); return; }; const metadata = switch (reply) { .Err, .Nil => { try resp.notFound(); return; }, .Ok => |val| val, }; var json = std.json.writeStream(resp.writer(), 4); try json.beginObject(); try json.objectField("user"); try json.emitString(args.user); try json.objectField("name"); try json.emitString(args.pkg); try json.objectField("version"); try json.emitString(args.version); inline for (std.meta.fields(PkgMetadata)) |field| { switch (field.field_type) { u64 => { try json.objectField(field.name); try json.emitNumber(@field(metadata, field.name)); }, ?[]const u8 => { if (@field(metadata, field.name)) |value| { try json.objectField(field.name); try json.emitString(value); } }, else => |T| @compileError("didn't account for this type: " ++ @typeName(T)), } } const tags = try client.sendAlloc([][]const u8, allocator, .{ "SMEMBERS", tags_key }); const deps = try client.sendAlloc([][]const u8, allocator, .{ "SMEMBERS", deps_key }); const build_deps = try client.sendAlloc([][]const u8, allocator, .{ "SMEMBERS", build_deps_key }); try json.objectField("tags"); try json.beginArray(); for (tags) |tag| { try json.arrayElem(); try json.emitString(tag); } try json.endArray(); var parser = std.json.Parser.init(allocator, false); defer parser.deinit(); try json.objectField("deps"); try json.beginArray(); for (deps) |dep| { var value_tree = try parser.parse(dep); defer value_tree.deinit(); try json.arrayElem(); try json.emitJson(value_tree.root); parser.reset(); } try json.endArray(); try json.objectField("build_deps"); try json.beginArray(); for (build_deps) |build_dep| { var value_tree = try parser.parse(build_dep); defer value_tree.deinit(); try json.arrayElem(); try json.emitJson(value_tree.root); parser.reset(); } try json.endArray(); try json.endObject(); try resp.headers.put("Content-Type", "application/json"); try resp.headers.put("Access-Control-Allow-Origin", "*"); } fn trees(resp: *http.Response, req: http.Request, args: struct { user: []const u8, pkg: []const u8, version: []const u8, }) !void { var arena = ArenaAllocator.init(gpa); defer arena.deinit(); const allocator = &arena.allocator; var query = try req.url.queryParameters(allocator); defer query.deinit(); const subpath = try std.fs.path.join(allocator, &.{ "pkg", query.get("path") orelse { try resp.notFound(); return; }, }); const semver = try version.Semver.parse(args.version); const path = try std.fs.path.join( allocator, &.{ archive_path, args.user, args.pkg, args.version }, ); const file = std.fs.openFileAbsolute(path, .{ .read = true }) catch |err| { if (err == error.FileNotFound) { try resp.notFound(); return; } else return err; }; defer file.close(); var gzip = try std.compress.gzip.gzipStream(allocator, file.reader()); defer gzip.deinit(); var extractor = tar.fileExtractor(subpath, gzip.reader()); var fifo = std.fifo.LinearFifo(u8, .{ .Static = std.mem.page_size }).init(); // set headers try resp.headers.put("Access-Control-Allow-Origin", "*"); fifo.pump(extractor.reader(), resp.writer()) catch |err| { if (err == error.FileNotFound) try resp.notFound() else return err; }; } fn archive(resp: *http.Response, req: http.Request, args: struct { user: []const u8, pkg: []const u8, version: []const u8, }) !void { // validate version string _ = version.Semver.parse(args.version) catch { try resp.notFound(); return; }; var client: redis.Client = undefined; try client.init(try std.net.connectUnixSocket("/var/run/redis/redis.sock")); defer client.close(); const key = try std.fmt.allocPrint(gpa, "user:{s}:pkg:{s}:{s}", .{ args.user, args.pkg, args.version }); defer gpa.free(key); // make sure package exists const versions_key = key[0 .. key.len - 6]; if ((try client.send(usize, SISMEMBER.init(versions_key, args.version))) == 0) { try resp.notFound(); return; } try fs.serve(resp, req); try client.send(void, .{ "HINCRBY", key, "downloads", 1 }); } fn publish(resp: *http.Response, req: http.Request) !void { var arena = ArenaAllocator.init(gpa); defer arena.deinit(); const allocator = &arena.allocator; var headers = try req.headers(allocator); const authorization = headers.get("Authorization") orelse return error.NoAuth; const id = headers.get("X-User-Id") orelse return error.NoId; const provider = headers.get("X-User-Provider") orelse return error.NoProvider; const username = headers.get("X-User-Username") orelse return error.NoUsername; const email = headers.get("X-User-Email") orelse return error.NoEmail; const length = headers.get("Content-Length") orelse return error.NoContentLength; if (!std.mem.eql(u8, provider, "github")) { resp.status_code = .service_unavailable; try resp.body.print("Unsupported provider: {s}", .{provider}); return; } var client: redis.Client = undefined; try client.init(try std.net.connectUnixSocket("/var/run/redis/redis.sock")); defer client.close(); createUser(&client, &arena, id, username, email) catch |err| switch (err) { error.Banned => { resp.status_code = .forbidden; try resp.body.writeAll("You are banned"); return; }, error.Taken => { resp.status_code = .forbidden; try resp.body.print("username '{s}' used to be used by a different github account", .{username}); return; }, else => return err, }; // TODO: rate limiting if (1_000_000 < try std.fmt.parseInt(usize, length, 10)) { resp.status_code = .forbidden; try resp.body.writeAll("uncompressed package size is greater that 1 million bytes"); return; } var archive_dir = try std.fs.openDirAbsolute(archive_path, .{}); defer archive_dir.close(); var user_dir = try archive_dir.makeOpenPath(username, .{}); defer user_dir.close(); const text = blk: { var fixed_buffer = std.io.fixedBufferStream(req.body); var extractor = tar.fileExtractor("manifest.zzz", fixed_buffer.reader()); break :blk try extractor.reader().readAllAlloc(allocator, std.math.maxInt(usize)); }; var tree = zzz.ZTree(1, 1000){}; var root = try tree.appendText(text); const ver_str = (try zFindString(root, "version")) orelse return error.NoVersion; const name = (try zFindString(root, "name")) orelse return error.NoName; _ = version.Semver.parse(ver_str) catch { resp.status_code = .bad_request; try resp.body.print("Invalid version string: {s}", .{ver_str}); return; }; var dst_dir = try user_dir.makeOpenPath(name, .{}); defer dst_dir.close(); { // TODO: change to proper gzip compression const file = try dst_dir.createFile(try std.mem.join(allocator, "", &.{ ver_str, ".tar" }), .{}); defer file.close(); try file.writer().writeAll(req.body); } createPkg(&client, &arena, root, username, name, ver_str) catch |err| if (err == error.AlreadyPublished) { resp.status_code = .bad_request; try resp.body.print("Package already published: '{s}/{s}' {s}", .{ username, name, ver_str }); return; } else return err; try client.send(void, .{ "DEL", "pkgs_json" }); try resp.body.print("Package successfully published to https://astrolabe.pm/#/package/{s}/{s}/{s}", .{ username, name, ver_str }); } fn createUser( client: *redis.Client, arena: *ArenaAllocator, id: []const u8, username: []const u8, email: []const u8, ) anyerror!void { const allocator = &arena.allocator; const keys = .{ .id = try std.fmt.allocPrint(allocator, "github:{s}", .{id}), .user = try std.fmt.allocPrint(allocator, "user:{s}", .{username}), }; // id value is the username const id_user: ?[]const u8 = client.sendAlloc([]const u8, allocator, GET.init(keys.id)) catch |err| if (err == error.GotNilReply) null else return err; // user id should be equal to id const user_id: ?[]const u8 = client.sendAlloc([]const u8, allocator, .{ "HGET", keys.user, "id" }) catch |err| if (err == error.GotNilReply) null else return err; // check if the user is banned, or check if the user is not registered to a different id if (user_id) |ui| { if (std.mem.eql(u8, ui, "banned")) return error.Banned; if (!std.mem.eql(u8, id, ui)) return error.Taken; if (id_user != null) return; } try client.send(void, .{ "SET", keys.id, username }); try client.send(void, HSET.init(keys.user, &[_]FV{ .{ .field = "id", .value = id }, .{ .field = "email", .value = email }, })); const path = try std.fs.path.join(allocator, &.{ "/var/www/archive/", username }); std.fs.makeDirAbsolute(path) catch |err| { if (err != error.PathAlreadyExists) return err; }; std.log.info("added user '{s}'", .{username}); } pub fn serializeDepsToJson( arena: *ArenaAllocator, root: *zzz.ZNode, tag: []const u8, ) !std.ArrayList([]const u8) { const allocator = &arena.allocator; var ret = std.ArrayList([]const u8).init(allocator); if (zFindChild(root, tag)) |deps_node| { var it = ZChildIterator.init(deps_node); const stdout = std.io.getStdOut().writer(); while (it.next()) |child_node| { const src_node = zFindChild(child_node, "src") orelse return error.NoSrcTag; const type_node = src_node.*.child orelse return error.NoSrcType; const type_str = try zGetString(type_node); var fifo = std.fifo.LinearFifo(u8, .{ .Dynamic = {} }).init(allocator); var json = std.json.writeStream(fifo.writer(), 4); json.whitespace = .{ .indent = .{ .Space = 0 }, .separator = false, }; try json.beginObject(); try json.objectField(type_str); if (std.mem.eql(u8, "pkg", type_str)) { try json.beginObject(); try json.objectField("repository"); try json.emitString((try zFindString(type_node, "repository")) orelse return error.NoRepository); try json.objectField("user"); try json.emitString((try zFindString(type_node, "user")) orelse return error.NoUser); try json.objectField("name"); try json.emitString((try zFindString(type_node, "name")) orelse return error.NoName); try json.objectField("version"); try json.emitString((try zFindString(type_node, "version")) orelse return error.NoVersion); try json.endObject(); } else if (std.mem.eql(u8, "github", type_str)) { try json.beginObject(); try json.objectField("repo"); try json.emitString((try zFindString(type_node, "repo")) orelse return error.NoRepo); try json.objectField("user"); try json.emitString((try zFindString(type_node, "user")) orelse return error.NoUser); try json.objectField("ref"); try json.emitString((try zFindString(type_node, "ref")) orelse return error.NoRef); try json.endObject(); } else if (std.mem.eql(u8, "url", type_str)) { try json.emitString(try zGetString(type_node.*.child orelse return error.NoUrl)); } else return error.InvalidTag; try json.endObject(); // have to do replacement because the stream writer doesn't minify correctly try ret.append(try std.mem.replaceOwned(u8, allocator, fifo.readableSlice(0), "\n", "")); } } return ret; } pub fn createPkg( client: *redis.Client, arena: *ArenaAllocator, manifest: *zzz.ZNode, username: []const u8, name: []const u8, semver_str: []const u8, ) !void { const allocator = &arena.allocator; const deps = try serializeDepsToJson(arena, manifest, "deps"); const build_deps = try serializeDepsToJson(arena, manifest, "build_deps"); var tags = std.ArrayList([]const u8).init(allocator); if (zFindChild(manifest, "tags")) |tags_node| { var it = ZChildIterator.init(tags_node); while (it.next()) |tag_node| try tags.append(try zGetString(tag_node)); } // required fields const semver = try version.Semver.parse(semver_str); // check if it exists const versions_key = try std.fmt.allocPrint(allocator, "user:{s}:pkg:{s}", .{ username, name, }); if ((try client.send(usize, SISMEMBER.init(versions_key, semver_str))) > 0) { std.log.err("'{s}/{s}' {} is already published", .{ username, name, semver }); return error.AlreadyPublished; } // is it the latest? const latest_key = try std.fmt.allocPrint(allocator, "{s}:latest", .{versions_key}); const latest_str: ?[]const u8 = switch (try client.sendAlloc(OrErr([]const u8), allocator, GET.init(latest_key))) { .Ok => |val| val, .Nil => null, .Err => return error.Redis, }; const publishing_latest = if (latest_str) |str| switch (semver.cmp(try version.Semver.parse(str))) { .lt => false, .gt => true, .eq => { std.log.err("'{s}/{s}' {} is already published", .{ username, name, semver }); return error.AlreadyPublished; }, } else true; var fields = std.ArrayList(FV).init(allocator); inline for (optional_fields) |field| { if (try zFindString(manifest, field)) |value| try fields.append(.{ .field = field, .value = value }); } try fields.append(.{ .field = "downloads", .value = "0" }); const current_key = try std.fmt.allocPrint(allocator, "{s}:{}", .{ versions_key, semver, }); const keys = .{ .pkg = current_key, .versions = versions_key, .tags = try std.fmt.allocPrint(allocator, "{s}:tags", .{current_key}), .deps = try std.fmt.allocPrint(allocator, "{s}:deps", .{current_key}), .build_deps = try std.fmt.allocPrint(allocator, "{s}:build_deps", .{ current_key, }), }; // TODO: do proper cleanup on error const reply = try client.trans(struct { pkg: OrErr(void), versions: OrErr(void), }, .{ HSET.init(keys.pkg, fields.items), SADD.init(keys.versions, &.{semver_str}), }); errdefer { inline for (std.meta.fields(@TypeOf(reply))) |field| { if (@field(reply, field.name) == .Ok) { std.log.debug("removing {s}", .{field.name}); client.send(void, .{ "DEL", @field(keys, field.name) }) catch {}; } } } inline for (std.meta.fields(@TypeOf(reply))) |field| { if (@field(reply, field.name) != .Ok) { std.log.err("issue with {s}", .{field.name}); return error.Redis; } } if (tags.items.len > 0) try client.send(void, SADD.init(keys.tags, tags.items)); errdefer client.send(void, .{ "DEL", keys.tags }) catch {}; if (deps.items.len > 0) try client.send(void, SADD.init(keys.deps, deps.items)); errdefer client.send(void, .{ "DEL", keys.deps }) catch {}; if (build_deps.items.len > 0) try client.send(void, SADD.init(keys.build_deps, build_deps.items)); errdefer client.send(void, .{ "DEL", keys.build_deps }) catch {}; var old_version: ?[]const u8 = if (publishing_latest) blk: { const ret: ?[]const u8 = switch (try client.sendAlloc(OrErr([]const u8), allocator, GET.init(latest_key))) { .Ok => |val| val, .Nil => null, .Err => return error.OldLatest, }; try client.send(void, SET.init(latest_key, semver_str, .NoExpire, .NoConditions)); break :blk ret; } else null; errdefer if (publishing_latest) { if (old_version) |ver| client.send(void, SET.init(latest_key, ver, .NoExpire, .NoConditions)) catch {} else client.send(void, .{ "DEL", latest_key }) catch {}; }; const package_id = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ username, name }); for (tags.items) |tag| { const tag_key = try std.fmt.allocPrint(allocator, "tag:{s}", .{tag}); try client.send(void, SADD.init(tag_key, &.{package_id})); } // TODO: rollback try client.send(void, SADD.init("pkgs", &.{package_id})); const user_pkgs_key = try std.fmt.allocPrint(allocator, "user:{s}:pkgs", .{username}); try client.send(void, SADD.init(user_pkgs_key, &.{name})); }
src/main.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const fmtUtf8SliceEscapeUpper = @import("util.zig").fmtUtf8SliceEscapeUpper; /// Like MetadataMap, but uses description/language as the keys /// so it works for ID3v2 comments/unsynchronized lyrics pub const FullTextMap = struct { allocator: Allocator, entries: EntryList, language_to_indexes: LanguageToIndexesMap, description_to_indexes: DescriptionToIndexesMap, pub const Entry = struct { language: []const u8, description: []const u8, value: []const u8, }; const EntryList = std.ArrayListUnmanaged(Entry); const IndexList = std.ArrayListUnmanaged(usize); const LanguageToIndexesMap = std.StringHashMapUnmanaged(IndexList); const DescriptionToIndexesMap = std.StringHashMapUnmanaged(IndexList); pub fn init(allocator: Allocator) FullTextMap { return .{ .allocator = allocator, .entries = .{}, .language_to_indexes = .{}, .description_to_indexes = .{}, }; } pub fn deinit(self: *FullTextMap) void { for (self.entries.items) |item| { self.allocator.free(item.value); } self.entries.deinit(self.allocator); var lang_it = self.language_to_indexes.iterator(); while (lang_it.next()) |entry| { entry.value_ptr.deinit(self.allocator); self.allocator.free(entry.key_ptr.*); } self.language_to_indexes.deinit(self.allocator); var desc_it = self.description_to_indexes.iterator(); while (desc_it.next()) |entry| { entry.value_ptr.deinit(self.allocator); self.allocator.free(entry.key_ptr.*); } self.description_to_indexes.deinit(self.allocator); } pub const LangAndDescEntries = struct { lang: LanguageToIndexesMap.Entry, desc: DescriptionToIndexesMap.Entry, }; pub fn getOrPutIndexesEntries(self: *FullTextMap, language: []const u8, description: []const u8) !LangAndDescEntries { var lang_entry = lang_entry: { if (self.language_to_indexes.getEntry(language)) |entry| { break :lang_entry entry; } else { var lang_dup = try self.allocator.dupe(u8, language); errdefer self.allocator.free(lang_dup); const entry = try self.language_to_indexes.getOrPutValue(self.allocator, lang_dup, IndexList{}); break :lang_entry entry; } }; var desc_entry = desc_entry: { if (self.description_to_indexes.getEntry(description)) |entry| { break :desc_entry entry; } else { var desc_dup = try self.allocator.dupe(u8, description); errdefer self.allocator.free(desc_dup); const entry = try self.description_to_indexes.getOrPutValue(self.allocator, desc_dup, IndexList{}); break :desc_entry entry; } }; return LangAndDescEntries{ .lang = lang_entry, .desc = desc_entry, }; } pub fn appendToEntries(self: *FullTextMap, entries: LangAndDescEntries, value: []const u8) !void { const entry_index = entry_index: { const value_dup = try self.allocator.dupe(u8, value); errdefer self.allocator.free(value_dup); const entry_index = self.entries.items.len; try self.entries.append(self.allocator, Entry{ .language = entries.lang.key_ptr.*, .description = entries.desc.key_ptr.*, .value = value_dup, }); break :entry_index entry_index; }; try entries.lang.value_ptr.append(self.allocator, entry_index); try entries.desc.value_ptr.append(self.allocator, entry_index); } pub fn put(self: *FullTextMap, language: []const u8, description: []const u8, value: []const u8) !void { const indexes_entries = try self.getOrPutIndexesEntries(language, description); try self.appendToEntries(indexes_entries, value); } pub fn dump(self: *const FullTextMap) void { for (self.entries.items) |entry| { std.debug.print("{s},{s}={s}\n", .{ std.fmt.fmtSliceEscapeUpper(entry.language), fmtUtf8SliceEscapeUpper(entry.description), fmtUtf8SliceEscapeUpper(entry.value) }); } } };
src/id3v2_data.zig
const std = @import("std"); fn defaultTransformer(field_name: []const u8, _: EncodingOptions) anyerror![]const u8 { return field_name; } pub const FieldNameTransformer = fn ([]const u8, EncodingOptions) anyerror![]const u8; pub const EncodingOptions = struct { allocator: ?std.mem.Allocator = null, field_name_transformer: *const FieldNameTransformer = &defaultTransformer, }; pub fn encode(obj: anytype, writer: anytype, options: EncodingOptions) !void { _ = try encodeInternal("", "", true, obj, writer, options); } fn encodeStruct(parent: []const u8, first: bool, obj: anytype, writer: anytype, options: EncodingOptions) !bool { var rc = first; inline for (@typeInfo(@TypeOf(obj)).Struct.fields) |field| { const field_name = try options.field_name_transformer.*(field.name, options); defer if (options.field_name_transformer.* != defaultTransformer) if (options.allocator) |a| a.free(field_name); // @compileLog(@typeInfo(field.field_type).Pointer); rc = try encodeInternal(parent, field_name, rc, @field(obj, field.name), writer, options); } return rc; } pub fn encodeInternal(parent: []const u8, field_name: []const u8, first: bool, obj: anytype, writer: anytype, options: EncodingOptions) !bool { // @compileLog(@typeInfo(@TypeOf(obj))); var rc = first; switch (@typeInfo(@TypeOf(obj))) { .Optional => if (obj) |o| { rc = try encodeInternal(parent, field_name, first, o, writer, options); }, .Pointer => |ti| if (ti.size == .One) { rc = try encodeInternal(parent, field_name, first, obj.*, writer, options); } else { if (!first) _ = try writer.write("&"); try writer.print("{s}{s}={s}", .{ parent, field_name, obj }); rc = false; }, .Struct => if (std.mem.eql(u8, "", field_name)) { rc = try encodeStruct(parent, first, obj, writer, options); } else { // TODO: It would be lovely if we could concat at compile time or allocPrint at runtime // XOR have compile time allocator support. Alas, neither are possible: // https://github.com/ziglang/zig/issues/868: Comptime detection (feels like foot gun) // https://github.com/ziglang/zig/issues/1291: Comptime allocator const allocator = options.allocator orelse return error.AllocatorRequired; const new_parent = try std.fmt.allocPrint(allocator, "{s}{s}.", .{ parent, field_name }); defer allocator.free(new_parent); rc = try encodeStruct(new_parent, first, obj, writer, options); // try encodeStruct(parent ++ field_name ++ ".", first, obj, writer, options); }, .Array => { if (!first) _ = try writer.write("&"); try writer.print("{s}{s}={s}", .{ parent, field_name, obj }); rc = false; }, .Int, .ComptimeInt, .Float, .ComptimeFloat => { if (!first) _ = try writer.write("&"); try writer.print("{s}{s}={d}", .{ parent, field_name, obj }); rc = false; }, // BUGS! any doesn't work - a lot. Check this out: // https://github.com/ziglang/zig/blob/master/lib/std/fmt.zig#L424 else => { if (!first) _ = try writer.write("&"); try writer.print("{s}{s}={any}", .{ parent, field_name, obj }); rc = false; }, } return rc; } fn testencode(expected: []const u8, value: anytype, options: EncodingOptions) !void { const ValidationWriter = struct { const Self = @This(); pub const Writer = std.io.Writer(*Self, Error, write); pub const Error = error{ TooMuchData, DifferentData, }; expected_remaining: []const u8, fn init(exp: []const u8) Self { return .{ .expected_remaining = exp }; } pub fn writer(self: *Self) Writer { return .{ .context = self }; } fn write(self: *Self, bytes: []const u8) Error!usize { // std.debug.print("{s}\n", .{bytes}); if (self.expected_remaining.len < bytes.len) { std.log.warn( \\====== expected this output: ========= \\{s} \\======== instead found this: ========= \\{s} \\====================================== , .{ self.expected_remaining, bytes, }); return error.TooMuchData; } if (!std.mem.eql(u8, self.expected_remaining[0..bytes.len], bytes)) { std.log.warn( \\====== expected this output: ========= \\{s} \\======== instead found this: ========= \\{s} \\====================================== , .{ self.expected_remaining[0..bytes.len], bytes, }); return error.DifferentData; } self.expected_remaining = self.expected_remaining[bytes.len..]; return bytes.len; } }; var vos = ValidationWriter.init(expected); try encode(value, vos.writer(), options); if (vos.expected_remaining.len > 0) return error.NotEnoughData; } test "can urlencode an object" { try testencode( "Action=GetCallerIdentity&Version=2021-01-01", .{ .Action = "GetCallerIdentity", .Version = "2021-01-01" }, .{}, ); } test "can urlencode an object with integer" { try testencode( "Action=GetCallerIdentity&Duration=32", .{ .Action = "GetCallerIdentity", .Duration = 32 }, .{}, ); } const UnsetValues = struct { action: ?[]const u8 = null, duration: ?i64 = null, val1: ?i64 = null, val2: ?[]const u8 = null, }; test "can urlencode an object with unset values" { // var buffer = std.ArrayList(u8).init(std.testing.allocator); // defer buffer.deinit(); // const writer = buffer.writer(); // try encode( // UnsetValues{ .action = "GetCallerIdentity", .duration = 32 }, // writer, // .{ .allocator = std.testing.allocator }, // ); // std.debug.print("{s}", .{buffer.items}); try testencode( "action=GetCallerIdentity&duration=32", UnsetValues{ .action = "GetCallerIdentity", .duration = 32 }, .{}, ); } test "can urlencode a complex object" { try testencode( "Action=GetCallerIdentity&Version=2021-01-01&complex.innermember=foo", .{ .Action = "GetCallerIdentity", .Version = "2021-01-01", .complex = .{ .innermember = "foo" } }, .{ .allocator = std.testing.allocator }, ); }
src/url.zig
// Physical Memory Map (from n64.readthedocs.io) // Address Range | Name | Description // 00000000h-003FFFFFh | RDRAM | Built-in // 00400000h-007FFFFFh | RDRAM | Expansion Pak // 03F00000h-03FFFFFFh | RDRAM IO | RDRAM MMIO // 04000000h-04000FFFh | SP DMEM | RSP Data Memory // 04001000h-04001FFFh | SP IMEM | RSP Instruction Memory // 04040000h-040FFFFFh | SP IO | RSP DMA, PC, ... // 04100000h-041FFFFFh | DP COM | RDP Command // 04200000h-042FFFFFh | DP SPAN | ??? // 04300000h-043FFFFFh | MI | MIPS Interface // 04400000h-044FFFFFh | VI | Video Interface // 04500000h-045FFFFFh | AI | Audio Interface // 04600000h-046FFFFFh | PI | Peripheral Interface // 04700000h-047FFFFFh | RI | RDRAM Interface // 04800000h-048FFFFFh | SI | Serial Interface // 05000000h-05FFFFFFh | CART2 A1 | N64DD Control // 06000000h-07FFFFFFh | CART1 A1 | N64DD IPL // 08000000h-0FFFFFFFh | CART2 A2 | SRAM // 10000000h-1FBFFFFFh | CART1 A2 | ROM // 1FC00000h-1FC007BFh | PIF ROM | // 1FC007C0h-1FC007FFh | PIF RAM | // 1FD00000h-7FFFFFFFh | CART1 A3 | const std = @import("std"); const ai = @import("ai.zig"); const mi = @import("mi.zig"); const si = @import("si.zig"); const vi = @import("vi.zig"); const pif = @import("pif.zig"); const rsp = @import("rsp.zig"); const MemoryRegion = enum(u64) { RDRAM0 = 0x000, RDRAM1 = 0x001, RDRAM2 = 0x002, RDRAM3 = 0x003, RDRAM4 = 0x004, RDRAM5 = 0x005, RDRAM6 = 0x006, RDRAM7 = 0x007, RDRAMIO = 0x03F, RSP = 0x040, MI = 0x043, VI = 0x044, AI = 0x045, PI = 0x046, RI = 0x047, SI = 0x048, CartS = 0x100, CartE = 0x1FB, PIF = 0x1FC, }; // TODO: write own module for PI stuff const PIReg = enum(u64) { DRAMAddr = 0x00, CartAddr = 0x04, ReadLen = 0x08, WriteLen = 0x0C, PIStatus = 0x10, }; const PIStatus = packed struct { dmaBusy : bool = false, ioBusy : bool = false, dmaError: bool = false, dmaIRQ : bool = false, _pad0 : u12 = 0, _pad1 : u16 = 0, }; const PIRegs = struct { dramAddr: u32 = undefined, cartAddr: u32 = undefined, writeLen: u32 = undefined, piStatus: PIStatus = PIStatus{}, }; var piRegs = PIRegs{}; pub var ram: []u8 = undefined; // RDRAM var rom: []u8 = undefined; // Cartridge ROM /// Initialize bus module pub fn init(alloc: std.mem.Allocator, romPath: []const u8, isFastBoot: bool) anyerror!void { ram = try alloc.alloc(u8, 0x80_0000); // Load ROM file const romFile = try std.fs.cwd().openFile(romPath, .{.read = true},); defer romFile.close(); rom = try romFile.reader().readAllAlloc(alloc, 0x80_0000); if (isFastBoot) { // Copy first 1000h bytes of ROM to SP DMEM @memcpy(@ptrCast([*]u8, &rsp.spDMEM), @ptrCast([*]u8, rom), 0x1000); } } /// Deinitialize bus module pub fn deinit(alloc: std.mem.Allocator) void { alloc.free(ram); alloc.free(rom); } pub fn read8(pAddr: u64) u8 { var data: u8 = undefined; switch (pAddr >> 20) { @enumToInt(MemoryRegion.RDRAM0) ... @enumToInt(MemoryRegion.RDRAM7) => { data = ram[pAddr & 0x7F_FFFF]; }, @enumToInt(MemoryRegion.RSP) => { data = rsp.read8(pAddr); }, else => { std.log.warn("[Bus] Unhandled read8 @ pAddr {X}h.", .{pAddr}); unreachable; } } return data; } pub fn read16(pAddr: u64) u16 { if ((pAddr & 1) != 0) @panic("unaligned read16"); var data: u16 = undefined; switch (pAddr >> 20) { @enumToInt(MemoryRegion.RDRAM0) ... @enumToInt(MemoryRegion.RDRAM7) => { @memcpy(@ptrCast([*]u8, &data), @ptrCast([*]u8, &ram[pAddr & 0x7F_FFFF]), 2); data = @byteSwap(u16, data); }, @enumToInt(MemoryRegion.RSP) => { data = rsp.read16(pAddr); }, else => { std.log.warn("[Bus] Unhandled read16 @ pAddr {X}h.", .{pAddr}); unreachable; } } return data; } pub fn read32(pAddr: u64) u32 { if ((pAddr & 3) != 0) @panic("unaligned read32"); var data: u32 = undefined; switch (pAddr >> 20) { @enumToInt(MemoryRegion.RDRAM0) ... @enumToInt(MemoryRegion.RDRAM7) => { @memcpy(@ptrCast([*]u8, &data), @ptrCast([*]u8, &ram[pAddr & 0x7F_FFFF]), 4); data = @byteSwap(u32, data); }, @enumToInt(MemoryRegion.RDRAMIO) => { switch (pAddr & 0xFF_FFFF) { else => { std.log.warn("[Bus] Unhandled read32 @ pAddr {X}h (RDRAM I/O).", .{pAddr}); data = 0; } } }, @enumToInt(MemoryRegion.RSP) => { data = rsp.read32(pAddr); }, @enumToInt(MemoryRegion.MI) => { data = mi.read32(pAddr); }, @enumToInt(MemoryRegion.VI) => { data = vi.read32(pAddr); }, @enumToInt(MemoryRegion.AI) => { data = ai.read32(pAddr); }, @enumToInt(MemoryRegion.PI) => { switch (pAddr & 0xF_FFFF) { @enumToInt(PIReg.PIStatus) => { std.log.info("[Bus] Read32 @ pAddr {X}h (PI Status).", .{pAddr}); data = @bitCast(u32, piRegs.piStatus); }, else => { std.log.warn("[Bus] Unhandled read32 @ pAddr {X}h (Peripheral Interface).", .{pAddr}); } } }, @enumToInt(MemoryRegion.RI) => { switch (pAddr & 0xF_FFFF) { else => { std.log.warn("[Bus] Unhandled read32 @ pAddr {X}h (RDRAM Interface).", .{pAddr}); data = 0; } } }, @enumToInt(MemoryRegion.SI) => { data = si.read32(pAddr); }, @enumToInt(MemoryRegion.CartS) ... @enumToInt(MemoryRegion.CartE) => { @memcpy(@ptrCast([*]u8, &data), @ptrCast([*]u8, &rom[pAddr - 0x1000_0000]), 4); data = @byteSwap(u32, data); }, @enumToInt(MemoryRegion.PIF) => { if (pAddr >= 0x1FC0_07C0 and pAddr < 0x1FC0_0800) { std.log.info("[Bus] Read32 @ pAddr {X}h (PIF RAM).", .{pAddr}); @memcpy(@ptrCast([*]u8, &data), @ptrCast([*]u8, &pif.pifRAM[pAddr - 0x1FC0_07C0]), 4); data = @byteSwap(u32, data); } else { std.log.warn("[Bus] Unhandled write32 @ pAddr {X}h (PIF), data: {X}h.", .{pAddr, data}); unreachable; } }, else => { std.log.warn("[Bus] Unhandled read32 @ pAddr {X}h.", .{pAddr}); unreachable; } } return data; } pub fn read64(pAddr: u64) u64 { if ((pAddr & 7) != 0) @panic("unaligned read164"); var data: u64 = undefined; switch (pAddr >> 20) { @enumToInt(MemoryRegion.RDRAM0) ... @enumToInt(MemoryRegion.RDRAM7) => { @memcpy(@ptrCast([*]u8, &data), @ptrCast([*]u8, &ram[pAddr & 0x7F_FFFF]), 8); data = @byteSwap(u64, data); }, @enumToInt(MemoryRegion.MI) => { if (pAddr == 0x4300028) return data; std.log.warn("[MI] Unhandled read64 @ pAddr {X}h.", .{pAddr}); @panic("mi: unhandled read64"); }, else => { std.log.warn("[Bus] Unhandled read64 @ pAddr {X}h.", .{pAddr}); unreachable; } } return data; } pub fn write8(pAddr: u64, data: u8) void { switch (pAddr >> 20) { @enumToInt(MemoryRegion.RDRAM0) ... @enumToInt(MemoryRegion.RDRAM7) => { ram[pAddr & 0x7F_FFFF] = data; }, @enumToInt(MemoryRegion.RSP) => { rsp.write8(pAddr, data); }, else => { std.log.warn("[Bus] Unhandled write8 @ pAddr {X}h, data: {X}h.", .{pAddr, data}); unreachable; } } } pub fn write16(pAddr: u64, data: u16) void { if ((pAddr & 1) != 0) @panic("unaligned write16"); switch (pAddr >> 20) { @enumToInt(MemoryRegion.RDRAM0) ... @enumToInt(MemoryRegion.RDRAM7) => { const data_ = @byteSwap(u16, data); @memcpy(@ptrCast([*]u8, &ram[pAddr & 0x7F_FFFF]), @ptrCast([*]const u8, &data_), 2); }, else => { std.log.warn("[Bus] Unhandled write16 @ pAddr {X}h, data: {X}h.", .{pAddr, data}); unreachable; } } } var siDRAMAddr: u32 = 0; pub fn write32(pAddr: u64, data: u32) void { if ((pAddr & 3) != 0) @panic("unaligned write32"); switch (pAddr >> 20) { @enumToInt(MemoryRegion.RDRAM0) ... @enumToInt(MemoryRegion.RDRAM7) => { const data_ = @byteSwap(u32, data); @memcpy(@ptrCast([*]u8, &ram[pAddr & 0x7F_FFFF]), @ptrCast([*]const u8, &data_), 4); }, @enumToInt(MemoryRegion.RDRAMIO) => { switch (pAddr & 0xFF_FFFF) { else => { std.log.warn("[Bus] Unhandled write32 @ pAddr {X}h (RDRAM I/O), data: {X}h.", .{pAddr, data}); } } }, @enumToInt(MemoryRegion.RSP) => { rsp.write32(pAddr, data); }, @enumToInt(MemoryRegion.MI) => { mi.write32(pAddr, data); }, @enumToInt(MemoryRegion.VI) => { vi.write32(pAddr, data); }, @enumToInt(MemoryRegion.AI) => { ai.write32(pAddr, data); }, @enumToInt(MemoryRegion.PI) => { switch (pAddr & 0xF_FFFF) { @enumToInt(PIReg.DRAMAddr) => { std.log.info("[Bus] Write32 @ pAddr {X}h (PI DRAM Address), data: {X}h.", .{pAddr, data}); piRegs.dramAddr = data & 0xFF_FFFF; }, @enumToInt(PIReg.CartAddr) => { std.log.info("[Bus] Write32 @ pAddr {X}h (PI Cart Address), data: {X}h.", .{pAddr, data}); piRegs.cartAddr = data; }, @enumToInt(PIReg.ReadLen) => { std.log.info("[Bus] Write32 @ pAddr {X}h (PI Read Length), data: {X}h.", .{pAddr, data}); piRegs.piStatus.dmaIRQ = true; mi.setPending(mi.InterruptSource.PI); @panic("blah"); }, @enumToInt(PIReg.WriteLen) => { std.log.info("[Bus] Write32 @ pAddr {X}h (PI Write Length), data: {X}h.", .{pAddr, data}); piRegs.writeLen = data & 0xFF_FFFF; // Start PI DMA (Cartridge => RDRAM) @memcpy(@ptrCast([*]u8, &ram[piRegs.dramAddr]), @ptrCast([*]u8, &rom[piRegs.cartAddr & 0xFFF_FFFF]), piRegs.writeLen +% 1); piRegs.piStatus.dmaIRQ = true; mi.setPending(mi.InterruptSource.PI); }, @enumToInt(PIReg.PIStatus) => { std.log.info("[Bus] Write32 @ pAddr {X}h (PI Status), data: {X}h.", .{pAddr, data}); if ((data & 2) != 0) { piRegs.piStatus.dmaIRQ = false; mi.clearPending(mi.InterruptSource.PI); } }, else => { std.log.warn("[Bus] Unhandled write32 @ pAddr {X}h (Peripheral Interface), data: {X}h.", .{pAddr, data}); } } }, @enumToInt(MemoryRegion.RI) => { switch (pAddr & 0xF_FFFF) { else => { std.log.warn("[Bus] Unhandled write32 @ pAddr {X}h (RDRAM Interface), data: {X}h.", .{pAddr, data}); } } }, @enumToInt(MemoryRegion.SI) => { si.write32(pAddr, data); }, @enumToInt(MemoryRegion.PIF) => { if (pAddr >= 0x1FC0_07C0 and pAddr < 0x1FC0_0800) { std.log.info("[Bus] Write32 @ pAddr {X}h (PIF RAM), data: {X}h.", .{pAddr, data}); const data_ = @byteSwap(u32, data); @memcpy(@ptrCast([*]u8, &pif.pifRAM[pAddr - 0x1FC0_07C0]), @ptrCast([*]const u8, &data_), 4); } else { std.log.warn("[Bus] Unhandled write32 @ pAddr {X}h (PIF), data: {X}h.", .{pAddr, data}); unreachable; } }, else => { std.log.warn("[Bus] Unhandled write32 @ pAddr {X}h, data: {X}h.", .{pAddr, data}); unreachable; } } } pub fn write64(pAddr: u64, data: u64) void { if ((pAddr & 7) != 0) @panic("unaligned write64"); switch (pAddr >> 20) { @enumToInt(MemoryRegion.RDRAM0) ... @enumToInt(MemoryRegion.RDRAM7) => { const data_ = @byteSwap(u64, data); @memcpy(@ptrCast([*]u8, &ram[pAddr & 0x7F_FFFF]), @ptrCast([*]const u8, &data_), 8); }, else => { std.log.warn("[Bus] Unhandled write64 @ pAddr {X}h, data: {X}h.", .{pAddr, data}); unreachable; } } }
src/core/bus.zig
const Data = @import("../events/events.zig").Data; const Event = @import("../events/events.zig").Event; const std = @import("std"); pub const ChunkedReader = struct { const Self = @This(); chunk_size: usize = 0, bytes_read: usize = 0, state: State = .ReadChunkSize, // An 8 bytes chunk size allows a chunk to be at most 16 777 215 bytes long. const MaxChunkSizeLength = 8; const State = enum { ReadChunkSize, ReadChunk, ReadChunkEnd, Done, }; pub fn read(self: *Self, reader: anytype, buffer: []u8) !Event { while (true) { var event = try switch (self.state) { .ReadChunkSize => self.readChunkSize(reader, buffer), .ReadChunk => self.readChunk(reader, buffer), .ReadChunkEnd => self.readChunkEnd(reader), .Done => return .EndOfMessage, }; if (event != null) { return event.?; } } } fn readChunkSize(self: *Self, reader: anytype, buffer: []u8) !?Event { var line: [MaxChunkSizeLength]u8 = undefined; _ = try reader.read(&line); const line_end = std.mem.indexOfPosLinear(u8, &line, 0, "\r\n") orelse return error.RemoteProtocolError; try reader.putBack(line[line_end + 2 ..]); self.chunk_size = try parseChunkSize(line[0..line_end]); if (self.chunk_size > 0) { self.state = .ReadChunk; return null; } self.state = .Done; if (self.bytes_read == 0) { return null; } return Event{ .Data = Data{ .bytes = buffer[0..self.bytes_read] } }; } fn readChunk(self: *Self, reader: anytype, buffer: []u8) !?Event { const remaining_space_left = buffer.len - self.bytes_read; const bytes_to_read = std.math.min(self.chunk_size, remaining_space_left); const count = try reader.read(buffer[self.bytes_read .. self.bytes_read + bytes_to_read]); if (count == 0) { return error.EndOfStream; } self.chunk_size -= count; self.bytes_read += count; if (self.chunk_size == 0) { self.state = .ReadChunkEnd; } if (self.bytes_read < buffer.len) { return null; } self.bytes_read = 0; return Event{ .Data = Data{ .bytes = buffer } }; } fn readChunkEnd(self: *Self, reader: anytype) !?Event { var chunk_end: [2]u8 = undefined; _ = try reader.read(&chunk_end); if (!std.mem.eql(u8, &chunk_end, "\r\n")) { return error.RemoteProtocolError; } self.state = .ReadChunkSize; return null; } fn parseChunkSize(hex_length: []const u8) !usize { var length: usize = 0; for (hex_length) |char| { const digit = std.fmt.charToDigit(char, 16) catch return error.RemoteProtocolError; length = (length << 4) | @as(usize, (digit & 0xF)); } return length; } }; const expect = std.testing.expect; const expectEqualStrings = std.testing.expectEqualStrings; const expectError = std.testing.expectError; test "ChunkedReader - Read a chunk" { const content = "E\r\nGotta go fast!\r\n0\r\n\r\n"; var reader = std.io.peekStream(1024, std.io.fixedBufferStream(content).reader()); var body_reader = ChunkedReader{}; var buffer: [32]u8 = undefined; var event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "Gotta go fast!"); event = try body_reader.read(&reader, &buffer); try expect(event == .EndOfMessage); } test "ChunkedReader - Read multiple chunks" { const content = "E\r\nGotta go fast!\r\n7\r\nZiguana\r\n0\r\n\r\n"; var reader = std.io.peekStream(1024, std.io.fixedBufferStream(content).reader()); var body_reader = ChunkedReader{}; var buffer: [14]u8 = undefined; var event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "Gotta go fast!"); event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "Ziguana"); event = try body_reader.read(&reader, &buffer); try expect(event == .EndOfMessage); } test "ChunkedReader - Read a chunk with a smaller buffer" { const content = "E\r\nGotta go fast!\r\n0\r\n\r\n"; var reader = std.io.peekStream(1024, std.io.fixedBufferStream(content).reader()); var body_reader = ChunkedReader{}; var buffer: [7]u8 = undefined; var event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "Gotta g"); event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "o fast!"); event = try body_reader.read(&reader, &buffer); try expect(event == .EndOfMessage); } test "ChunkedReader - Read multiple chunks with a smaller buffer" { const content = "E\r\nGotta go fast!\r\n7\r\nZiguana\r\n0\r\n\r\n"; var reader = std.io.peekStream(1024, std.io.fixedBufferStream(content).reader()); var body_reader = ChunkedReader{}; var buffer: [7]u8 = undefined; var event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "Gotta g"); event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "o fast!"); event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "Ziguana"); event = try body_reader.read(&reader, &buffer); try expect(event == .EndOfMessage); } test "ChunkedReader - Read multiple chunks in the same user buffer" { const content = "E\r\nGotta go fast!\r\n7\r\nZiguana\r\n6\r\nStonks\r\n0\r\n\r\n"; var reader = std.io.peekStream(1024, std.io.fixedBufferStream(content).reader()); var body_reader = ChunkedReader{}; var buffer: [32]u8 = undefined; var event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "Gotta go fast!ZiguanaStonks"); event = try body_reader.read(&reader, &buffer); try expect(event == .EndOfMessage); } test "ChunkedReader - When the inner buffer is smaller than the user buffer" { const content = ("3E8\r\n" ++ "a" ** 200 ++ "b" ** 200 ++ "c" ** 200 ++ "d" ** 200 ++ "e" ** 200 ++ "\r\n0\r\n\r\n"); var reader = std.io.peekStream(1024, std.io.fixedBufferStream(content).reader()); var body_reader = ChunkedReader{}; var buffer: [200]u8 = undefined; var event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "a" ** 200); event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "b" ** 200); event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "c" ** 200); event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "d" ** 200); event = try body_reader.read(&reader, &buffer); try expectEqualStrings(event.Data.bytes, "e" ** 200); event = try body_reader.read(&reader, &buffer); try expect(event == .EndOfMessage); } test "ChunkedReader - Fail to read a chunk size which is not hexadecimal" { const content = "XXX\r\nGotta go fast!\r\n0\r\n\r\n"; var reader = std.io.peekStream(1024, std.io.fixedBufferStream(content).reader()); var body_reader = ChunkedReader{}; var buffer: [32]u8 = undefined; var failure = body_reader.read(&reader, &buffer); try expectError(error.RemoteProtocolError, failure); } test "ChunkedReader - Fail to read too large chunk" { const content = "1000000\r\nGotta go fast!\r\n0\r\n\r\n"; var reader = std.io.peekStream(1024, std.io.fixedBufferStream(content).reader()); var body_reader = ChunkedReader{}; var buffer: [32]u8 = undefined; var failure = body_reader.read(&reader, &buffer); try expectError(error.RemoteProtocolError, failure); } test "ChunkedReader - Fail when not enough data can be read" { const content = "E\r\nGotta go fast!\r\n7\r\nZi"; var reader = std.io.peekStream(1024, std.io.fixedBufferStream(content).reader()); var body_reader = ChunkedReader{}; var buffer: [50]u8 = undefined; var failure = body_reader.read(&reader, &buffer); try expectError(error.EndOfStream, failure); }
src/readers/chunked_reader.zig
const std = @import("std"); const assert = std.debug.assert; const is_darwin = std.Target.current.isDarwin(); const config = @import("./config.zig"); pub const Time = struct { const Self = @This(); /// Hardware and/or software bugs can mean that the monotonic clock may regress. /// One example (of many): https://bugzilla.redhat.com/show_bug.cgi?id=448449 /// We crash the process for safety if this ever happens, to protect against infinite loops. /// It's better to crash and come back with a valid monotonic clock than get stuck forever. monotonic_guard: u64 = 0, /// A timestamp to measure elapsed time, meaningful only on the same system, not across reboots. /// Always use a monotonic timestamp if the goal is to measure elapsed time. /// This clock is not affected by discontinuous jumps in the system time, for example if the /// system administrator manually changes the clock. pub fn monotonic(self: *Self) u64 { const m = blk: { // Uses mach_continuous_time() instead of mach_absolute_time() as it counts while suspended. // https://developer.apple.com/documentation/kernel/1646199-mach_continuous_time // https://opensource.apple.com/source/Libc/Libc-1158.1.2/gen/clock_gettime.c.auto.html if (is_darwin) { const darwin = struct { const mach_timebase_info_t = std.os.darwin.mach_timebase_info_data; extern "c" fn mach_timebase_info(info: *mach_timebase_info_t) std.os.darwin.kern_return_t; extern "c" fn mach_continuous_time() u64; }; const now = darwin.mach_continuous_time(); var info: darwin.mach_timebase_info_t = undefined; if (darwin.mach_timebase_info(&info) != 0) @panic("mach_timebase_info() failed"); return (now * info.numer) / info.denom; } // The true monotonic clock on Linux is not in fact CLOCK_MONOTONIC: // CLOCK_MONOTONIC excludes elapsed time while the system is suspended (e.g. VM migration). // CLOCK_BOOTTIME is the same as CLOCK_MONOTONIC but includes elapsed time during a suspend. // For more detail and why CLOCK_MONOTONIC_RAW is even worse than CLOCK_MONOTONIC, // see https://github.com/ziglang/zig/pull/933#discussion_r656021295. var ts: std.os.timespec = undefined; std.os.clock_gettime(std.os.CLOCK_BOOTTIME, &ts) catch @panic("CLOCK_BOOTTIME required"); break :blk @intCast(u64, ts.tv_sec) * std.time.ns_per_s + @intCast(u64, ts.tv_nsec); }; // "Oops!...I Did It Again" if (m < self.monotonic_guard) @panic("a hardware/kernel bug regressed the monotonic clock"); self.monotonic_guard = m; return m; } /// A timestamp to measure real (i.e. wall clock) time, meaningful across systems, and reboots. /// This clock is affected by discontinuous jumps in the system time. pub fn realtime(self: *Self) i64 { // macos has supported clock_gettime() since 10.12: // https://opensource.apple.com/source/Libc/Libc-1158.1.2/gen/clock_gettime.3.auto.html var ts: std.os.timespec = undefined; std.os.clock_gettime(std.os.CLOCK_REALTIME, &ts) catch unreachable; return @as(i64, ts.tv_sec) * std.time.ns_per_s + ts.tv_nsec; } pub fn tick(self: *Self) void {} };
src/time.zig
const freebsd = @import("index.zig"); const socklen_t = freebsd.socklen_t; const iovec = freebsd.iovec; pub const SYS_sbrk = 69; pub fn syscall0(number: usize) usize { return asm volatile ("syscall" : [ret] "={rax}" (-> usize) : [number] "{rax}" (number) : "rcx", "r11" ); } pub fn syscall1(number: usize, arg1: usize) usize { return asm volatile ("syscall" : [ret] "={rax}" (-> usize) : [number] "{rax}" (number), [arg1] "{rdi}" (arg1) : "rcx", "r11" ); } pub fn syscall2(number: usize, arg1: usize, arg2: usize) usize { return asm volatile ("syscall" : [ret] "={rax}" (-> usize) : [number] "{rax}" (number), [arg1] "{rdi}" (arg1), [arg2] "{rsi}" (arg2) : "rcx", "r11" ); } pub fn syscall3(number: usize, arg1: usize, arg2: usize, arg3: usize) usize { return asm volatile ("syscall" : [ret] "={rax}" (-> usize) : [number] "{rax}" (number), [arg1] "{rdi}" (arg1), [arg2] "{rsi}" (arg2), [arg3] "{rdx}" (arg3) : "rcx", "r11" ); } pub fn syscall4(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize { return asm volatile ("syscall" : [ret] "={rax}" (-> usize) : [number] "{rax}" (number), [arg1] "{rdi}" (arg1), [arg2] "{rsi}" (arg2), [arg3] "{rdx}" (arg3), [arg4] "{r10}" (arg4) : "rcx", "r11" ); } pub fn syscall5(number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize { return asm volatile ("syscall" : [ret] "={rax}" (-> usize) : [number] "{rax}" (number), [arg1] "{rdi}" (arg1), [arg2] "{rsi}" (arg2), [arg3] "{rdx}" (arg3), [arg4] "{r10}" (arg4), [arg5] "{r8}" (arg5) : "rcx", "r11" ); } pub fn syscall6( number: usize, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize, arg6: usize, ) usize { return asm volatile ("syscall" : [ret] "={rax}" (-> usize) : [number] "{rax}" (number), [arg1] "{rdi}" (arg1), [arg2] "{rsi}" (arg2), [arg3] "{rdx}" (arg3), [arg4] "{r10}" (arg4), [arg5] "{r8}" (arg5), [arg6] "{r9}" (arg6) : "rcx", "r11" ); } pub nakedcc fn restore_rt() void { asm volatile ("syscall" : : [number] "{rax}" (usize(SYS_rt_sigreturn)) : "rcx", "r11" ); } pub const msghdr = extern struct { msg_name: *u8, msg_namelen: socklen_t, msg_iov: *iovec, msg_iovlen: i32, __pad1: i32, msg_control: *u8, msg_controllen: socklen_t, __pad2: socklen_t, msg_flags: i32, }; /// Renamed to Stat to not conflict with the stat function. pub const Stat = extern struct { dev: u64, ino: u64, nlink: usize, mode: u32, uid: u32, gid: u32, __pad0: u32, rdev: u64, size: i64, blksize: isize, blocks: i64, atim: timespec, mtim: timespec, ctim: timespec, __unused: [3]isize, }; pub const timespec = extern struct { tv_sec: isize, tv_nsec: isize, };
std/os/freebsd/x86_64.zig
const std = @import("std"); const utils = @import("utils"); const Allocator = std.mem.Allocator; const ArenaAllocator = std.heap.ArenaAllocator; const print = utils.print; const Map = struct { risk: []u32, width: usize, height: usize, }; fn readInput(arena: *ArenaAllocator, lines_it: *utils.FileLineIterator) anyerror!Map { var risk = try std.ArrayList(u32).initCapacity(&arena.allocator, 128 * 128); var width: usize = 0; while (lines_it.next()) |line| { for (line) |l| { try risk.append(l - '0'); } width = line.len; } const height = risk.items.len / width; print("File ok :) Number of inputs: {d}", .{risk.items.len}); return Map{ .risk = risk.items, .width = width, .height = height, }; } const Vertex = struct { index: usize, risk: u32, dist: u32, prev: ?usize, neighbors: []usize, }; fn lessThan(a: u64, b: u64) std.math.Order { return std.math.order(a & ((1 << 32) - 1), b & ((1 << 32) - 1)); } fn part1(arena: *ArenaAllocator, map: Map) anyerror!u32 { var vertices_list = try std.ArrayList(Vertex).initCapacity(&arena.allocator, 128 * 128); for (map.risk) |risk, offset| { var neighbors = try std.ArrayList(usize).initCapacity(&arena.allocator, 4); var x = offset % map.width; var y = offset / map.width; if (x > 0) try neighbors.append(offset - 1); if (x < map.width - 1) try neighbors.append(offset + 1); if (y > 0) try neighbors.append(offset - map.width); if (y < map.height - 1) try neighbors.append(offset + map.width); try vertices_list.append(.{ .index = offset, .risk = risk, .dist = 9999999, .prev = null, .neighbors = neighbors.items, }); } const vertices = vertices_list.items; vertices[0].dist = 0; var pg = std.PriorityQueue(u64, lessThan).init(&arena.allocator); try pg.ensureTotalCapacity(128 * 128); var i: usize = 0; while (i < vertices.len) : (i += 1) { const offset = vertices.len - i - 1; try pg.add(@intCast(u64, offset << 32) + @intCast(u64, vertices[offset].dist & ((1 << 32) - 1))); } while (pg.count() > 0) { const entry = pg.remove(); const entry_index = @intCast(usize, entry >> 32); const u = vertices[entry_index]; for (u.neighbors) |n| { var v = &vertices[n]; const alt = u.dist + v.risk; if (alt < v.dist) { const old_entry = @intCast(u64, n << 32) + @intCast(u64, v.dist & ((1 << 32) - 1)); v.dist = alt; v.prev = u.index; try pg.update(old_entry, @intCast(u64, n << 32) + @intCast(u64, v.dist & ((1 << 32) - 1))); } } } var path = try std.ArrayList(usize).initCapacity(&arena.allocator, map.risk.len); try path.append(0); var target = vertices.len - 1; var sum = vertices[target].risk; while (target != 0) { const v = &vertices[target]; target = if (v.prev) |prev| prev else unreachable; try path.append(v.index); sum += v.risk; } sum -= map.risk[map.risk.len - 1]; var row = try std.ArrayList(u8).initCapacity(&arena.allocator, 128); for (map.risk) |n, p_offset| { for (path.items) |p| { if (p == p_offset) { try row.append('.'); break; } } else { try row.append('0' + @intCast(u8, n)); } } return sum; } fn part2(arena: *ArenaAllocator, map: Map) anyerror!u32 { var risk = try std.ArrayList(u32).initCapacity(&arena.allocator, map.risk.len * 25); const new_width = map.width * 5; const new_height = map.height * 5; var y: usize = 0; while (y < new_height) : (y += 1) { const y_inc = y / map.height; const old_y = y % map.height; var x: usize = 0; while (x < new_width) : (x += 1) { const x_inc = x / map.width; const old_x = x % map.width; const old_offset = old_y * map.width + old_x; const new_risk = (map.risk[old_offset] - 1 + @intCast(u32, y_inc + x_inc)) % 9 + 1; try risk.append(new_risk); } } const expanded_map = Map{ .risk = risk.items, .width = new_width, .height = new_height, }; return part1(arena, expanded_map); } pub fn main() anyerror!void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); var lines_it = try utils.iterateLinesInFile(&arena.allocator, "input.txt"); defer lines_it.deinit(); const input = try readInput(&arena, &lines_it); const part1_result = try part1(&arena, input); print("Part 1: {d}", .{part1_result}); const part2_result = try part2(&arena, input); print("Part 2: {d}", .{part2_result}); }
day15/src/main.zig
const testing = @import("std").testing; const math = @import("std").math; pub const v2 = struct { x: f32, y: f32, /// initializes a v2 with x and y pub fn init(x: f32, y: f32) v2 { return v2 { .x = x, .y = y, }; } /// returns the component from an index pub fn at(v: v2, i: u16) f32 { return switch (i) { 0 => v.x, 1 => v.y, else => @panic("invalid index value provided when accessing a v2 index"), }; } /// sets the component from an index pub fn set(v: *v2, i: u16, val: f32) void { switch (i) { 0 => v.x = val, 1 => v.y = val, else => @panic("invalid index value provided when accessing a v2 index"), } } /// returns the x and y of the v2 as a [2]f32 array. /// to be used to pass to other stuff like opengl. pub fn arr(v: v2) [2]f32 { return .{v.x, v.y}; } /// returns the length/magnitude of the vector pub fn len(v: v2) f32 { var sum: f32 = 0; sum += v.x * v.x; sum += v.y * v.y; return math.sqrt(sum); } /// returns the normalized vector pub fn normalized(v: v2) v2 { const l = v.len(); if (l == 0) { return v2.init(0, 0); } return divs(v, l); } /// returns the dot product of two v2 pub fn dot(a: v2, b: v2) f32 { var res: f32 = 0; res += a.x * b.x; res += a.y * b.y; return res; } /// returns the negated vector pub fn neg(v: v2) v2 { return v2.init(-v.x, -v.y); } /// divides the v2 with a scalar pub fn divs(v: v2, s: f32) v2 { return v2.init(v.x / s, v.y / s); } /// multiplies the v2 with a scalar pub fn muls(v: v2, s: f32) v2 { return v2.init(v.x * s, v.y * s); } /// sums the v2 with a scalar pub fn sums(v: v2, s: f32) v2 { return v2.init(v.x + s, v.y + s); } /// subtracts the v2 from a scalar pub fn subs(v: v2, s: f32) v2 { return v2.init(v.x - s, v.y - s); } /// sums the v2 with another v2 pub fn sumv(a: v2, b: v2) v2 { return v2.init(a.x + b.x, a.y + b.y); } /// subtracts the v2 from another v2 pub fn subv(a: v2, b: v2) v2 { return v2.init(a.x - b.x, a.y - b.y); } }; test "creating a v2" { const x = v2.init(4, 8); testing.expect(x.x == 4); testing.expect(x.y == 8); testing.expect(x.at(0) == 4); testing.expect(x.at(1) == 8); testing.expect(x.arr()[0] == 4); testing.expect(x.arr()[1] == 8); } test "v2 length" { const x = v2.init(4, 8); testing.expect(math.approxEq(f32, x.len(), 8.94427190999916, math.f32_epsilon)); } test "v2 operation" { const x = v2.init(4, 8); const y = v2.init(1, 3); var res = v2.divs(x, 2); testing.expect(res.x == 2); testing.expect(res.y == 4); res = v2.muls(x, 2); testing.expect(res.x == 8); testing.expect(res.y == 16); res = v2.neg(x); testing.expect(res.x == -4); testing.expect(res.y == -8); res = v2.sums(x, 2); testing.expect(res.x == 6); testing.expect(res.y == 10); res = v2.subs(x, 2); testing.expect(res.x == 2); testing.expect(res.y == 6); res = v2.sumv(x, y); testing.expect(res.x == 5); testing.expect(res.y == 11); res = v2.subv(x, y); testing.expect(res.x == 3); testing.expect(res.y == 5); } test "v2 normalized" { const a = v2.init(4, 8); const x = v2.normalized(a); testing.expect(math.approxEq(f32, x.x, 0.4472135954999579, math.f32_epsilon)); testing.expect(math.approxEq(f32, x.y, 0.8944271909999159, math.f32_epsilon)); } test "v2 dot" { const x = v2.init(4, 8); const y = v2.init(1, 3); const res = v2.dot(x, y); testing.expect(res == 28); }
src/vector2.zig
usingnamespace @import("../core.zig"); const Device = @import("../vulkan/device.zig").Device; const Mesh = @import("mesh.zig").Mesh; const TransferQueue = @import("../transfer_queue.zig").TransferQueue; const obj = @import("../utils/obj_loader.zig"); //TODO: Replace const ColorVertex = struct { pos: Vector3, color: Vector3, }; pub const MeshManager = struct { const Self = @This(); allocator: *Allocator, device: Device, id_next: u16 = 0, list: std.AutoHashMap(u16, Mesh), transfers: TransferQueue, delete_list: std.ArrayList(Mesh), pub fn init(allocator: *Allocator, device: Device) Self { return Self{ .allocator = allocator, .device = device, .list = std.AutoHashMap(u16, Mesh).init(allocator), .transfers = TransferQueue.init(allocator, device), .delete_list = std.ArrayList(Mesh).init(allocator), }; } pub fn deinit(self: *Self) void { var iterator = self.list.iterator(); while (iterator.next()) |entry| { entry.value_ptr.deinit(); } self.list.deinit(); self.transfers.deinit(); for (self.delete_list.items) |mesh| { mesh.deinit(); } self.delete_list.deinit(); } pub fn flush(self: *Self) void { for (self.delete_list.items) |mesh| { mesh.deinit(); } self.delete_list.clearRetainingCapacity(); self.transfers.clearResources(); } pub fn load(self: *Self, file_path: []const u8) !u16 { var obj_file = try std.fs.cwd().openFile(file_path, std.fs.File.OpenFlags{ .read = true }); defer obj_file.close(); var obj_reader = obj_file.reader(); var obj_mesh = try obj.parseObjFile(self.allocator, obj_reader, .{}); defer obj_mesh.deinit(); var vertices = try self.allocator.alloc(ColorVertex, obj_mesh.positions.len); defer self.allocator.free(vertices); var i: usize = 0; while (i < vertices.len) : (i += 1) { vertices[i].pos = .{ .data = obj_mesh.positions[i], }; var normal = obj_mesh.normals[i]; vertices[i].color = Vector3.new(normal[0], normal[1], normal[2]).abs(); } var mesh = try Mesh.init(ColorVertex, u32, self.device, @intCast(u32, vertices.len), @intCast(u32, obj_mesh.indices.len)); self.transfers.copyToBuffer(mesh.vertex_buffer, ColorVertex, vertices); self.transfers.copyToBuffer(mesh.index_buffer, u32, obj_mesh.indices); var id = self.id_next; self.id_next += 1; try self.list.put(id, mesh); return id; } pub fn free(self: *Self, id: u16) void { if (self.list.fetchRemove(id)) |entry| { self.delete_list.append(entry.value); } } pub fn get(self: Self, id: u16) ?Mesh { return self.list.get(id); } };
src/renderer/mesh_manager.zig
const std = @import("std"); const win32 = @import("./win32.zig"); const GeneralPurposeAllocator = std.heap.GeneralPurposeAllocator(.{}); const StringHashMap = std.StringHashMap(void); const Allocator = std.mem.Allocator; pub fn main() u8 { const FOUND = 0; const NOT_FOUND = 1; var gpa = GeneralPurposeAllocator{}; defer _ = gpa.deinit(); var allocator = gpa.allocator(); var args = getArgs(allocator) orelse return NOT_FOUND; defer deinitArgs(&args); var processes = getProcesses(allocator) orelse return NOT_FOUND; defer allocator.free(processes); for (processes) |process_id| { var name = getProcessName(allocator, process_id) orelse continue; defer allocator.free(name); if (args.fetchRemove(toUpper(name))) |entry| { allocator.free(entry.key); if (args.count() == 0) return FOUND; } } return NOT_FOUND; } fn getArgs(allocator: Allocator) ?StringHashMap { var iterator = std.process.ArgIterator.init(); defer iterator.deinit(); if (!iterator.skip()) return null; var args = StringHashMap.init(allocator); while (iterator.next(allocator) catch unreachable) |key| { var entry = args.getOrPut(toUpper(key)) catch unreachable; if (entry.found_existing) { allocator.free(key); } } if (args.count() == 0) { args.deinit(); return null; } else { return args; } } fn deinitArgs(args: *StringHashMap) void { var allocator = args.allocator; var keys = args.keyIterator(); while (keys.next()) |key| { allocator.free(key.*); } args.deinit(); } fn toUpper(str: []u8) []u8 { for (str) |*char| { char.* = std.ascii.toUpper(char.*); } return str; } fn getProcesses(allocator: Allocator) ?[]win32.DWORD { // It's likely to have few processes inside a container // Starting with a small buffer and grow as needed var max_process: usize = 16; var processes_buffer = allocator.alloc(win32.DWORD, max_process) catch unreachable; errdefer allocator.free(processes_buffer); var len: win32.DWORD = undefined; while (true) { var ret = win32.EnumProcesses(processes_buffer.ptr, @intCast(win32.DWORD, processes_buffer.len * @sizeOf(win32.DWORD)), &len); len /= @sizeOf(win32.DWORD); if (ret == win32.FALSE or len == 0) { return null; } else if (len == max_process) { // Double the buffer size max_process *= 2; processes_buffer = allocator.realloc(processes_buffer, max_process) catch unreachable; continue; } else { return allocator.shrink(processes_buffer, len); } } } fn getProcessName(allocator: Allocator, process_id: win32.DWORD) ?[]u8 { const DESIRED_ACCESS = win32.PROCESS_ACCESS_RIGHTS.QUERY_INFORMATION | win32.PROCESS_ACCESS_RIGHTS.VM_READ; var process_handle = win32.OpenProcess(DESIRED_ACCESS, win32.FALSE, process_id) orelse return null; var module: [1]win32.HMODULE = undefined; var len: win32.DWORD = undefined; var ret = win32.EnumProcessModules(process_handle, &module, @intCast(win32.DWORD, @sizeOf(win32.HMODULE)), &len); len /= @sizeOf(win32.HMODULE); if (ret == win32.FALSE or len == 0) { return null; } else { // Starting with a small buffer and grow as needed var max_process_name: usize = 64; var name_buffer = allocator.allocSentinel(win32.CHAR, max_process_name, 0) catch unreachable; errdefer allocator.free(name_buffer); while (true) { len = win32.GetModuleBaseName(process_handle, module[0], name_buffer.ptr, @intCast(win32.DWORD, max_process_name)); if (len >= max_process_name) { // Double the buffer size max_process_name *= 2; name_buffer = std.meta.assumeSentinel(allocator.realloc(name_buffer, max_process_name) catch unreachable, 0); } else { return allocator.shrink(name_buffer, len); } } } }
src/main.zig
const std = @import("std"); const bigint = std.math.big.int; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; var global_allocator = &gpa.allocator; pub fn main() !void { const stdout = std.io.getStdOut().writer(); const n = try get_n(); const one = (try bigint.Managed.initSet(global_allocator, 1)).toConst(); const two = (try bigint.Managed.initSet(global_allocator, 2)).toConst(); const ten = (try bigint.Managed.initSet(global_allocator, 10)).toConst(); var k = try bigint.Managed.initSet(global_allocator, 1); var n1 = try bigint.Managed.initSet(global_allocator, 4); var n2 = try bigint.Managed.initSet(global_allocator, 3); var d = try bigint.Managed.initSet(global_allocator, 1); var u = try bigint.Managed.init(global_allocator); var v = try bigint.Managed.init(global_allocator); var w = try bigint.Managed.init(global_allocator); var digits_printed: usize = 0; var sb = [10:0]u8{ ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ' }; while (true) { // u = &n1 / &d; { var tmp = try bigint.Managed.init(global_allocator); defer tmp.deinit(); try bigint.Managed.divFloor(&u, &tmp, n1.toConst(), d.toConst()); } // v = &n2 / &d; { var tmp = try bigint.Managed.init(global_allocator); defer tmp.deinit(); try bigint.Managed.divFloor(&v, &tmp, n2.toConst(), d.toConst()); } // if u == v if (bigint.Managed.eq(u, v)) { var digitStr = try u.toString(global_allocator, 10, std.fmt.Case.lower); const rem = @rem(digits_printed, 10); sb[rem] = digitStr[0]; digits_printed += 1; if (rem == 9) { try stdout.print("{s}\t:{d}\n", .{ sb, digits_printed }); } if (digits_printed >= n) { if (rem != 9) { var i = rem + 1; while (i < 10) { sb[i] = ' '; i += 1; } try stdout.print("{s}\t:{d}\n", .{ sb, digits_printed }); } break; } // let to_minus = &u * &ten * &d; var to_minus_managed = try bigint.Managed.init(global_allocator); defer to_minus_managed.deinit(); { var tmp = try bigint.Managed.init(global_allocator); defer tmp.deinit(); try bigint.Managed.mul(&tmp, u.toConst(), d.toConst()); try bigint.Managed.mul(&to_minus_managed, tmp.toConst(), ten); } const to_minus = to_minus_managed.toConst(); // n1 = &n1 * &ten - &to_minus; { var tmp = try bigint.Managed.init(global_allocator); defer tmp.deinit(); try bigint.Managed.mul(&tmp, n1.toConst(), ten); try bigint.Managed.sub(&n1, tmp.toConst(), to_minus); } // n2 = &n2 * &ten - &to_minus; { var tmp = try bigint.Managed.init(global_allocator); defer tmp.deinit(); try bigint.Managed.mul(&tmp, n2.toConst(), ten); try bigint.Managed.sub(&n2, tmp.toConst(), to_minus); } } else { // let k2 = &k * &two; var k2 = try bigint.Managed.init(global_allocator); defer k2.deinit(); try bigint.Managed.mul(&k2, k.toConst(), two); // u = &n1 * (&k2 - &one); { var tmp = try bigint.Managed.init(global_allocator); defer tmp.deinit(); try bigint.Managed.sub(&tmp, k2.toConst(), one); var tmpu = try bigint.Managed.init(global_allocator); try bigint.Managed.mul(&tmpu, tmp.toConst(), n1.toConst()); u.deinit(); u = tmpu; } // v = &n2 * &two; { var tmpv = try bigint.Managed.init(global_allocator); try bigint.Managed.mul(&tmpv, n2.toConst(), two); v.deinit(); v = tmpv; } // w = &n1 * (&k - &one); { var tmp = try bigint.Managed.init(global_allocator); defer tmp.deinit(); try bigint.Managed.sub(&tmp, k.toConst(), one); var tmpw = try bigint.Managed.init(global_allocator); try bigint.Managed.mul(&tmpw, tmp.toConst(), n1.toConst()); w.deinit(); w = tmpw; } // n1 = &u + &v; try bigint.Managed.add(&n1, u.toConst(), v.toConst()); // u = &n2 * (&k + &two); { var tmp = try bigint.Managed.init(global_allocator); defer tmp.deinit(); try bigint.Managed.add(&tmp, k.toConst(), two); var tmpu = try bigint.Managed.init(global_allocator); try bigint.Managed.mul(&tmpu, tmp.toConst(), n2.toConst()); u.deinit(); u = tmpu; } // n2 = &w + &u; try bigint.Managed.add(&n2, w.toConst(), u.toConst()); // d = &d * (&k2 + &one); { var tmp1 = try bigint.Managed.init(global_allocator); defer tmp1.deinit(); try bigint.Managed.add(&tmp1, k2.toConst(), one); var tmpd = try bigint.Managed.init(global_allocator); try bigint.Managed.mul(&tmpd, tmp1.toConst(), d.toConst()); d.deinit(); d = tmpd; } // k = &k + &one; try bigint.Managed.add(&k, k.toConst(), one); } } } fn get_n() !i32 { const args = try std.process.argsAlloc(global_allocator); defer std.process.argsFree(global_allocator, args); if (args.len > 1) { return try std.fmt.parseInt(i32, args[1], 10); } else { return 1; } }
bench/algorithm/pidigits/1.zig
const std = @import("std"); // Use zig translate-c to generate raylib.zig and swap imports to get better completion in IDEs // const c = @import("raylib.zig"); const c = @cImport( @cInclude("raylib.h"), ); // Wrappers / Convenience Functions -------------------------------------------- fn rect(x: f32, y: f32, width: f32, height: f32) c.Rectangle { return .{ .x = x, .y = y, .width = width, .height = height }; } fn vec2(x: f32, y: f32) c.Vector2 { return c.Vector2{ .x = x, .y = y }; } // Constants ------------------------------------------------------------------- const screen_width = 800; const screen_height = 600; /// Limits how wide of an angle the ball can bounce off a paddle with /// Must be between 0 and 1 const bounce_angle_limit = 0.7; // Keys const player1_up_key = c.KEY_Q; const player1_down_key = c.KEY_A; const player2_up_key = c.KEY_P; const player2_down_key = c.KEY_L; const start_key = c.KEY_SPACE; const pause_key = c.KEY_TAB; const reset_key = c.KEY_R; // Text const title_text = "PONG"; const window_title = "PONG"; const start_message = "Press Space to Start."; const paused_text = "Paused."; // Types ----------------------------------------------------------------------- const GameState = enum { /// Right after launch or reset title, playing, pause, /// in-game after a player has scored start, }; const Player = struct { rect: c.Rectangle, score: i32, /// Pixels const height = 40; /// Pixels const width = 5; /// Pixels per second const speed = 300; /// Move up or down clamped to screen size fn move(self: *Player, amt: f32) void { const new_y = self.rect.y + amt; if (new_y < 0 or new_y + Player.height > screen_height) return; self.rect.y = new_y; } }; const Ball = struct { rect: c.Rectangle, velocity: c.Vector2, /// Pixels per second const speed = 500; /// Side length of square in pixels const size = 10; }; const BallEvent = enum { bounce_top, bounce_bot, /// Bounced off player 1's paddle bounce1, /// Bounced off player 2's paddle bounce2, /// Scored a point for player 1 score1, /// Scored a point for player 2 score2, }; // State ----------------------------------------------------------------------- var state: GameState = undefined; /// Left Paddle var player1: Player = undefined; /// Right Paddle var player2: Player = undefined; var ball: Ball = undefined; /// Shared buffer for String representations of each score /// Raylib converts the string immediately and does not store the pointer var score_str_buf: [32]u8 = undefined; // Functions ------------------------------------------------------------------- pub fn main() !void { c.SetWindowState(c.FLAG_VSYNC_HINT); c.InitWindow(screen_width, screen_height, window_title); c.HideCursor(); reset(); while (!c.WindowShouldClose()) { update(); draw(); } } fn resetPlayerPos() void { const start_y = screen_height / 2 - Player.height / 2; player1.rect = rect(20, start_y, Player.width, Player.height); player2.rect = rect(screen_width - 20, start_y, Player.width, Player.height); } fn resetBall() void { const ball_y = screen_height / 2 - Ball.size / 2; ball = .{ .rect = rect(screen_width / 2, ball_y, Ball.size, Ball.size), .velocity = vec2(-Ball.speed, 0), }; } fn reset() void { state = .title; player1.score = 0; player2.score = 0; resetPlayerPos(); resetBall(); } fn update() void { // Reset if (c.IsKeyPressed(reset_key)) { reset(); return; } switch (state) { .playing => { const delta = c.GetFrameTime(); // Pause if (c.IsKeyPressed(pause_key)) { state = .pause; return; } // Player movement const move_amt = Player.speed * delta; if (c.IsKeyDown(player1_up_key)) player1.move(-move_amt); if (c.IsKeyDown(player1_down_key)) player1.move(move_amt); if (c.IsKeyDown(player2_up_key)) player2.move(-move_amt); if (c.IsKeyDown(player2_down_key)) player2.move(move_amt); // Update ball // Multiple events can happen in a single frame var time_left = delta; while (updateBall(&time_left)) |event| { switch (event) { .score1 => { player1.score += 1; state = .start; resetPlayerPos(); break; }, .score2 => { player2.score += 1; state = .start; resetPlayerPos(); break; }, else => {}, } } }, .title, .start => { if (c.IsKeyPressed(start_key)) { resetPlayerPos(); resetBall(); state = .playing; } }, .pause => { if (c.IsKeyPressed(pause_key)) { state = .playing; } }, } } /// Check for all events that the ball could reach /// and update the ball's state to after that event /// subtract the time consumed from `time` /// Last, return the event that occurred or null if no events occurred fn updateBall(time: *f32) ?BallEvent { const player_time = playerTime(); const wall_time = wallTime(); const score_time = scoreTime(); // Process the first event to occur const min_time = std.math.min3(player_time, wall_time, score_time); // No events occur in time // Just advance the ball if (min_time > time.*) { ball.rect.x += ball.velocity.x * time.*; ball.rect.y += ball.velocity.y * time.*; time.* = 0; return null; } // Subtract time and process event time.* -= min_time; if (min_time == score_time) { if (ball.velocity.x > 0) { return .score1; } else { return .score2; } } else if (min_time == wall_time) { return bounceWall(wall_time); } else if (min_time == player_time) { return bouncePaddle(player_time); } else { unreachable; } } /// Time before the ball reaches a player's paddle /// or infinity if the ball will not reach a paddle fn playerTime() f32 { const inf = std.math.inf(f32); const xv = ball.velocity.x; std.debug.assert(xv != 0); if (xv > 0) { // Handle player 2 case const xdist = player2.rect.x - Ball.size - ball.rect.x; if (xdist < 0) return inf; const ptime = xdist / xv; // Ensure that the ball does not miss the paddle const ypos = ball.rect.y + ball.velocity.y * ptime; if (ypos + Ball.size < player2.rect.y or ypos > player2.rect.y + Player.height) return inf; return ptime; } else { // Handle player 1 case const xdist = ball.rect.x - player1.rect.x - Player.width; if (xdist < 0) return inf; const ptime = xdist / -xv; // Ensure that the ball does not miss the paddle const ypos = ball.rect.y + ball.velocity.y * ptime; if (ypos + Ball.size < player1.rect.y or ypos > player1.rect.y + Player.height) return inf; return ptime; } } /// Time before the ball reaches the top or bottom edge /// or infinity if the ball's y-velocity is zero fn wallTime() f32 { const inf = std.math.inf(f32); const yv = ball.velocity.y; if (yv > 0) { const ydist = screen_height - Ball.size - ball.rect.y; return ydist / yv; } else if (yv < 0) { return ball.rect.y / -yv; } else { return inf; } } /// Time before the ball reaches the left or right edge fn scoreTime() f32 { const xv = ball.velocity.x; if (xv > 0) { const xdist = screen_width - Ball.size - ball.rect.x; return xdist / xv; } else if (xv < 0) { const xdist = ball.rect.x; return xdist / -xv; } else { unreachable; } } /// Update the ball's state from bouncing off the top or bottom edge /// and return the event (which edge) fn bounceWall(time: f32) BallEvent { const xv = ball.velocity.x; const yv = ball.velocity.y; ball.rect.x += xv * time; ball.rect.y += yv * time; if (yv > 0) { ball.velocity.y *= -1; return .bounce_top; } else { ball.velocity.y *= -1; return .bounce_bot; } } /// Update the ball's state from bouncing off a paddle /// and return the event (which player's paddle it bounced off) fn bouncePaddle(time: f32) BallEvent { const y = ball.rect.y + ball.velocity.y * time; const xv = ball.velocity.x; std.debug.assert(xv != 0); var paddle_y: f32 = undefined; var ret: BallEvent = undefined; if (xv > 0) { paddle_y = player2.rect.y; ret = .bounce2; } else { paddle_y = player1.rect.y; ret = .bounce1; } const vel = paddleHitVelocity(y - paddle_y); ball.velocity.x = if (ret == .bounce1) vel.x else -vel.x; ball.velocity.y = vel.y; return ret; } /// New velocity of the ball /// where `diff` is the ball's y-position minus the paddle's y-position /// Always returns positive x velocity fn paddleHitVelocity(diff: f32) c.Vector2 { const math = std.math; // offset from the middle of the paddle const off = diff + Ball.size / 2 - Player.height / 2; // normalize it to a value within -1 and 1 const norm_off = off / (Ball.size / 2 + Player.height / 2); const angle = norm_off * std.math.pi * 0.5 * bounce_angle_limit; return vec2(Ball.speed * math.cos(angle), Ball.speed * math.sin(angle)); } fn draw() void { c.BeginDrawing(); c.ClearBackground(c.BLACK); defer c.EndDrawing(); // c.DrawFPS(80, 20); // Draw Scores c.DrawText(scoreStr(player1.score), 30, 30, 20, c.WHITE); const score2_text = scoreStr(player2.score); const score2_width = c.MeasureText(score2_text, 20); const score2_x = screen_width - 30 - score2_width; c.DrawText(score2_text, score2_x, 30, 20, c.WHITE); // Draw Players c.DrawRectangleRec(player1.rect, c.WHITE); c.DrawRectangleRec(player2.rect, c.WHITE); // Draw Other switch (state) { .playing => c.DrawRectangleRec(ball.rect, c.WHITE), .pause => { c.DrawRectangleRec(ball.rect, c.WHITE); drawCenterText(paused_text); }, .start => { drawCenterText(start_message); }, .title => { drawCenterText(start_message); const width = c.MeasureText(title_text, 100); const x = @divTrunc(screen_width, 2) - @divTrunc(width, 2); c.DrawText(title_text, x, screen_height / 4 - 50, 100, c.WHITE); }, } } fn drawCenterText(text: [*c]const u8) void { const width = c.MeasureText(text, 20); const x = @divTrunc(screen_width, 2) - @divTrunc(width, 2); c.DrawText(text, x, screen_height / 2 - 10, 20, c.WHITE); } /// Get a C-style string from a score fn scoreStr(score: i32) [*c]const u8 { // Just in case the buffer would _somehow_ overflow const buf = score_str_buf[0 .. score_str_buf.len - 1]; const slice = std.fmt.bufPrint(buf, "{}", .{score}) catch { const text: [*c]const u8 = "error"; return text; }; score_str_buf[slice.len] = 0; return @ptrCast([*c]const u8, slice); }
src/main.zig
const std = @import("std"); const c = @import("c.zig"); const util = @import("util.zig"); const log = std.log.scoped(.messages); pub const WriteCallback = fn (handle: ?*c.uv_write_t, status: i32, buffer: ?*c.uv_buf_t) callconv(.C) void; pub const AllocCallback = fn (handle: ?*c.uv_handle_t, size: usize, buf: ?*c.uv_buf_t) callconv(.C) void; pub const ReadCallback = fn (handle: ?*c.uv_stream_t, nread: isize, buf: ?*const c.uv_buf_t) callconv(.C) void; const WriteReq = struct { req: c.uv_write_t, buf: c.uv_buf_t, alloc: *std.mem.Allocator, callback: ?WriteCallback, }; /// Write data to a given stream /// Does _not_ take ownership of the data slice /// The data slice must be freed in the callback pub fn write( allocator: *std.mem.Allocator, data: []u8, stream: anytype, callback: ?WriteCallback, ) !void { comptime { if (!util.is_pointer(@TypeOf(stream))) { @compileLog(@TypeOf(stream)); @compileError("Stream must be a pointer type"); } } var req = try allocator.create(WriteReq); errdefer allocator.destroy(req); req.buf = c.uv_buf_init(data.ptr, @intCast(u32, data.len)); req.alloc = allocator; req.callback = callback; const result = c.uv_write( &req.req, @ptrCast(?*c.uv_stream_t, stream), &req.buf, 1, write_finish, ); if (result < 0) { return error.write_error; } } /// A wrapper function to free the `WriteReq` and make it unnecessary /// to leak that data structure export fn write_finish(req: ?*c.uv_write_t, status: i32) void { const write_req = @fieldParentPtr(WriteReq, "req", req.?); if (write_req.callback) |callback| { callback(req, status, &write_req.buf); } write_req.alloc.destroy(write_req); } pub fn read_start(stream: anytype, alloc_cb: AllocCallback, read_cb: ReadCallback) !void { comptime { if (!util.is_pointer(@TypeOf(stream))) { @compileLog(@TypeOf(stream)); @compileError("Stream must be a pointer type"); } } const result = c.uv_read_start(@ptrCast(?*c.uv_stream_t, stream), alloc_cb, read_cb); if (result < 0) { return error.stream_read_error; } } pub const COMPACT_PEER_LEN: usize = 6; pub const KEEP_ALIVE = "\x00\x00\x00\x00"; const MSG_LEN = "{:4}"; const UNCHOKE = MSG_LEN ++ "\x01"; const INTERESTED = MSG_LEN ++ "\x02"; const REQUEST = MSG_LEN ++ "\x06{:4}{:4}{:4}"; pub const TRACKER_GET_REQUEST_FORMAT = "GET {}?peer_id={}&" ++ "info_hash={}&" ++ "port={}&uploaded={}&downloaded={}&" ++ "compact=1&left={} HTTP/1.1\r\nHost: {}\r\n\r\n"; /// Consists of /// - Magic: 0x13 /// - "BitTorrent protocol" /// - 8 bytes of 0s /// - 20 byte sha1 hash of the info data /// - 20 byte peer id reported by the tracker const HANDSHAKE_REQUEST_FORMAT = "\x13BitTorrent protocol" ++ ("\x00" ** 8) ++ "{:20}{:20}"; pub const HANDSHAKE_LENGTH = 68; pub const HEARTBEAT_FORMAT = ""; // Message Type + Optional payload pub const MESSAGE_FORMAT = "{}" ++ "{}"; pub const MessageType = enum(u8) { choke, unchoke, interested, not_interested, have, bitfield, request, piece, cancel, keep_alive, }; /// Create the handshake message for a particular torrent /// User must free the memory at some point /// Use this rather than trying to format directly to not get /// the ordering wrong of peer_id and info_hash pub fn make_handshake(alloc: *std.mem.Allocator, peer_id: []const u8, info_hash: []const u8) ![]u8 { return std.fmt.allocPrint(alloc, HANDSHAKE_REQUEST_FORMAT, .{ info_hash, peer_id }); } pub fn make_unchoke(alloc: *std.mem.Allocator) ![]u8 { const msg_len = std.mem.nativeToBig(u32, 1); return std.fmt.allocPrint(alloc, UNCHOKE, .{std.mem.asBytes(&msg_len)}); } pub fn make_interested(alloc: *std.mem.Allocator) ![]u8 { const msg_len = std.mem.nativeToBig(u32, 1); return std.fmt.allocPrint(alloc, INTERESTED, .{std.mem.asBytes(&msg_len)}); } pub fn make_request(alloc: *std.mem.Allocator, index: usize, offset: usize, length: usize) ![]u8 { const msg_len = (1 + @sizeOf(u32) * 3); const msg_len_big = std.mem.nativeToBig(u32, msg_len); const idx_big = std.mem.nativeToBig(u32, @intCast(u32, index)); const offset_big = std.mem.nativeToBig(u32, @intCast(u32, offset)); const length_big = std.mem.nativeToBig(u32, @intCast(u32, length)); return std.fmt.allocPrint(alloc, REQUEST, .{ std.mem.asBytes(&msg_len_big), std.mem.asBytes(&idx_big), std.mem.asBytes(&offset_big), std.mem.asBytes(&length_big), }); } fn validate_handshake(msg: []const u8, info_hash: []const u8) bool { return msg.len >= HANDSHAKE_LENGTH; } pub fn consume_handshake(msg: []const u8, info_hash: []const u8) ![]const u8 { if (!validate_handshake(msg, info_hash)) { return error.invalid_handshake; } return msg[0..HANDSHAKE_LENGTH]; } pub const Message = struct { total_size: u32, message_type: MessageType, data: []const u8, pub fn number(self: Message, comptime T: type, offset: usize) T { var num = [_]u8{undefined} ** @sizeOf(T); std.mem.copy(u8, num[0..], self.data[offset .. offset + @sizeOf(T)]); return std.mem.bigToNative(T, std.mem.bytesToValue(T, num[0..])); } }; /// Consume a message /// throws away the size field if /// a complete message exists /// Errors on invalid data pub fn consume_message(data: []const u8) !?Message { var d = data[0..]; if (d.len < 4) { return null; } const size = std.mem.bigToNative(u32, std.mem.bytesToValue(u32, d[0..4])); d = d[4..]; // Keep alive message - only consume size if (size == 0) { return Message{ .total_size = 4, .message_type = .keep_alive, .data = d[0..size], }; } // we don't have enough from the stream if (d.len < size) { return null; } const msg_type = message_type(d[0]); if (msg_type) |t| { return Message{ .total_size = size + 4, .message_type = t, .data = d[1..size], }; } else { return error.unknown_msg_type; } } pub fn message_type(char: u8) ?MessageType { if (char > std.meta.fields(MessageType).len - 1) { return null; } return @intToEnum(MessageType, char); }
src/messages.zig
pub const NCBNAMSZ = @as(u32, 16); pub const MAX_LANA = @as(u32, 254); pub const NAME_FLAGS_MASK = @as(u32, 135); pub const GROUP_NAME = @as(u32, 128); pub const UNIQUE_NAME = @as(u32, 0); pub const REGISTERING = @as(u32, 0); pub const REGISTERED = @as(u32, 4); pub const DEREGISTERED = @as(u32, 5); pub const DUPLICATE = @as(u32, 6); pub const DUPLICATE_DEREG = @as(u32, 7); pub const LISTEN_OUTSTANDING = @as(u32, 1); pub const CALL_PENDING = @as(u32, 2); pub const SESSION_ESTABLISHED = @as(u32, 3); pub const HANGUP_PENDING = @as(u32, 4); pub const HANGUP_COMPLETE = @as(u32, 5); pub const SESSION_ABORTED = @as(u32, 6); pub const NCBCALL = @as(u32, 16); pub const NCBLISTEN = @as(u32, 17); pub const NCBHANGUP = @as(u32, 18); pub const NCBSEND = @as(u32, 20); pub const NCBRECV = @as(u32, 21); pub const NCBRECVANY = @as(u32, 22); pub const NCBCHAINSEND = @as(u32, 23); pub const NCBDGSEND = @as(u32, 32); pub const NCBDGRECV = @as(u32, 33); pub const NCBDGSENDBC = @as(u32, 34); pub const NCBDGRECVBC = @as(u32, 35); pub const NCBADDNAME = @as(u32, 48); pub const NCBDELNAME = @as(u32, 49); pub const NCBRESET = @as(u32, 50); pub const NCBASTAT = @as(u32, 51); pub const NCBSSTAT = @as(u32, 52); pub const NCBCANCEL = @as(u32, 53); pub const NCBADDGRNAME = @as(u32, 54); pub const NCBENUM = @as(u32, 55); pub const NCBUNLINK = @as(u32, 112); pub const NCBSENDNA = @as(u32, 113); pub const NCBCHAINSENDNA = @as(u32, 114); pub const NCBLANSTALERT = @as(u32, 115); pub const NCBACTION = @as(u32, 119); pub const NCBFINDNAME = @as(u32, 120); pub const NCBTRACE = @as(u32, 121); pub const ASYNCH = @as(u32, 128); pub const NRC_GOODRET = @as(u32, 0); pub const NRC_BUFLEN = @as(u32, 1); pub const NRC_ILLCMD = @as(u32, 3); pub const NRC_CMDTMO = @as(u32, 5); pub const NRC_INCOMP = @as(u32, 6); pub const NRC_BADDR = @as(u32, 7); pub const NRC_SNUMOUT = @as(u32, 8); pub const NRC_NORES = @as(u32, 9); pub const NRC_SCLOSED = @as(u32, 10); pub const NRC_CMDCAN = @as(u32, 11); pub const NRC_DUPNAME = @as(u32, 13); pub const NRC_NAMTFUL = @as(u32, 14); pub const NRC_ACTSES = @as(u32, 15); pub const NRC_LOCTFUL = @as(u32, 17); pub const NRC_REMTFUL = @as(u32, 18); pub const NRC_ILLNN = @as(u32, 19); pub const NRC_NOCALL = @as(u32, 20); pub const NRC_NOWILD = @as(u32, 21); pub const NRC_INUSE = @as(u32, 22); pub const NRC_NAMERR = @as(u32, 23); pub const NRC_SABORT = @as(u32, 24); pub const NRC_NAMCONF = @as(u32, 25); pub const NRC_IFBUSY = @as(u32, 33); pub const NRC_TOOMANY = @as(u32, 34); pub const NRC_BRIDGE = @as(u32, 35); pub const NRC_CANOCCR = @as(u32, 36); pub const NRC_CANCEL = @as(u32, 38); pub const NRC_DUPENV = @as(u32, 48); pub const NRC_ENVNOTDEF = @as(u32, 52); pub const NRC_OSRESNOTAV = @as(u32, 53); pub const NRC_MAXAPPS = @as(u32, 54); pub const NRC_NOSAPS = @as(u32, 55); pub const NRC_NORESOURCES = @as(u32, 56); pub const NRC_INVADDRESS = @as(u32, 57); pub const NRC_INVDDID = @as(u32, 59); pub const NRC_LOCKFAIL = @as(u32, 60); pub const NRC_OPENERR = @as(u32, 63); pub const NRC_SYSTEM = @as(u32, 64); pub const NRC_PENDING = @as(u32, 255); //-------------------------------------------------------------------------------- // Section: Types (10) //-------------------------------------------------------------------------------- pub const ADAPTER_STATUS = extern struct { adapter_address: [6]u8, rev_major: u8, reserved0: u8, adapter_type: u8, rev_minor: u8, duration: u16, frmr_recv: u16, frmr_xmit: u16, iframe_recv_err: u16, xmit_aborts: u16, xmit_success: u32, recv_success: u32, iframe_xmit_err: u16, recv_buff_unavail: u16, t1_timeouts: u16, ti_timeouts: u16, reserved1: u32, free_ncbs: u16, max_cfg_ncbs: u16, max_ncbs: u16, xmit_buf_unavail: u16, max_dgram_size: u16, pending_sess: u16, max_cfg_sess: u16, max_sess: u16, max_sess_pkt_size: u16, name_count: u16, }; pub const NAME_BUFFER = extern struct { name: [16]u8, name_num: u8, name_flags: u8, }; pub const SESSION_HEADER = extern struct { sess_name: u8, num_sess: u8, rcv_dg_outstanding: u8, rcv_any_outstanding: u8, }; pub const SESSION_BUFFER = extern struct { lsn: u8, state: u8, local_name: [16]u8, remote_name: [16]u8, rcvs_outstanding: u8, sends_outstanding: u8, }; pub const LANA_ENUM = extern struct { length: u8, lana: [255]u8, }; pub const FIND_NAME_HEADER = extern struct { node_count: u16, reserved: u8, unique_group: u8, }; pub const FIND_NAME_BUFFER = extern struct { length: u8, access_control: u8, frame_control: u8, destination_addr: [6]u8, source_addr: [6]u8, routing_info: [18]u8, }; pub const ACTION_HEADER = extern struct { transport_id: u32, action_code: u16, reserved: u16, }; pub const NCB = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { ncb_command: u8, ncb_retcode: u8, ncb_lsn: u8, ncb_num: u8, ncb_buffer: ?*u8, ncb_length: u16, ncb_callname: [16]u8, ncb_name: [16]u8, ncb_rto: u8, ncb_sto: u8, ncb_post: isize, ncb_lana_num: u8, ncb_cmd_cplt: u8, ncb_reserve: [18]u8, ncb_event: ?HANDLE, }, .X86 => extern struct { ncb_command: u8, ncb_retcode: u8, ncb_lsn: u8, ncb_num: u8, ncb_buffer: ?*u8, ncb_length: u16, ncb_callname: [16]u8, ncb_name: [16]u8, ncb_rto: u8, ncb_sto: u8, ncb_post: isize, ncb_lana_num: u8, ncb_cmd_cplt: u8, ncb_reserve: [10]u8, ncb_event: ?HANDLE, }, }; //-------------------------------------------------------------------------------- // Section: Functions (1) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows5.0' pub extern "NETAPI32" fn Netbios( pncb: ?*NCB, ) callconv(@import("std").os.windows.WINAPI) u8; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (1) //-------------------------------------------------------------------------------- const HANDLE = @import("../foundation.zig").HANDLE; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/network_management/net_bios.zig
const std = @import("std"); const serde_usize = u64; const SerdeError = error{ OutOfMemory, AllocatorRequired, }; pub fn serialize(allocator: std.mem.Allocator, value: anytype) ![]u8 { const Type = @TypeOf(value); if (@hasDecl(Type, "serialize")) { return try Type.serialize(allocator, value); } else { const bytes = allocator.alloc(u8, serializeSize(value)) catch return error.OutOfMemory; _ = serializeBytes(value, bytes); return bytes; } } fn serializeBytes(value: anytype, bytes: []u8) []u8 { var out_bytes = bytes; switch (@typeInfo(@TypeOf(value))) { .Void => {}, .Bool, .Int, .Float, .Enum => { const value_bytes = std.mem.toBytes(value); std.mem.copy(u8, out_bytes, &value_bytes); out_bytes = out_bytes[value_bytes.len..]; }, .Optional => { if (value) |v| { out_bytes = serializeBytes(true, out_bytes); out_bytes = serializeBytes(v, out_bytes); } else { out_bytes = serializeBytes(false, out_bytes); } }, .Pointer => |P| { switch (P.size) { .One => out_bytes = serializeBytes(value.*, out_bytes), .Slice => { out_bytes = serializeBytes(@intCast(serde_usize, value.len), out_bytes); switch (@typeInfo(P.child)) { .Bool, .Int, .Float, .Enum => { const value_bytes = std.mem.sliceAsBytes(value); std.mem.copy(u8, out_bytes, value_bytes); out_bytes = out_bytes[value_bytes.len..]; }, else => { for (value) |v| { out_bytes = serializeBytes(v, out_bytes); } }, } if (P.sentinel) |s| { out_bytes = serializeBytes(s, out_bytes); } }, else => |E| { @compileError("Cannot serialize pointer size " ++ @tagName(E) ++ "!"); }, } }, .Array => |A| { switch (@typeInfo(A.child)) { .Bool, .Int, .Float, .Enum => { const value_bytes = std.mem.sliceAsBytes(value[0..]); std.mem.copy(u8, out_bytes, value_bytes); out_bytes = out_bytes[value_bytes.len..]; }, else => { for (value) |v| { out_bytes = serializeBytes(v, out_bytes); } }, } if (A.sentinel) |s| { out_bytes = serializeBytes(s, out_bytes); } }, .Struct => |S| { inline for (S.fields) |field| { out_bytes = serializeBytes(@field(value, field.name), out_bytes); } }, .Union => |U| { const UnionTagType = U.tag_type orelse @compileError("Cannot serialize a union without a tag type!"); const tag = std.meta.activeTag(value); out_bytes = serializeBytes(tag, out_bytes); inline for (U.fields) |field| { if (@field(UnionTagType, field.name) == tag) { out_bytes = serializeBytes(@field(value, field.name), out_bytes); break; } } }, else => |E| @compileError("Cannot serialize type " ++ @tagName(E) ++ "!"), } return out_bytes; } pub fn serializeSize(value: anytype) usize { var size: usize = 0; switch (@typeInfo(@TypeOf(value))) { .Void => {}, .Bool, .Int, .Float, .Enum => size += @sizeOf(@TypeOf(value)), .Optional => { size += @sizeOf(bool); // null flag if (value) |v| { size += serializeSize(v); } }, .Pointer => |P| { switch (P.size) { .One => size += serializeSize(value.*), .Slice => { size += @sizeOf(serde_usize); // len switch (@typeInfo(P.child)) { .Bool, .Int, .Float, .Enum => size += @sizeOf(P.child) * value.len, else => { for (value) |v| { size += serializeSize(v); } }, } if (P.sentinel) |s| { size += serializeSize(s); } }, else => |E| { @compileError("Cannot serialize pointer size " ++ @tagName(E) ++ "!"); }, } }, .Array => |A| { switch (@typeInfo(A.child)) { .Bool, .Int, .Float, .Enum => size += @sizeOf(A.child) * value.len, else => { for (value) |v| { size += serializeSize(v); } }, } if (A.sentinel) |s| { size += serializeSize(s); } }, .Struct => |S| { inline for (S.fields) |field| { size += serializeSize(@field(value, field.name)); } }, .Union => |U| { const UnionTagType = U.tag_type orelse @compileError("Cannot serialize a union without a tag type!"); const tag = std.meta.activeTag(value); size += @sizeOf(UnionTagType); inline for (U.fields) |field| { if (@field(UnionTagType, field.name) == tag) { size += serializeSize(@field(value, field.name)); break; } } }, else => |E| @compileError("Cannot serialize type " ++ @tagName(E) ++ "!"), } return size; } pub const DeserializeDesc = struct { allocator: ?std.mem.Allocator = null, bytes_are_embedded: bool = false, }; pub fn deserialize(desc: DeserializeDesc, comptime Type: type, bytes: []const u8) !Type { if (@hasDecl(Type, "deserialize")) { return try Type.deserialize(desc, bytes); } else { var value: Type = undefined; _ = try deserializeBytes(desc, &value, bytes); return value; } } pub fn deserializeBytes( desc: DeserializeDesc, value: anytype, bytes: []const u8, ) SerdeError![]const u8 { var in_bytes = bytes; const Type = @typeInfo(@TypeOf(value)).Pointer.child; switch (@typeInfo(Type)) { .Void => {}, .Bool, .Int, .Float, .Enum => { value.* = std.mem.bytesAsSlice(Type, in_bytes[0..@sizeOf(Type)])[0]; in_bytes = in_bytes[@sizeOf(Type)..]; }, .Optional => { var exists: bool = undefined; in_bytes = try deserializeBytes(desc, &exists, in_bytes); if (exists) { in_bytes = try deserializeBytes(desc, &value.*.?, in_bytes); } else { value.* = null; } }, .Pointer => |P| { switch (P.size) { .One => { if (desc.allocator) |a| { var ptr = a.create(P.child) catch return error.OutOfMemory; in_bytes = try deserializeBytes(desc, ptr, in_bytes); value.* = ptr; } else { return error.AllocatorRequired; } }, .Slice => { var serde_len: serde_usize = undefined; in_bytes = try deserializeBytes(desc, &serde_len, in_bytes); const len = @intCast(usize, serde_len); if (desc.allocator) |a| { var slice = a.alloc(P.child, len) catch return error.OutOfMemory; switch (@typeInfo(P.child)) { .Bool, .Int, .Float, .Enum => { std.mem.copy( P.child, slice, std.mem.bytesAsSlice( P.child, in_bytes[0 .. len * @sizeOf(P.child)], ), ); in_bytes = in_bytes[len * @sizeOf(P.child) ..]; }, else => { for (slice) |*s| { in_bytes = try deserializeBytes(desc, s, in_bytes); } }, } if (P.sentinel) |_| { in_bytes = try deserializeBytes(desc, &slice[P.len], in_bytes); } value.* = slice; } else if (P.is_const and desc.bytes_are_embedded) { switch (@typeInfo(P.child)) { .Bool, .Int, .Float, .Enum => { value.* = std.mem.bytesAsSlice( P.child, in_bytes[0 .. len * @sizeOf(P.child)], ); in_bytes = in_bytes[len * @sizeOf(P.child) ..]; }, else => return error.AllocatorRequired, } } else { return error.AllocatorRequired; } }, else => |E| { @compileError("Cannot deserialize pointer size " ++ @tagName(E)); }, } }, .Array => |A| { switch (@typeInfo(A.child)) { .Bool, .Int, .Float, .Enum => { std.mem.copy( A.child, value.*[0..], std.mem.bytesAsSlice( A.child, bytes[0 .. A.len * @sizeOf(A.child)], ), ); in_bytes = in_bytes[A.len * @sizeOf(A.child) ..]; }, else => { for (value.*) |*v| { in_bytes = try deserializeBytes(desc, v, in_bytes); } }, } if (A.sentinel) |_| { in_bytes = try deserializeBytes(desc, &value.*[A.len], in_bytes); } }, .Struct => |S| { inline for (S.fields) |field| { in_bytes = try deserializeBytes(desc, &@field(value.*, field.name), in_bytes); } }, .Union => |U| { const UnionTagType = U.tag_type orelse @compileError("Cannot deserialize a union without a tag!"); var tag: UnionTagType = undefined; in_bytes = try deserializeBytes(desc, &tag, in_bytes); inline for (U.fields) |field| { if (@field(UnionTagType, field.name) == tag) { const UnionType = @TypeOf(@field(value.*, field.name)); var u: UnionType = undefined; in_bytes = try deserializeBytes(desc, &u, in_bytes); value.* = @unionInit(Type, field.name, u); break; } } }, else => |E| @compileError("Cannot deserializeBytes desc, type " ++ @tagName(E)), } return in_bytes; } pub fn deserializeFree(allocator: std.mem.Allocator, value: anytype) void { switch (@typeInfo(@TypeOf(value))) { .Void, .Bool, .Int, .Float, .Enum => {}, .Optional => { if (value) |v| { deserializeFree(allocator, v); } }, .Pointer => |P| { switch (P.size) { .One => { deserializeFree(allocator, value.*); allocator.destroy(value); }, .Slice => { switch (@typeInfo(P.child)) { .Bool, .Int, .Float, .Enum => {}, else => { for (value) |v| { deserializeFree(allocator, v); } }, } allocator.free(value); }, else => |E| { @compileError("Cannot deserialize pointer size " ++ @tagName(E) ++ "!"); }, } }, .Array => |A| { switch (@typeInfo(A.child)) { .Bool, .Int, .Float, .Enum => {}, else => { for (value) |v| { deserializeFree(allocator, v); } }, } }, .Struct => |S| { inline for (S.fields) |field| { deserializeFree(allocator, @field(value, field.name)); } }, .Union => |U| { inline for (U.fields) |field| { if (std.mem.eql(u8, field.name, @tagName(value))) { deserializeFree(allocator, @field(value, field.name)); break; } } }, else => |E| @compileError("Cannot deserialize type " ++ @tagName(E) ++ "!"), } } test "serde" { const TestEnum = enum { a_field, b_field, c_field, }; const TestInnerStruct = struct { i32_field: i32 = -12, u64_field: u64 = 16, }; const TestInnerUnion = union(enum) { f32_field: f32, void_field, }; const TestStruct = struct { bool_field: bool = false, u32_field: u32 = 0, u64_field: u64 = 8, f32_field: f32 = 1.0, enum_field: TestEnum = .b_field, optional_field: ?f32 = null, ptr_field: *const f32, slice_field: []const u8, array_field: [2]u8 = [_]u8{ 1, 2 }, struct_field: TestInnerStruct = .{}, union_field: TestInnerUnion = .void_field, }; const test_ptr = try std.testing.allocator.create(f32); defer std.testing.allocator.destroy(test_ptr); const test_slice = try std.testing.allocator.alloc(u8, 5); defer std.testing.allocator.free(test_slice); std.mem.copy(u8, test_slice, "hello"); const value: TestStruct = .{ .ptr_field = test_ptr, .slice_field = test_slice, }; const bytes = try serialize(std.testing.allocator, value); defer std.testing.allocator.free(bytes); var deserialized_value = try deserialize( .{ .allocator = std.testing.allocator }, TestStruct, bytes, ); defer deserializeFree(std.testing.allocator, deserialized_value); try std.testing.expectEqual(value.bool_field, deserialized_value.bool_field); try std.testing.expectEqual(value.u32_field, deserialized_value.u32_field); try std.testing.expectEqual(value.u64_field, deserialized_value.u64_field); try std.testing.expectEqual(value.f32_field, deserialized_value.f32_field); try std.testing.expectEqual(value.enum_field, deserialized_value.enum_field); try std.testing.expectEqual(value.optional_field, deserialized_value.optional_field); try std.testing.expectEqual(value.ptr_field.*, deserialized_value.ptr_field.*); try std.testing.expectEqualSlices(u8, value.slice_field, deserialized_value.slice_field); try std.testing.expectEqualSlices(u8, &value.array_field, &deserialized_value.array_field); try std.testing.expectEqual(value.struct_field, deserialized_value.struct_field); try std.testing.expectEqual(value.union_field, deserialized_value.union_field); }
src/serde.zig
const std = @import("../std.zig"); const elf = std.elf; const mem = std.mem; const fs = std.fs; const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const assert = std.debug.assert; const process = std.process; const Target = std.Target; const CrossTarget = std.zig.CrossTarget; const macos = @import("system/macos.zig"); const is_windows = Target.current.os.tag == .windows; pub const NativePaths = struct { include_dirs: ArrayList([:0]u8), lib_dirs: ArrayList([:0]u8), rpaths: ArrayList([:0]u8), warnings: ArrayList([:0]u8), pub fn detect(allocator: *Allocator) !NativePaths { var self: NativePaths = .{ .include_dirs = ArrayList([:0]u8).init(allocator), .lib_dirs = ArrayList([:0]u8).init(allocator), .rpaths = ArrayList([:0]u8).init(allocator), .warnings = ArrayList([:0]u8).init(allocator), }; errdefer self.deinit(); var is_nix = false; if (process.getEnvVarOwned(allocator, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| { defer allocator.free(nix_cflags_compile); is_nix = true; var it = mem.tokenize(nix_cflags_compile, " "); while (true) { const word = it.next() orelse break; if (mem.eql(u8, word, "-isystem")) { const include_path = it.next() orelse { try self.addWarning("Expected argument after -isystem in NIX_CFLAGS_COMPILE"); break; }; try self.addIncludeDir(include_path); } else { try self.addWarningFmt("Unrecognized C flag from NIX_CFLAGS_COMPILE: {}", .{word}); break; } } } else |err| switch (err) { error.InvalidUtf8 => {}, error.EnvironmentVariableNotFound => {}, error.OutOfMemory => |e| return e, } if (process.getEnvVarOwned(allocator, "NIX_LDFLAGS")) |nix_ldflags| { defer allocator.free(nix_ldflags); is_nix = true; var it = mem.tokenize(nix_ldflags, " "); while (true) { const word = it.next() orelse break; if (mem.eql(u8, word, "-rpath")) { const rpath = it.next() orelse { try self.addWarning("Expected argument after -rpath in NIX_LDFLAGS"); break; }; try self.addRPath(rpath); } else if (word.len > 2 and word[0] == '-' and word[1] == 'L') { const lib_path = word[2..]; try self.addLibDir(lib_path); } else { try self.addWarningFmt("Unrecognized C flag from NIX_LDFLAGS: {}", .{word}); break; } } } else |err| switch (err) { error.InvalidUtf8 => {}, error.EnvironmentVariableNotFound => {}, error.OutOfMemory => |e| return e, } if (is_nix) { return self; } if (!is_windows) { const triple = try Target.current.linuxTriple(allocator); const qual = Target.current.cpu.arch.ptrBitWidth(); // TODO: $ ld --verbose | grep SEARCH_DIR // the output contains some paths that end with lib64, maybe include them too? // TODO: what is the best possible order of things? // TODO: some of these are suspect and should only be added on some systems. audit needed. try self.addIncludeDir("/usr/local/include"); try self.addLibDirFmt("/usr/local/lib{}", .{qual}); try self.addLibDir("/usr/local/lib"); try self.addIncludeDirFmt("/usr/include/{}", .{triple}); try self.addLibDirFmt("/usr/lib/{}", .{triple}); try self.addIncludeDir("/usr/include"); try self.addLibDirFmt("/lib{}", .{qual}); try self.addLibDir("/lib"); try self.addLibDirFmt("/usr/lib{}", .{qual}); try self.addLibDir("/usr/lib"); // example: on a 64-bit debian-based linux distro, with zlib installed from apt: // zlib.h is in /usr/include (added above) // libz.so.1 is in /lib/x86_64-linux-gnu (added here) try self.addLibDirFmt("/lib/{}", .{triple}); } return self; } pub fn deinit(self: *NativePaths) void { deinitArray(&self.include_dirs); deinitArray(&self.lib_dirs); deinitArray(&self.rpaths); deinitArray(&self.warnings); self.* = undefined; } fn deinitArray(array: *ArrayList([:0]u8)) void { for (array.span()) |item| { array.allocator.free(item); } array.deinit(); } pub fn addIncludeDir(self: *NativePaths, s: []const u8) !void { return self.appendArray(&self.include_dirs, s); } pub fn addIncludeDirFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void { const item = try std.fmt.allocPrint0(self.include_dirs.allocator, fmt, args); errdefer self.include_dirs.allocator.free(item); try self.include_dirs.append(item); } pub fn addLibDir(self: *NativePaths, s: []const u8) !void { return self.appendArray(&self.lib_dirs, s); } pub fn addLibDirFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void { const item = try std.fmt.allocPrint0(self.lib_dirs.allocator, fmt, args); errdefer self.lib_dirs.allocator.free(item); try self.lib_dirs.append(item); } pub fn addWarning(self: *NativePaths, s: []const u8) !void { return self.appendArray(&self.warnings, s); } pub fn addWarningFmt(self: *NativePaths, comptime fmt: []const u8, args: anytype) !void { const item = try std.fmt.allocPrint0(self.warnings.allocator, fmt, args); errdefer self.warnings.allocator.free(item); try self.warnings.append(item); } pub fn addRPath(self: *NativePaths, s: []const u8) !void { return self.appendArray(&self.rpaths, s); } fn appendArray(self: *NativePaths, array: *ArrayList([:0]u8), s: []const u8) !void { const item = try array.allocator.dupeZ(u8, s); errdefer array.allocator.free(item); try array.append(item); } }; pub const NativeTargetInfo = struct { target: Target, dynamic_linker: DynamicLinker = DynamicLinker{}, /// Only some architectures have CPU detection implemented. This field reveals whether /// CPU detection actually occurred. When this is `true` it means that the reported /// CPU is baseline only because of a missing implementation for that architecture. cpu_detection_unimplemented: bool = false, pub const DynamicLinker = Target.DynamicLinker; pub const DetectError = error{ OutOfMemory, FileSystem, SystemResources, SymLinkLoop, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, DeviceBusy, }; /// Given a `CrossTarget`, which specifies in detail which parts of the target should be detected /// natively, which should be standard or default, and which are provided explicitly, this function /// resolves the native components by detecting the native system, and then resolves standard/default parts /// relative to that. /// Any resources this function allocates are released before returning, and so there is no /// deinitialization method. /// TODO Remove the Allocator requirement from this function. pub fn detect(allocator: *Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo { var os = cross_target.getOsTag().defaultVersionRange(); if (cross_target.os_tag == null) { switch (Target.current.os.tag) { .linux => { const uts = std.os.uname(); const release = mem.spanZ(&uts.release); // The release field may have several other fields after the // kernel version const kernel_version = if (mem.indexOfScalar(u8, release, '-')) |pos| release[0..pos] else if (mem.indexOfScalar(u8, release, '_')) |pos| release[0..pos] else release; if (std.builtin.Version.parse(kernel_version)) |ver| { os.version_range.linux.range.min = ver; os.version_range.linux.range.max = ver; } else |err| switch (err) { error.Overflow => {}, error.InvalidCharacter => {}, error.InvalidVersion => {}, } }, .windows => { var version_info: std.os.windows.RTL_OSVERSIONINFOW = undefined; version_info.dwOSVersionInfoSize = @sizeOf(@TypeOf(version_info)); switch (std.os.windows.ntdll.RtlGetVersion(&version_info)) { .SUCCESS => {}, else => unreachable, } // Starting from the system infos build a NTDDI-like version // constant whose format is: // B0 B1 B2 B3 // `---` `` ``--> Sub-version (Starting from Windows 10 onwards) // \ `--> Service pack (Always zero in the constants defined) // `--> OS version (Major & minor) const os_ver: u16 = // @intCast(u16, version_info.dwMajorVersion & 0xff) << 8 | @intCast(u16, version_info.dwMinorVersion & 0xff); const sp_ver: u8 = 0; const sub_ver: u8 = if (os_ver >= 0x0A00) subver: { // There's no other way to obtain this info beside // checking the build number against a known set of // values const known_build_numbers = [_]u32{ 10240, 10586, 14393, 15063, 16299, 17134, 17763, 18362, 19041, }; var last_idx: usize = 0; for (known_build_numbers) |build, i| { if (version_info.dwBuildNumber >= build) last_idx = i; } break :subver @truncate(u8, last_idx); } else 0; const version: u32 = @as(u32, os_ver) << 16 | @as(u32, sp_ver) << 8 | sub_ver; os.version_range.windows.max = @intToEnum(Target.Os.WindowsVersion, version); os.version_range.windows.min = @intToEnum(Target.Os.WindowsVersion, version); }, .macosx => { var scbuf: [32]u8 = undefined; var size: usize = undefined; // The osproductversion sysctl was introduced first with 10.13.4 High Sierra. const key_osproductversion = "kern.osproductversion"; // eg. "10.15.4" size = scbuf.len; if (std.os.sysctlbynameZ(key_osproductversion, &scbuf, &size, null, 0)) |_| { const string_version = scbuf[0 .. size - 1]; if (std.builtin.Version.parse(string_version)) |ver| { os.version_range.semver.min = ver; os.version_range.semver.max = ver; } else |err| switch (err) { error.Overflow => {}, error.InvalidCharacter => {}, error.InvalidVersion => {}, } } else |err| switch (err) { error.UnknownName => { const key_osversion = "kern.osversion"; // eg. "19E287" size = scbuf.len; std.os.sysctlbynameZ(key_osversion, &scbuf, &size, null, 0) catch { @panic("unable to detect macOS version: " ++ key_osversion); }; if (macos.version_from_build(scbuf[0 .. size - 1])) |ver| { os.version_range.semver.min = ver; os.version_range.semver.max = ver; } else |_| {} }, else => @panic("unable to detect macOS version: " ++ key_osproductversion), } }, .freebsd => { var osreldate: u32 = undefined; var len: usize = undefined; std.os.sysctlbynameZ("kern.osreldate", &osreldate, &len, null, 0) catch |err| switch (err) { error.NameTooLong => unreachable, // constant, known good value error.PermissionDenied => unreachable, // only when setting values, error.SystemResources => unreachable, // memory already on the stack error.UnknownName => unreachable, // constant, known good value error.Unexpected => unreachable, // EFAULT: stack should be safe, EISDIR/ENOTDIR: constant, known good value }; // https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html // Major * 100,000 has been convention since FreeBSD 2.2 (1997) // Minor * 1(0),000 summed has been convention since FreeBSD 2.2 (1997) // e.g. 492101 = 4.11-STABLE = 4.(9+2) const major = osreldate / 100_000; const minor1 = osreldate % 100_000 / 10_000; // usually 0 since 5.1 const minor2 = osreldate % 10_000 / 1_000; // 0 before 5.1, minor version since const patch = osreldate % 1_000; os.version_range.semver.min = .{ .major = major, .minor = minor1 + minor2, .patch = patch }; os.version_range.semver.max = .{ .major = major, .minor = minor1 + minor2, .patch = patch }; }, else => { // Unimplemented, fall back to default version range. }, } } if (cross_target.os_version_min) |min| switch (min) { .none => {}, .semver => |semver| switch (cross_target.getOsTag()) { .linux => os.version_range.linux.range.min = semver, else => os.version_range.semver.min = semver, }, .windows => |win_ver| os.version_range.windows.min = win_ver, }; if (cross_target.os_version_max) |max| switch (max) { .none => {}, .semver => |semver| switch (cross_target.getOsTag()) { .linux => os.version_range.linux.range.max = semver, else => os.version_range.semver.max = semver, }, .windows => |win_ver| os.version_range.windows.max = win_ver, }; if (cross_target.glibc_version) |glibc| { assert(cross_target.isGnuLibC()); os.version_range.linux.glibc = glibc; } var cpu_detection_unimplemented = false; // Until https://github.com/ziglang/zig/issues/4592 is implemented (support detecting the // native CPU architecture as being different than the current target), we use this: const cpu_arch = cross_target.getCpuArch(); var cpu = switch (cross_target.cpu_model) { .native => detectNativeCpuAndFeatures(cpu_arch, os, cross_target), .baseline => Target.Cpu.baseline(cpu_arch), .determined_by_cpu_arch => if (cross_target.cpu_arch == null) detectNativeCpuAndFeatures(cpu_arch, os, cross_target) else Target.Cpu.baseline(cpu_arch), .explicit => |model| model.toCpu(cpu_arch), } orelse backup_cpu_detection: { cpu_detection_unimplemented = true; break :backup_cpu_detection Target.Cpu.baseline(cpu_arch); }; cross_target.updateCpuFeatures(&cpu.features); var target = try detectAbiAndDynamicLinker(allocator, cpu, os, cross_target); target.cpu_detection_unimplemented = cpu_detection_unimplemented; return target; } /// First we attempt to use the executable's own binary. If it is dynamically /// linked, then it should answer both the C ABI question and the dynamic linker question. /// If it is statically linked, then we try /usr/bin/env. If that does not provide the answer, then /// we fall back to the defaults. /// TODO Remove the Allocator requirement from this function. fn detectAbiAndDynamicLinker( allocator: *Allocator, cpu: Target.Cpu, os: Target.Os, cross_target: CrossTarget, ) DetectError!NativeTargetInfo { const native_target_has_ld = comptime Target.current.hasDynamicLinker(); const is_linux = Target.current.os.tag == .linux; const have_all_info = cross_target.dynamic_linker.get() != null and cross_target.abi != null and (!is_linux or cross_target.abi.?.isGnu()); const os_is_non_native = cross_target.os_tag != null; if (!native_target_has_ld or have_all_info or os_is_non_native) { return defaultAbiAndDynamicLinker(cpu, os, cross_target); } if (cross_target.abi) |abi| { if (abi.isMusl()) { // musl implies static linking. return defaultAbiAndDynamicLinker(cpu, os, cross_target); } } // The current target's ABI cannot be relied on for this. For example, we may build the zig // compiler for target riscv64-linux-musl and provide a tarball for users to download. // A user could then run that zig compiler on riscv64-linux-gnu. This use case is well-defined // and supported by Zig. But that means that we must detect the system ABI here rather than // relying on `Target.current`. const all_abis = comptime blk: { assert(@enumToInt(Target.Abi.none) == 0); const fields = std.meta.fields(Target.Abi)[1..]; var array: [fields.len]Target.Abi = undefined; inline for (fields) |field, i| { array[i] = @field(Target.Abi, field.name); } break :blk array; }; var ld_info_list_buffer: [all_abis.len]LdInfo = undefined; var ld_info_list_len: usize = 0; for (all_abis) |abi| { // This may be a nonsensical parameter. We detect this with error.UnknownDynamicLinkerPath and // skip adding it to `ld_info_list`. const target: Target = .{ .cpu = cpu, .os = os, .abi = abi, }; const ld = target.standardDynamicLinkerPath(); if (ld.get() == null) continue; ld_info_list_buffer[ld_info_list_len] = .{ .ld = ld, .abi = abi, }; ld_info_list_len += 1; } const ld_info_list = ld_info_list_buffer[0..ld_info_list_len]; if (cross_target.dynamic_linker.get()) |explicit_ld| { const explicit_ld_basename = fs.path.basename(explicit_ld); for (ld_info_list) |ld_info| { const standard_ld_basename = fs.path.basename(ld_info.ld.get().?); } } // Best case scenario: the executable is dynamically linked, and we can iterate // over our own shared objects and find a dynamic linker. self_exe: { const lib_paths = try std.process.getSelfExeSharedLibPaths(allocator); defer { for (lib_paths) |lib_path| { allocator.free(lib_path); } allocator.free(lib_paths); } var found_ld_info: LdInfo = undefined; var found_ld_path: [:0]const u8 = undefined; // Look for dynamic linker. // This is O(N^M) but typical case here is N=2 and M=10. find_ld: for (lib_paths) |lib_path| { for (ld_info_list) |ld_info| { const standard_ld_basename = fs.path.basename(ld_info.ld.get().?); if (std.mem.endsWith(u8, lib_path, standard_ld_basename)) { found_ld_info = ld_info; found_ld_path = lib_path; break :find_ld; } } } else break :self_exe; // Look for glibc version. var os_adjusted = os; if (Target.current.os.tag == .linux and found_ld_info.abi.isGnu() and cross_target.glibc_version == null) { for (lib_paths) |lib_path| { if (std.mem.endsWith(u8, lib_path, glibc_so_basename)) { os_adjusted.version_range.linux.glibc = glibcVerFromSO(lib_path) catch |err| switch (err) { error.UnrecognizedGnuLibCFileName => continue, error.InvalidGnuLibCVersion => continue, error.GnuLibCVersionUnavailable => continue, else => |e| return e, }; break; } } } var result: NativeTargetInfo = .{ .target = .{ .cpu = cpu, .os = os_adjusted, .abi = cross_target.abi orelse found_ld_info.abi, }, .dynamic_linker = if (cross_target.dynamic_linker.get() == null) DynamicLinker.init(found_ld_path) else cross_target.dynamic_linker, }; return result; } const env_file = std.fs.openFileAbsoluteZ("/usr/bin/env", .{}) catch |err| switch (err) { error.NoSpaceLeft => unreachable, error.NameTooLong => unreachable, error.PathAlreadyExists => unreachable, error.SharingViolation => unreachable, error.InvalidUtf8 => unreachable, error.BadPathName => unreachable, error.PipeBusy => unreachable, error.FileLocksNotSupported => unreachable, error.WouldBlock => unreachable, error.IsDir, error.NotDir, error.AccessDenied, error.NoDevice, error.FileNotFound, error.FileTooBig, error.Unexpected, => return defaultAbiAndDynamicLinker(cpu, os, cross_target), else => |e| return e, }; defer env_file.close(); // If Zig is statically linked, such as via distributed binary static builds, the above // trick won't work. The next thing we fall back to is the same thing, but for /usr/bin/env. // Since that path is hard-coded into the shebang line of many portable scripts, it's a // reasonably reliable path to check for. return abiAndDynamicLinkerFromFile(env_file, cpu, os, ld_info_list, cross_target) catch |err| switch (err) { error.FileSystem, error.SystemResources, error.SymLinkLoop, error.ProcessFdQuotaExceeded, error.SystemFdQuotaExceeded, => |e| return e, error.UnableToReadElfFile, error.InvalidElfClass, error.InvalidElfVersion, error.InvalidElfEndian, error.InvalidElfFile, error.InvalidElfMagic, error.Unexpected, error.UnexpectedEndOfFile, error.NameTooLong, // Finally, we fall back on the standard path. => defaultAbiAndDynamicLinker(cpu, os, cross_target), }; } const glibc_so_basename = "libc.so.6"; fn glibcVerFromSO(so_path: [:0]const u8) !std.builtin.Version { var link_buf: [std.os.PATH_MAX]u8 = undefined; const link_name = std.os.readlinkZ(so_path.ptr, &link_buf) catch |err| switch (err) { error.AccessDenied => return error.GnuLibCVersionUnavailable, error.FileSystem => return error.FileSystem, error.SymLinkLoop => return error.SymLinkLoop, error.NameTooLong => unreachable, error.FileNotFound => return error.GnuLibCVersionUnavailable, error.SystemResources => return error.SystemResources, error.NotDir => return error.GnuLibCVersionUnavailable, error.Unexpected => return error.GnuLibCVersionUnavailable, error.InvalidUtf8 => unreachable, // Windows only error.BadPathName => unreachable, // Windows only error.UnsupportedReparsePointType => unreachable, // Windows only }; return glibcVerFromLinkName(link_name); } fn glibcVerFromLinkName(link_name: []const u8) !std.builtin.Version { // example: "libc-2.3.4.so" // example: "libc-2.27.so" const prefix = "libc-"; const suffix = ".so"; if (!mem.startsWith(u8, link_name, prefix) or !mem.endsWith(u8, link_name, suffix)) { return error.UnrecognizedGnuLibCFileName; } // chop off "libc-" and ".so" const link_name_chopped = link_name[prefix.len .. link_name.len - suffix.len]; return std.builtin.Version.parse(link_name_chopped) catch |err| switch (err) { error.Overflow => return error.InvalidGnuLibCVersion, error.InvalidCharacter => return error.InvalidGnuLibCVersion, error.InvalidVersion => return error.InvalidGnuLibCVersion, }; } pub const AbiAndDynamicLinkerFromFileError = error{ FileSystem, SystemResources, SymLinkLoop, ProcessFdQuotaExceeded, SystemFdQuotaExceeded, UnableToReadElfFile, InvalidElfClass, InvalidElfVersion, InvalidElfEndian, InvalidElfFile, InvalidElfMagic, Unexpected, UnexpectedEndOfFile, NameTooLong, }; pub fn abiAndDynamicLinkerFromFile( file: fs.File, cpu: Target.Cpu, os: Target.Os, ld_info_list: []const LdInfo, cross_target: CrossTarget, ) AbiAndDynamicLinkerFromFileError!NativeTargetInfo { var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined; _ = try preadMin(file, &hdr_buf, 0, hdr_buf.len); const hdr32 = @ptrCast(*elf.Elf32_Ehdr, &hdr_buf); const hdr64 = @ptrCast(*elf.Elf64_Ehdr, &hdr_buf); if (!mem.eql(u8, hdr32.e_ident[0..4], "\x7fELF")) return error.InvalidElfMagic; const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI_DATA]) { elf.ELFDATA2LSB => .Little, elf.ELFDATA2MSB => .Big, else => return error.InvalidElfEndian, }; const need_bswap = elf_endian != std.builtin.endian; if (hdr32.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion; const is_64 = switch (hdr32.e_ident[elf.EI_CLASS]) { elf.ELFCLASS32 => false, elf.ELFCLASS64 => true, else => return error.InvalidElfClass, }; var phoff = elfInt(is_64, need_bswap, hdr32.e_phoff, hdr64.e_phoff); const phentsize = elfInt(is_64, need_bswap, hdr32.e_phentsize, hdr64.e_phentsize); const phnum = elfInt(is_64, need_bswap, hdr32.e_phnum, hdr64.e_phnum); var result: NativeTargetInfo = .{ .target = .{ .cpu = cpu, .os = os, .abi = cross_target.abi orelse Target.Abi.default(cpu.arch, os), }, .dynamic_linker = cross_target.dynamic_linker, }; var rpath_offset: ?u64 = null; // Found inside PT_DYNAMIC const look_for_ld = cross_target.dynamic_linker.get() == null; var ph_buf: [16 * @sizeOf(elf.Elf64_Phdr)]u8 align(@alignOf(elf.Elf64_Phdr)) = undefined; if (phentsize > @sizeOf(elf.Elf64_Phdr)) return error.InvalidElfFile; var ph_i: u16 = 0; while (ph_i < phnum) { // Reserve some bytes so that we can deref the 64-bit struct fields // even when the ELF file is 32-bits. const ph_reserve: usize = @sizeOf(elf.Elf64_Phdr) - @sizeOf(elf.Elf32_Phdr); const ph_read_byte_len = try preadMin(file, ph_buf[0 .. ph_buf.len - ph_reserve], phoff, phentsize); var ph_buf_i: usize = 0; while (ph_buf_i < ph_read_byte_len and ph_i < phnum) : ({ ph_i += 1; phoff += phentsize; ph_buf_i += phentsize; }) { const ph32 = @ptrCast(*elf.Elf32_Phdr, @alignCast(@alignOf(elf.Elf32_Phdr), &ph_buf[ph_buf_i])); const ph64 = @ptrCast(*elf.Elf64_Phdr, @alignCast(@alignOf(elf.Elf64_Phdr), &ph_buf[ph_buf_i])); const p_type = elfInt(is_64, need_bswap, ph32.p_type, ph64.p_type); switch (p_type) { elf.PT_INTERP => if (look_for_ld) { const p_offset = elfInt(is_64, need_bswap, ph32.p_offset, ph64.p_offset); const p_filesz = elfInt(is_64, need_bswap, ph32.p_filesz, ph64.p_filesz); if (p_filesz > result.dynamic_linker.buffer.len) return error.NameTooLong; const filesz = @intCast(usize, p_filesz); _ = try preadMin(file, result.dynamic_linker.buffer[0..filesz], p_offset, filesz); // PT_INTERP includes a null byte in filesz. const len = filesz - 1; // dynamic_linker.max_byte is "max", not "len". // We know it will fit in u8 because we check against dynamic_linker.buffer.len above. result.dynamic_linker.max_byte = @intCast(u8, len - 1); // Use it to determine ABI. const full_ld_path = result.dynamic_linker.buffer[0..len]; for (ld_info_list) |ld_info| { const standard_ld_basename = fs.path.basename(ld_info.ld.get().?); if (std.mem.endsWith(u8, full_ld_path, standard_ld_basename)) { result.target.abi = ld_info.abi; break; } } }, // We only need this for detecting glibc version. elf.PT_DYNAMIC => if (Target.current.os.tag == .linux and result.target.isGnuLibC() and cross_target.glibc_version == null) { var dyn_off = elfInt(is_64, need_bswap, ph32.p_offset, ph64.p_offset); const p_filesz = elfInt(is_64, need_bswap, ph32.p_filesz, ph64.p_filesz); const dyn_size: usize = if (is_64) @sizeOf(elf.Elf64_Dyn) else @sizeOf(elf.Elf32_Dyn); const dyn_num = p_filesz / dyn_size; var dyn_buf: [16 * @sizeOf(elf.Elf64_Dyn)]u8 align(@alignOf(elf.Elf64_Dyn)) = undefined; var dyn_i: usize = 0; dyn: while (dyn_i < dyn_num) { // Reserve some bytes so that we can deref the 64-bit struct fields // even when the ELF file is 32-bits. const dyn_reserve: usize = @sizeOf(elf.Elf64_Dyn) - @sizeOf(elf.Elf32_Dyn); const dyn_read_byte_len = try preadMin( file, dyn_buf[0 .. dyn_buf.len - dyn_reserve], dyn_off, dyn_size, ); var dyn_buf_i: usize = 0; while (dyn_buf_i < dyn_read_byte_len and dyn_i < dyn_num) : ({ dyn_i += 1; dyn_off += dyn_size; dyn_buf_i += dyn_size; }) { const dyn32 = @ptrCast( *elf.Elf32_Dyn, @alignCast(@alignOf(elf.Elf32_Dyn), &dyn_buf[dyn_buf_i]), ); const dyn64 = @ptrCast( *elf.Elf64_Dyn, @alignCast(@alignOf(elf.Elf64_Dyn), &dyn_buf[dyn_buf_i]), ); const tag = elfInt(is_64, need_bswap, dyn32.d_tag, dyn64.d_tag); const val = elfInt(is_64, need_bswap, dyn32.d_val, dyn64.d_val); if (tag == elf.DT_RUNPATH) { rpath_offset = val; break :dyn; } } } }, else => continue, } } } if (Target.current.os.tag == .linux and result.target.isGnuLibC() and cross_target.glibc_version == null) { if (rpath_offset) |rpoff| { const shstrndx = elfInt(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx); var shoff = elfInt(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff); const shentsize = elfInt(is_64, need_bswap, hdr32.e_shentsize, hdr64.e_shentsize); const str_section_off = shoff + @as(u64, shentsize) * @as(u64, shstrndx); var sh_buf: [16 * @sizeOf(elf.Elf64_Shdr)]u8 align(@alignOf(elf.Elf64_Shdr)) = undefined; if (sh_buf.len < shentsize) return error.InvalidElfFile; _ = try preadMin(file, &sh_buf, str_section_off, shentsize); const shstr32 = @ptrCast(*elf.Elf32_Shdr, @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf)); const shstr64 = @ptrCast(*elf.Elf64_Shdr, @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf)); const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len); const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len); const shstrtab = strtab_buf[0..shstrtab_read_len]; const shnum = elfInt(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum); var sh_i: u16 = 0; const dynstr: ?struct { offset: u64, size: u64 } = find_dyn_str: while (sh_i < shnum) { // Reserve some bytes so that we can deref the 64-bit struct fields // even when the ELF file is 32-bits. const sh_reserve: usize = @sizeOf(elf.Elf64_Shdr) - @sizeOf(elf.Elf32_Shdr); const sh_read_byte_len = try preadMin( file, sh_buf[0 .. sh_buf.len - sh_reserve], shoff, shentsize, ); var sh_buf_i: usize = 0; while (sh_buf_i < sh_read_byte_len and sh_i < shnum) : ({ sh_i += 1; shoff += shentsize; sh_buf_i += shentsize; }) { const sh32 = @ptrCast( *elf.Elf32_Shdr, @alignCast(@alignOf(elf.Elf32_Shdr), &sh_buf[sh_buf_i]), ); const sh64 = @ptrCast( *elf.Elf64_Shdr, @alignCast(@alignOf(elf.Elf64_Shdr), &sh_buf[sh_buf_i]), ); const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name); // TODO this pointer cast should not be necessary const sh_name = mem.spanZ(@ptrCast([*:0]u8, shstrtab[sh_name_off..].ptr)); if (mem.eql(u8, sh_name, ".dynstr")) { break :find_dyn_str .{ .offset = elfInt(is_64, need_bswap, sh32.sh_offset, sh64.sh_offset), .size = elfInt(is_64, need_bswap, sh32.sh_size, sh64.sh_size), }; } } } else null; if (dynstr) |ds| { const strtab_len = std.math.min(ds.size, strtab_buf.len); const strtab_read_len = try preadMin(file, &strtab_buf, ds.offset, shstrtab_len); const strtab = strtab_buf[0..strtab_read_len]; // TODO this pointer cast should not be necessary const rpoff_usize = std.math.cast(usize, rpoff) catch |err| switch (err) { error.Overflow => return error.InvalidElfFile, }; const rpath_list = mem.spanZ(@ptrCast([*:0]u8, strtab[rpoff_usize..].ptr)); var it = mem.tokenize(rpath_list, ":"); while (it.next()) |rpath| { var dir = fs.cwd().openDir(rpath, .{}) catch |err| switch (err) { error.NameTooLong => unreachable, error.InvalidUtf8 => unreachable, error.BadPathName => unreachable, error.DeviceBusy => unreachable, error.FileNotFound, error.NotDir, error.AccessDenied, error.NoDevice, => continue, error.ProcessFdQuotaExceeded, error.SystemFdQuotaExceeded, error.SystemResources, error.SymLinkLoop, error.Unexpected, => |e| return e, }; defer dir.close(); var link_buf: [std.os.PATH_MAX]u8 = undefined; const link_name = std.os.readlinkatZ( dir.fd, glibc_so_basename, &link_buf, ) catch |err| switch (err) { error.NameTooLong => unreachable, error.InvalidUtf8 => unreachable, // Windows only error.BadPathName => unreachable, // Windows only error.UnsupportedReparsePointType => unreachable, // Windows only error.AccessDenied, error.FileNotFound, error.NotDir, => continue, error.SystemResources, error.FileSystem, error.SymLinkLoop, error.Unexpected, => |e| return e, }; result.target.os.version_range.linux.glibc = glibcVerFromLinkName( link_name, ) catch |err| switch (err) { error.UnrecognizedGnuLibCFileName, error.InvalidGnuLibCVersion, => continue, }; break; } } } } return result; } fn preadMin(file: fs.File, buf: []u8, offset: u64, min_read_len: usize) !usize { var i: usize = 0; while (i < min_read_len) { const len = file.pread(buf[i .. buf.len - i], offset + i) catch |err| switch (err) { error.OperationAborted => unreachable, // Windows-only error.WouldBlock => unreachable, // Did not request blocking mode error.NotOpenForReading => unreachable, error.SystemResources => return error.SystemResources, error.IsDir => return error.UnableToReadElfFile, error.BrokenPipe => return error.UnableToReadElfFile, error.Unseekable => return error.UnableToReadElfFile, error.ConnectionResetByPeer => return error.UnableToReadElfFile, error.ConnectionTimedOut => return error.UnableToReadElfFile, error.Unexpected => return error.Unexpected, error.InputOutput => return error.FileSystem, error.AccessDenied => return error.Unexpected, }; if (len == 0) return error.UnexpectedEndOfFile; i += len; } return i; } fn defaultAbiAndDynamicLinker(cpu: Target.Cpu, os: Target.Os, cross_target: CrossTarget) !NativeTargetInfo { const target: Target = .{ .cpu = cpu, .os = os, .abi = cross_target.abi orelse Target.Abi.default(cpu.arch, os), }; return NativeTargetInfo{ .target = target, .dynamic_linker = if (cross_target.dynamic_linker.get() == null) target.standardDynamicLinkerPath() else cross_target.dynamic_linker, }; } pub const LdInfo = struct { ld: DynamicLinker, abi: Target.Abi, }; pub fn elfInt(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) { if (is_64) { if (need_bswap) { return @byteSwap(@TypeOf(int_64), int_64); } else { return int_64; } } else { if (need_bswap) { return @byteSwap(@TypeOf(int_32), int_32); } else { return int_32; } } } fn detectNativeCpuAndFeatures(cpu_arch: Target.Cpu.Arch, os: Target.Os, cross_target: CrossTarget) ?Target.Cpu { // Here we switch on a comptime value rather than `cpu_arch`. This is valid because `cpu_arch`, // although it is a runtime value, is guaranteed to be one of the architectures in the set // of the respective switch prong. switch (std.Target.current.cpu.arch) { .x86_64, .i386 => { return @import("system/x86.zig").detectNativeCpuAndFeatures(cpu_arch, os, cross_target); }, else => { // This architecture does not have CPU model & feature detection yet. // See https://github.com/ziglang/zig/issues/4591 return null; }, } } }; test "" { _ = @import("system/macos.zig"); }
lib/std/zig/system.zig
const std = @import("../index.zig"); const builtin = @import("builtin"); const Os = builtin.Os; const debug = std.debug; const windows = std.os.windows; const linux = std.os.linux; const darwin = std.os.darwin; const posix = std.os.posix; pub const epoch = @import("epoch.zig"); /// Sleep for the specified duration pub fn sleep(seconds: usize, nanoseconds: usize) void { switch (builtin.os) { Os.linux, Os.macosx, Os.ios => { posixSleep(@intCast(u63, seconds), @intCast(u63, nanoseconds)); }, Os.windows => { const ns_per_ms = ns_per_s / ms_per_s; const milliseconds = seconds * ms_per_s + nanoseconds / ns_per_ms; windows.Sleep(@intCast(windows.DWORD, milliseconds)); }, else => @compileError("Unsupported OS"), } } const u63 = @IntType(false, 63); pub fn posixSleep(seconds: u63, nanoseconds: u63) void { var req = posix.timespec{ .tv_sec = seconds, .tv_nsec = nanoseconds, }; var rem: posix.timespec = undefined; while (true) { const ret_val = posix.nanosleep(&req, &rem); const err = posix.getErrno(ret_val); if (err == 0) return; switch (err) { posix.EFAULT => unreachable, posix.EINVAL => { // Sometimes Darwin returns EINVAL for no reason. // We treat it as a spurious wakeup. return; }, posix.EINTR => { req = rem; continue; }, else => return, } } } /// Get the posix timestamp, UTC, in seconds pub fn timestamp() u64 { return @divFloor(milliTimestamp(), ms_per_s); } /// Get the posix timestamp, UTC, in milliseconds pub const milliTimestamp = switch (builtin.os) { Os.windows => milliTimestampWindows, Os.linux => milliTimestampPosix, Os.macosx, Os.ios => milliTimestampDarwin, else => @compileError("Unsupported OS"), }; fn milliTimestampWindows() u64 { //FileTime has a granularity of 100 nanoseconds // and uses the NTFS/Windows epoch var ft: windows.FILETIME = undefined; windows.GetSystemTimeAsFileTime(&ft); const hns_per_ms = (ns_per_s / 100) / ms_per_s; const epoch_adj = epoch.windows * ms_per_s; const ft64 = (u64(ft.dwHighDateTime) << 32) | ft.dwLowDateTime; return @divFloor(ft64, hns_per_ms) - -epoch_adj; } fn milliTimestampDarwin() u64 { //Sources suggest MacOS 10.12 has support for // posix clock_gettime. var tv: darwin.timeval = undefined; var err = darwin.gettimeofday(&tv, null); debug.assert(err == 0); const sec_ms = @intCast(u64, tv.tv_sec) * ms_per_s; const usec_ms = @divFloor(@intCast(u64, tv.tv_usec), us_per_s / ms_per_s); return u64(sec_ms) + u64(usec_ms); } fn milliTimestampPosix() u64 { //From what I can tell there's no reason clock_gettime // should ever fail for us with CLOCK_REALTIME, // seccomp aside. var ts: posix.timespec = undefined; const err = posix.clock_gettime(posix.CLOCK_REALTIME, &ts); debug.assert(err == 0); const sec_ms = @intCast(u64, ts.tv_sec) * ms_per_s; const nsec_ms = @divFloor(@intCast(u64, ts.tv_nsec), ns_per_s / ms_per_s); return sec_ms + nsec_ms; } /// Divisions of a second pub const ns_per_s = 1000000000; pub const us_per_s = 1000000; pub const ms_per_s = 1000; pub const cs_per_s = 100; /// Common time divisions pub const s_per_min = 60; pub const s_per_hour = s_per_min * 60; pub const s_per_day = s_per_hour * 24; pub const s_per_week = s_per_day * 7; /// A monotonic high-performance timer. /// Timer.start() must be called to initialize the struct, which captures /// the counter frequency on windows and darwin, records the resolution, /// and gives the user an oportunity to check for the existnece of /// monotonic clocks without forcing them to check for error on each read. /// .resolution is in nanoseconds on all platforms but .start_time's meaning /// depends on the OS. On Windows and Darwin it is a hardware counter /// value that requires calculation to convert to a meaninful unit. pub const Timer = struct { //if we used resolution's value when performing the // performance counter calc on windows/darwin, it would // be less precise frequency: switch (builtin.os) { Os.windows => u64, Os.macosx, Os.ios => darwin.mach_timebase_info_data, else => void, }, resolution: u64, start_time: u64, //At some point we may change our minds on RAW, but for now we're // sticking with posix standard MONOTONIC. For more information, see: // https://github.com/ziglang/zig/pull/933 // //const monotonic_clock_id = switch(builtin.os) { // Os.linux => linux.CLOCK_MONOTONIC_RAW, // else => posix.CLOCK_MONOTONIC, //}; const monotonic_clock_id = posix.CLOCK_MONOTONIC; /// Initialize the timer structure. //This gives us an oportunity to grab the counter frequency in windows. //On Windows: QueryPerformanceCounter will succeed on anything >= XP/2000. //On Posix: CLOCK_MONOTONIC will only fail if the monotonic counter is not // supported, or if the timespec pointer is out of bounds, which should be // impossible here barring cosmic rays or other such occurances of // incredibly bad luck. //On Darwin: This cannot fail, as far as I am able to tell. const TimerError = error{ TimerUnsupported, Unexpected, }; pub fn start() TimerError!Timer { var self: Timer = undefined; switch (builtin.os) { Os.windows => { var freq: i64 = undefined; var err = windows.QueryPerformanceFrequency(&freq); if (err == windows.FALSE) return error.TimerUnsupported; self.frequency = @intCast(u64, freq); self.resolution = @divFloor(ns_per_s, self.frequency); var start_time: i64 = undefined; err = windows.QueryPerformanceCounter(&start_time); debug.assert(err != windows.FALSE); self.start_time = @intCast(u64, start_time); }, Os.linux => { //On Linux, seccomp can do arbitrary things to our ability to call // syscalls, including return any errno value it wants and // inconsistently throwing errors. Since we can't account for // abuses of seccomp in a reasonable way, we'll assume that if // seccomp is going to block us it will at least do so consistently var ts: posix.timespec = undefined; var result = posix.clock_getres(monotonic_clock_id, &ts); var errno = posix.getErrno(result); switch (errno) { 0 => {}, posix.EINVAL => return error.TimerUnsupported, else => return std.os.unexpectedErrorPosix(errno), } self.resolution = @intCast(u64, ts.tv_sec) * u64(ns_per_s) + @intCast(u64, ts.tv_nsec); result = posix.clock_gettime(monotonic_clock_id, &ts); errno = posix.getErrno(result); if (errno != 0) return std.os.unexpectedErrorPosix(errno); self.start_time = @intCast(u64, ts.tv_sec) * u64(ns_per_s) + @intCast(u64, ts.tv_nsec); }, Os.macosx, Os.ios => { darwin.mach_timebase_info(&self.frequency); self.resolution = @divFloor(self.frequency.numer, self.frequency.denom); self.start_time = darwin.mach_absolute_time(); }, else => @compileError("Unsupported OS"), } return self; } /// Reads the timer value since start or the last reset in nanoseconds pub fn read(self: *Timer) u64 { var clock = clockNative() - self.start_time; return switch (builtin.os) { Os.windows => @divFloor(clock * ns_per_s, self.frequency), Os.linux => clock, Os.macosx, Os.ios => @divFloor(clock * self.frequency.numer, self.frequency.denom), else => @compileError("Unsupported OS"), }; } /// Resets the timer value to 0/now. pub fn reset(self: *Timer) void { self.start_time = clockNative(); } /// Returns the current value of the timer in nanoseconds, then resets it pub fn lap(self: *Timer) u64 { var now = clockNative(); var lap_time = self.read(); self.start_time = now; return lap_time; } const clockNative = switch (builtin.os) { Os.windows => clockWindows, Os.linux => clockLinux, Os.macosx, Os.ios => clockDarwin, else => @compileError("Unsupported OS"), }; fn clockWindows() u64 { var result: i64 = undefined; var err = windows.QueryPerformanceCounter(&result); debug.assert(err != windows.FALSE); return @intCast(u64, result); } fn clockDarwin() u64 { return darwin.mach_absolute_time(); } fn clockLinux() u64 { var ts: posix.timespec = undefined; var result = posix.clock_gettime(monotonic_clock_id, &ts); debug.assert(posix.getErrno(result) == 0); return @intCast(u64, ts.tv_sec) * u64(ns_per_s) + @intCast(u64, ts.tv_nsec); } }; test "os.time.sleep" { sleep(0, 1); } test "os.time.timestamp" { const ns_per_ms = (ns_per_s / ms_per_s); const margin = 50; const time_0 = milliTimestamp(); sleep(0, ns_per_ms); const time_1 = milliTimestamp(); const interval = time_1 - time_0; debug.assert(interval > 0 and interval < margin); } test "os.time.Timer" { const ns_per_ms = (ns_per_s / ms_per_s); const margin = ns_per_ms * 150; var timer = try Timer.start(); sleep(0, 10 * ns_per_ms); const time_0 = timer.read(); debug.assert(time_0 > 0 and time_0 < margin); const time_1 = timer.lap(); debug.assert(time_1 >= time_0); timer.reset(); debug.assert(timer.read() < time_1); }
std/os/time.zig
const std = @import("../../std.zig"); const testing = std.testing; const math = std.math; const cmath = math.complex; const Complex = cmath.Complex; /// Returns the square root of z. The real and imaginary parts of the result have the same sign /// as the imaginary part of z. pub fn sqrt(z: var) @typeOf(z) { const T = @typeOf(z.re); return switch (T) { f32 => sqrt32(z), f64 => sqrt64(z), else => @compileError("sqrt not implemented for " ++ @typeName(T)), }; } fn sqrt32(z: Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; if (x == 0 and y == 0) { return Complex(f32).new(0, y); } if (math.isInf(y)) { return Complex(f32).new(math.inf(f32), y); } if (math.isNan(x)) { // raise invalid if y is not nan const t = (y - y) / (y - y); return Complex(f32).new(x, t); } if (math.isInf(x)) { // sqrt(inf + i nan) = inf + nan i // sqrt(inf + iy) = inf + i0 // sqrt(-inf + i nan) = nan +- inf i // sqrt(-inf + iy) = 0 + inf i if (math.signbit(x)) { return Complex(f32).new(math.fabs(x - y), math.copysign(f32, x, y)); } else { return Complex(f32).new(x, math.copysign(f32, y - y, y)); } } // y = nan special case is handled fine below // double-precision avoids overflow with correct rounding. const dx = @as(f64, x); const dy = @as(f64, y); if (dx >= 0) { const t = math.sqrt((dx + math.hypot(f64, dx, dy)) * 0.5); return Complex(f32).new( @floatCast(f32, t), @floatCast(f32, dy / (2.0 * t)), ); } else { const t = math.sqrt((-dx + math.hypot(f64, dx, dy)) * 0.5); return Complex(f32).new( @floatCast(f32, math.fabs(y) / (2.0 * t)), @floatCast(f32, math.copysign(f64, t, y)), ); } } fn sqrt64(z: Complex(f64)) Complex(f64) { // may encounter overflow for im,re >= DBL_MAX / (1 + sqrt(2)) const threshold = 0x1.a827999fcef32p+1022; var x = z.re; var y = z.im; if (x == 0 and y == 0) { return Complex(f64).new(0, y); } if (math.isInf(y)) { return Complex(f64).new(math.inf(f64), y); } if (math.isNan(x)) { // raise invalid if y is not nan const t = (y - y) / (y - y); return Complex(f64).new(x, t); } if (math.isInf(x)) { // sqrt(inf + i nan) = inf + nan i // sqrt(inf + iy) = inf + i0 // sqrt(-inf + i nan) = nan +- inf i // sqrt(-inf + iy) = 0 + inf i if (math.signbit(x)) { return Complex(f64).new(math.fabs(x - y), math.copysign(f64, x, y)); } else { return Complex(f64).new(x, math.copysign(f64, y - y, y)); } } // y = nan special case is handled fine below // scale to avoid overflow var scale = false; if (math.fabs(x) >= threshold or math.fabs(y) >= threshold) { x *= 0.25; y *= 0.25; scale = true; } var result: Complex(f64) = undefined; if (x >= 0) { const t = math.sqrt((x + math.hypot(f64, x, y)) * 0.5); result = Complex(f64).new(t, y / (2.0 * t)); } else { const t = math.sqrt((-x + math.hypot(f64, x, y)) * 0.5); result = Complex(f64).new(math.fabs(y) / (2.0 * t), math.copysign(f64, t, y)); } if (scale) { result.re *= 2; result.im *= 2; } return result; } const epsilon = 0.0001; test "complex.csqrt32" { const a = Complex(f32).new(5, 3); const c = sqrt(a); testing.expect(math.approxEq(f32, c.re, 2.327117, epsilon)); testing.expect(math.approxEq(f32, c.im, 0.644574, epsilon)); } test "complex.csqrt64" { const a = Complex(f64).new(5, 3); const c = sqrt(a); testing.expect(math.approxEq(f64, c.re, 2.3271175190399496, epsilon)); testing.expect(math.approxEq(f64, c.im, 0.6445742373246469, epsilon)); }
lib/std/math/complex/sqrt.zig
const std = @import("../std.zig"); const testing = std.testing; const builtin = std.builtin; const has_aesni = comptime std.Target.x86.featureSetHas(std.Target.current.cpu.features, .aes); const has_avx = comptime std.Target.x86.featureSetHas(std.Target.current.cpu.features, .avx); const has_armaes = comptime std.Target.aarch64.featureSetHas(std.Target.current.cpu.features, .aes); const impl = if (std.Target.current.cpu.arch == .x86_64 and has_aesni and has_avx) impl: { break :impl @import("aes/aesni.zig"); } else if (std.Target.current.cpu.arch == .aarch64 and has_armaes) impl: { break :impl @import("aes/armcrypto.zig"); } else impl: { break :impl @import("aes/soft.zig"); }; pub const Block = impl.Block; pub const AesEncryptCtx = impl.AesEncryptCtx; pub const AesDecryptCtx = impl.AesDecryptCtx; pub const Aes128 = impl.Aes128; pub const Aes256 = impl.Aes256; test "ctr" { // NIST SP 800-38A pp 55-58 const ctr = @import("modes.zig").ctr; const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }; const iv = [_]u8{ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }; const in = [_]u8{ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef, 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10, }; const exp_out = [_]u8{ 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26, 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce, 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff, 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff, 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e, 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab, 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1, 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee, }; var out: [exp_out.len]u8 = undefined; var ctx = Aes128.initEnc(key); ctr(AesEncryptCtx(Aes128), ctx, out[0..], in[0..], iv, builtin.Endian.Big); try testing.expectEqualSlices(u8, exp_out[0..], out[0..]); } test "encrypt" { // Appendix B { const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }; const in = [_]u8{ 0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d, 0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34 }; const exp_out = [_]u8{ 0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb, 0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32 }; var out: [exp_out.len]u8 = undefined; var ctx = Aes128.initEnc(key); ctx.encrypt(out[0..], in[0..]); try testing.expectEqualSlices(u8, exp_out[0..], out[0..]); } // Appendix C.3 { const key = [_]u8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, }; const in = [_]u8{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; const exp_out = [_]u8{ 0x8e, 0xa2, 0xb7, 0xca, 0x51, 0x67, 0x45, 0xbf, 0xea, 0xfc, 0x49, 0x90, 0x4b, 0x49, 0x60, 0x89 }; var out: [exp_out.len]u8 = undefined; var ctx = Aes256.initEnc(key); ctx.encrypt(out[0..], in[0..]); try testing.expectEqualSlices(u8, exp_out[0..], out[0..]); } } test "decrypt" { // Appendix B { const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }; const in = [_]u8{ 0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb, 0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32 }; const exp_out = [_]u8{ 0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d, 0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34 }; var out: [exp_out.len]u8 = undefined; var ctx = Aes128.initDec(key); ctx.decrypt(out[0..], in[0..]); try testing.expectEqualSlices(u8, exp_out[0..], out[0..]); } // Appendix C.3 { const key = [_]u8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, }; const in = [_]u8{ 0x8e, 0xa2, 0xb7, 0xca, 0x51, 0x67, 0x45, 0xbf, 0xea, 0xfc, 0x49, 0x90, 0x4b, 0x49, 0x60, 0x89 }; const exp_out = [_]u8{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; var out: [exp_out.len]u8 = undefined; var ctx = Aes256.initDec(key); ctx.decrypt(out[0..], in[0..]); try testing.expectEqualSlices(u8, exp_out[0..], out[0..]); } } test "expand 128-bit key" { const key = [_]u8{ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }; const exp_enc = [_]*const [32:0]u8{ "<KEY>", "a0fafe1788542cb123a339392a6c7605", "f2c295f27a96b9435935807a7359f67f", "3d80477d4716fe3e1e237e446d7a883b", "<KEY>", "<KEY>", "<KEY>", "4e54f70e5f5fc9f384a64fb24ea6dc4f", "ead27321b58dbad2312bf5607f8d292f", "ac7766f319fadc2128d12941575c006e", "d014f9a8c9ee2589e13f0cc8b6630ca6", }; const exp_dec = [_]*const [32:0]u8{ "<KEY>", "a0fafe1788542cb123a339392a6c7605", "f2c295f27a96b9435935807a7359f67f", "3d80477d4716fe3e1e237e446d7a883b", "<KEY>", "<KEY>", "<KEY>", "4e54f70e5f5fc9f384a64fb24ea6dc4f", "ead27321b58dbad2312bf5607f8d292f", "ac7766f319fadc2128d12941575c006e", "d014f9a8c9ee2589e13f0cc8b6630ca6", }; const enc = Aes128.initEnc(key); const dec = Aes128.initDec(key); var exp: [16]u8 = undefined; for (enc.key_schedule.round_keys) |round_key, i| { _ = try std.fmt.hexToBytes(&exp, exp_enc[i]); try testing.expectEqualSlices(u8, &exp, &round_key.toBytes()); } for (enc.key_schedule.round_keys) |round_key, i| { _ = try std.fmt.hexToBytes(&exp, exp_dec[i]); try testing.expectEqualSlices(u8, &exp, &round_key.toBytes()); } } test "expand 256-bit key" { const key = [_]u8{ 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 }; const exp_enc = [_]*const [32:0]u8{ "603deb1015ca71be2b73aef0857d7781", "<KEY>", "<KEY>", "a8b09c1a93d194cdbe49846eb75d5b9a", "<KEY>", "<KEY>", "812c81addadf48ba24360af2fab8b464", "<KEY>", "<KEY>", "c814e20476a9fb8a5025c02d59c58239", "de1369676ccc5a71fa2563959674ee15", "5886ca5d2e2f31d77e0af1fa27cf73c3", "749c47ab18501ddae2757e4f7401905a", "cafaaae3e4d59b349adf6acebd10190d", "fe4890d1e6188d0b046df344706c631e", }; const exp_dec = [_]*const [32:0]u8{ "fe4890d1e6188d0b046df344706c631e", "ada23f4963e23b2455427c8a5c709104", "<KEY>", "b668b621ce40046d36a047ae0932ed8e", "34ad1e4450866b367725bcc763152946", "<KEY>", "<KEY>", "d669a7334a7ade7a80c8f18fc772e9e3", "25ba3c22a06bc7fb4388a28333934270", "54fb808b9c137949cab22ff547ba186c", "6c3d632985d1fbd9e3e36578701be0f3", "<KEY>", "42107758e9ec98f066329ea193f8858b", "8ec6bff6829ca03b9e49af7edba96125", "603deb1015ca71be2b73aef0857d7781", }; const enc = Aes256.initEnc(key); const dec = Aes256.initDec(key); var exp: [16]u8 = undefined; for (enc.key_schedule.round_keys) |round_key, i| { _ = try std.fmt.hexToBytes(&exp, exp_enc[i]); try testing.expectEqualSlices(u8, &exp, &round_key.toBytes()); } for (dec.key_schedule.round_keys) |round_key, i| { _ = try std.fmt.hexToBytes(&exp, exp_dec[i]); try testing.expectEqualSlices(u8, &exp, &round_key.toBytes()); } }
lib/std/crypto/aes.zig
const std = @import("std"); const fs = std.fs; const io = std.io; const mem = std.mem; const meta = std.meta; const macho = std.macho; const testing = std.testing; const assert = std.debug.assert; const Allocator = std.mem.Allocator; const MachO = @import("../MachO.zig"); const makeStaticString = MachO.makeStaticString; const satMul = MachO.satMul; const alloc_num = MachO.alloc_num; const alloc_den = MachO.alloc_den; pub const LoadCommand = union(enum) { Segment: SegmentCommand, DyldInfoOnly: macho.dyld_info_command, Symtab: macho.symtab_command, Dysymtab: macho.dysymtab_command, Dylinker: GenericCommandWithData(macho.dylinker_command), Dylib: GenericCommandWithData(macho.dylib_command), Main: macho.entry_point_command, VersionMin: macho.version_min_command, SourceVersion: macho.source_version_command, Uuid: macho.uuid_command, LinkeditData: macho.linkedit_data_command, Unknown: GenericCommandWithData(macho.load_command), pub fn read(allocator: *Allocator, reader: anytype) !LoadCommand { const header = try reader.readStruct(macho.load_command); var buffer = try allocator.alloc(u8, header.cmdsize); defer allocator.free(buffer); mem.copy(u8, buffer, mem.asBytes(&header)); try reader.readNoEof(buffer[@sizeOf(macho.load_command)..]); var stream = io.fixedBufferStream(buffer); return switch (header.cmd) { macho.LC_SEGMENT_64 => LoadCommand{ .Segment = try SegmentCommand.read(allocator, stream.reader()), }, macho.LC_DYLD_INFO, macho.LC_DYLD_INFO_ONLY => LoadCommand{ .DyldInfoOnly = try stream.reader().readStruct(macho.dyld_info_command), }, macho.LC_SYMTAB => LoadCommand{ .Symtab = try stream.reader().readStruct(macho.symtab_command), }, macho.LC_DYSYMTAB => LoadCommand{ .Dysymtab = try stream.reader().readStruct(macho.dysymtab_command), }, macho.LC_ID_DYLINKER, macho.LC_LOAD_DYLINKER, macho.LC_DYLD_ENVIRONMENT => LoadCommand{ .Dylinker = try GenericCommandWithData(macho.dylinker_command).read(allocator, stream.reader()), }, macho.LC_ID_DYLIB, macho.LC_LOAD_WEAK_DYLIB, macho.LC_LOAD_DYLIB, macho.LC_REEXPORT_DYLIB => LoadCommand{ .Dylib = try GenericCommandWithData(macho.dylib_command).read(allocator, stream.reader()), }, macho.LC_MAIN => LoadCommand{ .Main = try stream.reader().readStruct(macho.entry_point_command), }, macho.LC_VERSION_MIN_MACOSX, macho.LC_VERSION_MIN_IPHONEOS, macho.LC_VERSION_MIN_WATCHOS, macho.LC_VERSION_MIN_TVOS => LoadCommand{ .VersionMin = try stream.reader().readStruct(macho.version_min_command), }, macho.LC_SOURCE_VERSION => LoadCommand{ .SourceVersion = try stream.reader().readStruct(macho.source_version_command), }, macho.LC_UUID => LoadCommand{ .Uuid = try stream.reader().readStruct(macho.uuid_command), }, macho.LC_FUNCTION_STARTS, macho.LC_DATA_IN_CODE, macho.LC_CODE_SIGNATURE => LoadCommand{ .LinkeditData = try stream.reader().readStruct(macho.linkedit_data_command), }, else => LoadCommand{ .Unknown = try GenericCommandWithData(macho.load_command).read(allocator, stream.reader()), }, }; } pub fn write(self: LoadCommand, writer: anytype) !void { return switch (self) { .DyldInfoOnly => |x| writeStruct(x, writer), .Symtab => |x| writeStruct(x, writer), .Dysymtab => |x| writeStruct(x, writer), .Main => |x| writeStruct(x, writer), .VersionMin => |x| writeStruct(x, writer), .SourceVersion => |x| writeStruct(x, writer), .Uuid => |x| writeStruct(x, writer), .LinkeditData => |x| writeStruct(x, writer), .Segment => |x| x.write(writer), .Dylinker => |x| x.write(writer), .Dylib => |x| x.write(writer), .Unknown => |x| x.write(writer), }; } pub fn cmd(self: LoadCommand) u32 { return switch (self) { .DyldInfoOnly => |x| x.cmd, .Symtab => |x| x.cmd, .Dysymtab => |x| x.cmd, .Main => |x| x.cmd, .VersionMin => |x| x.cmd, .SourceVersion => |x| x.cmd, .Uuid => |x| x.cmd, .LinkeditData => |x| x.cmd, .Segment => |x| x.inner.cmd, .Dylinker => |x| x.inner.cmd, .Dylib => |x| x.inner.cmd, .Unknown => |x| x.inner.cmd, }; } pub fn cmdsize(self: LoadCommand) u32 { return switch (self) { .DyldInfoOnly => |x| x.cmdsize, .Symtab => |x| x.cmdsize, .Dysymtab => |x| x.cmdsize, .Main => |x| x.cmdsize, .VersionMin => |x| x.cmdsize, .SourceVersion => |x| x.cmdsize, .LinkeditData => |x| x.cmdsize, .Uuid => |x| x.cmdsize, .Segment => |x| x.inner.cmdsize, .Dylinker => |x| x.inner.cmdsize, .Dylib => |x| x.inner.cmdsize, .Unknown => |x| x.inner.cmdsize, }; } pub fn deinit(self: *LoadCommand, allocator: *Allocator) void { return switch (self.*) { .Segment => |*x| x.deinit(allocator), .Dylinker => |*x| x.deinit(allocator), .Dylib => |*x| x.deinit(allocator), .Unknown => |*x| x.deinit(allocator), else => {}, }; } fn writeStruct(command: anytype, writer: anytype) !void { return writer.writeAll(mem.asBytes(&command)); } fn eql(self: LoadCommand, other: LoadCommand) bool { if (@as(@TagType(LoadCommand), self) != @as(@TagType(LoadCommand), other)) return false; return switch (self) { .DyldInfoOnly => |x| meta.eql(x, other.DyldInfoOnly), .Symtab => |x| meta.eql(x, other.Symtab), .Dysymtab => |x| meta.eql(x, other.Dysymtab), .Main => |x| meta.eql(x, other.Main), .VersionMin => |x| meta.eql(x, other.VersionMin), .SourceVersion => |x| meta.eql(x, other.SourceVersion), .Uuid => |x| meta.eql(x, other.Uuid), .LinkeditData => |x| meta.eql(x, other.LinkeditData), .Segment => |x| x.eql(other.Segment), .Dylinker => |x| x.eql(other.Dylinker), .Dylib => |x| x.eql(other.Dylib), .Unknown => |x| x.eql(other.Unknown), }; } }; pub const SegmentCommand = struct { inner: macho.segment_command_64, sections: std.ArrayListUnmanaged(macho.section_64) = .{}, pub fn empty(inner: macho.segment_command_64) SegmentCommand { return .{ .inner = inner }; } pub fn addSection(self: *SegmentCommand, alloc: *Allocator, section: macho.section_64) !void { try self.sections.append(alloc, section); self.inner.cmdsize += @sizeOf(macho.section_64); self.inner.nsects += 1; } pub fn read(alloc: *Allocator, reader: anytype) !SegmentCommand { const inner = try reader.readStruct(macho.segment_command_64); var segment = SegmentCommand{ .inner = inner, }; try segment.sections.ensureCapacity(alloc, inner.nsects); var i: usize = 0; while (i < inner.nsects) : (i += 1) { const section = try reader.readStruct(macho.section_64); segment.sections.appendAssumeCapacity(section); } return segment; } pub fn write(self: SegmentCommand, writer: anytype) !void { try writer.writeAll(mem.asBytes(&self.inner)); for (self.sections.items) |sect| { try writer.writeAll(mem.asBytes(&sect)); } } pub fn deinit(self: *SegmentCommand, alloc: *Allocator) void { self.sections.deinit(alloc); } pub fn allocatedSize(self: SegmentCommand, start: u64) u64 { assert(start > 0); if (start == self.inner.fileoff) return 0; var min_pos: u64 = std.math.maxInt(u64); for (self.sections.items) |section| { if (section.offset <= start) continue; if (section.offset < min_pos) min_pos = section.offset; } return min_pos - start; } fn detectAllocCollision(self: SegmentCommand, start: u64, size: u64) ?u64 { const end = start + satMul(size, alloc_num) / alloc_den; for (self.sections.items) |section| { const increased_size = satMul(section.size, alloc_num) / alloc_den; const test_end = section.offset + increased_size; if (end > section.offset and start < test_end) { return test_end; } } return null; } pub fn findFreeSpace(self: SegmentCommand, object_size: u64, min_alignment: u16, start: ?u64) u64 { var st: u64 = if (start) |v| v else self.inner.fileoff; while (self.detectAllocCollision(st, object_size)) |item_end| { st = mem.alignForwardGeneric(u64, item_end, min_alignment); } return st; } fn eql(self: SegmentCommand, other: SegmentCommand) bool { if (!meta.eql(self.inner, other.inner)) return false; const lhs = self.sections.items; const rhs = other.sections.items; var i: usize = 0; while (i < self.inner.nsects) : (i += 1) { if (!meta.eql(lhs[i], rhs[i])) return false; } return true; } }; pub fn emptyGenericCommandWithData(cmd: anytype) GenericCommandWithData(@TypeOf(cmd)) { return .{ .inner = cmd }; } pub fn GenericCommandWithData(comptime Cmd: type) type { return struct { inner: Cmd, /// This field remains undefined until `read` is called. data: []u8 = undefined, const Self = @This(); pub fn read(allocator: *Allocator, reader: anytype) !Self { const inner = try reader.readStruct(Cmd); var data = try allocator.alloc(u8, inner.cmdsize - @sizeOf(Cmd)); errdefer allocator.free(data); try reader.readNoEof(data); return Self{ .inner = inner, .data = data, }; } pub fn write(self: Self, writer: anytype) !void { try writer.writeAll(mem.asBytes(&self.inner)); try writer.writeAll(self.data); } pub fn deinit(self: *Self, allocator: *Allocator) void { allocator.free(self.data); } fn eql(self: Self, other: Self) bool { if (!meta.eql(self.inner, other.inner)) return false; return mem.eql(u8, self.data, other.data); } }; } fn testRead(allocator: *Allocator, buffer: []const u8, expected: anytype) !void { var stream = io.fixedBufferStream(buffer); var given = try LoadCommand.read(allocator, stream.reader()); defer given.deinit(allocator); testing.expect(expected.eql(given)); } fn testWrite(buffer: []u8, cmd: LoadCommand, expected: []const u8) !void { var stream = io.fixedBufferStream(buffer); try cmd.write(stream.writer()); testing.expect(mem.eql(u8, expected, buffer[0..expected.len])); } test "read-write segment command" { var gpa = testing.allocator; const in_buffer = &[_]u8{ 0x19, 0x00, 0x00, 0x00, // cmd 0x98, 0x00, 0x00, 0x00, // cmdsize 0x5f, 0x5f, 0x54, 0x45, 0x58, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // segname 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // vmaddr 0x00, 0x80, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // vmsize 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // fileoff 0x00, 0x80, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, // filesize 0x07, 0x00, 0x00, 0x00, // maxprot 0x05, 0x00, 0x00, 0x00, // initprot 0x01, 0x00, 0x00, 0x00, // nsects 0x00, 0x00, 0x00, 0x00, // flags 0x5f, 0x5f, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // sectname 0x5f, 0x5f, 0x54, 0x45, 0x58, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // segname 0x00, 0x40, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // address 0xc0, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // size 0x00, 0x40, 0x00, 0x00, // offset 0x02, 0x00, 0x00, 0x00, // alignment 0x00, 0x00, 0x00, 0x00, // reloff 0x00, 0x00, 0x00, 0x00, // nreloc 0x00, 0x04, 0x00, 0x80, // flags 0x00, 0x00, 0x00, 0x00, // reserved1 0x00, 0x00, 0x00, 0x00, // reserved2 0x00, 0x00, 0x00, 0x00, // reserved3 }; var cmd = SegmentCommand{ .inner = .{ .cmd = macho.LC_SEGMENT_64, .cmdsize = 152, .segname = makeStaticString("__TEXT"), .vmaddr = 4294967296, .vmsize = 294912, .fileoff = 0, .filesize = 294912, .maxprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE | macho.VM_PROT_EXECUTE, .initprot = macho.VM_PROT_EXECUTE | macho.VM_PROT_READ, .nsects = 1, .flags = 0, }, }; try cmd.sections.append(gpa, .{ .sectname = makeStaticString("__text"), .segname = makeStaticString("__TEXT"), .addr = 4294983680, .size = 448, .offset = 16384, .@"align" = 2, .reloff = 0, .nreloc = 0, .flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS, .reserved1 = 0, .reserved2 = 0, .reserved3 = 0, }); defer cmd.deinit(gpa); try testRead(gpa, in_buffer, LoadCommand{ .Segment = cmd }); var out_buffer: [in_buffer.len]u8 = undefined; try testWrite(&out_buffer, LoadCommand{ .Segment = cmd }, in_buffer); } test "read-write generic command with data" { var gpa = testing.allocator; const in_buffer = &[_]u8{ 0x0c, 0x00, 0x00, 0x00, // cmd 0x20, 0x00, 0x00, 0x00, // cmdsize 0x18, 0x00, 0x00, 0x00, // name 0x02, 0x00, 0x00, 0x00, // timestamp 0x00, 0x00, 0x00, 0x00, // current_version 0x00, 0x00, 0x00, 0x00, // compatibility_version 0x2f, 0x75, 0x73, 0x72, 0x00, 0x00, 0x00, 0x00, // data }; var cmd = GenericCommandWithData(macho.dylib_command){ .inner = .{ .cmd = macho.LC_LOAD_DYLIB, .cmdsize = 32, .dylib = .{ .name = 24, .timestamp = 2, .current_version = 0, .compatibility_version = 0, }, }, }; cmd.data = try gpa.alloc(u8, 8); defer gpa.free(cmd.data); cmd.data[0] = 0x2f; cmd.data[1] = 0x75; cmd.data[2] = 0x73; cmd.data[3] = 0x72; cmd.data[4] = 0x0; cmd.data[5] = 0x0; cmd.data[6] = 0x0; cmd.data[7] = 0x0; try testRead(gpa, in_buffer, LoadCommand{ .Dylib = cmd }); var out_buffer: [in_buffer.len]u8 = undefined; try testWrite(&out_buffer, LoadCommand{ .Dylib = cmd }, in_buffer); } test "read-write C struct command" { var gpa = testing.allocator; const in_buffer = &[_]u8{ 0x28, 0x00, 0x00, 0x80, // cmd 0x18, 0x00, 0x00, 0x00, // cmdsize 0x04, 0x41, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // entryoff 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // stacksize }; const cmd = .{ .cmd = macho.LC_MAIN, .cmdsize = 24, .entryoff = 16644, .stacksize = 0, }; try testRead(gpa, in_buffer, LoadCommand{ .Main = cmd }); var out_buffer: [in_buffer.len]u8 = undefined; try testWrite(&out_buffer, LoadCommand{ .Main = cmd }, in_buffer); }
src/link/MachO/commands.zig
const std = @import("../std.zig"); const io = std.io; const assert = std.debug.assert; const testing = std.testing; pub fn BufferedReader(comptime buffer_size: usize, comptime ReaderType: type) type { return struct { unbuffered_reader: ReaderType, fifo: FifoType = FifoType.init(), pub const Error = ReaderType.Error; pub const Reader = io.Reader(*Self, Error, read); /// Deprecated: use `Reader` pub const InStream = Reader; const Self = @This(); const FifoType = std.fifo.LinearFifo(u8, std.fifo.LinearFifoBufferType{ .Static = buffer_size }); pub fn read(self: *Self, dest: []u8) Error!usize { var dest_index: usize = 0; while (dest_index < dest.len) { const written = self.fifo.read(dest[dest_index..]); if (written == 0) { // fifo empty, fill it const writable = self.fifo.writableSlice(0); assert(writable.len > 0); const n = try self.unbuffered_reader.read(writable); if (n == 0) { // reading from the unbuffered stream returned nothing // so we have nothing left to read. return dest_index; } self.fifo.update(n); } dest_index += written; } return dest.len; } pub fn reader(self: *Self) Reader { return .{ .context = self }; } /// Deprecated: use `reader` pub fn inStream(self: *Self) InStream { return .{ .context = self }; } }; } pub fn bufferedReader(underlying_stream: anytype) BufferedReader(4096, @TypeOf(underlying_stream)) { return .{ .unbuffered_reader = underlying_stream }; } test "io.BufferedReader" { const OneByteReadReader = struct { str: []const u8, curr: usize, const Error = error{NoError}; const Self = @This(); const Reader = io.Reader(*Self, Error, read); fn init(str: []const u8) Self { return Self{ .str = str, .curr = 0, }; } fn read(self: *Self, dest: []u8) Error!usize { if (self.str.len <= self.curr or dest.len == 0) return 0; dest[0] = self.str[self.curr]; self.curr += 1; return 1; } fn reader(self: *Self) Reader { return .{ .context = self }; } }; const str = "This is a test"; var one_byte_stream = OneByteReadReader.init(str); var buf_reader = bufferedReader(one_byte_stream.reader()); const stream = buf_reader.reader(); const res = try stream.readAllAlloc(testing.allocator, str.len + 1); defer testing.allocator.free(res); testing.expectEqualSlices(u8, str, res); }
lib/std/io/buffered_reader.zig
const std = @import("std"); const backend = @import("backend.zig"); const Size = @import("data.zig").Size; const Colorspace = @import("color.zig").Colorspace; /// As of now, zgt only supports RGBA images pub const ImageData = struct { width: usize, stride: usize, height: usize, /// Value pointing to the image data peer: backend.ImageData, pub fn fromBytes(width: usize, height: usize, stride: usize, cs: Colorspace, bytes: []const u8) !ImageData { std.debug.assert(bytes.len >= stride * height); return ImageData{ .width = width, .height = height, .stride = stride, .peer = try backend.ImageData.from(width, height, stride, cs, bytes) }; } pub fn fromFile(allocator: std.mem.Allocator, path: []const u8) !ImageData { const file = try std.fs.cwd().openFile(path, .{ .mode = .read_only }); defer file.close(); const reader = file.reader(); return @import("png.zig").read(allocator, reader); } }; pub const Image_Impl = struct { pub usingnamespace @import("internal.zig").All(Image_Impl); peer: ?backend.Image = null, handlers: Image_Impl.Handlers = undefined, dataWrappers: Image_Impl.DataWrappers = .{}, data: ImageData, pub fn init() Image_Impl { return initWithData(.{ .width = 0, .height = 0, .stride = 0, .peer = 0 }); } pub fn initWithData(data: ImageData) Image_Impl { return Image_Impl.init_events(Image_Impl{ .data = data }); } pub fn show(self: *Image_Impl) !void { if (self.peer == null) { self.peer = try backend.Image.create(); self.peer.?.setData(self.data.peer); try self.show_events(); } } pub fn getPreferredSize(self: *Image_Impl, available: Size) Size { _ = available; return Size{ .width = @intCast(u32, self.data.width), .height = @intCast(u32, self.data.height) }; } }; pub fn Image(config: struct { data: ImageData }) Image_Impl { var image = Image_Impl.initWithData(config.data); return image; }
src/image.zig
const std = @import("std"); const warn = std.debug.warn; // D: Die, S: Stay the same, G: Grow a new cell const Rule = enum { D, // Die S, // Stay the same G, // Grow a new cell }; const Life = struct { generation: i32, rules: []const Rule, width: usize, height: usize, grid: [][]bool, next_grid: [][]bool, }; fn get(self: *Life, x: i32, y: i32) bool { if (x >= 0 and y >= 0) { const x_usize = @intCast(usize, x); const y_usize = @intCast(usize, y); if (x_usize < self.width and y_usize < self.height) { return self.grid[x_usize][y_usize]; } } return false; } fn getNeighbors(self: *Life, x: usize, y: usize) usize { const x_i32 = @intCast(i32, x); const y_i32 = @intCast(i32, y); var neighbors: usize = 0; for ([_]i32{-1, 0, 1}) |x_offset| { for ([_]i32{-1, 0, 1}) |y_offset| { // Don't count the offset 0, 0 if (x_offset != 0 or y_offset != 0) { if (get(self, x_i32 + x_offset, y_i32 + y_offset)) { neighbors += 1; } } } } return neighbors; } fn printGrid(self: *Life) void { warn("Generation {}\n", self.generation); warn("+"); var i: usize = 0; while (i < self.width) { warn("-"); i += 1; } warn("+\n"); for (self.grid) |row, y| { warn("|"); for (row) |cell, x| { if (self.grid[x][y]) { warn("0"); } else { warn(" "); } } warn("|\n"); } warn("+"); i = 0; while (i < self.width) { warn("-"); i += 1; } warn("+\n"); } fn nextGeneration(self: *Life) void { for (self.grid) |row, y| { for (row) |cell, x| { const neighbors = getNeighbors(self, x, y); const next_state = self.rules[neighbors]; if (next_state == Rule.G) { self.next_grid[x][y] = true; } else if (next_state == Rule.D) { self.next_grid[x][y] = false; } } } self.generation += 1; for (self.next_grid) |row, y| { for (row) |cell, x1| { self.grid[x1][y] = self.next_grid[x1][y]; } } } fn addFigure(self: *Life, x: usize, y: usize, points: []const [2]usize) void { for (points) |point| { self.grid[x+point[0]][y+point[1]] = true; self.next_grid[x+point[0]][y+point[1]] = true; } } const rpentomino = [_][2]usize{ [2]usize{1, 0}, [2]usize{2, 0}, [2]usize{0, 1}, [2]usize{1, 1}, [2]usize{1, 2}, }; const block = [_][2]usize{ [2]usize{0, 0}, [2]usize{1, 0}, [2]usize{0, 1}, [2]usize{1, 1}, }; const blinker = [_][2]usize{ [2]usize{1, 0}, [2]usize{1, 1}, [2]usize{1, 2}, }; const beacon = [_][2]usize{ [2]usize{0, 0}, [2]usize{0, 1}, [2]usize{1, 0}, [2]usize{2, 3}, [2]usize{3, 2}, [2]usize{3, 3}, }; pub fn main() void { const rules = [_]Rule{ Rule.D, Rule.D, Rule.S, Rule.G, Rule.D, Rule.D, Rule.D, Rule.D, Rule.D }; const width = 10; const height = 10; // Create grids of slices for current and next generation var grid = init: { var original_grid: [width*height]bool = [_]bool{false} ** (width*height); var sliced_grid: [width][]bool = undefined; for (sliced_grid) |*pt, i| { pt.* = original_grid[i*height..(i+1)*height]; } break :init sliced_grid[0..sliced_grid.len]; }; var next_grid = init: { var original_grid: [width*height]bool = [_]bool{false} ** (width*height); var sliced_grid: [width][]bool = undefined; for (sliced_grid) |*pt, i| { pt.* = original_grid[i*height..(i+1)*height]; } break :init sliced_grid[0..sliced_grid.len]; }; var game = Life { .generation = 0, .rules = rules, .width = width, .height = height, .grid = grid, .next_grid = next_grid }; // addFigure(&game, 0, 0, beacon[0..BEACON.len]); addFigure(&game, 4, 4, rpentomino[0..rpentomino.len]); while (true) { printGrid(&game); nextGeneration(&game); std.time.sleep(1000000000); } }
life.zig
const std = @import("std"); const c = @import("c.zig"); const AllShaders = @import("all_shaders.zig").AllShaders; const Mat4x4 = @import("math3d.zig").Mat4x4; const PngImage = @import("png.zig").PngImage; pub const Spritesheet = struct { img: PngImage, count: usize, texture_id: c.GLuint, vertex_buffer: c.GLuint, tex_coord_buffers: []c.GLuint, pub fn draw(s: *Spritesheet, as: AllShaders, index: usize, mvp: Mat4x4) void { as.texture.bind(); as.texture.setUniformMat4x4(as.texture_uniform_mvp, mvp); as.texture.setUniformInt(as.texture_uniform_tex, 0); c.glBindBuffer(c.GL_ARRAY_BUFFER, s.vertex_buffer); c.glEnableVertexAttribArray(@intCast(c.GLuint, as.texture_attrib_position)); c.glVertexAttribPointer(@intCast(c.GLuint, as.texture_attrib_position), 3, c.GL_FLOAT, c.GL_FALSE, 0, null); c.glBindBuffer(c.GL_ARRAY_BUFFER, s.tex_coord_buffers[index]); c.glEnableVertexAttribArray(@intCast(c.GLuint, as.texture_attrib_tex_coord)); c.glVertexAttribPointer(@intCast(c.GLuint, as.texture_attrib_tex_coord), 2, c.GL_FLOAT, c.GL_FALSE, 0, null); c.glActiveTexture(c.GL_TEXTURE0); c.glBindTexture(c.GL_TEXTURE_2D, s.texture_id); c.glDrawArrays(c.GL_TRIANGLE_STRIP, 0, 4); } pub fn init(s: *Spritesheet, compressed_bytes: []const u8, w: usize, h: usize) !void { s.img = try PngImage.create(compressed_bytes); const col_count = s.img.width / w; const row_count = s.img.height / h; s.count = col_count * row_count; c.glGenTextures(1, &s.texture_id); errdefer c.glDeleteTextures(1, &s.texture_id); c.glBindTexture(c.GL_TEXTURE_2D, s.texture_id); c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_MAG_FILTER, c.GL_NEAREST); c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_MIN_FILTER, c.GL_NEAREST); c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_WRAP_S, c.GL_CLAMP_TO_EDGE); c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_WRAP_T, c.GL_CLAMP_TO_EDGE); c.glPixelStorei(c.GL_PACK_ALIGNMENT, 4); c.glTexImage2D( c.GL_TEXTURE_2D, 0, c.GL_RGBA, @intCast(c_int, s.img.width), @intCast(c_int, s.img.height), 0, c.GL_RGBA, c.GL_UNSIGNED_BYTE, @ptrCast(*c_void, &s.img.raw[0]), ); c.glGenBuffers(1, &s.vertex_buffer); errdefer c.glDeleteBuffers(1, &s.vertex_buffer); const vertexes = [_][3]c.GLfloat{ [_]c.GLfloat{ 0.0, 0.0, 0.0 }, [_]c.GLfloat{ 0.0, @intToFloat(c.GLfloat, h), 0.0 }, [_]c.GLfloat{ @intToFloat(c.GLfloat, w), 0.0, 0.0 }, [_]c.GLfloat{ @intToFloat(c.GLfloat, w), @intToFloat(c.GLfloat, h), 0.0 }, }; c.glBindBuffer(c.GL_ARRAY_BUFFER, s.vertex_buffer); c.glBufferData(c.GL_ARRAY_BUFFER, 4 * 3 * @sizeOf(c.GLfloat), @ptrCast(*const c_void, &vertexes[0][0]), c.GL_STATIC_DRAW); s.tex_coord_buffers = try alloc(c.GLuint, s.count); //s.tex_coord_buffers = try c_allocator.alloc(c.GLuint, s.count); //errdefer c_allocator.free(s.tex_coord_buffers); c.glGenBuffers(@intCast(c.GLint, s.tex_coord_buffers.len), s.tex_coord_buffers.ptr); errdefer c.glDeleteBuffers(@intCast(c.GLint, s.tex_coord_buffers.len), &s.tex_coord_buffers[0]); for (s.tex_coord_buffers) |tex_coord_buffer, i| { const upside_down_row = i / col_count; const col = i % col_count; const row = row_count - upside_down_row - 1; const x = @intToFloat(f32, col * w); const y = @intToFloat(f32, row * h); const img_w = @intToFloat(f32, s.img.width); const img_h = @intToFloat(f32, s.img.height); const tex_coords = [_][2]c.GLfloat{ [_]c.GLfloat{ x / img_w, (y + @intToFloat(f32, h)) / img_h, }, [_]c.GLfloat{ x / img_w, y / img_h, }, [_]c.GLfloat{ (x + @intToFloat(f32, w)) / img_w, (y + @intToFloat(f32, h)) / img_h, }, [_]c.GLfloat{ (x + @intToFloat(f32, w)) / img_w, y / img_h, }, }; c.glBindBuffer(c.GL_ARRAY_BUFFER, tex_coord_buffer); c.glBufferData(c.GL_ARRAY_BUFFER, 4 * 2 * @sizeOf(c.GLfloat), @ptrCast(*const c_void, &tex_coords[0][0]), c.GL_STATIC_DRAW); } } pub fn deinit(s: *Spritesheet) void { c.glDeleteBuffers(@intCast(c.GLint, s.tex_coord_buffers.len), s.tex_coord_buffers.ptr); //c_allocator.free(s.tex_coord_buffers); c.glDeleteBuffers(1, &s.vertex_buffer); c.glDeleteTextures(1, &s.texture_id); s.img.destroy(); } }; fn alloc(comptime T: type, n: usize) ![]T { const ptr = c.malloc(@sizeOf(T) * n) orelse return error.OutOfMemory; return @ptrCast([*]T, @alignCast(@alignOf(T), ptr))[0..n]; }
src/spritesheet.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const c = @import("../c.zig"); const content = @import("../content.zig"); const gfx = @import("../gfx.zig"); const Texture = gfx.Texture; const Image = gfx.Image; const BoundInstancer = gfx.BoundInstancer; const CoordinateStack = @import("../coordinates.zig").CoordinateStack; const math = @import("../math.zig"); usingnamespace math; usingnamespace @import("util.zig"); //; // TODO // assert spritebatch isnt bound twice from drawer2d // floor_positions boolean uniform in default shader? pub const Vertex2d = extern struct { const Self = @This(); position: Vec2, uv: Vec2, pub fn init(position: Vec2, uv: Vec2) Self { return Self{ .position = position, .uv = uv, }; } pub fn genQuad(verts: *[4]Self, centered: bool) void { verts[0] = Self.init(Vec2.init(1., 1.), Vec2.init(1., 1.)); verts[1] = Self.init(Vec2.init(1., 0.), Vec2.init(1., 0.)); verts[2] = Self.init(Vec2.init(0., 1.), Vec2.init(0., 1.)); verts[3] = Self.init(Vec2.init(0., 0.), Vec2.init(0., 0.)); if (centered) { for (verts) |*v| { v.position.x -= 0.5; v.position.y -= 0.5; } } } pub fn genCircle(verts: []Self) void { const angle_step = std.math.tau / @intToFloat(f32, verts.len); for (verts) |*v, i| { const at = @intToFloat(f32, i) * angle_step; const x = std.math.cos(at) / 2.; const y = std.math.sin(at) / 2.; v.position = Vec2.init(x, y); v.uv = Vec2.init(x + 0.5, y + 0.5); } } pub fn setAttributes(vao: *gfx.VertexArray) void { var temp = gfx.VertexAttribute{ .size = 2, .ty = c.GL_FLOAT, .is_normalized = false, .stride = @sizeOf(Self), .offset = @byteOffsetOf(Self, "position"), .divisor = 0, }; vao.enableAttribute(0, temp); temp.offset = @byteOffsetOf(Self, "uv"); vao.enableAttribute(1, temp); } }; pub const Mesh2d = gfx.Mesh(Vertex2d); //; pub const Spritebatch = struct { pub const Sprite = extern struct { uv: UV_Region = UV_Region.init(0., 0., 1., 1.), transform: Transform2d = Transform2d.identity(), color: Color = Color.white(), pub fn setAttributes(vao: gfx.VertexArray) void { var temp = gfx.VertexAttribute{ .size = 4, .ty = c.GL_FLOAT, .is_normalized = false, .stride = @sizeOf(Sprite), .offset = @byteOffsetOf(Sprite, "uv"), .divisor = 1, }; vao.enableAttribute(2, temp); temp.size = 2; temp.offset = @byteOffsetOf(Sprite, "transform") + @byteOffsetOf(Transform2d, "position"); vao.enableAttribute(3, temp); temp.size = 1; temp.offset = @byteOffsetOf(Sprite, "transform") + @byteOffsetOf(Transform2d, "rotation"); vao.enableAttribute(4, temp); temp.size = 2; temp.offset = @byteOffsetOf(Sprite, "transform") + @byteOffsetOf(Transform2d, "scale"); vao.enableAttribute(5, temp); temp.size = 4; temp.offset = @byteOffsetOf(Sprite, "color"); vao.enableAttribute(6, temp); } }; const Self = @This(); instancer: gfx.Instancer(Sprite), quad_verts: [4]Vertex2d, quad: Mesh2d, quad_centered_verts: [4]Vertex2d, quad_centered: Mesh2d, pub fn init(data: []Sprite) Self { var ret: Self = undefined; ret.instancer = gfx.Instancer(Sprite).init(data); Vertex2d.genQuad(&ret.quad_verts, false); ret.quad = Mesh2d.init( &ret.quad_verts, &[_]u32{}, c.GL_STATIC_DRAW, c.GL_TRIANGLE_STRIP, ); ret.instancer.makeVertexArrayCompatible(ret.quad.vao); Vertex2d.genQuad(&ret.quad_centered_verts, true); ret.quad_centered = Mesh2d.init( &ret.quad_centered_verts, &[_]u32{}, c.GL_STATIC_DRAW, c.GL_TRIANGLE_STRIP, ); ret.instancer.makeVertexArrayCompatible(ret.quad_centered.vao); return ret; } pub fn deinit(self: *Self) void { self.quad_centered.deinit(); self.quad.deinit(); self.instancer.deinit(); } pub fn bind( self: *Self, centered: bool, ) gfx.BoundInstancer(Sprite, Vertex2d) { if (centered) { return self.instancer.bind(Vertex2d, &self.quad_centered); } else { return self.instancer.bind(Vertex2d, &self.quad); } } }; //; pub const Locations2d = struct { const Self = @This(); screen: gfx.Location, view: gfx.Location, model: gfx.Location, time: gfx.Location, flip_uvs: gfx.Location, tx_diffuse: gfx.Location, tx_normal: gfx.Location, base_color: gfx.Location, pub fn init(program: gfx.Program) Self { return .{ .screen = program.getLocation("_screen"), .view = program.getLocation("_view"), .model = program.getLocation("_model"), .time = program.getLocation("_time"), .flip_uvs = program.getLocation("_flip_uvs"), .tx_diffuse = program.getLocation("_tx_diffuse"), .tx_normal = program.getLocation("_tx_normal"), .base_color = program.getLocation("_base_color"), }; } // note: doesnt set textures pub fn reset(self: Self) void { self.screen.setMat3(Mat3.identity()); self.view.setMat3(Mat3.identity()); self.model.setMat3(Mat3.identity()); self.time.setFloat(0.); self.flip_uvs.setBool(false); self.base_color.setColor(Color.initRgba(1., 1., 1., 1.)); } }; pub const Program2d = struct { const Self = @This(); program: gfx.Program, locations: Locations2d, // takes ownership of program pub fn init(program: gfx.Program) Self { return .{ .program = program, .locations = Locations2d.init(program), }; } pub fn deinit(self: *Self) void { self.program.deinit(); } // TODO write a test that tests default program with no effects works // maybe dont take an allocator param here? // just use a global allocator for 'flat' namespace pub fn initDefault( workspace_allocator: *Allocator, v_effect: ?[]const u8, f_effect: ?[]const u8, ) !Self { const v_str = try applyEffectToDefaultShader( workspace_allocator, content.shaders.default_vert, content.shaders.default_vert_effect, v_effect, ); defer workspace_allocator.free(v_str); const f_str = try applyEffectToDefaultShader( workspace_allocator, content.shaders.default_frag, content.shaders.default_frag_effect, f_effect, ); defer workspace_allocator.free(f_str); var v_shader = try gfx.Shader.init(c.GL_VERTEX_SHADER, v_str); defer v_shader.deinit(); var f_shader = try gfx.Shader.init(c.GL_FRAGMENT_SHADER, f_str); defer f_shader.deinit(); const prog = try gfx.Program.init(&[_]gfx.Shader{ v_shader, f_shader }); return Self.init(prog); } pub fn initDefaultSpritebatch( workspace_allocator: *Allocator, v_effect: ?[]const u8, f_effect: ?[]const u8, ) !Self { return Self.initDefault( workspace_allocator, v_effect orelse content.shaders.default_spritebatch_vert, f_effect orelse content.shaders.default_spritebatch_frag, ); } }; // drawer2d === pub const DrawDefaults = struct { const Self = @This(); program: Program2d, spritebatch_program: Program2d, white_texture: Texture, mahou_texture: Texture, ibm_font: FixedFont, pub fn init(workspace_allocator: *Allocator) Allocator.Error!Self { var white = try Image.init(workspace_allocator, 1, 1); white.data[0] = .{ .r = 255, .g = 255, .b = 255, .a = 255, }; defer white.deinit(); var mahou = Image.initFromMemory( workspace_allocator, content.images.mahou, ) catch |err| switch (err) { error.OutOfMemory => |e| return e, else => unreachable, }; defer mahou.deinit(); var codepage437 = gfx.Image.initFromMemory( workspace_allocator, content.images.codepage437, ) catch |err| switch (err) { error.OutOfMemory => |e| return e, else => unreachable, }; defer codepage437.deinit(); return DrawDefaults{ .program = Program2d.initDefault( workspace_allocator, null, null, ) catch |err| switch (err) { error.OutOfMemory => |e| return e, else => unreachable, }, .spritebatch_program = Program2d.initDefaultSpritebatch( workspace_allocator, null, null, ) catch |err| switch (err) { error.OutOfMemory => |e| return e, else => unreachable, }, .white_texture = Texture.initImage(white), .mahou_texture = Texture.initImage(mahou), .ibm_font = FixedFont.init(codepage437, 9, 16), }; } pub fn deinit(self: *Self) void { self.program.deinit(); self.spritebatch_program.deinit(); self.white_texture.deinit(); } }; pub const Drawer2d = struct { const Self = @This(); const Settings = struct { spritebatch_size: usize, circle_resolution: usize, }; // TODO make a separate bindingcontext for spritebatch and shapedrawer const BindingContext = struct { program: *const Program2d, diffuse: *const Texture, canvas_width: u32, canvas_height: u32, }; allocator: *Allocator, coord_stack: CoordinateStack, sprite_buffer: []Spritebatch.Sprite, sprites: Spritebatch, // shapes: ShapeDrawer, pub fn init(allocator: *Allocator, settings: Settings) !Self { var sprite_buffer = try allocator.alloc(Spritebatch.Sprite, settings.spritebatch_size); errdefer allocator.free(sprite_buffer); return Self{ .allocator = allocator, .coord_stack = CoordinateStack.init(allocator), .sprite_buffer = sprite_buffer, .sprites = Spritebatch.init(sprite_buffer), }; } pub fn deinit(self: *Self) void { self.sprites.deinit(); self.allocator.free(self.sprite_buffer); self.coord_stack.deinit(); } //; fn bind(self: *Self, ctx: BindingContext) BoundDrawer2d { return .{ .drawer = self, .program = ctx.program, .diffuse = ctx.diffuse, .canvas_width = ctx.canvas_width, .canvas_height = ctx.canvas_height, }; } // TODO put centered_quad into the binding context pub fn bindSpritebatch(self: *Self, centered_quad: bool, ctx: BindingContext) BoundSpritebatch { var ret = BoundSpritebatch{ .base = self.bind(ctx), .sprites = self.sprites.bind(centered_quad), .sprite_transform = Transform2d.identity(), .sprite_uv = UV_Region.identity(), .sprite_color = Color.white(), }; ret.setProgram(ctx.program); ret.setDiffuse(ctx.diffuse); return ret; } // TODO pub fn bindShapeDrawer(self: *Self, ctx: BindingContext) BoundShapeDrawer {} }; const BoundDrawer2d = struct { const Self = @This(); drawer: *Drawer2d, program: *const Program2d, diffuse: *const Texture, canvas_width: u32, canvas_height: u32, fn setProgram(self: *Self, prog: *const Program2d) void { self.program = prog; self.program.program.bind(); self.program.locations.reset(); self.program.locations.screen.setMat3(Mat3.orthoScreen(UVec2.init( self.canvas_width, self.canvas_height, ))); self.drawer.coord_stack.clear(); self.program.locations.view.setMat3(self.drawer.coord_stack.composed); } fn setDiffuse(self: *Self, tex: *const Texture) void { self.diffuse = tex; self.program.locations.tx_diffuse.setTextureData(.{ .select = c.GL_TEXTURE0, .bind_to = c.GL_TEXTURE_2D, .texture = self.diffuse, }); } fn pushCoord(self: *Self, t: CoordinateStack.Transform) Allocator.Error!void { try self.drawer.coord_stack.push(t); self.program.locations.view.setMat3(self.drawer.coord_stack.composed); } fn popCoord(self: *Self) CoordinateStack.Error!void { try self.drawer.coord_stack.pop(); self.program.locations.view.setMat3(self.drawer.coord_stack.composed); } fn clearCoords(self: *Self) void { self.drawer.coord_stack.clear(); self.program.locations.view.setMat3(self.drawer.coord_stack.composed); } }; pub const BoundSpritebatch = struct { const Self = @This(); base: BoundDrawer2d, sprites: BoundInstancer(Spritebatch.Sprite, Vertex2d), sprite_transform: Transform2d, sprite_uv: UV_Region, sprite_color: Color, //; pub fn setProgram(self: *Self, prog: *const Program2d) void { if (prog != self.base.program) { self.drawNow(); } self.base.setProgram(prog); } pub fn setDiffuse(self: *Self, tex: *const Texture) void { if (tex != self.base.diffuse) { self.drawNow(); } self.base.setDiffuse(tex); } pub fn pushCoord(self: *Self, t: CoordinateStack.Transform) Allocator.Error!void { self.drawNow(); try self.base.pushCoord(t); } pub fn popCoord(self: *Self) CoordinateStack.Error!void { self.drawNow(); try self.base.popCoord(); } pub fn clearCoords(self: *Self) void { self.drawNow(); self.base.clearCoords(); } //; pub fn unbind(self: *Self) void { self.sprites.unbind(); } pub fn drawNow(self: *Self) void { self.sprites.draw(); } pub fn rectangle(self: *Self, x1: f32, y1: f32, x2: f32, y2: f32) void { var sp = self.sprites.pull(); sp.transform.position.x = x1; sp.transform.position.y = y1; sp.transform.rotation = 0.; sp.transform.scale.x = x2 - x1; sp.transform.scale.y = y2 - y1; sp.uv = self.sprite_uv; sp.color = self.sprite_color; } pub fn sprite(self: *Self, transform: Transform2d, uv: UV_Region, color: Color) void { var sp = self.sprites.pull(); sp.transform = transform; sp.uv = uv; sp.color = color; } pub fn print(self: *Self, font: FixedFont, text: []const u8) void { self.setDiffuse(&font.texture); const fw = @intToFloat(f32, font.glyph_width); const fh = @intToFloat(f32, font.glyph_height); var x_at: f32 = 0.; var y_at: f32 = 0.; for (text) |ch| { var sp = self.sprites.pull(); sp.transform.position.x = x_at; sp.transform.position.y = y_at; sp.transform.rotation = 0.; sp.transform.scale.x = fw; sp.transform.scale.y = fh; sp.uv = font.uvRegion(ch); sp.color = self.sprite_color; x_at += fw; } } }; pub const BoundShapeDrawer = struct { const Self = @This(); base: BoundDrawer2d, //; pub fn setProgram() void { //; } pub fn setTexture() void { //; } pub fn pushCoord() void { //; } pub fn popCoord() void { //; } //; }; // fonts === // Codepage 437 font pub const FixedFont = struct { const Self = @This(); texture: Texture, glyph_width: u32, glyph_height: u32, width_ct: u32, height_ct: u32, pub fn init(image: Image, glyph_width: u32, glyph_height: u32) Self { var tex = Texture.initImage(image); tex.setFilter(c.GL_NEAREST, c.GL_NEAREST); return .{ .texture = tex, .glyph_width = glyph_width, .glyph_height = glyph_height, .width_ct = image.width / glyph_width, .height_ct = image.height / glyph_height, }; } pub fn deinit(self: *Self) void { self.texture.deinit(); } //; pub fn region(self: Self, glyph: u8) TextureRegion { // glyph is a number betw 0 and 255 // wrap it around width_ct const x = glyph % self.width_ct; const y = glyph / self.width_ct; return TextureRegion.init( @intCast(i32, x * self.glyph_width), @intCast(i32, y * self.glyph_height), @intCast(i32, (x + 1) * self.glyph_width), @intCast(i32, (y + 1) * self.glyph_height), ); } pub fn uvRegion(self: Self, glyph: u8) UV_Region { const tx = self.region(glyph); return tx.normalized(IVec2.init( @intCast(i32, self.texture.width), @intCast(i32, self.texture.height), )); } };
src/defaults/flat.zig
const std = @import("std"); const math = std.math; const testing = std.testing; const assert = std.debug.assert; const root = @import("main.zig"); usingnamespace @import("vec3.zig"); usingnamespace @import("vec4.zig"); usingnamespace @import("mat4.zig"); pub const Quat = Quaternion(f32); pub const Quat_f64 = Quaternion(f64); /// A Quaternion for 3D rotations. pub fn Quaternion(comptime T: type) type { if (@typeInfo(T) != .Float) { @compileError("Quaternion not implemented for " ++ @typeName(T)); } return struct { w: T, x: T, y: T, z: T, const Self = @This(); /// Construct new quaternion from floats. pub fn new(w: T, x: T, y: T, z: T) Self { return .{ .w = w, .x = x, .y = y, .z = z, }; } /// Construct most basic quaternion. pub fn zero() Self { return Self.new(1, 0, 0, 0); } /// Construct new quaternion from slice. /// Note: Careful, the latest component `slice[3]` is the `W` component. pub fn fromSlice(slice: []const T) Self { return Self.new(slice[3], slice[0], slice[1], slice[2]); } pub fn fromVec3(w: T, axis: Vector3(T)) Self { return .{ .w = w, .x = axis.x, .y = axis.y, .z = axis.z, }; } pub fn eql(left: Self, right: Self) bool { return (left.w == right.w and left.x == right.x and left.y == right.y and left.z == right.z); } pub fn norm(self: Self) Self { const l = length(self); assert(l != 0); return Self.new(self.w / l, self.x / l, self.y / l, self.z / l); } pub fn length(self: Self) T { return math.sqrt((self.w * self.w) + (self.x * self.x) + (self.y * self.y) + (self.z * self.z)); } pub fn sub(left: Self, right: Self) Self { return Self.new( left.w - right.w, left.x - right.x, left.y - right.y, left.z - right.z, ); } pub fn add(left: Self, right: Self) Self { return Self.new( left.w + right.w, left.x + right.x, left.y + right.y, left.z + right.z, ); } pub fn mult(left: Self, right: Self) Self { var q: Self = undefined; q.x = (left.x * right.w) + (left.y * right.z) - (left.z * right.y) + (left.w * right.x); q.y = (-left.x * right.z) + (left.y * right.w) + (left.z * right.x) + (left.w * right.y); q.z = (left.x * right.y) - (left.y * right.x) + (left.z * right.w) + (left.w * right.z); q.w = (-left.x * right.x) - (left.y * right.y) - (left.z * right.z) + (left.w * right.w); return q; } pub fn scale(mat: Self, scalar: T) Self { var result: Self = undefined; result.w = mat.w * scalar; result.x = mat.x * scalar; result.y = mat.y * scalar; result.z = mat.z * scalar; return result; } /// Return the dot product between two quaternion. pub fn dot(left: Self, right: Self) T { return (left.x * right.x) + (left.y * right.y) + (left.z * right.z) + (left.w * right.w); } /// Convert given quaternion to rotation 4x4 matrix. /// Mostly taken from https://github.com/HandmadeMath/Handmade-Math. pub fn toMat4(self: Self) Mat4x4(T) { var result: Mat4x4(T) = undefined; const normalized = self.norm(); const xx = normalized.x * normalized.x; const yy = normalized.y * normalized.y; const zz = normalized.z * normalized.z; const xy = normalized.x * normalized.y; const xz = normalized.x * normalized.z; const yz = normalized.y * normalized.z; const wx = normalized.w * normalized.x; const wy = normalized.w * normalized.y; const wz = normalized.w * normalized.z; result.data[0][0] = 1.0 - 2.0 * (yy + zz); result.data[0][1] = 2.0 * (xy + wz); result.data[0][2] = 2.0 * (xz - wy); result.data[0][3] = 0.0; result.data[1][0] = 2.0 * (xy - wz); result.data[1][1] = 1.0 - 2.0 * (xx + zz); result.data[1][2] = 2.0 * (yz + wx); result.data[1][3] = 0.0; result.data[2][0] = 2.0 * (xz + wy); result.data[2][1] = 2.0 * (yz - wx); result.data[2][2] = 1.0 - 2.0 * (xx + yy); result.data[2][3] = 0.0; result.data[3][0] = 0.0; result.data[3][1] = 0.0; result.data[3][2] = 0.0; result.data[3][3] = 1.0; return result; } /// From <NAME> at Insomniac Games. /// For more details: https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf pub fn fromMat4(m: Mat4x4(T)) Self { var t: f32 = 0; var result: Self = undefined; if (m.data[2][2] < 0.0) { if (m.data[0][0] > m.data[1][1]) { t = 1 + m.data[0][0] - m.data[1][1] - m.data[2][2]; result = Self.new( m.data[1][2] - m.data[2][1], t, m.data[0][1] + m.data[1][0], m.data[2][0] + m.data[0][2], ); } else { t = 1 - m.data[0][0] + m.data[1][1] - m.data[2][2]; result = Self.new( m.data[2][0] - m.data[0][2], m.data[0][1] + m.data[1][0], t, m.data[1][2] + m.data[2][1], ); } } else { if (m.data[0][0] < -m.data[1][1]) { t = 1 - m.data[0][0] - m.data[1][1] + m.data[2][2]; result = Self.new( m.data[0][1] - m.data[1][0], m.data[2][0] + m.data[0][2], m.data[1][2] + m.data[2][1], t, ); } else { t = 1 + m.data[0][0] + m.data[1][1] + m.data[2][2]; result = Self.new( t, m.data[1][2] - m.data[2][1], m.data[2][0] - m.data[0][2], m.data[0][1] - m.data[1][0], ); } } return Self.scale(result, 0.5 / math.sqrt(t)); } /// Convert all Euler angles to quaternion. pub fn fromEulerAngle(axis: Vector3(T)) Self { const x = Self.fromAxis(axis.x, Vec3.new(1, 0, 0)); const y = Self.fromAxis(axis.y, Vec3.new(0, 1, 0)); const z = Self.fromAxis(axis.z, Vec3.new(0, 0, 1)); return z.mult(y.mult(x)); } /// Convert Euler angle around specified axis to quaternion. pub fn fromAxis(degrees: T, axis: Vector3(T)) Self { const radians = root.toRadians(degrees); const rot_sin = math.sin(radians / 2.0); const quat_axis = axis.norm().scale(rot_sin); const w = math.cos(radians / 2.0); return Self.fromVec3(w, quat_axis); } /// Extract euler angles from quaternion. pub fn extractRotation(self: Self) Vector3(T) { const yaw = math.atan2( T, 2.0 * (self.y * self.z + self.w * self.x), self.w * self.w - self.x * self.x - self.y * self.y + self.z * self.z, ); const pitch = math.asin( -2.0 * (self.x * self.z - self.w * self.y), ); const roll = math.atan2( T, 2.0 * (self.x * self.y + self.w * self.z), self.w * self.w + self.x * self.x - self.y * self.y - self.z * self.z, ); return Vector3(T).new(root.toDegrees(yaw), root.toDegrees(pitch), root.toDegrees(roll)); } /// Lerp between two quaternions. pub fn lerp(left: Self, right: Self, t: f32) Self { const w = root.lerp(T, left.w, right.w, t); const x = root.lerp(T, left.x, right.x, t); const y = root.lerp(T, left.y, right.y, t); const z = root.lerp(T, left.z, right.z, t); return Self.new(w, x, y, z); } // Shortest path slerp between two quaternions. // Taken from "Physically Based Rendering, 3rd Edition, Chapter 2.9.2" // https://pbr-book.org/3ed-2018/Geometry_and_Transformations/Animating_Transformations#QuaternionInterpolation pub fn slerp(left: Self, right: Self, t: f32) Self { const ParallelThreshold: f32 = 0.9995; var cos_theta = dot(left, right); var right1 = right; // We need the absolute value of the dot product to take the shortest path if (cos_theta < 0.0) { cos_theta *= -1; right1 = right.scale(-1); } if (cos_theta > ParallelThreshold) { // Use regular old lerp to avoid numerical instability return lerp(left, right1, t); } else { var theta = math.acos(math.clamp(cos_theta, -1, 1)); var thetap = theta * t; var qperp = right1.sub(left.scale(cos_theta)).norm(); return left.scale(math.cos(thetap)).add(qperp.scale(math.sin(thetap))); } } /// Rotate the vector v using the sandwich product. /// Taken from "Foundations of Game Engine Development Vol. 1 Mathematics". pub fn rotateVec(self: Self, v: Vector3(T)) Vector3(T) { const q = self.norm(); const b = Vector3(T).new(q.x, q.y, q.z); const b2 = b.x * b.x + b.y * b.y + b.z * b.z; return v.scale(q.w * q.w - b2).add(b.scale(v.dot(b) * 2.0)).add(b.cross(v).scale(q.w * 2.0)); } }; } test "zalgebra.Quaternion.new" { const q = Quat.new(1.5, 2.6, 3.7, 4.7); try testing.expectEqual(q.w, 1.5); try testing.expectEqual(q.x, 2.6); try testing.expectEqual(q.y, 3.7); try testing.expectEqual(q.z, 4.7); } test "zalgebra.Quaternion.fromSlice" { const array = [4]f32{ 2, 3, 4, 1 }; try testing.expectEqual(Quat.eql(Quat.fromSlice(&array), Quat.new(1, 2, 3, 4)), true); } test "zalgebra.Quaternion.fromVec3" { const q = Quat.fromVec3(1.5, Vec3.new(2.6, 3.7, 4.7)); try testing.expectEqual(q.w, 1.5); try testing.expectEqual(q.x, 2.6); try testing.expectEqual(q.y, 3.7); try testing.expectEqual(q.z, 4.7); } test "zalgebra.Quaternion.fromVec3" { const q1 = Quat.fromVec3(1.5, Vec3.new(2.6, 3.7, 4.7)); const q2 = Quat.fromVec3(1.5, Vec3.new(2.6, 3.7, 4.7)); const q3 = Quat.fromVec3(1, Vec3.new(2.6, 3.7, 4.7)); try testing.expectEqual(q1.eql(q2), true); try testing.expectEqual(q1.eql(q3), false); } test "zalgebra.Quaternion.norm" { const q1 = Quat.fromVec3(1, Vec3.new(2, 2.0, 2.0)); const q2 = Quat.fromVec3(0.2773500978946686, Vec3.new(0.5547001957893372, 0.5547001957893372, 0.5547001957893372)); try testing.expectEqual(q1.norm().eql(q2), true); } test "zalgebra.Quaternion.fromEulerAngle" { const q1 = Quat.fromEulerAngle(Vec3.new(10, 5, 45)); const res_q1 = q1.extractRotation(); const q2 = Quat.fromEulerAngle(Vec3.new(0, 55, 22)); const res_q2 = q2.toMat4().extractRotation(); try testing.expectEqual(Vec3.eql(res_q1, Vec3.new(9.999999046325684, 5.000000476837158, 45)), true); try testing.expectEqual(Vec3.eql(res_q2, Vec3.new(0, 47.245025634765625, 22)), true); } test "zalgebra.Quaternion.fromAxis" { const q1 = Quat.fromAxis(45, Vec3.new(0, 1, 0)); const res_q1 = q1.extractRotation(); try testing.expectEqual(Vec3.eql(res_q1, Vec3.new(0, 45.0000076, 0)), true); } test "zalgebra.Quaternion.extractRotation" { const q1 = Quat.fromVec3(0.5, Vec3.new(0.5, 1, 0.3)); const res_q1 = q1.extractRotation(); try testing.expectEqual(Vec3.eql(res_q1, Vec3.new(129.6000213623047, 44.427005767822266, 114.41073608398438)), true); } test "zalgebra.Quaternion.rotateVec" { const eps_value = comptime std.math.epsilon(f32); const q = Quat.fromEulerAngle(Vec3.new(45, 45, 45)); const m = q.toMat4(); const v = Vec3.new(0, 1, 0); const v1 = q.rotateVec(v); const v2 = m.multByVec4(Vec4.new(v.x, v.y, v.z, 1.0)); try testing.expect(std.math.approxEqAbs(f32, v1.x, -1.46446585e-01, eps_value)); try testing.expect(std.math.approxEqAbs(f32, v1.y, 8.53553473e-01, eps_value)); try testing.expect(std.math.approxEqAbs(f32, v1.z, 0.5, eps_value)); try testing.expect(std.math.approxEqAbs(f32, v1.x, v2.x, eps_value)); try testing.expect(std.math.approxEqAbs(f32, v1.y, v2.y, eps_value)); try testing.expect(std.math.approxEqAbs(f32, v1.z, v2.z, eps_value)); } test "zalgebra.Quaternion.lerp" { const eps_value = comptime std.math.epsilon(f32); var v1 = Quat.zero(); var v2 = Quat.fromAxis(180, Vec3.up()); try testing.expectEqual(Quat.eql( Quat.lerp(v1, v2, 1.0), v2, ), true); var v3 = Quat.lerp(v1, v2, 0.5); var v4 = Quat.new(4.99999970e-01, 0, 4.99999970e-01, 0); try testing.expect(std.math.approxEqAbs(f32, v3.w, v4.w, eps_value)); try testing.expect(std.math.approxEqAbs(f32, v3.x, v4.x, eps_value)); try testing.expect(std.math.approxEqAbs(f32, v3.y, v4.y, eps_value)); try testing.expect(std.math.approxEqAbs(f32, v3.z, v4.z, eps_value)); } test "zalgebra.Quaternion.slerp" { const eps_value = comptime std.math.epsilon(f32); var v1 = Quat.zero(); var v2 = Quat.fromAxis(180, Vec3.up()); try testing.expectEqual(Quat.eql( Quat.slerp(v1, v2, 1.0), Quat.new(7.54979012e-08, 0, -1, 0), ), true); var v3 = Quat.slerp(v1, v2, 0.5); var v4 = Quat.new(7.071067e-01, 0, -7.071067e-01, 0); try testing.expect(std.math.approxEqAbs(f32, v3.w, v4.w, eps_value)); try testing.expect(std.math.approxEqAbs(f32, v3.x, v4.x, eps_value)); try testing.expect(std.math.approxEqAbs(f32, v3.y, v4.y, eps_value)); try testing.expect(std.math.approxEqAbs(f32, v3.z, v4.z, eps_value)); }
src/quaternion.zig
const std = @import("std"); const assert = std.debug.assert; const tools = @import("tools"); const Vec2 = tools.Vec2; pub fn run(input_text: []const u8, allocator: std.mem.Allocator) ![2][]const u8 { var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); const param: struct { depth: u32, target: Vec2, } = param: { var depth: ?u32 = null; var target: ?Vec2 = null; var it = std.mem.tokenize(u8, input_text, "\n\r"); while (it.next()) |line| { if (tools.match_pattern("depth: {}", line)) |fields| { depth = @intCast(u32, fields[0].imm); } else if (tools.match_pattern("target: {},{}", line)) |fields| { target = Vec2{ .x = @intCast(i32, fields[0].imm), .y = @intCast(i32, fields[1].imm) }; } else unreachable; } break :param .{ .depth = depth.?, .target = target.? }; }; var map = tools.Map(u16, 512, 2048, false){ .default_tile = 0 }; { var p = Vec2{ .x = 0, .y = 0 }; while (p.y <= 2000) : (p.y += 1) { p.x = 0; while (p.x <= 500) : (p.x += 1) { const geo_idx: i32 = blk: { if (p.y == 0) break :blk p.x * 16807; if (p.x == 0) break :blk p.y * 48271; if (p.eq(param.target)) break :blk 0; break :blk @intCast(i32, map.at(p.add(Vec2{ .x = -1, .y = 0 }))) * @intCast(i32, map.at(p.add(Vec2{ .x = 0, .y = -1 }))); }; const level = (@intCast(u32, geo_idx) + param.depth) % 20183; map.set(p, @intCast(u16, level)); } } } const ans1 = ans: { var sum: i32 = 0; var it = map.iter(tools.BBox{ .min = Vec2{ .x = 0, .y = 0 }, .max = param.target }); while (it.next()) |region| { sum += region % 3; } // std.debug.print("checksum: {}\n", .{sum}); break :ans sum; }; const ans2 = ans: { const Tool = enum { neither, climbing_gear, torch }; const State = struct { p: Vec2, tool: Tool }; const BFS = tools.BestFirstSearch(State, void); var bfs = BFS.init(allocator); defer bfs.deinit(); const init_state = State{ .p = Vec2{ .x = 0, .y = 0 }, .tool = .torch }; try bfs.insert(BFS.Node{ .state = init_state, .cost = 0, .rating = 0, .trace = {} }); var best_steps: u32 = 2000; while (bfs.pop()) |cur| { if (Vec2.eq(cur.state.p, param.target) and cur.state.tool == .torch) { // goal! if (best_steps >= cur.cost) { // std.debug.print("new best: {}, agendalen:{}\n", .{ cur.cost, bfs.agenda.count() }); best_steps = cur.cost; } continue; } if (cur.cost + 7 < best_steps) { var next = cur; const region = map.at(cur.state.p) % 3; const allowed_tools = switch (region) { //rocky 0 => &[_]Tool{ .climbing_gear, .torch }, //wet 1 => &[_]Tool{ .climbing_gear, .neither }, //narrow 2 => &[_]Tool{ .torch, .neither }, else => unreachable, }; for (allowed_tools) |tool| { if (tool != cur.state.tool) { next.state.tool = tool; next.state.p = cur.state.p; next.cost = cur.cost + 7; next.rating = @intCast(i32, Vec2.dist(next.state.p, param.target) * 1 + next.cost); try bfs.insert(next); } } } if (cur.cost + 1 < best_steps) { var next = cur; for (Vec2.cardinal_dirs) |dir| { const next_p = cur.state.p.add(dir); if (next_p.x < 0 or next_p.y < 0) continue; const next_region = (map.get(next_p) orelse continue) % 3; const allowed_tools = switch (next_region) { //rocky 0 => &[_]Tool{ .climbing_gear, .torch }, //wet 1 => &[_]Tool{ .climbing_gear, .neither }, //narrow 2 => &[_]Tool{ .torch, .neither }, else => unreachable, }; if (std.mem.indexOfScalar(Tool, allowed_tools, cur.state.tool) == null) continue; next.state.tool = cur.state.tool; next.state.p = next_p; next.cost = cur.cost + 1; next.rating = @intCast(i32, Vec2.dist(next.state.p, param.target) * 1 + next.cost); try bfs.insert(next); } } } break :ans best_steps; // 2559 too high }; return [_][]const u8{ try std.fmt.allocPrint(allocator, "{}", .{ans1}), try std.fmt.allocPrint(allocator, "{}", .{ans2}), }; } pub const main = tools.defaultMain("2018/input_day22.txt", run);
2018/day22.zig
//-------------------------------------------------------------------------------- // Section: Types (2) //-------------------------------------------------------------------------------- const IID_IDisplayDeviceInterop_Value = @import("../../zig.zig").Guid.initString("64338358-366a-471b-bd56-dd8ef48e439b"); pub const IID_IDisplayDeviceInterop = &IID_IDisplayDeviceInterop_Value; pub const IDisplayDeviceInterop = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateSharedHandle: fn( self: *const IDisplayDeviceInterop, pObject: ?*IInspectable, pSecurityAttributes: ?*const SECURITY_ATTRIBUTES, Access: u32, Name: ?HSTRING, pHandle: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OpenSharedHandle: fn( self: *const IDisplayDeviceInterop, NTHandle: ?HANDLE, riid: Guid, ppvObj: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDisplayDeviceInterop_CreateSharedHandle(self: *const T, pObject: ?*IInspectable, pSecurityAttributes: ?*const SECURITY_ATTRIBUTES, Access: u32, Name: ?HSTRING, pHandle: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const IDisplayDeviceInterop.VTable, self.vtable).CreateSharedHandle(@ptrCast(*const IDisplayDeviceInterop, self), pObject, pSecurityAttributes, Access, Name, pHandle); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDisplayDeviceInterop_OpenSharedHandle(self: *const T, NTHandle: ?HANDLE, riid: Guid, ppvObj: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const IDisplayDeviceInterop.VTable, self.vtable).OpenSharedHandle(@ptrCast(*const IDisplayDeviceInterop, self), NTHandle, riid, ppvObj); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDisplayPathInterop_Value = @import("../../zig.zig").Guid.initString("a6ba4205-e59e-4e71-b25b-4e436d21ee3d"); pub const IID_IDisplayPathInterop = &IID_IDisplayPathInterop_Value; pub const IDisplayPathInterop = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateSourcePresentationHandle: fn( self: *const IDisplayPathInterop, pValue: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSourceId: fn( self: *const IDisplayPathInterop, pSourceId: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDisplayPathInterop_CreateSourcePresentationHandle(self: *const T, pValue: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const IDisplayPathInterop.VTable, self.vtable).CreateSourcePresentationHandle(@ptrCast(*const IDisplayPathInterop, self), pValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDisplayPathInterop_GetSourceId(self: *const T, pSourceId: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDisplayPathInterop.VTable, self.vtable).GetSourceId(@ptrCast(*const IDisplayPathInterop, self), pSourceId); } };} pub usingnamespace MethodMixin(@This()); }; //-------------------------------------------------------------------------------- // Section: Functions (0) //-------------------------------------------------------------------------------- //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (7) //-------------------------------------------------------------------------------- const Guid = @import("../../zig.zig").Guid; const HANDLE = @import("../../foundation.zig").HANDLE; const HRESULT = @import("../../foundation.zig").HRESULT; const HSTRING = @import("../../system/win_rt.zig").HSTRING; const IInspectable = @import("../../system/win_rt.zig").IInspectable; const IUnknown = @import("../../system/com.zig").IUnknown; const SECURITY_ATTRIBUTES = @import("../../security.zig").SECURITY_ATTRIBUTES; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/system/win_rt/display.zig
const std = @import("std"); const builtin = @import("builtin"); const math = std.math; const isNan = std.math.isNan; const maxInt = std.math.maxInt; const native_os = builtin.os.tag; const native_arch = builtin.cpu.arch; const native_abi = builtin.abi; const is_wasm = switch (native_arch) { .wasm32, .wasm64 => true, else => false, }; const is_msvc = switch (native_abi) { .msvc => true, else => false, }; const is_freestanding = switch (native_os) { .freestanding => true, else => false, }; comptime { if (is_freestanding and is_wasm and builtin.link_libc) { @export(wasm_start, .{ .name = "_start", .linkage = .Strong }); } if (native_os == .linux) { @export(clone, .{ .name = "clone" }); } @export(memset, .{ .name = "memset", .linkage = .Strong }); @export(__memset, .{ .name = "__memset", .linkage = .Strong }); @export(memcpy, .{ .name = "memcpy", .linkage = .Strong }); @export(memmove, .{ .name = "memmove", .linkage = .Strong }); @export(memcmp, .{ .name = "memcmp", .linkage = .Strong }); @export(bcmp, .{ .name = "bcmp", .linkage = .Strong }); if (builtin.link_libc) { @export(strcmp, .{ .name = "strcmp", .linkage = .Strong }); @export(strncmp, .{ .name = "strncmp", .linkage = .Strong }); @export(strerror, .{ .name = "strerror", .linkage = .Strong }); @export(strlen, .{ .name = "strlen", .linkage = .Strong }); @export(strcpy, .{ .name = "strcpy", .linkage = .Strong }); @export(strncpy, .{ .name = "strncpy", .linkage = .Strong }); @export(strcat, .{ .name = "strcat", .linkage = .Strong }); @export(strncat, .{ .name = "strncat", .linkage = .Strong }); } else if (is_msvc) { @export(_fltused, .{ .name = "_fltused", .linkage = .Strong }); } } // Avoid dragging in the runtime safety mechanisms into this .o file, // unless we're trying to test this file. pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn { @setCold(true); _ = error_return_trace; if (builtin.is_test) { std.debug.panic("{s}", .{msg}); } if (native_os != .freestanding and native_os != .other) { std.os.abort(); } while (true) {} } extern fn main(argc: c_int, argv: [*:null]?[*:0]u8) c_int; fn wasm_start() callconv(.C) void { _ = main(0, undefined); } fn memset(dest: ?[*]u8, c: u8, len: usize) callconv(.C) ?[*]u8 { @setRuntimeSafety(false); if (len != 0) { var d = dest.?; var n = len; while (true) { d.* = c; n -= 1; if (n == 0) break; d += 1; } } return dest; } fn __memset(dest: ?[*]u8, c: u8, n: usize, dest_n: usize) callconv(.C) ?[*]u8 { if (dest_n < n) @panic("buffer overflow"); return memset(dest, c, n); } fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, len: usize) callconv(.C) ?[*]u8 { @setRuntimeSafety(false); if (len != 0) { var d = dest.?; var s = src.?; var n = len; while (true) { d[0] = s[0]; n -= 1; if (n == 0) break; d += 1; s += 1; } } return dest; } fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8 { @setRuntimeSafety(false); if (@ptrToInt(dest) < @ptrToInt(src)) { var index: usize = 0; while (index != n) : (index += 1) { dest.?[index] = src.?[index]; } } else { var index = n; while (index != 0) { index -= 1; dest.?[index] = src.?[index]; } } return dest; } fn memcmp(vl: ?[*]const u8, vr: ?[*]const u8, n: usize) callconv(.C) c_int { @setRuntimeSafety(false); var index: usize = 0; while (index != n) : (index += 1) { const compare_val = @bitCast(i8, vl.?[index] -% vr.?[index]); if (compare_val != 0) { return compare_val; } } return 0; } test "memcmp" { const base_arr = &[_]u8{ 1, 1, 1 }; const arr1 = &[_]u8{ 1, 1, 1 }; const arr2 = &[_]u8{ 1, 0, 1 }; const arr3 = &[_]u8{ 1, 2, 1 }; try std.testing.expect(memcmp(base_arr[0..], arr1[0..], base_arr.len) == 0); try std.testing.expect(memcmp(base_arr[0..], arr2[0..], base_arr.len) > 0); try std.testing.expect(memcmp(base_arr[0..], arr3[0..], base_arr.len) < 0); } fn bcmp(vl: [*]allowzero const u8, vr: [*]allowzero const u8, n: usize) callconv(.C) c_int { @setRuntimeSafety(false); var index: usize = 0; while (index != n) : (index += 1) { if (vl[index] != vr[index]) { return 1; } } return 0; } test "bcmp" { const base_arr = &[_]u8{ 1, 1, 1 }; const arr1 = &[_]u8{ 1, 1, 1 }; const arr2 = &[_]u8{ 1, 0, 1 }; const arr3 = &[_]u8{ 1, 2, 1 }; try std.testing.expect(bcmp(base_arr[0..], arr1[0..], base_arr.len) == 0); try std.testing.expect(bcmp(base_arr[0..], arr2[0..], base_arr.len) != 0); try std.testing.expect(bcmp(base_arr[0..], arr3[0..], base_arr.len) != 0); } var _fltused: c_int = 1; fn strcpy(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 { var i: usize = 0; while (src[i] != 0) : (i += 1) { dest[i] = src[i]; } dest[i] = 0; return dest; } test "strcpy" { var s1: [9:0]u8 = undefined; s1[0] = 0; _ = strcpy(&s1, "foobarbaz"); try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0)); } fn strncpy(dest: [*:0]u8, src: [*:0]const u8, n: usize) callconv(.C) [*:0]u8 { var i: usize = 0; while (i < n and src[i] != 0) : (i += 1) { dest[i] = src[i]; } while (i < n) : (i += 1) { dest[i] = 0; } return dest; } test "strncpy" { var s1: [9:0]u8 = undefined; s1[0] = 0; _ = strncpy(&s1, "foobarbaz", @sizeOf(@TypeOf(s1))); try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0)); } fn strcat(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 { var dest_end: usize = 0; while (dest[dest_end] != 0) : (dest_end += 1) {} var i: usize = 0; while (src[i] != 0) : (i += 1) { dest[dest_end + i] = src[i]; } dest[dest_end + i] = 0; return dest; } test "strcat" { var s1: [9:0]u8 = undefined; s1[0] = 0; _ = strcat(&s1, "foo"); _ = strcat(&s1, "bar"); _ = strcat(&s1, "baz"); try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0)); } fn strncat(dest: [*:0]u8, src: [*:0]const u8, avail: usize) callconv(.C) [*:0]u8 { var dest_end: usize = 0; while (dest[dest_end] != 0) : (dest_end += 1) {} var i: usize = 0; while (i < avail and src[i] != 0) : (i += 1) { dest[dest_end + i] = src[i]; } dest[dest_end + i] = 0; return dest; } test "strncat" { var s1: [9:0]u8 = undefined; s1[0] = 0; _ = strncat(&s1, "foo1111", 3); _ = strncat(&s1, "bar1111", 3); _ = strncat(&s1, "baz1111", 3); try std.testing.expectEqualSlices(u8, "foobarbaz", std.mem.sliceTo(&s1, 0)); } fn strcmp(s1: [*:0]const u8, s2: [*:0]const u8) callconv(.C) c_int { return std.cstr.cmp(s1, s2); } fn strlen(s: [*:0]const u8) callconv(.C) usize { return std.mem.len(s); } fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.C) c_int { if (_n == 0) return 0; var l = _l; var r = _r; var n = _n - 1; while (l[0] != 0 and r[0] != 0 and n != 0 and l[0] == r[0]) { l += 1; r += 1; n -= 1; } return @as(c_int, l[0]) - @as(c_int, r[0]); } fn strerror(errnum: c_int) callconv(.C) [*:0]const u8 { _ = errnum; return "TODO strerror implementation"; } test "strncmp" { try std.testing.expect(strncmp("a", "b", 1) == -1); try std.testing.expect(strncmp("a", "c", 1) == -2); try std.testing.expect(strncmp("b", "a", 1) == 1); try std.testing.expect(strncmp("\xff", "\x02", 1) == 253); } // TODO we should be able to put this directly in std/linux/x86_64.zig but // it causes a segfault in release mode. this is a workaround of calling it // across .o file boundaries. fix comptime @ptrCast of nakedcc functions. fn clone() callconv(.Naked) void { switch (native_arch) { .i386 => { // __clone(func, stack, flags, arg, ptid, tls, ctid) // +8, +12, +16, +20, +24, +28, +32 // syscall(SYS_clone, flags, stack, ptid, tls, ctid) // eax, ebx, ecx, edx, esi, edi asm volatile ( \\ push %%ebp \\ mov %%esp,%%ebp \\ push %%ebx \\ push %%esi \\ push %%edi \\ // Setup the arguments \\ mov 16(%%ebp),%%ebx \\ mov 12(%%ebp),%%ecx \\ and $-16,%%ecx \\ sub $20,%%ecx \\ mov 20(%%ebp),%%eax \\ mov %%eax,4(%%ecx) \\ mov 8(%%ebp),%%eax \\ mov %%eax,0(%%ecx) \\ mov 24(%%ebp),%%edx \\ mov 28(%%ebp),%%esi \\ mov 32(%%ebp),%%edi \\ mov $120,%%eax \\ int $128 \\ test %%eax,%%eax \\ jnz 1f \\ pop %%eax \\ xor %%ebp,%%ebp \\ call *%%eax \\ mov %%eax,%%ebx \\ xor %%eax,%%eax \\ inc %%eax \\ int $128 \\ hlt \\1: \\ pop %%edi \\ pop %%esi \\ pop %%ebx \\ pop %%ebp \\ ret ); }, .x86_64 => { asm volatile ( \\ xor %%eax,%%eax \\ mov $56,%%al // SYS_clone \\ mov %%rdi,%%r11 \\ mov %%rdx,%%rdi \\ mov %%r8,%%rdx \\ mov %%r9,%%r8 \\ mov 8(%%rsp),%%r10 \\ mov %%r11,%%r9 \\ and $-16,%%rsi \\ sub $8,%%rsi \\ mov %%rcx,(%%rsi) \\ syscall \\ test %%eax,%%eax \\ jnz 1f \\ xor %%ebp,%%ebp \\ pop %%rdi \\ call *%%r9 \\ mov %%eax,%%edi \\ xor %%eax,%%eax \\ mov $60,%%al // SYS_exit \\ syscall \\ hlt \\1: ret \\ ); }, .aarch64 => { // __clone(func, stack, flags, arg, ptid, tls, ctid) // x0, x1, w2, x3, x4, x5, x6 // syscall(SYS_clone, flags, stack, ptid, tls, ctid) // x8, x0, x1, x2, x3, x4 asm volatile ( \\ // align stack and save func,arg \\ and x1,x1,#-16 \\ stp x0,x3,[x1,#-16]! \\ \\ // syscall \\ uxtw x0,w2 \\ mov x2,x4 \\ mov x3,x5 \\ mov x4,x6 \\ mov x8,#220 // SYS_clone \\ svc #0 \\ \\ cbz x0,1f \\ // parent \\ ret \\ // child \\1: ldp x1,x0,[sp],#16 \\ blr x1 \\ mov x8,#93 // SYS_exit \\ svc #0 ); }, .arm, .thumb => { // __clone(func, stack, flags, arg, ptid, tls, ctid) // r0, r1, r2, r3, +0, +4, +8 // syscall(SYS_clone, flags, stack, ptid, tls, ctid) // r7 r0, r1, r2, r3, r4 asm volatile ( \\ stmfd sp!,{r4,r5,r6,r7} \\ mov r7,#120 \\ mov r6,r3 \\ mov r5,r0 \\ mov r0,r2 \\ and r1,r1,#-16 \\ ldr r2,[sp,#16] \\ ldr r3,[sp,#20] \\ ldr r4,[sp,#24] \\ svc 0 \\ tst r0,r0 \\ beq 1f \\ ldmfd sp!,{r4,r5,r6,r7} \\ bx lr \\ \\1: mov r0,r6 \\ bl 3f \\2: mov r7,#1 \\ svc 0 \\ b 2b \\3: bx r5 ); }, .riscv64 => { // __clone(func, stack, flags, arg, ptid, tls, ctid) // a0, a1, a2, a3, a4, a5, a6 // syscall(SYS_clone, flags, stack, ptid, tls, ctid) // a7 a0, a1, a2, a3, a4 asm volatile ( \\ # Save func and arg to stack \\ addi a1, a1, -16 \\ sd a0, 0(a1) \\ sd a3, 8(a1) \\ \\ # Call SYS_clone \\ mv a0, a2 \\ mv a2, a4 \\ mv a3, a5 \\ mv a4, a6 \\ li a7, 220 # SYS_clone \\ ecall \\ \\ beqz a0, 1f \\ # Parent \\ ret \\ \\ # Child \\1: ld a1, 0(sp) \\ ld a0, 8(sp) \\ jalr a1 \\ \\ # Exit \\ li a7, 93 # SYS_exit \\ ecall ); }, .mips, .mipsel => { // __clone(func, stack, flags, arg, ptid, tls, ctid) // 3, 4, 5, 6, 7, 8, 9 // syscall(SYS_clone, flags, stack, ptid, tls, ctid) // 2 4, 5, 6, 7, 8 asm volatile ( \\ # Save function pointer and argument pointer on new thread stack \\ and $5, $5, -8 \\ subu $5, $5, 16 \\ sw $4, 0($5) \\ sw $7, 4($5) \\ # Shuffle (fn,sp,fl,arg,ptid,tls,ctid) to (fl,sp,ptid,tls,ctid) \\ move $4, $6 \\ lw $6, 16($sp) \\ lw $7, 20($sp) \\ lw $9, 24($sp) \\ subu $sp, $sp, 16 \\ sw $9, 16($sp) \\ li $2, 4120 \\ syscall \\ beq $7, $0, 1f \\ nop \\ addu $sp, $sp, 16 \\ jr $ra \\ subu $2, $0, $2 \\1: \\ beq $2, $0, 1f \\ nop \\ addu $sp, $sp, 16 \\ jr $ra \\ nop \\1: \\ lw $25, 0($sp) \\ lw $4, 4($sp) \\ jalr $25 \\ nop \\ move $4, $2 \\ li $2, 4001 \\ syscall ); }, .powerpc => { // __clone(func, stack, flags, arg, ptid, tls, ctid) // 3, 4, 5, 6, 7, 8, 9 // syscall(SYS_clone, flags, stack, ptid, tls, ctid) // 0 3, 4, 5, 6, 7 asm volatile ( \\# store non-volatile regs r30, r31 on stack in order to put our \\# start func and its arg there \\stwu 30, -16(1) \\stw 31, 4(1) \\ \\# save r3 (func) into r30, and r6(arg) into r31 \\mr 30, 3 \\mr 31, 6 \\ \\# create initial stack frame for new thread \\clrrwi 4, 4, 4 \\li 0, 0 \\stwu 0, -16(4) \\ \\#move c into first arg \\mr 3, 5 \\#mr 4, 4 \\mr 5, 7 \\mr 6, 8 \\mr 7, 9 \\ \\# move syscall number into r0 \\li 0, 120 \\ \\sc \\ \\# check for syscall error \\bns+ 1f # jump to label 1 if no summary overflow. \\#else \\neg 3, 3 #negate the result (errno) \\1: \\# compare sc result with 0 \\cmpwi cr7, 3, 0 \\ \\# if not 0, jump to end \\bne cr7, 2f \\ \\#else: we're the child \\#call funcptr: move arg (d) into r3 \\mr 3, 31 \\#move r30 (funcptr) into CTR reg \\mtctr 30 \\# call CTR reg \\bctrl \\# mov SYS_exit into r0 (the exit param is already in r3) \\li 0, 1 \\sc \\ \\2: \\ \\# restore stack \\lwz 30, 0(1) \\lwz 31, 4(1) \\addi 1, 1, 16 \\ \\blr ); }, .powerpc64, .powerpc64le => { // __clone(func, stack, flags, arg, ptid, tls, ctid) // 3, 4, 5, 6, 7, 8, 9 // syscall(SYS_clone, flags, stack, ptid, tls, ctid) // 0 3, 4, 5, 6, 7 asm volatile ( \\ # create initial stack frame for new thread \\ clrrdi 4, 4, 4 \\ li 0, 0 \\ stdu 0,-32(4) \\ \\ # save fn and arg to child stack \\ std 3, 8(4) \\ std 6, 16(4) \\ \\ # shuffle args into correct registers and call SYS_clone \\ mr 3, 5 \\ #mr 4, 4 \\ mr 5, 7 \\ mr 6, 8 \\ mr 7, 9 \\ li 0, 120 # SYS_clone = 120 \\ sc \\ \\ # if error, negate return (errno) \\ bns+ 1f \\ neg 3, 3 \\ \\1: \\ # if we're the parent, return \\ cmpwi cr7, 3, 0 \\ bnelr cr7 \\ \\ # we're the child. call fn(arg) \\ ld 3, 16(1) \\ ld 12, 8(1) \\ mtctr 12 \\ bctrl \\ \\ # call SYS_exit. exit code is already in r3 from fn return value \\ li 0, 1 # SYS_exit = 1 \\ sc ); }, .sparc64 => { // __clone(func, stack, flags, arg, ptid, tls, ctid) // i0, i1, i2, i3, i4, i5, sp // syscall(SYS_clone, flags, stack, ptid, tls, ctid) // g1 o0, o1, o2, o3, o4 asm volatile ( \\ save %%sp, -192, %%sp \\ # Save the func pointer and the arg pointer \\ mov %%i0, %%g2 \\ mov %%i3, %%g3 \\ # Shuffle the arguments \\ mov 217, %%g1 \\ mov %%i2, %%o0 \\ # Add some extra space for the initial frame \\ sub %%i1, 176 + 2047, %%o1 \\ mov %%i4, %%o2 \\ mov %%i5, %%o3 \\ ldx [%%fp + 0x8af], %%o4 \\ t 0x6d \\ bcs,pn %%xcc, 2f \\ nop \\ # The child pid is returned in o0 while o1 tells if this \\ # process is # the child (=1) or the parent (=0). \\ brnz %%o1, 1f \\ nop \\ # Parent process, return the child pid \\ mov %%o0, %%i0 \\ ret \\ restore \\1: \\ # Child process, call func(arg) \\ mov %%g0, %%fp \\ call %%g2 \\ mov %%g3, %%o0 \\ # Exit \\ mov 1, %%g1 \\ t 0x6d \\2: \\ # The syscall failed \\ sub %%g0, %%o0, %%i0 \\ ret \\ restore ); }, else => @compileError("Implement clone() for this arch."), } }
lib/c.zig
const std = @import("std"); const assert = std.debug.assert; const fixedBufferStream = std.io.fixedBufferStream; const ExecError = error{ InvalidOpcode, InvalidParamMode, }; fn opcode(ins: i32) i32 { return @rem(ins, 100); } test "opcode extraction" { assert(opcode(1002) == 2); } fn paramMode(ins: i32, pos: i32) i32 { var div: i32 = 100; // Mode of parameter 0 is in digit 3 var i: i32 = 0; while (i < pos) : (i += 1) { div *= 10; } return @rem(@divTrunc(ins, div), 10); } test "param mode extraction" { assert(paramMode(1002, 0) == 0); assert(paramMode(1002, 1) == 1); assert(paramMode(1002, 2) == 0); } fn getParam(intcode: []i32, pos: usize, n: usize, mode: i32) !i32 { return switch (mode) { 0 => intcode[@intCast(usize, intcode[pos + 1 + n])], 1 => intcode[pos + 1 + n], else => return error.InvalidParamMode, }; } fn exec(intcode: []i32, input_stream: anytype, output_stream: anytype) !void { var pos: usize = 0; while (true) { const instr = intcode[pos]; switch (opcode(instr)) { 99 => break, 1 => { const val_x = try getParam(intcode, pos, 0, paramMode(instr, 0)); const val_y = try getParam(intcode, pos, 1, paramMode(instr, 1)); const pos_result = @intCast(usize, intcode[pos + 3]); intcode[pos_result] = val_x + val_y; pos += 4; }, 2 => { const val_x = try getParam(intcode, pos, 0, paramMode(instr, 0)); const val_y = try getParam(intcode, pos, 1, paramMode(instr, 1)); const pos_result = @intCast(usize, intcode[pos + 3]); intcode[pos_result] = val_x * val_y; pos += 4; }, 3 => { const pos_x = @intCast(usize, intcode[pos + 1]); var buf: [1024]u8 = undefined; const line = (try input_stream.readUntilDelimiterOrEof(&buf, '\n')) orelse ""; const val = try std.fmt.parseInt(i32, line, 10); intcode[pos_x] = val; pos += 2; }, 4 => { const val_x = try getParam(intcode, pos, 0, paramMode(instr, 0)); try output_stream.print("{}\n", .{val_x}); pos += 2; }, 5 => { const val_x = try getParam(intcode, pos, 0, paramMode(instr, 0)); if (val_x != 0) { const val_y = try getParam(intcode, pos, 1, paramMode(instr, 1)); pos = @intCast(usize, val_y); } else { pos += 3; } }, 6 => { const val_x = try getParam(intcode, pos, 0, paramMode(instr, 0)); if (val_x == 0) { const val_y = try getParam(intcode, pos, 1, paramMode(instr, 1)); pos = @intCast(usize, val_y); } else { pos += 3; } }, 7 => { const val_x = try getParam(intcode, pos, 0, paramMode(instr, 0)); const val_y = try getParam(intcode, pos, 1, paramMode(instr, 1)); const pos_result = @intCast(usize, intcode[pos + 3]); intcode[pos_result] = if (val_x < val_y) 1 else 0; pos += 4; }, 8 => { const val_x = try getParam(intcode, pos, 0, paramMode(instr, 0)); const val_y = try getParam(intcode, pos, 1, paramMode(instr, 1)); const pos_result = @intCast(usize, intcode[pos + 3]); intcode[pos_result] = if (val_x == val_y) 1 else 0; pos += 4; }, else => { std.debug.warn("pos: {}, instr: {}\n", .{ pos, intcode[pos] }); return error.InvalidOpcode; }, } } } test "test exec 1" { var intcode = [_]i32{ 1, 0, 0, 0, 99 }; var reader = fixedBufferStream("").reader(); try exec(&intcode, reader, std.io.null_writer); assert(intcode[0] == 2); } test "test exec 2" { var intcode = [_]i32{ 2, 3, 0, 3, 99 }; var reader = fixedBufferStream("").reader(); try exec(&intcode, reader, std.io.null_writer); assert(intcode[3] == 6); } test "test exec 3" { var intcode = [_]i32{ 2, 4, 4, 5, 99, 0 }; var reader = fixedBufferStream("").reader(); try exec(&intcode, reader, std.io.null_writer); assert(intcode[5] == 9801); } test "test exec with different param mode" { var intcode = [_]i32{ 1002, 4, 3, 4, 33 }; var reader = fixedBufferStream("").reader(); try exec(&intcode, reader, std.io.null_writer); assert(intcode[4] == 99); } test "test exec with negative integers" { var intcode = [_]i32{ 1101, 100, -1, 4, 0 }; var reader = fixedBufferStream("").reader(); try exec(&intcode, reader, std.io.null_writer); assert(intcode[4] == 99); } test "test equal 1" { var intcode = [_]i32{ 3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8 }; var output_buf: [32]u8 = undefined; var reader = fixedBufferStream("8\n").reader(); var writer = fixedBufferStream(&output_buf).writer(); try exec(&intcode, reader, writer); assert(std.mem.eql(u8, "1\n", output_buf[0..2])); } test "test equal 2" { var intcode = [_]i32{ 3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8 }; var output_buf: [32]u8 = undefined; var reader = fixedBufferStream("13\n").reader(); var writer = fixedBufferStream(&output_buf).writer(); try exec(&intcode, reader, writer); assert(std.mem.eql(u8, "0\n", output_buf[0..2])); } test "test less than 1" { var intcode = [_]i32{ 3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8 }; var output_buf: [32]u8 = undefined; var reader = fixedBufferStream("5\n").reader(); var writer = fixedBufferStream(&output_buf).writer(); try exec(&intcode, reader, writer); assert(std.mem.eql(u8, "1\n", output_buf[0..2])); } test "test less than 2" { var intcode = [_]i32{ 3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8 }; var output_buf: [32]u8 = undefined; var reader = fixedBufferStream("20\n").reader(); var writer = fixedBufferStream(&output_buf).writer(); try exec(&intcode, reader, writer); assert(std.mem.eql(u8, "0\n", output_buf[0..2])); } test "test equal immediate" { var intcode = [_]i32{ 3, 3, 1108, -1, 8, 3, 4, 3, 99 }; var output_buf: [32]u8 = undefined; var reader = fixedBufferStream("8\n").reader(); var writer = fixedBufferStream(&output_buf).writer(); try exec(&intcode, reader, writer); assert(std.mem.eql(u8, "1\n", output_buf[0..2])); } test "test less than immediate" { var intcode = [_]i32{ 3, 3, 1107, -1, 8, 3, 4, 3, 99 }; var output_buf: [32]u8 = undefined; var reader = fixedBufferStream("3\n").reader(); var writer = fixedBufferStream(&output_buf).writer(); try exec(&intcode, reader, writer); assert(std.mem.eql(u8, "1\n", output_buf[0..2])); } pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); const allocator = &arena.allocator; defer arena.deinit(); const input_file = try std.fs.cwd().openFile("input05.txt", .{}); var input_stream = input_file.reader(); var buf: [1024]u8 = undefined; var ints = std.ArrayList(i32).init(allocator); // read everything into an int arraylist while (try input_stream.readUntilDelimiterOrEof(&buf, ',')) |item| { try ints.append(std.fmt.parseInt(i32, item, 10) catch 0); } // execute code try exec(ints.items, &std.io.getStdIn().reader(), &std.io.getStdOut().writer()); }
zig/05_2.zig
pub const c = @import("sfml_import.zig").c; pub const Error = @import("sfml_errors.zig").Error; pub const system = struct { const vector = @import("system/vector.zig"); pub const Vector2 = vector.Vector2; pub const Vector3 = vector.Vector3; pub const Vector4 = vector.Vector4; pub const Vector2i = Vector2(c_int); pub const Vector2u = Vector2(c_uint); pub const Vector2f = Vector2(f32); pub const Vector3f = Vector3(f32); pub const Time = @import("system/Time.zig"); pub const Clock = @import("system/Clock.zig"); }; pub const window = struct { pub const Event = @import("window/event.zig").Event; pub const Style = @import("window/Style.zig"); pub const WindowHandle = c.sfWindowHandle; pub const ContextSettings = @import("window/context_settings.zig").ContextSettings; pub const keyboard = @import("window/keyboard.zig"); pub const mouse = @import("window/mouse.zig"); }; pub const graphics = struct { pub const Color = @import("graphics/color.zig").Color; pub const RenderWindow = @import("graphics/RenderWindow.zig"); pub const RenderTexture = @import("graphics/RenderTexture.zig"); pub const View = @import("graphics/View.zig"); pub const Image = @import("graphics/Image.zig"); pub const Texture = @import("graphics/texture.zig").Texture; pub const Sprite = @import("graphics/Sprite.zig"); pub const CircleShape = @import("graphics/CircleShape.zig"); pub const RectangleShape = @import("graphics/RectangleShape.zig"); pub const Vertex = @import("graphics/vertex.zig").Vertex; pub const VertexArray = @import("graphics/VertexArray.zig"); pub const PrimitiveType = @import("graphics/primitive_type.zig").PrimitiveType; pub const Rect = @import("graphics/rect.zig").Rect; pub const IntRect = Rect(c_int); pub const FloatRect = Rect(f32); pub const Font = @import("graphics/Font.zig"); pub const Text = @import("graphics/Text.zig"); pub const glsl = @import("graphics/glsl.zig"); pub const Shader = @import("graphics/Shader.zig"); pub const BlendMode = @import("graphics/BlendMode.zig"); pub const RenderStates = @import("graphics/RenderStates.zig"); }; pub const audio = struct { pub const Music = @import("audio/Music.zig"); pub const SoundBuffer = @import("audio/SoundBuffer.zig"); pub const Sound = @import("audio/Sound.zig"); }; pub const network = @compileError("network module: to be implemented one day"); pub const VertexBuffer = @compileError("VertexArray not available yet"); pub const Transform = @compileError("Transform not available yet"); pub const touch = @compileError("touch not available yet"); pub const joystick = @compileError("Joystick not available yet"); pub const sensor = @compileError("Sensor not available yet"); pub const clipboard = @compileError("Clipboard not available yet"); pub const Cursor = @compileError("Cursor not available yet");
src/sfml/sfml.zig
/// NTSTATUS codes from https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/596a1078-e883-4972-9bbc-49e60bebca55? pub const NTSTATUS = enum(u32) { /// The caller specified WaitAny for WaitType and one of the dispatcher /// objects in the Object array has been set to the signaled state. pub const WAIT_0: NTSTATUS = .SUCCESS; /// The caller attempted to wait for a mutex that has been abandoned. pub const ABANDONED_WAIT_0: NTSTATUS = .ABANDONED; /// The maximum number of boot-time filters has been reached. pub const FWP_TOO_MANY_BOOTTIME_FILTERS: NTSTATUS = .FWP_TOO_MANY_CALLOUTS; /// The operation completed successfully. SUCCESS = 0x00000000, /// The caller specified WaitAny for WaitType and one of the dispatcher objects in the Object array has been set to the signaled state. WAIT_1 = 0x00000001, /// The caller specified WaitAny for WaitType and one of the dispatcher objects in the Object array has been set to the signaled state. WAIT_2 = 0x00000002, /// The caller specified WaitAny for WaitType and one of the dispatcher objects in the Object array has been set to the signaled state. WAIT_3 = 0x00000003, /// The caller specified WaitAny for WaitType and one of the dispatcher objects in the Object array has been set to the signaled state. WAIT_63 = 0x0000003F, /// The caller attempted to wait for a mutex that has been abandoned. ABANDONED = 0x00000080, /// The caller attempted to wait for a mutex that has been abandoned. ABANDONED_WAIT_63 = 0x000000BF, /// A user-mode APC was delivered before the given Interval expired. USER_APC = 0x000000C0, /// The delay completed because the thread was alerted. ALERTED = 0x00000101, /// The given Timeout interval expired. TIMEOUT = 0x00000102, /// The operation that was requested is pending completion. PENDING = 0x00000103, /// A reparse should be performed by the Object Manager because the name of the file resulted in a symbolic link. REPARSE = 0x00000104, /// Returned by enumeration APIs to indicate more information is available to successive calls. MORE_ENTRIES = 0x00000105, /// Indicates not all privileges or groups that are referenced are assigned to the caller. /// This allows, for example, all privileges to be disabled without having to know exactly which privileges are assigned. NOT_ALL_ASSIGNED = 0x00000106, /// Some of the information to be translated has not been translated. SOME_NOT_MAPPED = 0x00000107, /// An open/create operation completed while an opportunistic lock (oplock) break is underway. OPLOCK_BREAK_IN_PROGRESS = 0x00000108, /// A new volume has been mounted by a file system. VOLUME_MOUNTED = 0x00000109, /// This success level status indicates that the transaction state already exists for the registry subtree but that a transaction commit was previously aborted. The commit has now been completed. RXACT_COMMITTED = 0x0000010A, /// Indicates that a notify change request has been completed due to closing the handle that made the notify change request. NOTIFY_CLEANUP = 0x0000010B, /// Indicates that a notify change request is being completed and that the information is not being returned in the caller's buffer. /// The caller now needs to enumerate the files to find the changes. NOTIFY_ENUM_DIR = 0x0000010C, /// {No Quotas} No system quota limits are specifically set for this account. NO_QUOTAS_FOR_ACCOUNT = 0x0000010D, /// {Connect Failure on Primary Transport} An attempt was made to connect to the remote server %hs on the primary transport, but the connection failed. /// The computer WAS able to connect on a secondary transport. PRIMARY_TRANSPORT_CONNECT_FAILED = 0x0000010E, /// The page fault was a transition fault. PAGE_FAULT_TRANSITION = 0x00000110, /// The page fault was a demand zero fault. PAGE_FAULT_DEMAND_ZERO = 0x00000111, /// The page fault was a demand zero fault. PAGE_FAULT_COPY_ON_WRITE = 0x00000112, /// The page fault was a demand zero fault. PAGE_FAULT_GUARD_PAGE = 0x00000113, /// The page fault was satisfied by reading from a secondary storage device. PAGE_FAULT_PAGING_FILE = 0x00000114, /// The cached page was locked during operation. CACHE_PAGE_LOCKED = 0x00000115, /// The crash dump exists in a paging file. CRASH_DUMP = 0x00000116, /// The specified buffer contains all zeros. BUFFER_ALL_ZEROS = 0x00000117, /// A reparse should be performed by the Object Manager because the name of the file resulted in a symbolic link. REPARSE_OBJECT = 0x00000118, /// The device has succeeded a query-stop and its resource requirements have changed. RESOURCE_REQUIREMENTS_CHANGED = 0x00000119, /// The translator has translated these resources into the global space and no additional translations should be performed. TRANSLATION_COMPLETE = 0x00000120, /// The directory service evaluated group memberships locally, because it was unable to contact a global catalog server. DS_MEMBERSHIP_EVALUATED_LOCALLY = 0x00000121, /// A process being terminated has no threads to terminate. NOTHING_TO_TERMINATE = 0x00000122, /// The specified process is not part of a job. PROCESS_NOT_IN_JOB = 0x00000123, /// The specified process is part of a job. PROCESS_IN_JOB = 0x00000124, /// {Volume Shadow Copy Service} The system is now ready for hibernation. VOLSNAP_HIBERNATE_READY = 0x00000125, /// A file system or file system filter driver has successfully completed an FsFilter operation. FSFILTER_OP_COMPLETED_SUCCESSFULLY = 0x00000126, /// The specified interrupt vector was already connected. INTERRUPT_VECTOR_ALREADY_CONNECTED = 0x00000127, /// The specified interrupt vector is still connected. INTERRUPT_STILL_CONNECTED = 0x00000128, /// The current process is a cloned process. PROCESS_CLONED = 0x00000129, /// The file was locked and all users of the file can only read. FILE_LOCKED_WITH_ONLY_READERS = 0x0000012A, /// The file was locked and at least one user of the file can write. FILE_LOCKED_WITH_WRITERS = 0x0000012B, /// The specified ResourceManager made no changes or updates to the resource under this transaction. RESOURCEMANAGER_READ_ONLY = 0x00000202, /// An operation is blocked and waiting for an oplock. WAIT_FOR_OPLOCK = 0x00000367, /// Debugger handled the exception. DBG_EXCEPTION_HANDLED = 0x00010001, /// The debugger continued. DBG_CONTINUE = 0x00010002, /// The IO was completed by a filter. FLT_IO_COMPLETE = 0x001C0001, /// The file is temporarily unavailable. FILE_NOT_AVAILABLE = 0xC0000467, /// The share is temporarily unavailable. SHARE_UNAVAILABLE = 0xC0000480, /// A threadpool worker thread entered a callback at thread affinity %p and exited at affinity %p. /// This is unexpected, indicating that the callback missed restoring the priority. CALLBACK_RETURNED_THREAD_AFFINITY = 0xC0000721, /// {Object Exists} An attempt was made to create an object but the object name already exists. OBJECT_NAME_EXISTS = 0x40000000, /// {Thread Suspended} A thread termination occurred while the thread was suspended. The thread resumed, and termination proceeded. THREAD_WAS_SUSPENDED = 0x40000001, /// {Working Set Range Error} An attempt was made to set the working set minimum or maximum to values that are outside the allowable range. WORKING_SET_LIMIT_RANGE = 0x40000002, /// {Image Relocated} An image file could not be mapped at the address that is specified in the image file. Local fixes must be performed on this image. IMAGE_NOT_AT_BASE = 0x40000003, /// This informational level status indicates that a specified registry subtree transaction state did not yet exist and had to be created. RXACT_STATE_CREATED = 0x40000004, /// {Segment Load} A virtual DOS machine (VDM) is loading, unloading, or moving an MS-DOS or Win16 program segment image. /// An exception is raised so that a debugger can load, unload, or track symbols and breakpoints within these 16-bit segments. SEGMENT_NOTIFICATION = 0x40000005, /// {Local Session Key} A user session key was requested for a local remote procedure call (RPC) connection. /// The session key that is returned is a constant value and not unique to this connection. LOCAL_USER_SESSION_KEY = 0x40000006, /// {Invalid Current Directory} The process cannot switch to the startup current directory %hs. /// Select OK to set the current directory to %hs, or select CANCEL to exit. BAD_CURRENT_DIRECTORY = 0x40000007, /// {Serial IOCTL Complete} A serial I/O operation was completed by another write to a serial port. (The IOCTL_SERIAL_XOFF_COUNTER reached zero.) SERIAL_MORE_WRITES = 0x40000008, /// {Registry Recovery} One of the files that contains the system registry data had to be recovered by using a log or alternate copy. The recovery was successful. REGISTRY_RECOVERED = 0x40000009, /// {Redundant Read} To satisfy a read request, the Windows NT operating system fault-tolerant file system successfully read the requested data from a redundant copy. /// This was done because the file system encountered a failure on a member of the fault-tolerant volume but was unable to reassign the failing area of the device. FT_READ_RECOVERY_FROM_BACKUP = 0x4000000A, /// {Redundant Write} To satisfy a write request, the Windows NT fault-tolerant file system successfully wrote a redundant copy of the information. /// This was done because the file system encountered a failure on a member of the fault-tolerant volume but was unable to reassign the failing area of the device. FT_WRITE_RECOVERY = 0x4000000B, /// {Serial IOCTL Timeout} A serial I/O operation completed because the time-out period expired. /// (The IOCTL_SERIAL_XOFF_COUNTER had not reached zero.) SERIAL_COUNTER_TIMEOUT = 0x4000000C, /// {Password Too Complex} The Windows password is too complex to be converted to a LAN Manager password. /// The LAN Manager password that returned is a NULL string. NULL_LM_PASSWORD = 0x4000000D, /// {Machine Type Mismatch} The image file %hs is valid but is for a machine type other than the current machine. /// Select OK to continue, or CANCEL to fail the DLL load. IMAGE_MACHINE_TYPE_MISMATCH = 0x4000000E, /// {Partial Data Received} The network transport returned partial data to its client. The remaining data will be sent later. RECEIVE_PARTIAL = 0x4000000F, /// {Expedited Data Received} The network transport returned data to its client that was marked as expedited by the remote system. RECEIVE_EXPEDITED = 0x40000010, /// {Partial Expedited Data Received} The network transport returned partial data to its client and this data was marked as expedited by the remote system. The remaining data will be sent later. RECEIVE_PARTIAL_EXPEDITED = 0x40000011, /// {TDI Event Done} The TDI indication has completed successfully. EVENT_DONE = 0x40000012, /// {TDI Event Pending} The TDI indication has entered the pending state. EVENT_PENDING = 0x40000013, /// Checking file system on %wZ. CHECKING_FILE_SYSTEM = 0x40000014, /// {Fatal Application Exit} %hs FATAL_APP_EXIT = 0x40000015, /// The specified registry key is referenced by a predefined handle. PREDEFINED_HANDLE = 0x40000016, /// {Page Unlocked} The page protection of a locked page was changed to 'No Access' and the page was unlocked from memory and from the process. WAS_UNLOCKED = 0x40000017, /// %hs SERVICE_NOTIFICATION = 0x40000018, /// {Page Locked} One of the pages to lock was already locked. WAS_LOCKED = 0x40000019, /// Application popup: %1 : %2 LOG_HARD_ERROR = 0x4000001A, /// A Win32 process already exists. ALREADY_WIN32 = 0x4000001B, /// An exception status code that is used by the Win32 x86 emulation subsystem. WX86_UNSIMULATE = 0x4000001C, /// An exception status code that is used by the Win32 x86 emulation subsystem. WX86_CONTINUE = 0x4000001D, /// An exception status code that is used by the Win32 x86 emulation subsystem. WX86_SINGLE_STEP = 0x4000001E, /// An exception status code that is used by the Win32 x86 emulation subsystem. WX86_BREAKPOINT = 0x4000001F, /// An exception status code that is used by the Win32 x86 emulation subsystem. WX86_EXCEPTION_CONTINUE = 0x40000020, /// An exception status code that is used by the Win32 x86 emulation subsystem. WX86_EXCEPTION_LASTCHANCE = 0x40000021, /// An exception status code that is used by the Win32 x86 emulation subsystem. WX86_EXCEPTION_CHAIN = 0x40000022, /// {Machine Type Mismatch} The image file %hs is valid but is for a machine type other than the current machine. IMAGE_MACHINE_TYPE_MISMATCH_EXE = 0x40000023, /// A yield execution was performed and no thread was available to run. NO_YIELD_PERFORMED = 0x40000024, /// The resume flag to a timer API was ignored. TIMER_RESUME_IGNORED = 0x40000025, /// The arbiter has deferred arbitration of these resources to its parent. ARBITRATION_UNHANDLED = 0x40000026, /// The device has detected a CardBus card in its slot. CARDBUS_NOT_SUPPORTED = 0x40000027, /// An exception status code that is used by the Win32 x86 emulation subsystem. WX86_CREATEWX86TIB = 0x40000028, /// The CPUs in this multiprocessor system are not all the same revision level. /// To use all processors, the operating system restricts itself to the features of the least capable processor in the system. /// If problems occur with this system, contact the CPU manufacturer to see if this mix of processors is supported. MP_PROCESSOR_MISMATCH = 0x40000029, /// The system was put into hibernation. HIBERNATED = 0x4000002A, /// The system was resumed from hibernation. RESUME_HIBERNATION = 0x4000002B, /// Windows has detected that the system firmware (BIOS) was updated [previous firmware date = %2, current firmware date %3]. FIRMWARE_UPDATED = 0x4000002C, /// A device driver is leaking locked I/O pages and is causing system degradation. /// The system has automatically enabled the tracking code to try and catch the culprit. DRIVERS_LEAKING_LOCKED_PAGES = 0x4000002D, /// The ALPC message being canceled has already been retrieved from the queue on the other side. MESSAGE_RETRIEVED = 0x4000002E, /// The system power state is transitioning from %2 to %3. SYSTEM_POWERSTATE_TRANSITION = 0x4000002F, /// The receive operation was successful. /// Check the ALPC completion list for the received message. ALPC_CHECK_COMPLETION_LIST = 0x40000030, /// The system power state is transitioning from %2 to %3 but could enter %4. SYSTEM_POWERSTATE_COMPLEX_TRANSITION = 0x40000031, /// Access to %1 is monitored by policy rule %2. ACCESS_AUDIT_BY_POLICY = 0x40000032, /// A valid hibernation file has been invalidated and should be abandoned. ABANDON_HIBERFILE = 0x40000033, /// Business rule scripts are disabled for the calling application. BIZRULES_NOT_ENABLED = 0x40000034, /// The system has awoken. WAKE_SYSTEM = 0x40000294, /// The directory service is shutting down. DS_SHUTTING_DOWN = 0x40000370, /// Debugger will reply later. DBG_REPLY_LATER = 0x40010001, /// Debugger cannot provide a handle. DBG_UNABLE_TO_PROVIDE_HANDLE = 0x40010002, /// Debugger terminated the thread. DBG_TERMINATE_THREAD = 0x40010003, /// Debugger terminated the process. DBG_TERMINATE_PROCESS = 0x40010004, /// Debugger obtained control of C. DBG_CONTROL_C = 0x40010005, /// Debugger printed an exception on control C. DBG_PRINTEXCEPTION_C = 0x40010006, /// Debugger received a RIP exception. DBG_RIPEXCEPTION = 0x40010007, /// Debugger received a control break. DBG_CONTROL_BREAK = 0x40010008, /// Debugger command communication exception. DBG_COMMAND_EXCEPTION = 0x40010009, /// A UUID that is valid only on this computer has been allocated. RPC_NT_UUID_LOCAL_ONLY = 0x40020056, /// Some data remains to be sent in the request buffer. RPC_NT_SEND_INCOMPLETE = 0x400200AF, /// The Client Drive Mapping Service has connected on Terminal Connection. CTX_CDM_CONNECT = 0x400A0004, /// The Client Drive Mapping Service has disconnected on Terminal Connection. CTX_CDM_DISCONNECT = 0x400A0005, /// A kernel mode component is releasing a reference on an activation context. SXS_RELEASE_ACTIVATION_CONTEXT = 0x4015000D, /// The transactional resource manager is already consistent. Recovery is not needed. RECOVERY_NOT_NEEDED = 0x40190034, /// The transactional resource manager has already been started. RM_ALREADY_STARTED = 0x40190035, /// The log service encountered a log stream with no restart area. LOG_NO_RESTART = 0x401A000C, /// {Display Driver Recovered From Failure} The %hs display driver has detected a failure and recovered from it. Some graphical operations might have failed. /// The next time you restart the machine, a dialog box appears, giving you an opportunity to upload data about this failure to Microsoft. VIDEO_DRIVER_DEBUG_REPORT_REQUEST = 0x401B00EC, /// The specified buffer is not big enough to contain the entire requested dataset. /// Partial data is populated up to the size of the buffer. /// The caller needs to provide a buffer of the size as specified in the partially populated buffer's content (interface specific). GRAPHICS_PARTIAL_DATA_POPULATED = 0x401E000A, /// The kernel driver detected a version mismatch between it and the user mode driver. GRAPHICS_DRIVER_MISMATCH = 0x401E0117, /// No mode is pinned on the specified VidPN source/target. GRAPHICS_MODE_NOT_PINNED = 0x401E0307, /// The specified mode set does not specify a preference for one of its modes. GRAPHICS_NO_PREFERRED_MODE = 0x401E031E, /// The specified dataset (for example, mode set, frequency range set, descriptor set, or topology) is empty. GRAPHICS_DATASET_IS_EMPTY = 0x401E034B, /// The specified dataset (for example, mode set, frequency range set, descriptor set, or topology) does not contain any more elements. GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET = 0x401E034C, /// The specified content transformation is not pinned on the specified VidPN present path. GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED = 0x401E0351, /// The child device presence was not reliably detected. GRAPHICS_UNKNOWN_CHILD_STATUS = 0x401E042F, /// Starting the lead adapter in a linked configuration has been temporarily deferred. GRAPHICS_LEADLINK_START_DEFERRED = 0x401E0437, /// The display adapter is being polled for children too frequently at the same polling level. GRAPHICS_POLLING_TOO_FREQUENTLY = 0x401E0439, /// Starting the adapter has been temporarily deferred. GRAPHICS_START_DEFERRED = 0x401E043A, /// The request will be completed later by an NDIS status indication. NDIS_INDICATION_REQUIRED = 0x40230001, /// {EXCEPTION} Guard Page Exception A page of memory that marks the end of a data structure, such as a stack or an array, has been accessed. GUARD_PAGE_VIOLATION = 0x80000001, /// {EXCEPTION} Alignment Fault A data type misalignment was detected in a load or store instruction. DATATYPE_MISALIGNMENT = 0x80000002, /// {EXCEPTION} Breakpoint A breakpoint has been reached. BREAKPOINT = 0x80000003, /// {EXCEPTION} Single Step A single step or trace operation has just been completed. SINGLE_STEP = 0x80000004, /// {Buffer Overflow} The data was too large to fit into the specified buffer. BUFFER_OVERFLOW = 0x80000005, /// {No More Files} No more files were found which match the file specification. NO_MORE_FILES = 0x80000006, /// {Kernel Debugger Awakened} The system debugger was awakened by an interrupt. WAKE_SYSTEM_DEBUGGER = 0x80000007, /// {Handles Closed} Handles to objects have been automatically closed because of the requested operation. HANDLES_CLOSED = 0x8000000A, /// {Non-Inheritable ACL} An access control list (ACL) contains no components that can be inherited. NO_INHERITANCE = 0x8000000B, /// {GUID Substitution} During the translation of a globally unique identifier (GUID) to a Windows security ID (SID), no administratively defined GUID prefix was found. /// A substitute prefix was used, which will not compromise system security. /// However, this might provide a more restrictive access than intended. GUID_SUBSTITUTION_MADE = 0x8000000C, /// Because of protection conflicts, not all the requested bytes could be copied. PARTIAL_COPY = 0x8000000D, /// {Out of Paper} The printer is out of paper. DEVICE_PAPER_EMPTY = 0x8000000E, /// {Device Power Is Off} The printer power has been turned off. DEVICE_POWERED_OFF = 0x8000000F, /// {Device Offline} The printer has been taken offline. DEVICE_OFF_LINE = 0x80000010, /// {Device Busy} The device is currently busy. DEVICE_BUSY = 0x80000011, /// {No More EAs} No more extended attributes (EAs) were found for the file. NO_MORE_EAS = 0x80000012, /// {Illegal EA} The specified extended attribute (EA) name contains at least one illegal character. INVALID_EA_NAME = 0x80000013, /// {Inconsistent EA List} The extended attribute (EA) list is inconsistent. EA_LIST_INCONSISTENT = 0x80000014, /// {Invalid EA Flag} An invalid extended attribute (EA) flag was set. INVALID_EA_FLAG = 0x80000015, /// {Verifying Disk} The media has changed and a verify operation is in progress; therefore, no reads or writes can be performed to the device, except those that are used in the verify operation. VERIFY_REQUIRED = 0x80000016, /// {Too Much Information} The specified access control list (ACL) contained more information than was expected. EXTRANEOUS_INFORMATION = 0x80000017, /// This warning level status indicates that the transaction state already exists for the registry subtree, but that a transaction commit was previously aborted. /// The commit has NOT been completed but has not been rolled back either; therefore, it can still be committed, if needed. RXACT_COMMIT_NECESSARY = 0x80000018, /// {No More Entries} No more entries are available from an enumeration operation. NO_MORE_ENTRIES = 0x8000001A, /// {Filemark Found} A filemark was detected. FILEMARK_DETECTED = 0x8000001B, /// {Media Changed} The media has changed. MEDIA_CHANGED = 0x8000001C, /// {I/O Bus Reset} An I/O bus reset was detected. BUS_RESET = 0x8000001D, /// {End of Media} The end of the media was encountered. END_OF_MEDIA = 0x8000001E, /// The beginning of a tape or partition has been detected. BEGINNING_OF_MEDIA = 0x8000001F, /// {Media Changed} The media might have changed. MEDIA_CHECK = 0x80000020, /// A tape access reached a set mark. SETMARK_DETECTED = 0x80000021, /// During a tape access, the end of the data written is reached. NO_DATA_DETECTED = 0x80000022, /// The redirector is in use and cannot be unloaded. REDIRECTOR_HAS_OPEN_HANDLES = 0x80000023, /// The server is in use and cannot be unloaded. SERVER_HAS_OPEN_HANDLES = 0x80000024, /// The specified connection has already been disconnected. ALREADY_DISCONNECTED = 0x80000025, /// A long jump has been executed. LONGJUMP = 0x80000026, /// A cleaner cartridge is present in the tape library. CLEANER_CARTRIDGE_INSTALLED = 0x80000027, /// The Plug and Play query operation was not successful. PLUGPLAY_QUERY_VETOED = 0x80000028, /// A frame consolidation has been executed. UNWIND_CONSOLIDATE = 0x80000029, /// {Registry Hive Recovered} The registry hive (file): %hs was corrupted and it has been recovered. Some data might have been lost. REGISTRY_HIVE_RECOVERED = 0x8000002A, /// The application is attempting to run executable code from the module %hs. This might be insecure. /// An alternative, %hs, is available. Should the application use the secure module %hs? DLL_MIGHT_BE_INSECURE = 0x8000002B, /// The application is loading executable code from the module %hs. /// This is secure but might be incompatible with previous releases of the operating system. /// An alternative, %hs, is available. Should the application use the secure module %hs? DLL_MIGHT_BE_INCOMPATIBLE = 0x8000002C, /// The create operation stopped after reaching a symbolic link. STOPPED_ON_SYMLINK = 0x8000002D, /// The device has indicated that cleaning is necessary. DEVICE_REQUIRES_CLEANING = 0x80000288, /// The device has indicated that its door is open. Further operations require it closed and secured. DEVICE_DOOR_OPEN = 0x80000289, /// Windows discovered a corruption in the file %hs. This file has now been repaired. /// Check if any data in the file was lost because of the corruption. DATA_LOST_REPAIR = 0x80000803, /// Debugger did not handle the exception. DBG_EXCEPTION_NOT_HANDLED = 0x80010001, /// The cluster node is already up. CLUSTER_NODE_ALREADY_UP = 0x80130001, /// The cluster node is already down. CLUSTER_NODE_ALREADY_DOWN = 0x80130002, /// The cluster network is already online. CLUSTER_NETWORK_ALREADY_ONLINE = 0x80130003, /// The cluster network is already offline. CLUSTER_NETWORK_ALREADY_OFFLINE = 0x80130004, /// The cluster node is already a member of the cluster. CLUSTER_NODE_ALREADY_MEMBER = 0x80130005, /// The log could not be set to the requested size. COULD_NOT_RESIZE_LOG = 0x80190009, /// There is no transaction metadata on the file. NO_TXF_METADATA = 0x80190029, /// The file cannot be recovered because there is a handle still open on it. CANT_RECOVER_WITH_HANDLE_OPEN = 0x80190031, /// Transaction metadata is already present on this file and cannot be superseded. TXF_METADATA_ALREADY_PRESENT = 0x80190041, /// A transaction scope could not be entered because the scope handler has not been initialized. TRANSACTION_SCOPE_CALLBACKS_NOT_SET = 0x80190042, /// {Display Driver Stopped Responding and recovered} The %hs display driver has stopped working normally. The recovery had been performed. VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED = 0x801B00EB, /// {Buffer too small} The buffer is too small to contain the entry. No information has been written to the buffer. FLT_BUFFER_TOO_SMALL = 0x801C0001, /// Volume metadata read or write is incomplete. FVE_PARTIAL_METADATA = 0x80210001, /// BitLocker encryption keys were ignored because the volume was in a transient state. FVE_TRANSIENT_STATE = 0x80210002, /// {Operation Failed} The requested operation was unsuccessful. UNSUCCESSFUL = 0xC0000001, /// {Not Implemented} The requested operation is not implemented. NOT_IMPLEMENTED = 0xC0000002, /// {Invalid Parameter} The specified information class is not a valid information class for the specified object. INVALID_INFO_CLASS = 0xC0000003, /// The specified information record length does not match the length that is required for the specified information class. INFO_LENGTH_MISMATCH = 0xC0000004, /// The instruction at 0x%08lx referenced memory at 0x%08lx. The memory could not be %s. ACCESS_VIOLATION = 0xC0000005, /// The instruction at 0x%08lx referenced memory at 0x%08lx. /// The required data was not placed into memory because of an I/O error status of 0x%08lx. IN_PAGE_ERROR = 0xC0000006, /// The page file quota for the process has been exhausted. PAGEFILE_QUOTA = 0xC0000007, /// An invalid HANDLE was specified. INVALID_HANDLE = 0xC0000008, /// An invalid initial stack was specified in a call to NtCreateThread. BAD_INITIAL_STACK = 0xC0000009, /// An invalid initial start address was specified in a call to NtCreateThread. BAD_INITIAL_PC = 0xC000000A, /// An invalid client ID was specified. INVALID_CID = 0xC000000B, /// An attempt was made to cancel or set a timer that has an associated APC and the specified thread is not the thread that originally set the timer with an associated APC routine. TIMER_NOT_CANCELED = 0xC000000C, /// An invalid parameter was passed to a service or function. INVALID_PARAMETER = 0xC000000D, /// A device that does not exist was specified. NO_SUCH_DEVICE = 0xC000000E, /// {File Not Found} The file %hs does not exist. NO_SUCH_FILE = 0xC000000F, /// The specified request is not a valid operation for the target device. INVALID_DEVICE_REQUEST = 0xC0000010, /// The end-of-file marker has been reached. /// There is no valid data in the file beyond this marker. END_OF_FILE = 0xC0000011, /// {Wrong Volume} The wrong volume is in the drive. Insert volume %hs into drive %hs. WRONG_VOLUME = 0xC0000012, /// {No Disk} There is no disk in the drive. Insert a disk into drive %hs. NO_MEDIA_IN_DEVICE = 0xC0000013, /// {Unknown Disk Format} The disk in drive %hs is not formatted properly. /// Check the disk, and reformat it, if needed. UNRECOGNIZED_MEDIA = 0xC0000014, /// {Sector Not Found} The specified sector does not exist. NONEXISTENT_SECTOR = 0xC0000015, /// {Still Busy} The specified I/O request packet (IRP) cannot be disposed of because the I/O operation is not complete. MORE_PROCESSING_REQUIRED = 0xC0000016, /// {Not Enough Quota} Not enough virtual memory or paging file quota is available to complete the specified operation. NO_MEMORY = 0xC0000017, /// {Conflicting Address Range} The specified address range conflicts with the address space. CONFLICTING_ADDRESSES = 0xC0000018, /// The address range to unmap is not a mapped view. NOT_MAPPED_VIEW = 0xC0000019, /// The virtual memory cannot be freed. UNABLE_TO_FREE_VM = 0xC000001A, /// The specified section cannot be deleted. UNABLE_TO_DELETE_SECTION = 0xC000001B, /// An invalid system service was specified in a system service call. INVALID_SYSTEM_SERVICE = 0xC000001C, /// {EXCEPTION} Illegal Instruction An attempt was made to execute an illegal instruction. ILLEGAL_INSTRUCTION = 0xC000001D, /// {Invalid Lock Sequence} An attempt was made to execute an invalid lock sequence. INVALID_LOCK_SEQUENCE = 0xC000001E, /// {Invalid Mapping} An attempt was made to create a view for a section that is bigger than the section. INVALID_VIEW_SIZE = 0xC000001F, /// {Bad File} The attributes of the specified mapping file for a section of memory cannot be read. INVALID_FILE_FOR_SECTION = 0xC0000020, /// {Already Committed} The specified address range is already committed. ALREADY_COMMITTED = 0xC0000021, /// {Access Denied} A process has requested access to an object but has not been granted those access rights. ACCESS_DENIED = 0xC0000022, /// {Buffer Too Small} The buffer is too small to contain the entry. No information has been written to the buffer. BUFFER_TOO_SMALL = 0xC0000023, /// {Wrong Type} There is a mismatch between the type of object that is required by the requested operation and the type of object that is specified in the request. OBJECT_TYPE_MISMATCH = 0xC0000024, /// {EXCEPTION} Cannot Continue Windows cannot continue from this exception. NONCONTINUABLE_EXCEPTION = 0xC0000025, /// An invalid exception disposition was returned by an exception handler. INVALID_DISPOSITION = 0xC0000026, /// Unwind exception code. UNWIND = 0xC0000027, /// An invalid or unaligned stack was encountered during an unwind operation. BAD_STACK = 0xC0000028, /// An invalid unwind target was encountered during an unwind operation. INVALID_UNWIND_TARGET = 0xC0000029, /// An attempt was made to unlock a page of memory that was not locked. NOT_LOCKED = 0xC000002A, /// A device parity error on an I/O operation. PARITY_ERROR = 0xC000002B, /// An attempt was made to decommit uncommitted virtual memory. UNABLE_TO_DECOMMIT_VM = 0xC000002C, /// An attempt was made to change the attributes on memory that has not been committed. NOT_COMMITTED = 0xC000002D, /// Invalid object attributes specified to NtCreatePort or invalid port attributes specified to NtConnectPort. INVALID_PORT_ATTRIBUTES = 0xC000002E, /// The length of the message that was passed to NtRequestPort or NtRequestWaitReplyPort is longer than the maximum message that is allowed by the port. PORT_MESSAGE_TOO_LONG = 0xC000002F, /// An invalid combination of parameters was specified. INVALID_PARAMETER_MIX = 0xC0000030, /// An attempt was made to lower a quota limit below the current usage. INVALID_QUOTA_LOWER = 0xC0000031, /// {Corrupt Disk} The file system structure on the disk is corrupt and unusable. Run the Chkdsk utility on the volume %hs. DISK_CORRUPT_ERROR = 0xC0000032, /// The object name is invalid. OBJECT_NAME_INVALID = 0xC0000033, /// The object name is not found. OBJECT_NAME_NOT_FOUND = 0xC0000034, /// The object name already exists. OBJECT_NAME_COLLISION = 0xC0000035, /// An attempt was made to send a message to a disconnected communication port. PORT_DISCONNECTED = 0xC0000037, /// An attempt was made to attach to a device that was already attached to another device. DEVICE_ALREADY_ATTACHED = 0xC0000038, /// The object path component was not a directory object. OBJECT_PATH_INVALID = 0xC0000039, /// {Path Not Found} The path %hs does not exist. OBJECT_PATH_NOT_FOUND = 0xC000003A, /// The object path component was not a directory object. OBJECT_PATH_SYNTAX_BAD = 0xC000003B, /// {Data Overrun} A data overrun error occurred. DATA_OVERRUN = 0xC000003C, /// {Data Late} A data late error occurred. DATA_LATE_ERROR = 0xC000003D, /// {Data Error} An error occurred in reading or writing data. DATA_ERROR = 0xC000003E, /// {Bad CRC} A cyclic redundancy check (CRC) checksum error occurred. CRC_ERROR = 0xC000003F, /// {Section Too Large} The specified section is too big to map the file. SECTION_TOO_BIG = 0xC0000040, /// The NtConnectPort request is refused. PORT_CONNECTION_REFUSED = 0xC0000041, /// The type of port handle is invalid for the operation that is requested. INVALID_PORT_HANDLE = 0xC0000042, /// A file cannot be opened because the share access flags are incompatible. SHARING_VIOLATION = 0xC0000043, /// Insufficient quota exists to complete the operation. QUOTA_EXCEEDED = 0xC0000044, /// The specified page protection was not valid. INVALID_PAGE_PROTECTION = 0xC0000045, /// An attempt to release a mutant object was made by a thread that was not the owner of the mutant object. MUTANT_NOT_OWNED = 0xC0000046, /// An attempt was made to release a semaphore such that its maximum count would have been exceeded. SEMAPHORE_LIMIT_EXCEEDED = 0xC0000047, /// An attempt was made to set the DebugPort or ExceptionPort of a process, but a port already exists in the process, or an attempt was made to set the CompletionPort of a file but a port was already set in the file, or an attempt was made to set the associated completion port of an ALPC port but it is already set. PORT_ALREADY_SET = 0xC0000048, /// An attempt was made to query image information on a section that does not map an image. SECTION_NOT_IMAGE = 0xC0000049, /// An attempt was made to suspend a thread whose suspend count was at its maximum. SUSPEND_COUNT_EXCEEDED = 0xC000004A, /// An attempt was made to suspend a thread that has begun termination. THREAD_IS_TERMINATING = 0xC000004B, /// An attempt was made to set the working set limit to an invalid value (for example, the minimum greater than maximum). BAD_WORKING_SET_LIMIT = 0xC000004C, /// A section was created to map a file that is not compatible with an already existing section that maps the same file. INCOMPATIBLE_FILE_MAP = 0xC000004D, /// A view to a section specifies a protection that is incompatible with the protection of the initial view. SECTION_PROTECTION = 0xC000004E, /// An operation involving EAs failed because the file system does not support EAs. EAS_NOT_SUPPORTED = 0xC000004F, /// An EA operation failed because the EA set is too large. EA_TOO_LARGE = 0xC0000050, /// An EA operation failed because the name or EA index is invalid. NONEXISTENT_EA_ENTRY = 0xC0000051, /// The file for which EAs were requested has no EAs. NO_EAS_ON_FILE = 0xC0000052, /// The EA is corrupt and cannot be read. EA_CORRUPT_ERROR = 0xC0000053, /// A requested read/write cannot be granted due to a conflicting file lock. FILE_LOCK_CONFLICT = 0xC0000054, /// A requested file lock cannot be granted due to other existing locks. LOCK_NOT_GRANTED = 0xC0000055, /// A non-close operation has been requested of a file object that has a delete pending. DELETE_PENDING = 0xC0000056, /// An attempt was made to set the control attribute on a file. /// This attribute is not supported in the destination file system. CTL_FILE_NOT_SUPPORTED = 0xC0000057, /// Indicates a revision number that was encountered or specified is not one that is known by the service. /// It might be a more recent revision than the service is aware of. UNKNOWN_REVISION = 0xC0000058, /// Indicates that two revision levels are incompatible. REVISION_MISMATCH = 0xC0000059, /// Indicates a particular security ID cannot be assigned as the owner of an object. INVALID_OWNER = 0xC000005A, /// Indicates a particular security ID cannot be assigned as the primary group of an object. INVALID_PRIMARY_GROUP = 0xC000005B, /// An attempt has been made to operate on an impersonation token by a thread that is not currently impersonating a client. NO_IMPERSONATION_TOKEN = 0xC000005C, /// A mandatory group cannot be disabled. CANT_DISABLE_MANDATORY = 0xC000005D, /// No logon servers are currently available to service the logon request. NO_LOGON_SERVERS = 0xC000005E, /// A specified logon session does not exist. It might already have been terminated. NO_SUCH_LOGON_SESSION = 0xC000005F, /// A specified privilege does not exist. NO_SUCH_PRIVILEGE = 0xC0000060, /// A required privilege is not held by the client. PRIVILEGE_NOT_HELD = 0xC0000061, /// The name provided is not a properly formed account name. INVALID_ACCOUNT_NAME = 0xC0000062, /// The specified account already exists. USER_EXISTS = 0xC0000063, /// The specified account does not exist. NO_SUCH_USER = 0xC0000064, /// The specified group already exists. GROUP_EXISTS = 0xC0000065, /// The specified group does not exist. NO_SUCH_GROUP = 0xC0000066, /// The specified user account is already in the specified group account. /// Also used to indicate a group cannot be deleted because it contains a member. MEMBER_IN_GROUP = 0xC0000067, /// The specified user account is not a member of the specified group account. MEMBER_NOT_IN_GROUP = 0xC0000068, /// Indicates the requested operation would disable or delete the last remaining administration account. /// This is not allowed to prevent creating a situation in which the system cannot be administrated. LAST_ADMIN = 0xC0000069, /// When trying to update a password, this return status indicates that the value provided as the current password is not correct. WRONG_PASSWORD = 0xC<PASSWORD>, /// When trying to update a password, this return status indicates that the value provided for the new password contains values that are not allowed in passwords. ILL_FORMED_PASSWORD = 0xC<PASSWORD>, /// When trying to update a password, this status indicates that some password update rule has been violated. /// For example, the password might not meet length criteria. PASSWORD_RESTRICTION = 0xC000006C, /// The attempted logon is invalid. /// This is either due to a bad username or authentication information. LOGON_FAILURE = 0xC000006D, /// Indicates a referenced user name and authentication information are valid, but some user account restriction has prevented successful authentication (such as time-of-day restrictions). ACCOUNT_RESTRICTION = 0xC000006E, /// The user account has time restrictions and cannot be logged onto at this time. INVALID_LOGON_HOURS = 0xC000006F, /// The user account is restricted so that it cannot be used to log on from the source workstation. INVALID_WORKSTATION = 0xC0000070, /// The user account password has expired. PASSWORD_EXPIRED = 0xC0000071, /// The referenced account is currently disabled and cannot be logged on to. ACCOUNT_DISABLED = 0xC0000072, /// None of the information to be translated has been translated. NONE_MAPPED = 0xC0000073, /// The number of LUIDs requested cannot be allocated with a single allocation. TOO_MANY_LUIDS_REQUESTED = 0xC0000074, /// Indicates there are no more LUIDs to allocate. LUIDS_EXHAUSTED = 0xC0000075, /// Indicates the sub-authority value is invalid for the particular use. INVALID_SUB_AUTHORITY = 0xC0000076, /// Indicates the ACL structure is not valid. INVALID_ACL = 0xC0000077, /// Indicates the SID structure is not valid. INVALID_SID = 0xC0000078, /// Indicates the SECURITY_DESCRIPTOR structure is not valid. INVALID_SECURITY_DESCR = 0xC0000079, /// Indicates the specified procedure address cannot be found in the DLL. PROCEDURE_NOT_FOUND = 0xC000007A, /// {Bad Image} %hs is either not designed to run on Windows or it contains an error. /// Try installing the program again using the original installation media or contact your system administrator or the software vendor for support. INVALID_IMAGE_FORMAT = 0xC000007B, /// An attempt was made to reference a token that does not exist. /// This is typically done by referencing the token that is associated with a thread when the thread is not impersonating a client. NO_TOKEN = 0xC000007C, /// Indicates that an attempt to build either an inherited ACL or ACE was not successful. This can be caused by a number of things. /// One of the more probable causes is the replacement of a CreatorId with a SID that did not fit into the ACE or ACL. BAD_INHERITANCE_ACL = 0xC000007D, /// The range specified in NtUnlockFile was not locked. RANGE_NOT_LOCKED = 0xC000007E, /// An operation failed because the disk was full. DISK_FULL = 0xC000007F, /// The GUID allocation server is disabled at the moment. SERVER_DISABLED = 0xC0000080, /// The GUID allocation server is enabled at the moment. SERVER_NOT_DISABLED = 0xC0000081, /// Too many GUIDs were requested from the allocation server at once. TOO_MANY_GUIDS_REQUESTED = 0xC0000082, /// The GUIDs could not be allocated because the Authority Agent was exhausted. GUIDS_EXHAUSTED = 0xC0000083, /// The value provided was an invalid value for an identifier authority. INVALID_ID_AUTHORITY = 0xC0000084, /// No more authority agent values are available for the particular identifier authority value. AGENTS_EXHAUSTED = 0xC0000085, /// An invalid volume label has been specified. INVALID_VOLUME_LABEL = 0xC0000086, /// A mapped section could not be extended. SECTION_NOT_EXTENDED = 0xC0000087, /// Specified section to flush does not map a data file. NOT_MAPPED_DATA = 0xC0000088, /// Indicates the specified image file did not contain a resource section. RESOURCE_DATA_NOT_FOUND = 0xC0000089, /// Indicates the specified resource type cannot be found in the image file. RESOURCE_TYPE_NOT_FOUND = 0xC000008A, /// Indicates the specified resource name cannot be found in the image file. RESOURCE_NAME_NOT_FOUND = 0xC000008B, /// {EXCEPTION} Array bounds exceeded. ARRAY_BOUNDS_EXCEEDED = 0xC000008C, /// {EXCEPTION} Floating-point denormal operand. FLOAT_DENORMAL_OPERAND = 0xC000008D, /// {EXCEPTION} Floating-point division by zero. FLOAT_DIVIDE_BY_ZERO = 0xC000008E, /// {EXCEPTION} Floating-point inexact result. FLOAT_INEXACT_RESULT = 0xC000008F, /// {EXCEPTION} Floating-point invalid operation. FLOAT_INVALID_OPERATION = 0xC0000090, /// {EXCEPTION} Floating-point overflow. FLOAT_OVERFLOW = 0xC0000091, /// {EXCEPTION} Floating-point stack check. FLOAT_STACK_CHECK = 0xC0000092, /// {EXCEPTION} Floating-point underflow. FLOAT_UNDERFLOW = 0xC0000093, /// {EXCEPTION} Integer division by zero. INTEGER_DIVIDE_BY_ZERO = 0xC0000094, /// {EXCEPTION} Integer overflow. INTEGER_OVERFLOW = 0xC0000095, /// {EXCEPTION} Privileged instruction. PRIVILEGED_INSTRUCTION = 0xC0000096, /// An attempt was made to install more paging files than the system supports. TOO_MANY_PAGING_FILES = 0xC0000097, /// The volume for a file has been externally altered such that the opened file is no longer valid. FILE_INVALID = 0xC0000098, /// When a block of memory is allotted for future updates, such as the memory allocated to hold discretionary access control and primary group information, successive updates might exceed the amount of memory originally allotted. /// Because a quota might already have been charged to several processes that have handles to the object, it is not reasonable to alter the size of the allocated memory. /// Instead, a request that requires more memory than has been allotted must fail and the STATUS_ALLOTTED_SPACE_EXCEEDED error returned. ALLOTTED_SPACE_EXCEEDED = 0xC0000099, /// Insufficient system resources exist to complete the API. INSUFFICIENT_RESOURCES = 0xC000009A, /// An attempt has been made to open a DFS exit path control file. DFS_EXIT_PATH_FOUND = 0xC000009B, /// There are bad blocks (sectors) on the hard disk. DEVICE_DATA_ERROR = 0xC000009C, /// There is bad cabling, non-termination, or the controller is not able to obtain access to the hard disk. DEVICE_NOT_CONNECTED = 0xC000009D, /// Virtual memory cannot be freed because the base address is not the base of the region and a region size of zero was specified. FREE_VM_NOT_AT_BASE = 0xC000009F, /// An attempt was made to free virtual memory that is not allocated. MEMORY_NOT_ALLOCATED = 0xC00000A0, /// The working set is not big enough to allow the requested pages to be locked. WORKING_SET_QUOTA = 0xC00000A1, /// {Write Protect Error} The disk cannot be written to because it is write-protected. /// Remove the write protection from the volume %hs in drive %hs. MEDIA_WRITE_PROTECTED = 0xC00000A2, /// {Drive Not Ready} The drive is not ready for use; its door might be open. /// Check drive %hs and make sure that a disk is inserted and that the drive door is closed. DEVICE_NOT_READY = 0xC00000A3, /// The specified attributes are invalid or are incompatible with the attributes for the group as a whole. INVALID_GROUP_ATTRIBUTES = 0xC00000A4, /// A specified impersonation level is invalid. /// Also used to indicate that a required impersonation level was not provided. BAD_IMPERSONATION_LEVEL = 0xC00000A5, /// An attempt was made to open an anonymous-level token. Anonymous tokens cannot be opened. CANT_OPEN_ANONYMOUS = 0xC00000A6, /// The validation information class requested was invalid. BAD_VALIDATION_CLASS = 0xC00000A7, /// The type of a token object is inappropriate for its attempted use. BAD_TOKEN_TYPE = 0xC00000A8, /// The type of a token object is inappropriate for its attempted use. BAD_MASTER_BOOT_RECORD = 0xC00000A9, /// An attempt was made to execute an instruction at an unaligned address and the host system does not support unaligned instruction references. INSTRUCTION_MISALIGNMENT = 0xC00000AA, /// The maximum named pipe instance count has been reached. INSTANCE_NOT_AVAILABLE = 0xC00000AB, /// An instance of a named pipe cannot be found in the listening state. PIPE_NOT_AVAILABLE = 0xC00000AC, /// The named pipe is not in the connected or closing state. INVALID_PIPE_STATE = 0xC00000AD, /// The specified pipe is set to complete operations and there are current I/O operations queued so that it cannot be changed to queue operations. PIPE_BUSY = 0xC00000AE, /// The specified handle is not open to the server end of the named pipe. ILLEGAL_FUNCTION = 0xC00000AF, /// The specified named pipe is in the disconnected state. PIPE_DISCONNECTED = 0xC00000B0, /// The specified named pipe is in the closing state. PIPE_CLOSING = 0xC00000B1, /// The specified named pipe is in the connected state. PIPE_CONNECTED = 0xC00000B2, /// The specified named pipe is in the listening state. PIPE_LISTENING = 0xC00000B3, /// The specified named pipe is not in message mode. INVALID_READ_MODE = 0xC00000B4, /// {Device Timeout} The specified I/O operation on %hs was not completed before the time-out period expired. IO_TIMEOUT = 0xC00000B5, /// The specified file has been closed by another process. FILE_FORCED_CLOSED = 0xC00000B6, /// Profiling is not started. PROFILING_NOT_STARTED = 0xC00000B7, /// Profiling is not stopped. PROFILING_NOT_STOPPED = 0xC00000B8, /// The passed ACL did not contain the minimum required information. COULD_NOT_INTERPRET = 0xC00000B9, /// The file that was specified as a target is a directory, and the caller specified that it could be anything but a directory. FILE_IS_A_DIRECTORY = 0xC00000BA, /// The request is not supported. NOT_SUPPORTED = 0xC00000BB, /// This remote computer is not listening. REMOTE_NOT_LISTENING = 0xC00000BC, /// A duplicate name exists on the network. DUPLICATE_NAME = 0xC00000BD, /// The network path cannot be located. BAD_NETWORK_PATH = 0xC00000BE, /// The network is busy. NETWORK_BUSY = 0xC00000BF, /// This device does not exist. DEVICE_DOES_NOT_EXIST = 0xC00000C0, /// The network BIOS command limit has been reached. TOO_MANY_COMMANDS = 0xC00000C1, /// An I/O adapter hardware error has occurred. ADAPTER_HARDWARE_ERROR = 0xC00000C2, /// The network responded incorrectly. INVALID_NETWORK_RESPONSE = 0xC00000C3, /// An unexpected network error occurred. UNEXPECTED_NETWORK_ERROR = 0xC00000C4, /// The remote adapter is not compatible. BAD_REMOTE_ADAPTER = 0xC00000C5, /// The print queue is full. PRINT_QUEUE_FULL = 0xC00000C6, /// Space to store the file that is waiting to be printed is not available on the server. NO_SPOOL_SPACE = 0xC00000C7, /// The requested print file has been canceled. PRINT_CANCELLED = 0xC00000C8, /// The network name was deleted. NETWORK_NAME_DELETED = 0xC00000C9, /// Network access is denied. NETWORK_ACCESS_DENIED = 0xC00000CA, /// {Incorrect Network Resource Type} The specified device type (LPT, for example) conflicts with the actual device type on the remote resource. BAD_DEVICE_TYPE = 0xC00000CB, /// {Network Name Not Found} The specified share name cannot be found on the remote server. BAD_NETWORK_NAME = 0xC00000CC, /// The name limit for the network adapter card of the local computer was exceeded. TOO_MANY_NAMES = 0xC00000CD, /// The network BIOS session limit was exceeded. TOO_MANY_SESSIONS = 0xC00000CE, /// File sharing has been temporarily paused. SHARING_PAUSED = 0xC00000CF, /// No more connections can be made to this remote computer at this time because the computer has already accepted the maximum number of connections. REQUEST_NOT_ACCEPTED = 0xC00000D0, /// Print or disk redirection is temporarily paused. REDIRECTOR_PAUSED = 0xC00000D1, /// A network data fault occurred. NET_WRITE_FAULT = 0xC00000D2, /// The number of active profiling objects is at the maximum and no more can be started. PROFILING_AT_LIMIT = 0xC00000D3, /// {Incorrect Volume} The destination file of a rename request is located on a different device than the source of the rename request. NOT_SAME_DEVICE = 0xC00000D4, /// The specified file has been renamed and thus cannot be modified. FILE_RENAMED = 0xC00000D5, /// {Network Request Timeout} The session with a remote server has been disconnected because the time-out interval for a request has expired. VIRTUAL_CIRCUIT_CLOSED = 0xC00000D6, /// Indicates an attempt was made to operate on the security of an object that does not have security associated with it. NO_SECURITY_ON_OBJECT = 0xC00000D7, /// Used to indicate that an operation cannot continue without blocking for I/O. CANT_WAIT = 0xC00000D8, /// Used to indicate that a read operation was done on an empty pipe. PIPE_EMPTY = 0xC00000D9, /// Configuration information could not be read from the domain controller, either because the machine is unavailable or access has been denied. CANT_ACCESS_DOMAIN_INFO = 0xC00000DA, /// Indicates that a thread attempted to terminate itself by default (called NtTerminateThread with NULL) and it was the last thread in the current process. CANT_TERMINATE_SELF = 0xC00000DB, /// Indicates the Sam Server was in the wrong state to perform the desired operation. INVALID_SERVER_STATE = 0xC00000DC, /// Indicates the domain was in the wrong state to perform the desired operation. INVALID_DOMAIN_STATE = 0xC00000DD, /// This operation is only allowed for the primary domain controller of the domain. INVALID_DOMAIN_ROLE = 0xC00000DE, /// The specified domain did not exist. NO_SUCH_DOMAIN = 0xC00000DF, /// The specified domain already exists. DOMAIN_EXISTS = 0xC00000E0, /// An attempt was made to exceed the limit on the number of domains per server for this release. DOMAIN_LIMIT_EXCEEDED = 0xC00000E1, /// An error status returned when the opportunistic lock (oplock) request is denied. OPLOCK_NOT_GRANTED = 0xC00000E2, /// An error status returned when an invalid opportunistic lock (oplock) acknowledgment is received by a file system. INVALID_OPLOCK_PROTOCOL = 0xC00000E3, /// This error indicates that the requested operation cannot be completed due to a catastrophic media failure or an on-disk data structure corruption. INTERNAL_DB_CORRUPTION = 0xC00000E4, /// An internal error occurred. INTERNAL_ERROR = 0xC00000E5, /// Indicates generic access types were contained in an access mask which should already be mapped to non-generic access types. GENERIC_NOT_MAPPED = 0xC00000E6, /// Indicates a security descriptor is not in the necessary format (absolute or self-relative). BAD_DESCRIPTOR_FORMAT = 0xC00000E7, /// An access to a user buffer failed at an expected point in time. /// This code is defined because the caller does not want to accept STATUS_ACCESS_VIOLATION in its filter. INVALID_USER_BUFFER = 0xC00000E8, /// If an I/O error that is not defined in the standard FsRtl filter is returned, it is converted to the following error, which is guaranteed to be in the filter. /// In this case, information is lost; however, the filter correctly handles the exception. UNEXPECTED_IO_ERROR = 0xC00000E9, /// If an MM error that is not defined in the standard FsRtl filter is returned, it is converted to one of the following errors, which are guaranteed to be in the filter. /// In this case, information is lost; however, the filter correctly handles the exception. UNEXPECTED_MM_CREATE_ERR = 0xC00000EA, /// If an MM error that is not defined in the standard FsRtl filter is returned, it is converted to one of the following errors, which are guaranteed to be in the filter. /// In this case, information is lost; however, the filter correctly handles the exception. UNEXPECTED_MM_MAP_ERROR = 0xC00000EB, /// If an MM error that is not defined in the standard FsRtl filter is returned, it is converted to one of the following errors, which are guaranteed to be in the filter. /// In this case, information is lost; however, the filter correctly handles the exception. UNEXPECTED_MM_EXTEND_ERR = 0xC00000EC, /// The requested action is restricted for use by logon processes only. /// The calling process has not registered as a logon process. NOT_LOGON_PROCESS = 0xC00000ED, /// An attempt has been made to start a new session manager or LSA logon session by using an ID that is already in use. LOGON_SESSION_EXISTS = 0xC00000EE, /// An invalid parameter was passed to a service or function as the first argument. INVALID_PARAMETER_1 = 0xC00000EF, /// An invalid parameter was passed to a service or function as the second argument. INVALID_PARAMETER_2 = 0xC00000F0, /// An invalid parameter was passed to a service or function as the third argument. INVALID_PARAMETER_3 = 0xC00000F1, /// An invalid parameter was passed to a service or function as the fourth argument. INVALID_PARAMETER_4 = 0xC00000F2, /// An invalid parameter was passed to a service or function as the fifth argument. INVALID_PARAMETER_5 = 0xC00000F3, /// An invalid parameter was passed to a service or function as the sixth argument. INVALID_PARAMETER_6 = 0xC00000F4, /// An invalid parameter was passed to a service or function as the seventh argument. INVALID_PARAMETER_7 = 0xC00000F5, /// An invalid parameter was passed to a service or function as the eighth argument. INVALID_PARAMETER_8 = 0xC00000F6, /// An invalid parameter was passed to a service or function as the ninth argument. INVALID_PARAMETER_9 = 0xC00000F7, /// An invalid parameter was passed to a service or function as the tenth argument. INVALID_PARAMETER_10 = 0xC00000F8, /// An invalid parameter was passed to a service or function as the eleventh argument. INVALID_PARAMETER_11 = 0xC00000F9, /// An invalid parameter was passed to a service or function as the twelfth argument. INVALID_PARAMETER_12 = 0xC00000FA, /// An attempt was made to access a network file, but the network software was not yet started. REDIRECTOR_NOT_STARTED = 0xC00000FB, /// An attempt was made to start the redirector, but the redirector has already been started. REDIRECTOR_STARTED = 0xC00000FC, /// A new guard page for the stack cannot be created. STACK_OVERFLOW = 0xC00000FD, /// A specified authentication package is unknown. NO_SUCH_PACKAGE = 0xC00000FE, /// A malformed function table was encountered during an unwind operation. BAD_FUNCTION_TABLE = 0xC00000FF, /// Indicates the specified environment variable name was not found in the specified environment block. VARIABLE_NOT_FOUND = 0xC0000100, /// Indicates that the directory trying to be deleted is not empty. DIRECTORY_NOT_EMPTY = 0xC0000101, /// {Corrupt File} The file or directory %hs is corrupt and unreadable. Run the Chkdsk utility. FILE_CORRUPT_ERROR = 0xC0000102, /// A requested opened file is not a directory. NOT_A_DIRECTORY = 0xC0000103, /// The logon session is not in a state that is consistent with the requested operation. BAD_LOGON_SESSION_STATE = 0xC0000104, /// An internal LSA error has occurred. /// An authentication package has requested the creation of a logon session but the ID of an already existing logon session has been specified. LOGON_SESSION_COLLISION = 0xC0000105, /// A specified name string is too long for its intended use. NAME_TOO_LONG = 0xC0000106, /// The user attempted to force close the files on a redirected drive, but there were opened files on the drive, and the user did not specify a sufficient level of force. FILES_OPEN = 0xC0000107, /// The user attempted to force close the files on a redirected drive, but there were opened directories on the drive, and the user did not specify a sufficient level of force. CONNECTION_IN_USE = 0xC0000108, /// RtlFindMessage could not locate the requested message ID in the message table resource. MESSAGE_NOT_FOUND = 0xC0000109, /// An attempt was made to duplicate an object handle into or out of an exiting process. PROCESS_IS_TERMINATING = 0xC000010A, /// Indicates an invalid value has been provided for the LogonType requested. INVALID_LOGON_TYPE = 0xC000010B, /// Indicates that an attempt was made to assign protection to a file system file or directory and one of the SIDs in the security descriptor could not be translated into a GUID that could be stored by the file system. /// This causes the protection attempt to fail, which might cause a file creation attempt to fail. NO_GUID_TRANSLATION = 0xC000010C, /// Indicates that an attempt has been made to impersonate via a named pipe that has not yet been read from. CANNOT_IMPERSONATE = 0xC000010D, /// Indicates that the specified image is already loaded. IMAGE_ALREADY_LOADED = 0xC000010E, /// Indicates that an attempt was made to change the size of the LDT for a process that has no LDT. NO_LDT = 0xC0000117, /// Indicates that an attempt was made to grow an LDT by setting its size, or that the size was not an even number of selectors. INVALID_LDT_SIZE = 0xC0000118, /// Indicates that the starting value for the LDT information was not an integral multiple of the selector size. INVALID_LDT_OFFSET = 0xC0000119, /// Indicates that the user supplied an invalid descriptor when trying to set up LDT descriptors. INVALID_LDT_DESCRIPTOR = 0xC000011A, /// The specified image file did not have the correct format. It appears to be NE format. INVALID_IMAGE_NE_FORMAT = 0xC000011B, /// Indicates that the transaction state of a registry subtree is incompatible with the requested operation. /// For example, a request has been made to start a new transaction with one already in progress, or a request has been made to apply a transaction when one is not currently in progress. RXACT_INVALID_STATE = 0xC000011C, /// Indicates an error has occurred during a registry transaction commit. /// The database has been left in an unknown, but probably inconsistent, state. /// The state of the registry transaction is left as COMMITTING. RXACT_COMMIT_FAILURE = 0xC000011D, /// An attempt was made to map a file of size zero with the maximum size specified as zero. MAPPED_FILE_SIZE_ZERO = 0xC000011E, /// Too many files are opened on a remote server. /// This error should only be returned by the Windows redirector on a remote drive. TOO_MANY_OPENED_FILES = 0xC000011F, /// The I/O request was canceled. CANCELLED = 0xC0000120, /// An attempt has been made to remove a file or directory that cannot be deleted. CANNOT_DELETE = 0xC0000121, /// Indicates a name that was specified as a remote computer name is syntactically invalid. INVALID_COMPUTER_NAME = 0xC0000122, /// An I/O request other than close was performed on a file after it was deleted, which can only happen to a request that did not complete before the last handle was closed via NtClose. FILE_DELETED = 0xC0000123, /// Indicates an operation that is incompatible with built-in accounts has been attempted on a built-in (special) SAM account. For example, built-in accounts cannot be deleted. SPECIAL_ACCOUNT = 0xC0000124, /// The operation requested cannot be performed on the specified group because it is a built-in special group. SPECIAL_GROUP = 0xC0000125, /// The operation requested cannot be performed on the specified user because it is a built-in special user. SPECIAL_USER = 0xC0000126, /// Indicates a member cannot be removed from a group because the group is currently the member's primary group. MEMBERS_PRIMARY_GROUP = 0xC0000127, /// An I/O request other than close and several other special case operations was attempted using a file object that had already been closed. FILE_CLOSED = 0xC0000128, /// Indicates a process has too many threads to perform the requested action. /// For example, assignment of a primary token can be performed only when a process has zero or one threads. TOO_MANY_THREADS = 0xC0000129, /// An attempt was made to operate on a thread within a specific process, but the specified thread is not in the specified process. THREAD_NOT_IN_PROCESS = 0xC000012A, /// An attempt was made to establish a token for use as a primary token but the token is already in use. /// A token can only be the primary token of one process at a time. TOKEN_ALREADY_IN_USE = 0xC000012B, /// The page file quota was exceeded. PAGEFILE_QUOTA_EXCEEDED = 0xC000012C, /// {Out of Virtual Memory} Your system is low on virtual memory. /// To ensure that Windows runs correctly, increase the size of your virtual memory paging file. For more information, see Help. COMMITMENT_LIMIT = 0xC000012D, /// The specified image file did not have the correct format: it appears to be LE format. INVALID_IMAGE_LE_FORMAT = 0xC000012E, /// The specified image file did not have the correct format: it did not have an initial MZ. INVALID_IMAGE_NOT_MZ = 0xC000012F, /// The specified image file did not have the correct format: it did not have a proper e_lfarlc in the MZ header. INVALID_IMAGE_PROTECT = 0xC0000130, /// The specified image file did not have the correct format: it appears to be a 16-bit Windows image. INVALID_IMAGE_WIN_16 = 0xC0000131, /// The Netlogon service cannot start because another Netlogon service running in the domain conflicts with the specified role. LOGON_SERVER_CONFLICT = 0xC0000132, /// The time at the primary domain controller is different from the time at the backup domain controller or member server by too large an amount. TIME_DIFFERENCE_AT_DC = 0xC0000133, /// On applicable Windows Server releases, the SAM database is significantly out of synchronization with the copy on the domain controller. A complete synchronization is required. SYNCHRONIZATION_REQUIRED = 0xC0000134, /// {Unable To Locate Component} This application has failed to start because %hs was not found. /// Reinstalling the application might fix this problem. DLL_NOT_FOUND = 0xC0000135, /// The NtCreateFile API failed. This error should never be returned to an application; it is a place holder for the Windows LAN Manager Redirector to use in its internal error-mapping routines. OPEN_FAILED = 0xC0000136, /// {Privilege Failed} The I/O permissions for the process could not be changed. IO_PRIVILEGE_FAILED = 0xC0000137, /// {Ordinal Not Found} The ordinal %ld could not be located in the dynamic link library %hs. ORDINAL_NOT_FOUND = 0xC0000138, /// {Entry Point Not Found} The procedure entry point %hs could not be located in the dynamic link library %hs. ENTRYPOINT_NOT_FOUND = 0xC0000139, /// {Application Exit by CTRL+C} The application terminated as a result of a CTRL+C. CONTROL_C_EXIT = 0xC000013A, /// {Virtual Circuit Closed} The network transport on your computer has closed a network connection. /// There might or might not be I/O requests outstanding. LOCAL_DISCONNECT = 0xC000013B, /// {Virtual Circuit Closed} The network transport on a remote computer has closed a network connection. /// There might or might not be I/O requests outstanding. REMOTE_DISCONNECT = 0xC000013C, /// {Insufficient Resources on Remote Computer} The remote computer has insufficient resources to complete the network request. /// For example, the remote computer might not have enough available memory to carry out the request at this time. REMOTE_RESOURCES = 0xC000013D, /// {Virtual Circuit Closed} An existing connection (virtual circuit) has been broken at the remote computer. /// There is probably something wrong with the network software protocol or the network hardware on the remote computer. LINK_FAILED = 0xC000013E, /// {Virtual Circuit Closed} The network transport on your computer has closed a network connection because it had to wait too long for a response from the remote computer. LINK_TIMEOUT = 0xC000013F, /// The connection handle that was given to the transport was invalid. INVALID_CONNECTION = 0xC0000140, /// The address handle that was given to the transport was invalid. INVALID_ADDRESS = 0xC0000141, /// {DLL Initialization Failed} Initialization of the dynamic link library %hs failed. The process is terminating abnormally. DLL_INIT_FAILED = 0xC0000142, /// {Missing System File} The required system file %hs is bad or missing. MISSING_SYSTEMFILE = 0xC0000143, /// {Application Error} The exception %s (0x%08lx) occurred in the application at location 0x%08lx. UNHANDLED_EXCEPTION = 0xC0000144, /// {Application Error} The application failed to initialize properly (0x%lx). Click OK to terminate the application. APP_INIT_FAILURE = 0xC0000145, /// {Unable to Create Paging File} The creation of the paging file %hs failed (%lx). The requested size was %ld. PAGEFILE_CREATE_FAILED = 0xC0000146, /// {No Paging File Specified} No paging file was specified in the system configuration. NO_PAGEFILE = 0xC0000147, /// {Incorrect System Call Level} An invalid level was passed into the specified system call. INVALID_LEVEL = 0xC0000148, /// {Incorrect Password to LAN Manager Server} You specified an incorrect password to a LAN Manager 2.x or MS-NET server. WRONG_PASSWORD_CORE = 0xC<PASSWORD>, /// {EXCEPTION} A real-mode application issued a floating-point instruction and floating-point hardware is not present. ILLEGAL_FLOAT_CONTEXT = 0xC000014A, /// The pipe operation has failed because the other end of the pipe has been closed. PIPE_BROKEN = 0xC000014B, /// {The Registry Is Corrupt} The structure of one of the files that contains registry data is corrupt; the image of the file in memory is corrupt; or the file could not be recovered because the alternate copy or log was absent or corrupt. REGISTRY_CORRUPT = 0xC000014C, /// An I/O operation initiated by the Registry failed and cannot be recovered. /// The registry could not read in, write out, or flush one of the files that contain the system's image of the registry. REGISTRY_IO_FAILED = 0xC000014D, /// An event pair synchronization operation was performed using the thread-specific client/server event pair object, but no event pair object was associated with the thread. NO_EVENT_PAIR = 0xC000014E, /// The volume does not contain a recognized file system. /// Be sure that all required file system drivers are loaded and that the volume is not corrupt. UNRECOGNIZED_VOLUME = 0xC000014F, /// No serial device was successfully initialized. The serial driver will unload. SERIAL_NO_DEVICE_INITED = 0xC0000150, /// The specified local group does not exist. NO_SUCH_ALIAS = 0xC0000151, /// The specified account name is not a member of the group. MEMBER_NOT_IN_ALIAS = 0xC0000152, /// The specified account name is already a member of the group. MEMBER_IN_ALIAS = 0xC0000153, /// The specified local group already exists. ALIAS_EXISTS = 0xC0000154, /// A requested type of logon (for example, interactive, network, and service) is not granted by the local security policy of the target system. /// Ask the system administrator to grant the necessary form of logon. LOGON_NOT_GRANTED = 0xC0000155, /// The maximum number of secrets that can be stored in a single system was exceeded. /// The length and number of secrets is limited to satisfy U.S. State Department export restrictions. TOO_MANY_SECRETS = 0xC0000156, /// The length of a secret exceeds the maximum allowable length. /// The length and number of secrets is limited to satisfy U.S. State Department export restrictions. SECRET_TOO_LONG = 0xC0000157, /// The local security authority (LSA) database contains an internal inconsistency. INTERNAL_DB_ERROR = 0xC0000158, /// The requested operation cannot be performed in full-screen mode. FULLSCREEN_MODE = 0xC0000159, /// During a logon attempt, the user's security context accumulated too many security IDs. This is a very unusual situation. /// Remove the user from some global or local groups to reduce the number of security IDs to incorporate into the security context. TOO_MANY_CONTEXT_IDS = 0xC000015A, /// A user has requested a type of logon (for example, interactive or network) that has not been granted. /// An administrator has control over who can logon interactively and through the network. LOGON_TYPE_NOT_GRANTED = 0xC000015B, /// The system has attempted to load or restore a file into the registry, and the specified file is not in the format of a registry file. NOT_REGISTRY_FILE = 0xC000015C, /// An attempt was made to change a user password in the security account manager without providing the necessary Windows cross-encrypted password. NT_CROSS_ENCRYPTION_REQUIRED = 0xC000015D, /// A domain server has an incorrect configuration. DOMAIN_CTRLR_CONFIG_ERROR = 0xC000015E, /// An attempt was made to explicitly access the secondary copy of information via a device control to the fault tolerance driver and the secondary copy is not present in the system. FT_MISSING_MEMBER = 0xC000015F, /// A configuration registry node that represents a driver service entry was ill-formed and did not contain the required value entries. ILL_FORMED_SERVICE_ENTRY = 0xC0000160, /// An illegal character was encountered. /// For a multibyte character set, this includes a lead byte without a succeeding trail byte. /// For the Unicode character set this includes the characters 0xFFFF and 0xFFFE. ILLEGAL_CHARACTER = 0xC0000161, /// No mapping for the Unicode character exists in the target multibyte code page. UNMAPPABLE_CHARACTER = 0xC0000162, /// The Unicode character is not defined in the Unicode character set that is installed on the system. UNDEFINED_CHARACTER = 0xC0000163, /// The paging file cannot be created on a floppy disk. FLOPPY_VOLUME = 0xC0000164, /// {Floppy Disk Error} While accessing a floppy disk, an ID address mark was not found. FLOPPY_ID_MARK_NOT_FOUND = 0xC0000165, /// {Floppy Disk Error} While accessing a floppy disk, the track address from the sector ID field was found to be different from the track address that is maintained by the controller. FLOPPY_WRONG_CYLINDER = 0xC0000166, /// {Floppy Disk Error} The floppy disk controller reported an error that is not recognized by the floppy disk driver. FLOPPY_UNKNOWN_ERROR = 0xC0000167, /// {Floppy Disk Error} While accessing a floppy-disk, the controller returned inconsistent results via its registers. FLOPPY_BAD_REGISTERS = 0xC0000168, /// {Hard Disk Error} While accessing the hard disk, a recalibrate operation failed, even after retries. DISK_RECALIBRATE_FAILED = 0xC0000169, /// {Hard Disk Error} While accessing the hard disk, a disk operation failed even after retries. DISK_OPERATION_FAILED = 0xC000016A, /// {Hard Disk Error} While accessing the hard disk, a disk controller reset was needed, but even that failed. DISK_RESET_FAILED = 0xC000016B, /// An attempt was made to open a device that was sharing an interrupt request (IRQ) with other devices. /// At least one other device that uses that IRQ was already opened. /// Two concurrent opens of devices that share an IRQ and only work via interrupts is not supported for the particular bus type that the devices use. SHARED_IRQ_BUSY = 0xC000016C, /// {FT Orphaning} A disk that is part of a fault-tolerant volume can no longer be accessed. FT_ORPHANING = 0xC000016D, /// The basic input/output system (BIOS) failed to connect a system interrupt to the device or bus for which the device is connected. BIOS_FAILED_TO_CONNECT_INTERRUPT = 0xC000016E, /// The tape could not be partitioned. PARTITION_FAILURE = 0xC0000172, /// When accessing a new tape of a multi-volume partition, the current blocksize is incorrect. INVALID_BLOCK_LENGTH = 0xC0000173, /// The tape partition information could not be found when loading a tape. DEVICE_NOT_PARTITIONED = 0xC0000174, /// An attempt to lock the eject media mechanism failed. UNABLE_TO_LOCK_MEDIA = 0xC0000175, /// An attempt to unload media failed. UNABLE_TO_UNLOAD_MEDIA = 0xC0000176, /// The physical end of tape was detected. EOM_OVERFLOW = 0xC0000177, /// {No Media} There is no media in the drive. Insert media into drive %hs. NO_MEDIA = 0xC0000178, /// A member could not be added to or removed from the local group because the member does not exist. NO_SUCH_MEMBER = 0xC000017A, /// A new member could not be added to a local group because the member has the wrong account type. INVALID_MEMBER = 0xC000017B, /// An illegal operation was attempted on a registry key that has been marked for deletion. KEY_DELETED = 0xC000017C, /// The system could not allocate the required space in a registry log. NO_LOG_SPACE = 0xC000017D, /// Too many SIDs have been specified. TOO_MANY_SIDS = 0xC000017E, /// An attempt was made to change a user password in the security account manager without providing the necessary LM cross-encrypted password. LM_CROSS_ENCRYPTION_REQUIRED = 0xC000017F, /// An attempt was made to create a symbolic link in a registry key that already has subkeys or values. KEY_HAS_CHILDREN = 0xC0000180, /// An attempt was made to create a stable subkey under a volatile parent key. CHILD_MUST_BE_VOLATILE = 0xC0000181, /// The I/O device is configured incorrectly or the configuration parameters to the driver are incorrect. DEVICE_CONFIGURATION_ERROR = 0xC0000182, /// An error was detected between two drivers or within an I/O driver. DRIVER_INTERNAL_ERROR = 0xC0000183, /// The device is not in a valid state to perform this request. INVALID_DEVICE_STATE = 0xC0000184, /// The I/O device reported an I/O error. IO_DEVICE_ERROR = 0xC0000185, /// A protocol error was detected between the driver and the device. DEVICE_PROTOCOL_ERROR = 0xC0000186, /// This operation is only allowed for the primary domain controller of the domain. BACKUP_CONTROLLER = 0xC0000187, /// The log file space is insufficient to support this operation. LOG_FILE_FULL = 0xC0000188, /// A write operation was attempted to a volume after it was dismounted. TOO_LATE = 0xC0000189, /// The workstation does not have a trust secret for the primary domain in the local LSA database. NO_TRUST_LSA_SECRET = 0xC000018A, /// On applicable Windows Server releases, the SAM database does not have a computer account for this workstation trust relationship. NO_TRUST_SAM_ACCOUNT = 0xC000018B, /// The logon request failed because the trust relationship between the primary domain and the trusted domain failed. TRUSTED_DOMAIN_FAILURE = 0xC000018C, /// The logon request failed because the trust relationship between this workstation and the primary domain failed. TRUSTED_RELATIONSHIP_FAILURE = 0xC000018D, /// The Eventlog log file is corrupt. EVENTLOG_FILE_CORRUPT = 0xC000018E, /// No Eventlog log file could be opened. The Eventlog service did not start. EVENTLOG_CANT_START = 0xC000018F, /// The network logon failed. This might be because the validation authority cannot be reached. TRUST_FAILURE = 0xC0000190, /// An attempt was made to acquire a mutant such that its maximum count would have been exceeded. MUTANT_LIMIT_EXCEEDED = 0xC0000191, /// An attempt was made to logon, but the NetLogon service was not started. NETLOGON_NOT_STARTED = 0xC0000192, /// The user account has expired. ACCOUNT_EXPIRED = 0xC0000193, /// {EXCEPTION} Possible deadlock condition. POSSIBLE_DEADLOCK = 0xC0000194, /// Multiple connections to a server or shared resource by the same user, using more than one user name, are not allowed. /// Disconnect all previous connections to the server or shared resource and try again. NETWORK_CREDENTIAL_CONFLICT = 0xC0000195, /// An attempt was made to establish a session to a network server, but there are already too many sessions established to that server. REMOTE_SESSION_LIMIT = 0xC0000196, /// The log file has changed between reads. EVENTLOG_FILE_CHANGED = 0xC0000197, /// The account used is an interdomain trust account. /// Use your global user account or local user account to access this server. NOLOGON_INTERDOMAIN_TRUST_ACCOUNT = 0xC0000198, /// The account used is a computer account. /// Use your global user account or local user account to access this server. NOLOGON_WORKSTATION_TRUST_ACCOUNT = 0xC0000199, /// The account used is a server trust account. /// Use your global user account or local user account to access this server. NOLOGON_SERVER_TRUST_ACCOUNT = 0xC000019A, /// The name or SID of the specified domain is inconsistent with the trust information for that domain. DOMAIN_TRUST_INCONSISTENT = 0xC000019B, /// A volume has been accessed for which a file system driver is required that has not yet been loaded. FS_DRIVER_REQUIRED = 0xC000019C, /// Indicates that the specified image is already loaded as a DLL. IMAGE_ALREADY_LOADED_AS_DLL = 0xC000019D, /// Short name settings cannot be changed on this volume due to the global registry setting. INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING = 0xC000019E, /// Short names are not enabled on this volume. SHORT_NAMES_NOT_ENABLED_ON_VOLUME = 0xC000019F, /// The security stream for the given volume is in an inconsistent state. Please run CHKDSK on the volume. SECURITY_STREAM_IS_INCONSISTENT = 0xC00001A0, /// A requested file lock operation cannot be processed due to an invalid byte range. INVALID_LOCK_RANGE = 0xC00001A1, /// The specified access control entry (ACE) contains an invalid condition. INVALID_ACE_CONDITION = 0xC00001A2, /// The subsystem needed to support the image type is not present. IMAGE_SUBSYSTEM_NOT_PRESENT = 0xC00001A3, /// The specified file already has a notification GUID associated with it. NOTIFICATION_GUID_ALREADY_DEFINED = 0xC00001A4, /// A remote open failed because the network open restrictions were not satisfied. NETWORK_OPEN_RESTRICTION = 0xC0000201, /// There is no user session key for the specified logon session. NO_USER_SESSION_KEY = 0xC0000202, /// The remote user session has been deleted. USER_SESSION_DELETED = 0xC0000203, /// Indicates the specified resource language ID cannot be found in the image file. RESOURCE_LANG_NOT_FOUND = 0xC0000204, /// Insufficient server resources exist to complete the request. INSUFF_SERVER_RESOURCES = 0xC0000205, /// The size of the buffer is invalid for the specified operation. INVALID_BUFFER_SIZE = 0xC0000206, /// The transport rejected the specified network address as invalid. INVALID_ADDRESS_COMPONENT = 0xC0000207, /// The transport rejected the specified network address due to invalid use of a wildcard. INVALID_ADDRESS_WILDCARD = 0xC0000208, /// The transport address could not be opened because all the available addresses are in use. TOO_MANY_ADDRESSES = 0xC0000209, /// The transport address could not be opened because it already exists. ADDRESS_ALREADY_EXISTS = 0xC000020A, /// The transport address is now closed. ADDRESS_CLOSED = 0xC000020B, /// The transport connection is now disconnected. CONNECTION_DISCONNECTED = 0xC000020C, /// The transport connection has been reset. CONNECTION_RESET = 0xC000020D, /// The transport cannot dynamically acquire any more nodes. TOO_MANY_NODES = 0xC000020E, /// The transport aborted a pending transaction. TRANSACTION_ABORTED = 0xC000020F, /// The transport timed out a request that is waiting for a response. TRANSACTION_TIMED_OUT = 0xC0000210, /// The transport did not receive a release for a pending response. TRANSACTION_NO_RELEASE = 0xC0000211, /// The transport did not find a transaction that matches the specific token. TRANSACTION_NO_MATCH = 0xC0000212, /// The transport had previously responded to a transaction request. TRANSACTION_RESPONDED = 0xC0000213, /// The transport does not recognize the specified transaction request ID. TRANSACTION_INVALID_ID = 0xC0000214, /// The transport does not recognize the specified transaction request type. TRANSACTION_INVALID_TYPE = 0xC0000215, /// The transport can only process the specified request on the server side of a session. NOT_SERVER_SESSION = 0xC0000216, /// The transport can only process the specified request on the client side of a session. NOT_CLIENT_SESSION = 0xC0000217, /// {Registry File Failure} The registry cannot load the hive (file): %hs or its log or alternate. It is corrupt, absent, or not writable. CANNOT_LOAD_REGISTRY_FILE = 0xC0000218, /// {Unexpected Failure in DebugActiveProcess} An unexpected failure occurred while processing a DebugActiveProcess API request. /// Choosing OK will terminate the process, and choosing Cancel will ignore the error. DEBUG_ATTACH_FAILED = 0xC0000219, /// {Fatal System Error} The %hs system process terminated unexpectedly with a status of 0x%08x (0x%08x 0x%08x). The system has been shut down. SYSTEM_PROCESS_TERMINATED = 0xC000021A, /// {Data Not Accepted} The TDI client could not handle the data received during an indication. DATA_NOT_ACCEPTED = 0xC000021B, /// {Unable to Retrieve Browser Server List} The list of servers for this workgroup is not currently available. NO_BROWSER_SERVERS_FOUND = 0xC000021C, /// NTVDM encountered a hard error. VDM_HARD_ERROR = 0xC000021D, /// {Cancel Timeout} The driver %hs failed to complete a canceled I/O request in the allotted time. DRIVER_CANCEL_TIMEOUT = 0xC000021E, /// {Reply Message Mismatch} An attempt was made to reply to an LPC message, but the thread specified by the client ID in the message was not waiting on that message. REPLY_MESSAGE_MISMATCH = 0xC000021F, /// {Mapped View Alignment Incorrect} An attempt was made to map a view of a file, but either the specified base address or the offset into the file were not aligned on the proper allocation granularity. MAPPED_ALIGNMENT = 0xC0000220, /// {Bad Image Checksum} The image %hs is possibly corrupt. /// The header checksum does not match the computed checksum. IMAGE_CHECKSUM_MISMATCH = 0xC0000221, /// {Delayed Write Failed} Windows was unable to save all the data for the file %hs. The data has been lost. /// This error might be caused by a failure of your computer hardware or network connection. Try to save this file elsewhere. LOST_WRITEBEHIND_DATA = 0xC0000222, /// The parameters passed to the server in the client/server shared memory window were invalid. /// Too much data might have been put in the shared memory window. CLIENT_SERVER_PARAMETERS_INVALID = 0xC0000223, /// The user password must be changed before logging on the first time. PASSWORD_MUST_CHANGE = <PASSWORD>, /// The object was not found. NOT_FOUND = 0xC0000225, /// The stream is not a tiny stream. NOT_TINY_STREAM = 0xC0000226, /// A transaction recovery failed. RECOVERY_FAILURE = 0xC0000227, /// The request must be handled by the stack overflow code. STACK_OVERFLOW_READ = 0xC0000228, /// A consistency check failed. FAIL_CHECK = 0xC0000229, /// The attempt to insert the ID in the index failed because the ID is already in the index. DUPLICATE_OBJECTID = 0xC000022A, /// The attempt to set the object ID failed because the object already has an ID. OBJECTID_EXISTS = 0xC000022B, /// Internal OFS status codes indicating how an allocation operation is handled. /// Either it is retried after the containing oNode is moved or the extent stream is converted to a large stream. CONVERT_TO_LARGE = 0xC000022C, /// The request needs to be retried. RETRY = 0xC000022D, /// The attempt to find the object found an object on the volume that matches by ID; however, it is out of the scope of the handle that is used for the operation. FOUND_OUT_OF_SCOPE = 0xC000022E, /// The bucket array must be grown. Retry the transaction after doing so. ALLOCATE_BUCKET = 0xC000022F, /// The specified property set does not exist on the object. PROPSET_NOT_FOUND = 0xC0000230, /// The user/kernel marshaling buffer has overflowed. MARSHALL_OVERFLOW = 0xC0000231, /// The supplied variant structure contains invalid data. INVALID_VARIANT = 0xC0000232, /// A domain controller for this domain was not found. DOMAIN_CONTROLLER_NOT_FOUND = 0xC0000233, /// The user account has been automatically locked because too many invalid logon attempts or password change attempts have been requested. ACCOUNT_LOCKED_OUT = 0xC0000234, /// NtClose was called on a handle that was protected from close via NtSetInformationObject. HANDLE_NOT_CLOSABLE = 0xC0000235, /// The transport-connection attempt was refused by the remote system. CONNECTION_REFUSED = 0xC0000236, /// The transport connection was gracefully closed. GRACEFUL_DISCONNECT = 0xC0000237, /// The transport endpoint already has an address associated with it. ADDRESS_ALREADY_ASSOCIATED = 0xC0000238, /// An address has not yet been associated with the transport endpoint. ADDRESS_NOT_ASSOCIATED = 0xC0000239, /// An operation was attempted on a nonexistent transport connection. CONNECTION_INVALID = 0xC000023A, /// An invalid operation was attempted on an active transport connection. CONNECTION_ACTIVE = 0xC000023B, /// The remote network is not reachable by the transport. NETWORK_UNREACHABLE = 0xC000023C, /// The remote system is not reachable by the transport. HOST_UNREACHABLE = 0xC000023D, /// The remote system does not support the transport protocol. PROTOCOL_UNREACHABLE = 0xC000023E, /// No service is operating at the destination port of the transport on the remote system. PORT_UNREACHABLE = 0xC000023F, /// The request was aborted. REQUEST_ABORTED = 0xC0000240, /// The transport connection was aborted by the local system. CONNECTION_ABORTED = 0xC0000241, /// The specified buffer contains ill-formed data. BAD_COMPRESSION_BUFFER = 0xC0000242, /// The requested operation cannot be performed on a file with a user mapped section open. USER_MAPPED_FILE = 0xC0000243, /// {Audit Failed} An attempt to generate a security audit failed. AUDIT_FAILED = 0xC0000244, /// The timer resolution was not previously set by the current process. TIMER_RESOLUTION_NOT_SET = 0xC0000245, /// A connection to the server could not be made because the limit on the number of concurrent connections for this account has been reached. CONNECTION_COUNT_LIMIT = 0xC0000246, /// Attempting to log on during an unauthorized time of day for this account. LOGIN_TIME_RESTRICTION = 0xC0000247, /// The account is not authorized to log on from this station. LOGIN_WKSTA_RESTRICTION = 0xC0000248, /// {UP/MP Image Mismatch} The image %hs has been modified for use on a uniprocessor system, but you are running it on a multiprocessor machine. Reinstall the image file. IMAGE_MP_UP_MISMATCH = 0xC0000249, /// There is insufficient account information to log you on. INSUFFICIENT_LOGON_INFO = 0xC0000250, /// {Invalid DLL Entrypoint} The dynamic link library %hs is not written correctly. /// The stack pointer has been left in an inconsistent state. /// The entry point should be declared as WINAPI or STDCALL. /// Select YES to fail the DLL load. Select NO to continue execution. /// Selecting NO might cause the application to operate incorrectly. BAD_DLL_ENTRYPOINT = 0xC0000251, /// {Invalid Service Callback Entrypoint} The %hs service is not written correctly. /// The stack pointer has been left in an inconsistent state. /// The callback entry point should be declared as WINAPI or STDCALL. /// Selecting OK will cause the service to continue operation. /// However, the service process might operate incorrectly. BAD_SERVICE_ENTRYPOINT = 0xC0000252, /// The server received the messages but did not send a reply. LPC_REPLY_LOST = 0xC0000253, /// There is an IP address conflict with another system on the network. IP_ADDRESS_CONFLICT1 = 0xC0000254, /// There is an IP address conflict with another system on the network. IP_ADDRESS_CONFLICT2 = 0xC0000255, /// {Low On Registry Space} The system has reached the maximum size that is allowed for the system part of the registry. Additional storage requests will be ignored. REGISTRY_QUOTA_LIMIT = 0xC0000256, /// The contacted server does not support the indicated part of the DFS namespace. PATH_NOT_COVERED = 0xC0000257, /// A callback return system service cannot be executed when no callback is active. NO_CALLBACK_ACTIVE = 0xC0000258, /// The service being accessed is licensed for a particular number of connections. /// No more connections can be made to the service at this time because the service has already accepted the maximum number of connections. LICENSE_QUOTA_EXCEEDED = 0xC0000259, /// The password provided is too short to meet the policy of your user account. Choose a longer password. PWD_TOO_SHORT = 0xC000025A, /// The policy of your user account does not allow you to change passwords too frequently. /// This is done to prevent users from changing back to a familiar, but potentially discovered, password. /// If you feel your password has been compromised, contact your administrator immediately to have a new one assigned. PWD_TOO_RECENT = 0xC000025B, /// You have attempted to change your password to one that you have used in the past. /// The policy of your user account does not allow this. /// Select a password that you have not previously used. PWD_HISTORY_CONFLICT = 0xC000025C, /// You have attempted to load a legacy device driver while its device instance had been disabled. PLUGPLAY_NO_DEVICE = 0xC000025E, /// The specified compression format is unsupported. UNSUPPORTED_COMPRESSION = 0xC000025F, /// The specified hardware profile configuration is invalid. INVALID_HW_PROFILE = 0xC0000260, /// The specified Plug and Play registry device path is invalid. INVALID_PLUGPLAY_DEVICE_PATH = 0xC0000261, /// {Driver Entry Point Not Found} The %hs device driver could not locate the ordinal %ld in driver %hs. DRIVER_ORDINAL_NOT_FOUND = 0xC0000262, /// {Driver Entry Point Not Found} The %hs device driver could not locate the entry point %hs in driver %hs. DRIVER_ENTRYPOINT_NOT_FOUND = 0xC0000263, /// {Application Error} The application attempted to release a resource it did not own. Click OK to terminate the application. RESOURCE_NOT_OWNED = 0xC0000264, /// An attempt was made to create more links on a file than the file system supports. TOO_MANY_LINKS = 0xC0000265, /// The specified quota list is internally inconsistent with its descriptor. QUOTA_LIST_INCONSISTENT = 0xC0000266, /// The specified file has been relocated to offline storage. FILE_IS_OFFLINE = 0xC0000267, /// {Windows Evaluation Notification} The evaluation period for this installation of Windows has expired. This system will shutdown in 1 hour. /// To restore access to this installation of Windows, upgrade this installation by using a licensed distribution of this product. EVALUATION_EXPIRATION = 0xC0000268, /// {Illegal System DLL Relocation} The system DLL %hs was relocated in memory. The application will not run properly. /// The relocation occurred because the DLL %hs occupied an address range that is reserved for Windows system DLLs. /// The vendor supplying the DLL should be contacted for a new DLL. ILLEGAL_DLL_RELOCATION = 0xC0000269, /// {License Violation} The system has detected tampering with your registered product type. /// This is a violation of your software license. Tampering with the product type is not permitted. LICENSE_VIOLATION = 0xC000026A, /// {DLL Initialization Failed} The application failed to initialize because the window station is shutting down. DLL_INIT_FAILED_LOGOFF = 0xC000026B, /// {Unable to Load Device Driver} %hs device driver could not be loaded. Error Status was 0x%x. DRIVER_UNABLE_TO_LOAD = 0xC000026C, /// DFS is unavailable on the contacted server. DFS_UNAVAILABLE = 0xC000026D, /// An operation was attempted to a volume after it was dismounted. VOLUME_DISMOUNTED = 0xC000026E, /// An internal error occurred in the Win32 x86 emulation subsystem. WX86_INTERNAL_ERROR = 0xC000026F, /// Win32 x86 emulation subsystem floating-point stack check. WX86_FLOAT_STACK_CHECK = 0xC0000270, /// The validation process needs to continue on to the next step. VALIDATE_CONTINUE = 0xC0000271, /// There was no match for the specified key in the index. NO_MATCH = 0xC0000272, /// There are no more matches for the current index enumeration. NO_MORE_MATCHES = 0xC0000273, /// The NTFS file or directory is not a reparse point. NOT_A_REPARSE_POINT = 0xC0000275, /// The Windows I/O reparse tag passed for the NTFS reparse point is invalid. IO_REPARSE_TAG_INVALID = 0xC0000276, /// The Windows I/O reparse tag does not match the one that is in the NTFS reparse point. IO_REPARSE_TAG_MISMATCH = 0xC0000277, /// The user data passed for the NTFS reparse point is invalid. IO_REPARSE_DATA_INVALID = 0xC0000278, /// The layered file system driver for this I/O tag did not handle it when needed. IO_REPARSE_TAG_NOT_HANDLED = 0xC0000279, /// The NTFS symbolic link could not be resolved even though the initial file name is valid. REPARSE_POINT_NOT_RESOLVED = 0xC0000280, /// The NTFS directory is a reparse point. DIRECTORY_IS_A_REPARSE_POINT = 0xC0000281, /// The range could not be added to the range list because of a conflict. RANGE_LIST_CONFLICT = 0xC0000282, /// The specified medium changer source element contains no media. SOURCE_ELEMENT_EMPTY = 0xC0000283, /// The specified medium changer destination element already contains media. DESTINATION_ELEMENT_FULL = 0xC0000284, /// The specified medium changer element does not exist. ILLEGAL_ELEMENT_ADDRESS = 0xC0000285, /// The specified element is contained in a magazine that is no longer present. MAGAZINE_NOT_PRESENT = 0xC0000286, /// The device requires re-initialization due to hardware errors. REINITIALIZATION_NEEDED = 0xC0000287, /// The file encryption attempt failed. ENCRYPTION_FAILED = 0xC000028A, /// The file decryption attempt failed. DECRYPTION_FAILED = 0xC000028B, /// The specified range could not be found in the range list. RANGE_NOT_FOUND = 0xC000028C, /// There is no encryption recovery policy configured for this system. NO_RECOVERY_POLICY = 0xC000028D, /// The required encryption driver is not loaded for this system. NO_EFS = 0xC000028E, /// The file was encrypted with a different encryption driver than is currently loaded. WRONG_EFS = 0xC000028F, /// There are no EFS keys defined for the user. NO_USER_KEYS = 0xC0000290, /// The specified file is not encrypted. FILE_NOT_ENCRYPTED = 0xC0000291, /// The specified file is not in the defined EFS export format. NOT_EXPORT_FORMAT = 0xC0000292, /// The specified file is encrypted and the user does not have the ability to decrypt it. FILE_ENCRYPTED = 0xC0000293, /// The GUID passed was not recognized as valid by a WMI data provider. WMI_GUID_NOT_FOUND = 0xC0000295, /// The instance name passed was not recognized as valid by a WMI data provider. WMI_INSTANCE_NOT_FOUND = 0xC0000296, /// The data item ID passed was not recognized as valid by a WMI data provider. WMI_ITEMID_NOT_FOUND = 0xC0000297, /// The WMI request could not be completed and should be retried. WMI_TRY_AGAIN = 0xC0000298, /// The policy object is shared and can only be modified at the root. SHARED_POLICY = 0xC0000299, /// The policy object does not exist when it should. POLICY_OBJECT_NOT_FOUND = 0xC000029A, /// The requested policy information only lives in the Ds. POLICY_ONLY_IN_DS = 0xC000029B, /// The volume must be upgraded to enable this feature. VOLUME_NOT_UPGRADED = 0xC000029C, /// The remote storage service is not operational at this time. REMOTE_STORAGE_NOT_ACTIVE = 0xC000029D, /// The remote storage service encountered a media error. REMOTE_STORAGE_MEDIA_ERROR = 0xC000029E, /// The tracking (workstation) service is not running. NO_TRACKING_SERVICE = 0xC000029F, /// The server process is running under a SID that is different from the SID that is required by client. SERVER_SID_MISMATCH = 0xC00002A0, /// The specified directory service attribute or value does not exist. DS_NO_ATTRIBUTE_OR_VALUE = 0xC00002A1, /// The attribute syntax specified to the directory service is invalid. DS_INVALID_ATTRIBUTE_SYNTAX = 0xC00002A2, /// The attribute type specified to the directory service is not defined. DS_ATTRIBUTE_TYPE_UNDEFINED = 0xC00002A3, /// The specified directory service attribute or value already exists. DS_ATTRIBUTE_OR_VALUE_EXISTS = 0xC00002A4, /// The directory service is busy. DS_BUSY = 0xC00002A5, /// The directory service is unavailable. DS_UNAVAILABLE = 0xC00002A6, /// The directory service was unable to allocate a relative identifier. DS_NO_RIDS_ALLOCATED = 0xC00002A7, /// The directory service has exhausted the pool of relative identifiers. DS_NO_MORE_RIDS = 0xC00002A8, /// The requested operation could not be performed because the directory service is not the master for that type of operation. DS_INCORRECT_ROLE_OWNER = 0xC00002A9, /// The directory service was unable to initialize the subsystem that allocates relative identifiers. DS_RIDMGR_INIT_ERROR = 0xC00002AA, /// The requested operation did not satisfy one or more constraints that are associated with the class of the object. DS_OBJ_CLASS_VIOLATION = 0xC00002AB, /// The directory service can perform the requested operation only on a leaf object. DS_CANT_ON_NON_LEAF = 0xC00002AC, /// The directory service cannot perform the requested operation on the Relatively Defined Name (RDN) attribute of an object. DS_CANT_ON_RDN = 0xC00002AD, /// The directory service detected an attempt to modify the object class of an object. DS_CANT_MOD_OBJ_CLASS = 0xC00002AE, /// An error occurred while performing a cross domain move operation. DS_CROSS_DOM_MOVE_FAILED = 0xC00002AF, /// Unable to contact the global catalog server. DS_GC_NOT_AVAILABLE = 0xC00002B0, /// The requested operation requires a directory service, and none was available. DIRECTORY_SERVICE_REQUIRED = 0xC00002B1, /// The reparse attribute cannot be set because it is incompatible with an existing attribute. REPARSE_ATTRIBUTE_CONFLICT = 0xC00002B2, /// A group marked "use for deny only" cannot be enabled. CANT_ENABLE_DENY_ONLY = 0xC00002B3, /// {EXCEPTION} Multiple floating-point faults. FLOAT_MULTIPLE_FAULTS = 0xC00002B4, /// {EXCEPTION} Multiple floating-point traps. FLOAT_MULTIPLE_TRAPS = 0xC00002B5, /// The device has been removed. DEVICE_REMOVED = 0xC00002B6, /// The volume change journal is being deleted. JOURNAL_DELETE_IN_PROGRESS = 0xC00002B7, /// The volume change journal is not active. JOURNAL_NOT_ACTIVE = 0xC00002B8, /// The requested interface is not supported. NOINTERFACE = 0xC00002B9, /// A directory service resource limit has been exceeded. DS_ADMIN_LIMIT_EXCEEDED = 0xC00002C1, /// {System Standby Failed} The driver %hs does not support standby mode. /// Updating this driver allows the system to go to standby mode. DRIVER_FAILED_SLEEP = 0xC00002C2, /// Mutual Authentication failed. The server password is out of date at the domain controller. MUTUAL_AUTHENTICATION_FAILED = 0xC00002C3, /// The system file %1 has become corrupt and has been replaced. CORRUPT_SYSTEM_FILE = 0xC00002C4, /// {EXCEPTION} Alignment Error A data type misalignment error was detected in a load or store instruction. DATATYPE_MISALIGNMENT_ERROR = 0xC00002C5, /// The WMI data item or data block is read-only. WMI_READ_ONLY = 0xC00002C6, /// The WMI data item or data block could not be changed. WMI_SET_FAILURE = 0xC00002C7, /// {Virtual Memory Minimum Too Low} Your system is low on virtual memory. /// Windows is increasing the size of your virtual memory paging file. /// During this process, memory requests for some applications might be denied. For more information, see Help. COMMITMENT_MINIMUM = 0xC00002C8, /// {EXCEPTION} Register NaT consumption faults. /// A NaT value is consumed on a non-speculative instruction. REG_NAT_CONSUMPTION = 0xC00002C9, /// The transport element of the medium changer contains media, which is causing the operation to fail. TRANSPORT_FULL = 0xC00002CA, /// Security Accounts Manager initialization failed because of the following error: %hs Error Status: 0x%x. /// Click OK to shut down this system and restart in Directory Services Restore Mode. /// Check the event log for more detailed information. DS_SAM_INIT_FAILURE = 0xC00002CB, /// This operation is supported only when you are connected to the server. ONLY_IF_CONNECTED = 0xC00002CC, /// Only an administrator can modify the membership list of an administrative group. DS_SENSITIVE_GROUP_VIOLATION = 0xC00002CD, /// A device was removed so enumeration must be restarted. PNP_RESTART_ENUMERATION = 0xC00002CE, /// The journal entry has been deleted from the journal. JOURNAL_ENTRY_DELETED = 0xC00002CF, /// Cannot change the primary group ID of a domain controller account. DS_CANT_MOD_PRIMARYGROUPID = 0xC00002D0, /// {Fatal System Error} The system image %s is not properly signed. /// The file has been replaced with the signed file. The system has been shut down. SYSTEM_IMAGE_BAD_SIGNATURE = 0xC00002D1, /// The device will not start without a reboot. PNP_REBOOT_REQUIRED = 0xC00002D2, /// The power state of the current device cannot support this request. POWER_STATE_INVALID = 0xC00002D3, /// The specified group type is invalid. DS_INVALID_GROUP_TYPE = 0xC00002D4, /// In a mixed domain, no nesting of a global group if the group is security enabled. DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN = 0xC00002D5, /// In a mixed domain, cannot nest local groups with other local groups, if the group is security enabled. DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN = 0xC00002D6, /// A global group cannot have a local group as a member. DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER = 0xC00002D7, /// A global group cannot have a universal group as a member. DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER = 0xC00002D8, /// A universal group cannot have a local group as a member. DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER = 0xC00002D9, /// A global group cannot have a cross-domain member. DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER = 0xC00002DA, /// A local group cannot have another cross-domain local group as a member. DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER = 0xC00002DB, /// Cannot change to a security-disabled group because primary members are in this group. DS_HAVE_PRIMARY_MEMBERS = 0xC00002DC, /// The WMI operation is not supported by the data block or method. WMI_NOT_SUPPORTED = 0xC00002DD, /// There is not enough power to complete the requested operation. INSUFFICIENT_POWER = 0xC00002DE, /// The Security Accounts Manager needs to get the boot password. SAM_NEED_BOOTKEY_PASSWORD = <PASSWORD>, /// The Security Accounts Manager needs to get the boot key from the floppy disk. SAM_NEED_BOOTKEY_FLOPPY = 0xC00002E0, /// The directory service cannot start. DS_CANT_START = 0xC00002E1, /// The directory service could not start because of the following error: %hs Error Status: 0x%x. /// Click OK to shut down this system and restart in Directory Services Restore Mode. /// Check the event log for more detailed information. DS_INIT_FAILURE = 0xC00002E2, /// The Security Accounts Manager initialization failed because of the following error: %hs Error Status: 0x%x. /// Click OK to shut down this system and restart in Safe Mode. /// Check the event log for more detailed information. SAM_INIT_FAILURE = 0xC00002E3, /// The requested operation can be performed only on a global catalog server. DS_GC_REQUIRED = 0xC00002E4, /// A local group can only be a member of other local groups in the same domain. DS_LOCAL_MEMBER_OF_LOCAL_ONLY = 0xC00002E5, /// Foreign security principals cannot be members of universal groups. DS_NO_FPO_IN_UNIVERSAL_GROUPS = 0xC00002E6, /// Your computer could not be joined to the domain. /// You have exceeded the maximum number of computer accounts you are allowed to create in this domain. /// Contact your system administrator to have this limit reset or increased. DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED = 0xC00002E7, /// This operation cannot be performed on the current domain. CURRENT_DOMAIN_NOT_ALLOWED = 0xC00002E9, /// The directory or file cannot be created. CANNOT_MAKE = 0xC00002EA, /// The system is in the process of shutting down. SYSTEM_SHUTDOWN = 0xC00002EB, /// Directory Services could not start because of the following error: %hs Error Status: 0x%x. Click OK to shut down the system. /// You can use the recovery console to diagnose the system further. DS_INIT_FAILURE_CONSOLE = 0xC00002EC, /// Security Accounts Manager initialization failed because of the following error: %hs Error Status: 0x%x. Click OK to shut down the system. /// You can use the recovery console to diagnose the system further. DS_SAM_INIT_FAILURE_CONSOLE = 0xC00002ED, /// A security context was deleted before the context was completed. This is considered a logon failure. UNFINISHED_CONTEXT_DELETED = 0xC00002EE, /// The client is trying to negotiate a context and the server requires user-to-user but did not send a TGT reply. NO_TGT_REPLY = 0xC00002EF, /// An object ID was not found in the file. OBJECTID_NOT_FOUND = 0xC00002F0, /// Unable to accomplish the requested task because the local machine does not have any IP addresses. NO_IP_ADDRESSES = 0xC00002F1, /// The supplied credential handle does not match the credential that is associated with the security context. WRONG_CREDENTIAL_HANDLE = 0xC00002F2, /// The crypto system or checksum function is invalid because a required function is unavailable. CRYPTO_SYSTEM_INVALID = 0xC00002F3, /// The number of maximum ticket referrals has been exceeded. MAX_REFERRALS_EXCEEDED = 0xC00002F4, /// The local machine must be a Kerberos KDC (domain controller) and it is not. MUST_BE_KDC = 0xC00002F5, /// The other end of the security negotiation requires strong crypto but it is not supported on the local machine. STRONG_CRYPTO_NOT_SUPPORTED = 0xC00002F6, /// The KDC reply contained more than one principal name. TOO_MANY_PRINCIPALS = 0xC00002F7, /// Expected to find PA data for a hint of what etype to use, but it was not found. NO_PA_DATA = 0xC00002F8, /// The client certificate does not contain a valid UPN, or does not match the client name in the logon request. Contact your administrator. PKINIT_NAME_MISMATCH = 0xC00002F9, /// Smart card logon is required and was not used. SMARTCARD_LOGON_REQUIRED = 0xC00002FA, /// An invalid request was sent to the KDC. KDC_INVALID_REQUEST = 0xC00002FB, /// The KDC was unable to generate a referral for the service requested. KDC_UNABLE_TO_REFER = 0xC00002FC, /// The encryption type requested is not supported by the KDC. KDC_UNKNOWN_ETYPE = 0xC00002FD, /// A system shutdown is in progress. SHUTDOWN_IN_PROGRESS = 0xC00002FE, /// The server machine is shutting down. SERVER_SHUTDOWN_IN_PROGRESS = 0xC00002FF, /// This operation is not supported on a computer running Windows Server 2003 operating system for Small Business Server. NOT_SUPPORTED_ON_SBS = 0xC0000300, /// The WMI GUID is no longer available. WMI_GUID_DISCONNECTED = 0xC0000301, /// Collection or events for the WMI GUID is already disabled. WMI_ALREADY_DISABLED = 0xC0000302, /// Collection or events for the WMI GUID is already enabled. WMI_ALREADY_ENABLED = 0xC0000303, /// The master file table on the volume is too fragmented to complete this operation. MFT_TOO_FRAGMENTED = 0xC0000304, /// Copy protection failure. COPY_PROTECTION_FAILURE = 0xC0000305, /// Copy protection error—DVD CSS Authentication failed. CSS_AUTHENTICATION_FAILURE = 0xC0000306, /// Copy protection error—The specified sector does not contain a valid key. CSS_KEY_NOT_PRESENT = 0xC0000307, /// Copy protection error—DVD session key not established. CSS_KEY_NOT_ESTABLISHED = 0xC0000308, /// Copy protection error—The read failed because the sector is encrypted. CSS_SCRAMBLED_SECTOR = 0xC0000309, /// Copy protection error—The region of the specified DVD does not correspond to the region setting of the drive. CSS_REGION_MISMATCH = 0xC000030A, /// Copy protection error—The region setting of the drive might be permanent. CSS_RESETS_EXHAUSTED = 0xC000030B, /// The Kerberos protocol encountered an error while validating the KDC certificate during smart card logon. /// There is more information in the system event log. PKINIT_FAILURE = 0xC0000320, /// The Kerberos protocol encountered an error while attempting to use the smart card subsystem. SMARTCARD_SUBSYSTEM_FAILURE = 0xC0000321, /// The target server does not have acceptable Kerberos credentials. NO_KERB_KEY = 0xC0000322, /// The transport determined that the remote system is down. HOST_DOWN = 0xC0000350, /// An unsupported pre-authentication mechanism was presented to the Kerberos package. UNSUPPORTED_PREAUTH = 0xC0000351, /// The encryption algorithm that is used on the source file needs a bigger key buffer than the one that is used on the destination file. EFS_ALG_BLOB_TOO_BIG = 0xC0000352, /// An attempt to remove a processes DebugPort was made, but a port was not already associated with the process. PORT_NOT_SET = 0xC0000353, /// An attempt to do an operation on a debug port failed because the port is in the process of being deleted. DEBUGGER_INACTIVE = 0xC0000354, /// This version of Windows is not compatible with the behavior version of the directory forest, domain, or domain controller. DS_VERSION_CHECK_FAILURE = 0xC0000355, /// The specified event is currently not being audited. AUDITING_DISABLED = 0xC0000356, /// The machine account was created prior to Windows NT 4.0 operating system. The account needs to be recreated. PRENT4_MACHINE_ACCOUNT = 0xC0000357, /// An account group cannot have a universal group as a member. DS_AG_CANT_HAVE_UNIVERSAL_MEMBER = 0xC0000358, /// The specified image file did not have the correct format; it appears to be a 32-bit Windows image. INVALID_IMAGE_WIN_32 = 0xC0000359, /// The specified image file did not have the correct format; it appears to be a 64-bit Windows image. INVALID_IMAGE_WIN_64 = 0xC000035A, /// The client's supplied SSPI channel bindings were incorrect. BAD_BINDINGS = 0xC000035B, /// The client session has expired; so the client must re-authenticate to continue accessing the remote resources. NETWORK_SESSION_EXPIRED = 0xC000035C, /// The AppHelp dialog box canceled; thus preventing the application from starting. APPHELP_BLOCK = 0xC000035D, /// The SID filtering operation removed all SIDs. ALL_SIDS_FILTERED = 0xC000035E, /// The driver was not loaded because the system is starting in safe mode. NOT_SAFE_MODE_DRIVER = 0xC000035F, /// Access to %1 has been restricted by your Administrator by the default software restriction policy level. ACCESS_DISABLED_BY_POLICY_DEFAULT = 0xC0000361, /// Access to %1 has been restricted by your Administrator by location with policy rule %2 placed on path %3. ACCESS_DISABLED_BY_POLICY_PATH = 0xC0000362, /// Access to %1 has been restricted by your Administrator by software publisher policy. ACCESS_DISABLED_BY_POLICY_PUBLISHER = 0xC0000363, /// Access to %1 has been restricted by your Administrator by policy rule %2. ACCESS_DISABLED_BY_POLICY_OTHER = 0xC0000364, /// The driver was not loaded because it failed its initialization call. FAILED_DRIVER_ENTRY = 0xC0000365, /// The device encountered an error while applying power or reading the device configuration. /// This might be caused by a failure of your hardware or by a poor connection. DEVICE_ENUMERATION_ERROR = 0xC0000366, /// The create operation failed because the name contained at least one mount point that resolves to a volume to which the specified device object is not attached. MOUNT_POINT_NOT_RESOLVED = 0xC0000368, /// The device object parameter is either not a valid device object or is not attached to the volume that is specified by the file name. INVALID_DEVICE_OBJECT_PARAMETER = 0xC0000369, /// A machine check error has occurred. /// Check the system event log for additional information. MCA_OCCURED = 0xC000036A, /// Driver %2 has been blocked from loading. DRIVER_BLOCKED_CRITICAL = 0xC000036B, /// Driver %2 has been blocked from loading. DRIVER_BLOCKED = 0xC000036C, /// There was error [%2] processing the driver database. DRIVER_DATABASE_ERROR = 0xC000036D, /// System hive size has exceeded its limit. SYSTEM_HIVE_TOO_LARGE = 0xC000036E, /// A dynamic link library (DLL) referenced a module that was neither a DLL nor the process's executable image. INVALID_IMPORT_OF_NON_DLL = 0xC000036F, /// The local account store does not contain secret material for the specified account. NO_SECRETS = 0xC0000371, /// Access to %1 has been restricted by your Administrator by policy rule %2. ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY = 0xC0000372, /// The system was not able to allocate enough memory to perform a stack switch. FAILED_STACK_SWITCH = 0xC0000373, /// A heap has been corrupted. HEAP_CORRUPTION = 0xC0000374, /// An incorrect PIN was presented to the smart card. SMARTCARD_WRONG_PIN = 0xC0000380, /// The smart card is blocked. SMARTCARD_CARD_BLOCKED = 0xC0000381, /// No PIN was presented to the smart card. SMARTCARD_CARD_NOT_AUTHENTICATED = 0xC0000382, /// No smart card is available. SMARTCARD_NO_CARD = 0xC0000383, /// The requested key container does not exist on the smart card. SMARTCARD_NO_KEY_CONTAINER = 0xC0000384, /// The requested certificate does not exist on the smart card. SMARTCARD_NO_CERTIFICATE = 0xC0000385, /// The requested keyset does not exist. SMARTCARD_NO_KEYSET = 0xC0000386, /// A communication error with the smart card has been detected. SMARTCARD_IO_ERROR = 0xC0000387, /// The system detected a possible attempt to compromise security. /// Ensure that you can contact the server that authenticated you. DOWNGRADE_DETECTED = 0xC0000388, /// The smart card certificate used for authentication has been revoked. Contact your system administrator. /// There might be additional information in the event log. SMARTCARD_CERT_REVOKED = 0xC0000389, /// An untrusted certificate authority was detected while processing the smart card certificate that is used for authentication. Contact your system administrator. ISSUING_CA_UNTRUSTED = 0xC000038A, /// The revocation status of the smart card certificate that is used for authentication could not be determined. Contact your system administrator. REVOCATION_OFFLINE_C = 0xC000038B, /// The smart card certificate used for authentication was not trusted. Contact your system administrator. PKINIT_CLIENT_FAILURE = 0xC000038C, /// The smart card certificate used for authentication has expired. Contact your system administrator. SMARTCARD_CERT_EXPIRED = 0xC000038D, /// The driver could not be loaded because a previous version of the driver is still in memory. DRIVER_FAILED_PRIOR_UNLOAD = 0xC000038E, /// The smart card provider could not perform the action because the context was acquired as silent. SMARTCARD_SILENT_CONTEXT = 0xC000038F, /// The delegated trust creation quota of the current user has been exceeded. PER_USER_TRUST_QUOTA_EXCEEDED = 0xC0000401, /// The total delegated trust creation quota has been exceeded. ALL_USER_TRUST_QUOTA_EXCEEDED = 0xC0000402, /// The delegated trust deletion quota of the current user has been exceeded. USER_DELETE_TRUST_QUOTA_EXCEEDED = 0xC0000403, /// The requested name already exists as a unique identifier. DS_NAME_NOT_UNIQUE = 0xC0000404, /// The requested object has a non-unique identifier and cannot be retrieved. DS_DUPLICATE_ID_FOUND = 0xC0000405, /// The group cannot be converted due to attribute restrictions on the requested group type. DS_GROUP_CONVERSION_ERROR = 0xC0000406, /// {Volume Shadow Copy Service} Wait while the Volume Shadow Copy Service prepares volume %hs for hibernation. VOLSNAP_PREPARE_HIBERNATE = 0xC0000407, /// Kerberos sub-protocol User2User is required. USER2USER_REQUIRED = 0xC0000408, /// The system detected an overrun of a stack-based buffer in this application. /// This overrun could potentially allow a malicious user to gain control of this application. STACK_BUFFER_OVERRUN = 0xC0000409, /// The Kerberos subsystem encountered an error. /// A service for user protocol request was made against a domain controller which does not support service for user. NO_S4U_PROT_SUPPORT = 0xC000040A, /// An attempt was made by this server to make a Kerberos constrained delegation request for a target that is outside the server realm. /// This action is not supported and the resulting error indicates a misconfiguration on the allowed-to-delegate-to list for this server. Contact your administrator. CROSSREALM_DELEGATION_FAILURE = 0xC000040B, /// The revocation status of the domain controller certificate used for smart card authentication could not be determined. /// There is additional information in the system event log. Contact your system administrator. REVOCATION_OFFLINE_KDC = 0xC000040C, /// An untrusted certificate authority was detected while processing the domain controller certificate used for authentication. /// There is additional information in the system event log. Contact your system administrator. ISSUING_CA_UNTRUSTED_KDC = 0xC000040D, /// The domain controller certificate used for smart card logon has expired. /// Contact your system administrator with the contents of your system event log. KDC_CERT_EXPIRED = 0xC000040E, /// The domain controller certificate used for smart card logon has been revoked. /// Contact your system administrator with the contents of your system event log. KDC_CERT_REVOKED = 0xC000040F, /// Data present in one of the parameters is more than the function can operate on. PARAMETER_QUOTA_EXCEEDED = 0xC0000410, /// The system has failed to hibernate (The error code is %hs). /// Hibernation will be disabled until the system is restarted. HIBERNATION_FAILURE = 0xC0000411, /// An attempt to delay-load a .dll or get a function address in a delay-loaded .dll failed. DELAY_LOAD_FAILED = 0xC0000412, /// Logon Failure: The machine you are logging onto is protected by an authentication firewall. /// The specified account is not allowed to authenticate to the machine. AUTHENTICATION_FIREWALL_FAILED = 0xC0000413, /// %hs is a 16-bit application. You do not have permissions to execute 16-bit applications. /// Check your permissions with your system administrator. VDM_DISALLOWED = 0xC0000414, /// {Display Driver Stopped Responding} The %hs display driver has stopped working normally. /// Save your work and reboot the system to restore full display functionality. /// The next time you reboot the machine a dialog will be displayed giving you a chance to report this failure to Microsoft. HUNG_DISPLAY_DRIVER_THREAD = 0xC0000415, /// The Desktop heap encountered an error while allocating session memory. /// There is more information in the system event log. INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE = 0xC0000416, /// An invalid parameter was passed to a C runtime function. INVALID_CRUNTIME_PARAMETER = 0xC0000417, /// The authentication failed because NTLM was blocked. NTLM_BLOCKED = 0xC0000418, /// The source object's SID already exists in destination forest. DS_SRC_SID_EXISTS_IN_FOREST = 0xC0000419, /// The domain name of the trusted domain already exists in the forest. DS_DOMAIN_NAME_EXISTS_IN_FOREST = 0xC000041A, /// The flat name of the trusted domain already exists in the forest. DS_FLAT_NAME_EXISTS_IN_FOREST = 0xC000041B, /// The User Principal Name (UPN) is invalid. INVALID_USER_PRINCIPAL_NAME = 0xC000041C, /// There has been an assertion failure. ASSERTION_FAILURE = 0xC0000420, /// Application verifier has found an error in the current process. VERIFIER_STOP = 0xC0000421, /// A user mode unwind is in progress. CALLBACK_POP_STACK = 0xC0000423, /// %2 has been blocked from loading due to incompatibility with this system. /// Contact your software vendor for a compatible version of the driver. INCOMPATIBLE_DRIVER_BLOCKED = 0xC0000424, /// Illegal operation attempted on a registry key which has already been unloaded. HIVE_UNLOADED = 0xC0000425, /// Compression is disabled for this volume. COMPRESSION_DISABLED = 0xC0000426, /// The requested operation could not be completed due to a file system limitation. FILE_SYSTEM_LIMITATION = 0xC0000427, /// The hash for image %hs cannot be found in the system catalogs. /// The image is likely corrupt or the victim of tampering. INVALID_IMAGE_HASH = 0xC0000428, /// The implementation is not capable of performing the request. NOT_CAPABLE = 0xC0000429, /// The requested operation is out of order with respect to other operations. REQUEST_OUT_OF_SEQUENCE = 0xC000042A, /// An operation attempted to exceed an implementation-defined limit. IMPLEMENTATION_LIMIT = 0xC000042B, /// The requested operation requires elevation. ELEVATION_REQUIRED = 0xC000042C, /// The required security context does not exist. NO_SECURITY_CONTEXT = 0xC000042D, /// The PKU2U protocol encountered an error while attempting to utilize the associated certificates. PKU2U_CERT_FAILURE = 0xC000042E, /// The operation was attempted beyond the valid data length of the file. BEYOND_VDL = 0xC0000432, /// The attempted write operation encountered a write already in progress for some portion of the range. ENCOUNTERED_WRITE_IN_PROGRESS = 0xC0000433, /// The page fault mappings changed in the middle of processing a fault so the operation must be retried. PTE_CHANGED = 0xC0000434, /// The attempt to purge this file from memory failed to purge some or all the data from memory. PURGE_FAILED = 0xC0000435, /// The requested credential requires confirmation. CRED_REQUIRES_CONFIRMATION = 0xC0000440, /// The remote server sent an invalid response for a file being opened with Client Side Encryption. CS_ENCRYPTION_INVALID_SERVER_RESPONSE = 0xC0000441, /// Client Side Encryption is not supported by the remote server even though it claims to support it. CS_ENCRYPTION_UNSUPPORTED_SERVER = 0xC0000442, /// File is encrypted and should be opened in Client Side Encryption mode. CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE = 0xC0000443, /// A new encrypted file is being created and a $EFS needs to be provided. CS_ENCRYPTION_NEW_ENCRYPTED_FILE = 0xC0000444, /// The SMB client requested a CSE FSCTL on a non-CSE file. CS_ENCRYPTION_FILE_NOT_CSE = 0xC0000445, /// Indicates a particular Security ID cannot be assigned as the label of an object. INVALID_LABEL = 0xC0000446, /// The process hosting the driver for this device has terminated. DRIVER_PROCESS_TERMINATED = 0xC0000450, /// The requested system device cannot be identified due to multiple indistinguishable devices potentially matching the identification criteria. AMBIGUOUS_SYSTEM_DEVICE = 0xC0000451, /// The requested system device cannot be found. SYSTEM_DEVICE_NOT_FOUND = 0xC0000452, /// This boot application must be restarted. RESTART_BOOT_APPLICATION = 0xC0000453, /// Insufficient NVRAM resources exist to complete the API. A reboot might be required. INSUFFICIENT_NVRAM_RESOURCES = 0xC0000454, /// No ranges for the specified operation were able to be processed. NO_RANGES_PROCESSED = 0xC0000460, /// The storage device does not support Offload Write. DEVICE_FEATURE_NOT_SUPPORTED = 0xC0000463, /// Data cannot be moved because the source device cannot communicate with the destination device. DEVICE_UNREACHABLE = 0xC0000464, /// The token representing the data is invalid or expired. INVALID_TOKEN = 0xC0000465, /// The file server is temporarily unavailable. SERVER_UNAVAILABLE = 0xC0000466, /// The specified task name is invalid. INVALID_TASK_NAME = 0xC0000500, /// The specified task index is invalid. INVALID_TASK_INDEX = 0xC0000501, /// The specified thread is already joining a task. THREAD_ALREADY_IN_TASK = 0xC0000502, /// A callback has requested to bypass native code. CALLBACK_BYPASS = 0xC0000503, /// A fail fast exception occurred. /// Exception handlers will not be invoked and the process will be terminated immediately. FAIL_FAST_EXCEPTION = 0xC0000602, /// Windows cannot verify the digital signature for this file. /// The signing certificate for this file has been revoked. IMAGE_CERT_REVOKED = 0xC0000603, /// The ALPC port is closed. PORT_CLOSED = 0xC0000700, /// The ALPC message requested is no longer available. MESSAGE_LOST = 0xC0000701, /// The ALPC message supplied is invalid. INVALID_MESSAGE = 0xC0000702, /// The ALPC message has been canceled. REQUEST_CANCELED = 0xC0000703, /// Invalid recursive dispatch attempt. RECURSIVE_DISPATCH = 0xC0000704, /// No receive buffer has been supplied in a synchronous request. LPC_RECEIVE_BUFFER_EXPECTED = 0xC0000705, /// The connection port is used in an invalid context. LPC_INVALID_CONNECTION_USAGE = 0xC0000706, /// The ALPC port does not accept new request messages. LPC_REQUESTS_NOT_ALLOWED = 0xC0000707, /// The resource requested is already in use. RESOURCE_IN_USE = 0xC0000708, /// The hardware has reported an uncorrectable memory error. HARDWARE_MEMORY_ERROR = 0xC0000709, /// Status 0x%08x was returned, waiting on handle 0x%x for wait 0x%p, in waiter 0x%p. THREADPOOL_HANDLE_EXCEPTION = 0xC000070A, /// After a callback to 0x%p(0x%p), a completion call to Set event(0x%p) failed with status 0x%08x. THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED = 0xC000070B, /// After a callback to 0x%p(0x%p), a completion call to ReleaseSemaphore(0x%p, %d) failed with status 0x%08x. THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED = 0xC000070C, /// After a callback to 0x%p(0x%p), a completion call to ReleaseMutex(%p) failed with status 0x%08x. THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED = 0xC000070D, /// After a callback to 0x%p(0x%p), a completion call to FreeLibrary(%p) failed with status 0x%08x. THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED = 0xC000070E, /// The thread pool 0x%p was released while a thread was posting a callback to 0x%p(0x%p) to it. THREADPOOL_RELEASED_DURING_OPERATION = 0xC000070F, /// A thread pool worker thread is impersonating a client, after a callback to 0x%p(0x%p). /// This is unexpected, indicating that the callback is missing a call to revert the impersonation. CALLBACK_RETURNED_WHILE_IMPERSONATING = 0xC0000710, /// A thread pool worker thread is impersonating a client, after executing an APC. /// This is unexpected, indicating that the APC is missing a call to revert the impersonation. APC_RETURNED_WHILE_IMPERSONATING = 0xC0000711, /// Either the target process, or the target thread's containing process, is a protected process. PROCESS_IS_PROTECTED = 0xC0000712, /// A thread is getting dispatched with MCA EXCEPTION because of MCA. MCA_EXCEPTION = 0xC0000713, /// The client certificate account mapping is not unique. CERTIFICATE_MAPPING_NOT_UNIQUE = 0xC0000714, /// The symbolic link cannot be followed because its type is disabled. SYMLINK_CLASS_DISABLED = 0xC0000715, /// Indicates that the specified string is not valid for IDN normalization. INVALID_IDN_NORMALIZATION = 0xC0000716, /// No mapping for the Unicode character exists in the target multi-byte code page. NO_UNICODE_TRANSLATION = 0xC0000717, /// The provided callback is already registered. ALREADY_REGISTERED = 0xC0000718, /// The provided context did not match the target. CONTEXT_MISMATCH = 0xC0000719, /// The specified port already has a completion list. PORT_ALREADY_HAS_COMPLETION_LIST = 0xC000071A, /// A threadpool worker thread entered a callback at thread base priority 0x%x and exited at priority 0x%x. /// This is unexpected, indicating that the callback missed restoring the priority. CALLBACK_RETURNED_THREAD_PRIORITY = 0xC000071B, /// An invalid thread, handle %p, is specified for this operation. /// Possibly, a threadpool worker thread was specified. INVALID_THREAD = 0xC000071C, /// A threadpool worker thread entered a callback, which left transaction state. /// This is unexpected, indicating that the callback missed clearing the transaction. CALLBACK_RETURNED_TRANSACTION = 0xC000071D, /// A threadpool worker thread entered a callback, which left the loader lock held. /// This is unexpected, indicating that the callback missed releasing the lock. CALLBACK_RETURNED_LDR_LOCK = 0xC000071E, /// A threadpool worker thread entered a callback, which left with preferred languages set. /// This is unexpected, indicating that the callback missed clearing them. CALLBACK_RETURNED_LANG = 0xC000071F, /// A threadpool worker thread entered a callback, which left with background priorities set. /// This is unexpected, indicating that the callback missed restoring the original priorities. CALLBACK_RETURNED_PRI_BACK = 0xC0000720, /// The attempted operation required self healing to be enabled. DISK_REPAIR_DISABLED = 0xC0000800, /// The directory service cannot perform the requested operation because a domain rename operation is in progress. DS_DOMAIN_RENAME_IN_PROGRESS = 0xC0000801, /// An operation failed because the storage quota was exceeded. DISK_QUOTA_EXCEEDED = 0xC0000802, /// An operation failed because the content was blocked. CONTENT_BLOCKED = 0xC0000804, /// The operation could not be completed due to bad clusters on disk. BAD_CLUSTERS = 0xC0000805, /// The operation could not be completed because the volume is dirty. Please run the Chkdsk utility and try again. VOLUME_DIRTY = 0xC0000806, /// This file is checked out or locked for editing by another user. FILE_CHECKED_OUT = 0xC0000901, /// The file must be checked out before saving changes. CHECKOUT_REQUIRED = 0xC0000902, /// The file type being saved or retrieved has been blocked. BAD_FILE_TYPE = 0xC0000903, /// The file size exceeds the limit allowed and cannot be saved. FILE_TOO_LARGE = 0xC0000904, /// Access Denied. Before opening files in this location, you must first browse to the e.g. /// site and select the option to log on automatically. FORMS_AUTH_REQUIRED = 0xC0000905, /// The operation did not complete successfully because the file contains a virus. VIRUS_INFECTED = 0xC0000906, /// This file contains a virus and cannot be opened. /// Due to the nature of this virus, the file has been removed from this location. VIRUS_DELETED = 0xC0000907, /// The resources required for this device conflict with the MCFG table. BAD_MCFG_TABLE = 0xC0000908, /// The operation did not complete successfully because it would cause an oplock to be broken. /// The caller has requested that existing oplocks not be broken. CANNOT_BREAK_OPLOCK = 0xC0000909, /// WOW Assertion Error. WOW_ASSERTION = 0xC0009898, /// The cryptographic signature is invalid. INVALID_SIGNATURE = 0xC000A000, /// The cryptographic provider does not support HMAC. HMAC_NOT_SUPPORTED = 0xC000A001, /// The IPsec queue overflowed. IPSEC_QUEUE_OVERFLOW = 0xC000A010, /// The neighbor discovery queue overflowed. ND_QUEUE_OVERFLOW = 0xC000A011, /// An Internet Control Message Protocol (ICMP) hop limit exceeded error was received. HOPLIMIT_EXCEEDED = 0xC000A012, /// The protocol is not installed on the local machine. PROTOCOL_NOT_SUPPORTED = 0xC000A013, /// {Delayed Write Failed} Windows was unable to save all the data for the file %hs; the data has been lost. /// This error might be caused by network connectivity issues. Try to save this file elsewhere. LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED = 0xC000A080, /// {Delayed Write Failed} Windows was unable to save all the data for the file %hs; the data has been lost. /// This error was returned by the server on which the file exists. Try to save this file elsewhere. LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR = 0xC000A081, /// {Delayed Write Failed} Windows was unable to save all the data for the file %hs; the data has been lost. /// This error might be caused if the device has been removed or the media is write-protected. LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR = 0xC000A082, /// Windows was unable to parse the requested XML data. XML_PARSE_ERROR = 0xC000A083, /// An error was encountered while processing an XML digital signature. XMLDSIG_ERROR = 0xC000A084, /// This indicates that the caller made the connection request in the wrong routing compartment. WRONG_COMPARTMENT = 0xC000A085, /// This indicates that there was an AuthIP failure when attempting to connect to the remote host. AUTHIP_FAILURE = 0xC000A086, /// OID mapped groups cannot have members. DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS = 0xC000A087, /// The specified OID cannot be found. DS_OID_NOT_FOUND = 0xC000A088, /// Hash generation for the specified version and hash type is not enabled on server. HASH_NOT_SUPPORTED = 0xC000A100, /// The hash requests is not present or not up to date with the current file contents. HASH_NOT_PRESENT = 0xC000A101, /// A file system filter on the server has not opted in for Offload Read support. OFFLOAD_READ_FLT_NOT_SUPPORTED = 0xC000A2A1, /// A file system filter on the server has not opted in for Offload Write support. OFFLOAD_WRITE_FLT_NOT_SUPPORTED = 0xC000A2A2, /// Offload read operations cannot be performed on: /// - Compressed files /// - Sparse files /// - Encrypted files /// - File system metadata files OFFLOAD_READ_FILE_NOT_SUPPORTED = 0xC000A2A3, /// Offload write operations cannot be performed on: /// - Compressed files /// - Sparse files /// - Encrypted files /// - File system metadata files OFFLOAD_WRITE_FILE_NOT_SUPPORTED = 0xC000A2A4, /// The debugger did not perform a state change. DBG_NO_STATE_CHANGE = 0xC0010001, /// The debugger found that the application is not idle. DBG_APP_NOT_IDLE = 0xC0010002, /// The string binding is invalid. RPC_NT_INVALID_STRING_BINDING = 0xC0020001, /// The binding handle is not the correct type. RPC_NT_WRONG_KIND_OF_BINDING = 0xC0020002, /// The binding handle is invalid. RPC_NT_INVALID_BINDING = 0xC0020003, /// The RPC protocol sequence is not supported. RPC_NT_PROTSEQ_NOT_SUPPORTED = 0xC0020004, /// The RPC protocol sequence is invalid. RPC_NT_INVALID_RPC_PROTSEQ = 0xC0020005, /// The string UUID is invalid. RPC_NT_INVALID_STRING_UUID = 0xC0020006, /// The endpoint format is invalid. RPC_NT_INVALID_ENDPOINT_FORMAT = 0xC0020007, /// The network address is invalid. RPC_NT_INVALID_NET_ADDR = 0xC0020008, /// No endpoint was found. RPC_NT_NO_ENDPOINT_FOUND = 0xC0020009, /// The time-out value is invalid. RPC_NT_INVALID_TIMEOUT = 0xC002000A, /// The object UUID was not found. RPC_NT_OBJECT_NOT_FOUND = 0xC002000B, /// The object UUID has already been registered. RPC_NT_ALREADY_REGISTERED = 0xC002000C, /// The type UUID has already been registered. RPC_NT_TYPE_ALREADY_REGISTERED = 0xC002000D, /// The RPC server is already listening. RPC_NT_ALREADY_LISTENING = 0xC002000E, /// No protocol sequences have been registered. RPC_NT_NO_PROTSEQS_REGISTERED = 0xC002000F, /// The RPC server is not listening. RPC_NT_NOT_LISTENING = 0xC0020010, /// The manager type is unknown. RPC_NT_UNKNOWN_MGR_TYPE = 0xC0020011, /// The interface is unknown. RPC_NT_UNKNOWN_IF = 0xC0020012, /// There are no bindings. RPC_NT_NO_BINDINGS = 0xC0020013, /// There are no protocol sequences. RPC_NT_NO_PROTSEQS = 0xC0020014, /// The endpoint cannot be created. RPC_NT_CANT_CREATE_ENDPOINT = 0xC0020015, /// Insufficient resources are available to complete this operation. RPC_NT_OUT_OF_RESOURCES = 0xC0020016, /// The RPC server is unavailable. RPC_NT_SERVER_UNAVAILABLE = 0xC0020017, /// The RPC server is too busy to complete this operation. RPC_NT_SERVER_TOO_BUSY = 0xC0020018, /// The network options are invalid. RPC_NT_INVALID_NETWORK_OPTIONS = 0xC0020019, /// No RPCs are active on this thread. RPC_NT_NO_CALL_ACTIVE = 0xC002001A, /// The RPC failed. RPC_NT_CALL_FAILED = 0xC002001B, /// The RPC failed and did not execute. RPC_NT_CALL_FAILED_DNE = 0xC002001C, /// An RPC protocol error occurred. RPC_NT_PROTOCOL_ERROR = 0xC002001D, /// The RPC server does not support the transfer syntax. RPC_NT_UNSUPPORTED_TRANS_SYN = 0xC002001F, /// The type UUID is not supported. RPC_NT_UNSUPPORTED_TYPE = 0xC0020021, /// The tag is invalid. RPC_NT_INVALID_TAG = 0xC0020022, /// The array bounds are invalid. RPC_NT_INVALID_BOUND = 0xC0020023, /// The binding does not contain an entry name. RPC_NT_NO_ENTRY_NAME = 0xC0020024, /// The name syntax is invalid. RPC_NT_INVALID_NAME_SYNTAX = 0xC0020025, /// The name syntax is not supported. RPC_NT_UNSUPPORTED_NAME_SYNTAX = 0xC0020026, /// No network address is available to construct a UUID. RPC_NT_UUID_NO_ADDRESS = 0xC0020028, /// The endpoint is a duplicate. RPC_NT_DUPLICATE_ENDPOINT = 0xC0020029, /// The authentication type is unknown. RPC_NT_UNKNOWN_AUTHN_TYPE = 0xC002002A, /// The maximum number of calls is too small. RPC_NT_MAX_CALLS_TOO_SMALL = 0xC002002B, /// The string is too long. RPC_NT_STRING_TOO_LONG = 0xC002002C, /// The RPC protocol sequence was not found. RPC_NT_PROTSEQ_NOT_FOUND = 0xC002002D, /// The procedure number is out of range. RPC_NT_PROCNUM_OUT_OF_RANGE = 0xC002002E, /// The binding does not contain any authentication information. RPC_NT_BINDING_HAS_NO_AUTH = 0xC002002F, /// The authentication service is unknown. RPC_NT_UNKNOWN_AUTHN_SERVICE = 0xC0020030, /// The authentication level is unknown. RPC_NT_UNKNOWN_AUTHN_LEVEL = 0xC0020031, /// The security context is invalid. RPC_NT_INVALID_AUTH_IDENTITY = 0xC0020032, /// The authorization service is unknown. RPC_NT_UNKNOWN_AUTHZ_SERVICE = 0xC0020033, /// The entry is invalid. EPT_NT_INVALID_ENTRY = 0xC0020034, /// The operation cannot be performed. EPT_NT_CANT_PERFORM_OP = 0xC0020035, /// No more endpoints are available from the endpoint mapper. EPT_NT_NOT_REGISTERED = 0xC0020036, /// No interfaces have been exported. RPC_NT_NOTHING_TO_EXPORT = 0xC0020037, /// The entry name is incomplete. RPC_NT_INCOMPLETE_NAME = 0xC0020038, /// The version option is invalid. RPC_NT_INVALID_VERS_OPTION = 0xC0020039, /// There are no more members. RPC_NT_NO_MORE_MEMBERS = 0xC002003A, /// There is nothing to unexport. RPC_NT_NOT_ALL_OBJS_UNEXPORTED = 0xC002003B, /// The interface was not found. RPC_NT_INTERFACE_NOT_FOUND = 0xC002003C, /// The entry already exists. RPC_NT_ENTRY_ALREADY_EXISTS = 0xC002003D, /// The entry was not found. RPC_NT_ENTRY_NOT_FOUND = 0xC002003E, /// The name service is unavailable. RPC_NT_NAME_SERVICE_UNAVAILABLE = 0xC002003F, /// The network address family is invalid. RPC_NT_INVALID_NAF_ID = 0xC0020040, /// The requested operation is not supported. RPC_NT_CANNOT_SUPPORT = 0xC0020041, /// No security context is available to allow impersonation. RPC_NT_NO_CONTEXT_AVAILABLE = 0xC0020042, /// An internal error occurred in the RPC. RPC_NT_INTERNAL_ERROR = 0xC0020043, /// The RPC server attempted to divide an integer by zero. RPC_NT_ZERO_DIVIDE = 0xC0020044, /// An addressing error occurred in the RPC server. RPC_NT_ADDRESS_ERROR = 0xC0020045, /// A floating point operation at the RPC server caused a divide by zero. RPC_NT_FP_DIV_ZERO = 0xC0020046, /// A floating point underflow occurred at the RPC server. RPC_NT_FP_UNDERFLOW = 0xC0020047, /// A floating point overflow occurred at the RPC server. RPC_NT_FP_OVERFLOW = 0xC0020048, /// An RPC is already in progress for this thread. RPC_NT_CALL_IN_PROGRESS = 0xC0020049, /// There are no more bindings. RPC_NT_NO_MORE_BINDINGS = 0xC002004A, /// The group member was not found. RPC_NT_GROUP_MEMBER_NOT_FOUND = 0xC002004B, /// The endpoint mapper database entry could not be created. EPT_NT_CANT_CREATE = 0xC002004C, /// The object UUID is the nil UUID. RPC_NT_INVALID_OBJECT = 0xC002004D, /// No interfaces have been registered. RPC_NT_NO_INTERFACES = 0xC002004F, /// The RPC was canceled. RPC_NT_CALL_CANCELLED = 0xC0020050, /// The binding handle does not contain all the required information. RPC_NT_BINDING_INCOMPLETE = 0xC0020051, /// A communications failure occurred during an RPC. RPC_NT_COMM_FAILURE = 0xC0020052, /// The requested authentication level is not supported. RPC_NT_UNSUPPORTED_AUTHN_LEVEL = 0xC0020053, /// No principal name was registered. RPC_NT_NO_PRINC_NAME = 0xC0020054, /// The error specified is not a valid Windows RPC error code. RPC_NT_NOT_RPC_ERROR = 0xC0020055, /// A security package-specific error occurred. RPC_NT_SEC_PKG_ERROR = 0xC0020057, /// The thread was not canceled. RPC_NT_NOT_CANCELLED = 0xC0020058, /// Invalid asynchronous RPC handle. RPC_NT_INVALID_ASYNC_HANDLE = 0xC0020062, /// Invalid asynchronous RPC call handle for this operation. RPC_NT_INVALID_ASYNC_CALL = 0xC0020063, /// Access to the HTTP proxy is denied. RPC_NT_PROXY_ACCESS_DENIED = 0xC0020064, /// The list of RPC servers available for auto-handle binding has been exhausted. RPC_NT_NO_MORE_ENTRIES = 0xC0030001, /// The file designated by DCERPCCHARTRANS cannot be opened. RPC_NT_SS_CHAR_TRANS_OPEN_FAIL = 0xC0030002, /// The file containing the character translation table has fewer than 512 bytes. RPC_NT_SS_CHAR_TRANS_SHORT_FILE = 0xC0030003, /// A null context handle is passed as an [in] parameter. RPC_NT_SS_IN_NULL_CONTEXT = 0xC0030004, /// The context handle does not match any known context handles. RPC_NT_SS_CONTEXT_MISMATCH = 0xC0030005, /// The context handle changed during a call. RPC_NT_SS_CONTEXT_DAMAGED = 0xC0030006, /// The binding handles passed to an RPC do not match. RPC_NT_SS_HANDLES_MISMATCH = 0xC0030007, /// The stub is unable to get the call handle. RPC_NT_SS_CANNOT_GET_CALL_HANDLE = 0xC0030008, /// A null reference pointer was passed to the stub. RPC_NT_NULL_REF_POINTER = 0xC0030009, /// The enumeration value is out of range. RPC_NT_ENUM_VALUE_OUT_OF_RANGE = 0xC003000A, /// The byte count is too small. RPC_NT_BYTE_COUNT_TOO_SMALL = 0xC003000B, /// The stub received bad data. RPC_NT_BAD_STUB_DATA = 0xC003000C, /// Invalid operation on the encoding/decoding handle. RPC_NT_INVALID_ES_ACTION = 0xC0030059, /// Incompatible version of the serializing package. RPC_NT_WRONG_ES_VERSION = 0xC003005A, /// Incompatible version of the RPC stub. RPC_NT_WRONG_STUB_VERSION = 0xC003005B, /// The RPC pipe object is invalid or corrupt. RPC_NT_INVALID_PIPE_OBJECT = 0xC003005C, /// An invalid operation was attempted on an RPC pipe object. RPC_NT_INVALID_PIPE_OPERATION = 0xC003005D, /// Unsupported RPC pipe version. RPC_NT_WRONG_PIPE_VERSION = 0xC003005E, /// The RPC pipe object has already been closed. RPC_NT_PIPE_CLOSED = 0xC003005F, /// The RPC call completed before all pipes were processed. RPC_NT_PIPE_DISCIPLINE_ERROR = 0xC0030060, /// No more data is available from the RPC pipe. RPC_NT_PIPE_EMPTY = 0xC0030061, /// A device is missing in the system BIOS MPS table. This device will not be used. /// Contact your system vendor for a system BIOS update. PNP_BAD_MPS_TABLE = 0xC0040035, /// A translator failed to translate resources. PNP_TRANSLATION_FAILED = 0xC0040036, /// An IRQ translator failed to translate resources. PNP_IRQ_TRANSLATION_FAILED = 0xC0040037, /// Driver %2 returned an invalid ID for a child device (%3). PNP_INVALID_ID = 0xC0040038, /// Reissue the given operation as a cached I/O operation IO_REISSUE_AS_CACHED = 0xC0040039, /// Session name %1 is invalid. CTX_WINSTATION_NAME_INVALID = 0xC00A0001, /// The protocol driver %1 is invalid. CTX_INVALID_PD = 0xC00A0002, /// The protocol driver %1 was not found in the system path. CTX_PD_NOT_FOUND = 0xC00A0003, /// A close operation is pending on the terminal connection. CTX_CLOSE_PENDING = 0xC00A0006, /// No free output buffers are available. CTX_NO_OUTBUF = 0xC00A0007, /// The MODEM.INF file was not found. CTX_MODEM_INF_NOT_FOUND = 0xC00A0008, /// The modem (%1) was not found in the MODEM.INF file. CTX_INVALID_MODEMNAME = 0xC00A0009, /// The modem did not accept the command sent to it. /// Verify that the configured modem name matches the attached modem. CTX_RESPONSE_ERROR = 0xC00A000A, /// The modem did not respond to the command sent to it. /// Verify that the modem cable is properly attached and the modem is turned on. CTX_MODEM_RESPONSE_TIMEOUT = 0xC00A000B, /// Carrier detection has failed or the carrier has been dropped due to disconnection. CTX_MODEM_RESPONSE_NO_CARRIER = 0xC00A000C, /// A dial tone was not detected within the required time. /// Verify that the phone cable is properly attached and functional. CTX_MODEM_RESPONSE_NO_DIALTONE = 0xC00A000D, /// A busy signal was detected at a remote site on callback. CTX_MODEM_RESPONSE_BUSY = 0xC00A000E, /// A voice was detected at a remote site on callback. CTX_MODEM_RESPONSE_VOICE = 0xC00A000F, /// Transport driver error. CTX_TD_ERROR = 0xC00A0010, /// The client you are using is not licensed to use this system. Your logon request is denied. CTX_LICENSE_CLIENT_INVALID = 0xC00A0012, /// The system has reached its licensed logon limit. Try again later. CTX_LICENSE_NOT_AVAILABLE = 0xC00A0013, /// The system license has expired. Your logon request is denied. CTX_LICENSE_EXPIRED = 0xC00A0014, /// The specified session cannot be found. CTX_WINSTATION_NOT_FOUND = 0xC00A0015, /// The specified session name is already in use. CTX_WINSTATION_NAME_COLLISION = 0xC00A0016, /// The requested operation cannot be completed because the terminal connection is currently processing a connect, disconnect, reset, or delete operation. CTX_WINSTATION_BUSY = 0xC00A0017, /// An attempt has been made to connect to a session whose video mode is not supported by the current client. CTX_BAD_VIDEO_MODE = 0xC00A0018, /// The application attempted to enable DOS graphics mode. DOS graphics mode is not supported. CTX_GRAPHICS_INVALID = 0xC00A0022, /// The requested operation can be performed only on the system console. /// This is most often the result of a driver or system DLL requiring direct console access. CTX_NOT_CONSOLE = 0xC00A0024, /// The client failed to respond to the server connect message. CTX_CLIENT_QUERY_TIMEOUT = 0xC00A0026, /// Disconnecting the console session is not supported. CTX_CONSOLE_DISCONNECT = 0xC00A0027, /// Reconnecting a disconnected session to the console is not supported. CTX_CONSOLE_CONNECT = 0xC00A0028, /// The request to control another session remotely was denied. CTX_SHADOW_DENIED = 0xC00A002A, /// A process has requested access to a session, but has not been granted those access rights. CTX_WINSTATION_ACCESS_DENIED = 0xC00A002B, /// The terminal connection driver %1 is invalid. CTX_INVALID_WD = 0xC00A002E, /// The terminal connection driver %1 was not found in the system path. CTX_WD_NOT_FOUND = 0xC00A002F, /// The requested session cannot be controlled remotely. /// You cannot control your own session, a session that is trying to control your session, a session that has no user logged on, or other sessions from the console. CTX_SHADOW_INVALID = 0xC00A0030, /// The requested session is not configured to allow remote control. CTX_SHADOW_DISABLED = 0xC00A0031, /// The RDP protocol component %2 detected an error in the protocol stream and has disconnected the client. RDP_PROTOCOL_ERROR = 0xC00A0032, /// Your request to connect to this terminal server has been rejected. /// Your terminal server client license number has not been entered for this copy of the terminal client. /// Contact your system administrator for help in entering a valid, unique license number for this terminal server client. Click OK to continue. CTX_CLIENT_LICENSE_NOT_SET = 0xC00A0033, /// Your request to connect to this terminal server has been rejected. /// Your terminal server client license number is currently being used by another user. /// Contact your system administrator to obtain a new copy of the terminal server client with a valid, unique license number. Click OK to continue. CTX_CLIENT_LICENSE_IN_USE = 0xC00A0034, /// The remote control of the console was terminated because the display mode was changed. /// Changing the display mode in a remote control session is not supported. CTX_SHADOW_ENDED_BY_MODE_CHANGE = 0xC00A0035, /// Remote control could not be terminated because the specified session is not currently being remotely controlled. CTX_SHADOW_NOT_RUNNING = 0xC00A0036, /// Your interactive logon privilege has been disabled. Contact your system administrator. CTX_LOGON_DISABLED = 0xC00A0037, /// The terminal server security layer detected an error in the protocol stream and has disconnected the client. CTX_SECURITY_LAYER_ERROR = 0xC00A0038, /// The target session is incompatible with the current session. TS_INCOMPATIBLE_SESSIONS = 0xC00A0039, /// The resource loader failed to find an MUI file. MUI_FILE_NOT_FOUND = 0xC00B0001, /// The resource loader failed to load an MUI file because the file failed to pass validation. MUI_INVALID_FILE = 0xC00B0002, /// The RC manifest is corrupted with garbage data, is an unsupported version, or is missing a required item. MUI_INVALID_RC_CONFIG = 0xC00B0003, /// The RC manifest has an invalid culture name. MUI_INVALID_LOCALE_NAME = 0xC00B0004, /// The RC manifest has and invalid ultimate fallback name. MUI_INVALID_ULTIMATEFALLBACK_NAME = 0xC00B0005, /// The resource loader cache does not have a loaded MUI entry. MUI_FILE_NOT_LOADED = 0xC00B0006, /// The user stopped resource enumeration. RESOURCE_ENUM_USER_STOP = 0xC00B0007, /// The cluster node is not valid. CLUSTER_INVALID_NODE = 0xC0130001, /// The cluster node already exists. CLUSTER_NODE_EXISTS = 0xC0130002, /// A node is in the process of joining the cluster. CLUSTER_JOIN_IN_PROGRESS = 0xC0130003, /// The cluster node was not found. CLUSTER_NODE_NOT_FOUND = 0xC0130004, /// The cluster local node information was not found. CLUSTER_LOCAL_NODE_NOT_FOUND = 0xC0130005, /// The cluster network already exists. CLUSTER_NETWORK_EXISTS = 0xC0130006, /// The cluster network was not found. CLUSTER_NETWORK_NOT_FOUND = 0xC0130007, /// The cluster network interface already exists. CLUSTER_NETINTERFACE_EXISTS = 0xC0130008, /// The cluster network interface was not found. CLUSTER_NETINTERFACE_NOT_FOUND = 0xC0130009, /// The cluster request is not valid for this object. CLUSTER_INVALID_REQUEST = 0xC013000A, /// The cluster network provider is not valid. CLUSTER_INVALID_NETWORK_PROVIDER = 0xC013000B, /// The cluster node is down. CLUSTER_NODE_DOWN = 0xC013000C, /// The cluster node is not reachable. CLUSTER_NODE_UNREACHABLE = 0xC013000D, /// The cluster node is not a member of the cluster. CLUSTER_NODE_NOT_MEMBER = 0xC013000E, /// A cluster join operation is not in progress. CLUSTER_JOIN_NOT_IN_PROGRESS = 0xC013000F, /// The cluster network is not valid. CLUSTER_INVALID_NETWORK = 0xC0130010, /// No network adapters are available. CLUSTER_NO_NET_ADAPTERS = 0xC0130011, /// The cluster node is up. CLUSTER_NODE_UP = 0xC0130012, /// The cluster node is paused. CLUSTER_NODE_PAUSED = 0xC0130013, /// The cluster node is not paused. CLUSTER_NODE_NOT_PAUSED = 0xC0130014, /// No cluster security context is available. CLUSTER_NO_SECURITY_CONTEXT = 0xC0130015, /// The cluster network is not configured for internal cluster communication. CLUSTER_NETWORK_NOT_INTERNAL = 0xC0130016, /// The cluster node has been poisoned. CLUSTER_POISONED = 0xC0130017, /// An attempt was made to run an invalid AML opcode. ACPI_INVALID_OPCODE = 0xC0140001, /// The AML interpreter stack has overflowed. ACPI_STACK_OVERFLOW = 0xC0140002, /// An inconsistent state has occurred. ACPI_ASSERT_FAILED = 0xC0140003, /// An attempt was made to access an array outside its bounds. ACPI_INVALID_INDEX = 0xC0140004, /// A required argument was not specified. ACPI_INVALID_ARGUMENT = 0xC0140005, /// A fatal error has occurred. ACPI_FATAL = 0xC0140006, /// An invalid SuperName was specified. ACPI_INVALID_SUPERNAME = 0xC0140007, /// An argument with an incorrect type was specified. ACPI_INVALID_ARGTYPE = 0xC0140008, /// An object with an incorrect type was specified. ACPI_INVALID_OBJTYPE = 0xC0140009, /// A target with an incorrect type was specified. ACPI_INVALID_TARGETTYPE = 0xC014000A, /// An incorrect number of arguments was specified. ACPI_INCORRECT_ARGUMENT_COUNT = 0xC014000B, /// An address failed to translate. ACPI_ADDRESS_NOT_MAPPED = 0xC014000C, /// An incorrect event type was specified. ACPI_INVALID_EVENTTYPE = 0xC014000D, /// A handler for the target already exists. ACPI_HANDLER_COLLISION = 0xC014000E, /// Invalid data for the target was specified. ACPI_INVALID_DATA = 0xC014000F, /// An invalid region for the target was specified. ACPI_INVALID_REGION = 0xC0140010, /// An attempt was made to access a field outside the defined range. ACPI_INVALID_ACCESS_SIZE = 0xC0140011, /// The global system lock could not be acquired. ACPI_ACQUIRE_GLOBAL_LOCK = 0xC0140012, /// An attempt was made to reinitialize the ACPI subsystem. ACPI_ALREADY_INITIALIZED = 0xC0140013, /// The ACPI subsystem has not been initialized. ACPI_NOT_INITIALIZED = 0xC0140014, /// An incorrect mutex was specified. ACPI_INVALID_MUTEX_LEVEL = 0xC0140015, /// The mutex is not currently owned. ACPI_MUTEX_NOT_OWNED = 0xC0140016, /// An attempt was made to access the mutex by a process that was not the owner. ACPI_MUTEX_NOT_OWNER = 0xC0140017, /// An error occurred during an access to region space. ACPI_RS_ACCESS = 0xC0140018, /// An attempt was made to use an incorrect table. ACPI_INVALID_TABLE = 0xC0140019, /// The registration of an ACPI event failed. ACPI_REG_HANDLER_FAILED = 0xC0140020, /// An ACPI power object failed to transition state. ACPI_POWER_REQUEST_FAILED = 0xC0140021, /// The requested section is not present in the activation context. SXS_SECTION_NOT_FOUND = 0xC0150001, /// Windows was unble to process the application binding information. /// Refer to the system event log for further information. SXS_CANT_GEN_ACTCTX = 0xC0150002, /// The application binding data format is invalid. SXS_INVALID_ACTCTXDATA_FORMAT = 0xC0150003, /// The referenced assembly is not installed on the system. SXS_ASSEMBLY_NOT_FOUND = 0xC0150004, /// The manifest file does not begin with the required tag and format information. SXS_MANIFEST_FORMAT_ERROR = 0xC0150005, /// The manifest file contains one or more syntax errors. SXS_MANIFEST_PARSE_ERROR = 0xC0150006, /// The application attempted to activate a disabled activation context. SXS_ACTIVATION_CONTEXT_DISABLED = 0xC0150007, /// The requested lookup key was not found in any active activation context. SXS_KEY_NOT_FOUND = 0xC0150008, /// A component version required by the application conflicts with another component version that is already active. SXS_VERSION_CONFLICT = 0xC0150009, /// The type requested activation context section does not match the query API used. SXS_WRONG_SECTION_TYPE = 0xC015000A, /// Lack of system resources has required isolated activation to be disabled for the current thread of execution. SXS_THREAD_QUERIES_DISABLED = 0xC015000B, /// The referenced assembly could not be found. SXS_ASSEMBLY_MISSING = 0xC015000C, /// An attempt to set the process default activation context failed because the process default activation context was already set. SXS_PROCESS_DEFAULT_ALREADY_SET = 0xC015000E, /// The activation context being deactivated is not the most recently activated one. SXS_EARLY_DEACTIVATION = 0xC015000F, /// The activation context being deactivated is not active for the current thread of execution. SXS_INVALID_DEACTIVATION = 0xC0150010, /// The activation context being deactivated has already been deactivated. SXS_MULTIPLE_DEACTIVATION = 0xC0150011, /// The activation context of the system default assembly could not be generated. SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY = 0xC0150012, /// A component used by the isolation facility has requested that the process be terminated. SXS_PROCESS_TERMINATION_REQUESTED = 0xC0150013, /// The activation context activation stack for the running thread of execution is corrupt. SXS_CORRUPT_ACTIVATION_STACK = 0xC0150014, /// The application isolation metadata for this process or thread has become corrupt. SXS_CORRUPTION = 0xC0150015, /// The value of an attribute in an identity is not within the legal range. SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE = 0xC0150016, /// The name of an attribute in an identity is not within the legal range. SXS_INVALID_IDENTITY_ATTRIBUTE_NAME = 0xC0150017, /// An identity contains two definitions for the same attribute. SXS_IDENTITY_DUPLICATE_ATTRIBUTE = 0xC0150018, /// The identity string is malformed. /// This might be due to a trailing comma, more than two unnamed attributes, a missing attribute name, or a missing attribute value. SXS_IDENTITY_PARSE_ERROR = 0xC0150019, /// The component store has become corrupted. SXS_COMPONENT_STORE_CORRUPT = 0xC015001A, /// A component's file does not match the verification information present in the component manifest. SXS_FILE_HASH_MISMATCH = 0xC015001B, /// The identities of the manifests are identical, but their contents are different. SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT = 0xC015001C, /// The component identities are different. SXS_IDENTITIES_DIFFERENT = 0xC015001D, /// The assembly is not a deployment. SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT = 0xC015001E, /// The file is not a part of the assembly. SXS_FILE_NOT_PART_OF_ASSEMBLY = 0xC015001F, /// An advanced installer failed during setup or servicing. ADVANCED_INSTALLER_FAILED = 0xC0150020, /// The character encoding in the XML declaration did not match the encoding used in the document. XML_ENCODING_MISMATCH = 0xC0150021, /// The size of the manifest exceeds the maximum allowed. SXS_MANIFEST_TOO_BIG = 0xC0150022, /// The setting is not registered. SXS_SETTING_NOT_REGISTERED = 0xC0150023, /// One or more required transaction members are not present. SXS_TRANSACTION_CLOSURE_INCOMPLETE = 0xC0150024, /// The SMI primitive installer failed during setup or servicing. SMI_PRIMITIVE_INSTALLER_FAILED = 0xC0150025, /// A generic command executable returned a result that indicates failure. GENERIC_COMMAND_FAILED = 0xC0150026, /// A component is missing file verification information in its manifest. SXS_FILE_HASH_MISSING = 0xC0150027, /// The function attempted to use a name that is reserved for use by another transaction. TRANSACTIONAL_CONFLICT = 0xC0190001, /// The transaction handle associated with this operation is invalid. INVALID_TRANSACTION = 0xC0190002, /// The requested operation was made in the context of a transaction that is no longer active. TRANSACTION_NOT_ACTIVE = 0xC0190003, /// The transaction manager was unable to be successfully initialized. Transacted operations are not supported. TM_INITIALIZATION_FAILED = 0xC0190004, /// Transaction support within the specified file system resource manager was not started or was shut down due to an error. RM_NOT_ACTIVE = 0xC0190005, /// The metadata of the resource manager has been corrupted. The resource manager will not function. RM_METADATA_CORRUPT = 0xC0190006, /// The resource manager attempted to prepare a transaction that it has not successfully joined. TRANSACTION_NOT_JOINED = 0xC0190007, /// The specified directory does not contain a file system resource manager. DIRECTORY_NOT_RM = 0xC0190008, /// The remote server or share does not support transacted file operations. TRANSACTIONS_UNSUPPORTED_REMOTE = 0xC019000A, /// The requested log size for the file system resource manager is invalid. LOG_RESIZE_INVALID_SIZE = 0xC019000B, /// The remote server sent mismatching version number or Fid for a file opened with transactions. REMOTE_FILE_VERSION_MISMATCH = 0xC019000C, /// The resource manager tried to register a protocol that already exists. CRM_PROTOCOL_ALREADY_EXISTS = 0xC019000F, /// The attempt to propagate the transaction failed. TRANSACTION_PROPAGATION_FAILED = 0xC0190010, /// The requested propagation protocol was not registered as a CRM. CRM_PROTOCOL_NOT_FOUND = 0xC0190011, /// The transaction object already has a superior enlistment, and the caller attempted an operation that would have created a new superior. Only a single superior enlistment is allowed. TRANSACTION_SUPERIOR_EXISTS = 0xC0190012, /// The requested operation is not valid on the transaction object in its current state. TRANSACTION_REQUEST_NOT_VALID = 0xC0190013, /// The caller has called a response API, but the response is not expected because the transaction manager did not issue the corresponding request to the caller. TRANSACTION_NOT_REQUESTED = 0xC0190014, /// It is too late to perform the requested operation, because the transaction has already been aborted. TRANSACTION_ALREADY_ABORTED = 0xC0190015, /// It is too late to perform the requested operation, because the transaction has already been committed. TRANSACTION_ALREADY_COMMITTED = 0xC0190016, /// The buffer passed in to NtPushTransaction or NtPullTransaction is not in a valid format. TRANSACTION_INVALID_MARSHALL_BUFFER = 0xC0190017, /// The current transaction context associated with the thread is not a valid handle to a transaction object. CURRENT_TRANSACTION_NOT_VALID = 0xC0190018, /// An attempt to create space in the transactional resource manager's log failed. /// The failure status has been recorded in the event log. LOG_GROWTH_FAILED = 0xC0190019, /// The object (file, stream, or link) that corresponds to the handle has been deleted by a transaction savepoint rollback. OBJECT_NO_LONGER_EXISTS = 0xC0190021, /// The specified file miniversion was not found for this transacted file open. STREAM_MINIVERSION_NOT_FOUND = 0xC0190022, /// The specified file miniversion was found but has been invalidated. /// The most likely cause is a transaction savepoint rollback. STREAM_MINIVERSION_NOT_VALID = 0xC0190023, /// A miniversion can be opened only in the context of the transaction that created it. MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION = 0xC0190024, /// It is not possible to open a miniversion with modify access. CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT = 0xC0190025, /// It is not possible to create any more miniversions for this stream. CANT_CREATE_MORE_STREAM_MINIVERSIONS = 0xC0190026, /// The handle has been invalidated by a transaction. /// The most likely cause is the presence of memory mapping on a file or an open handle when the transaction ended or rolled back to savepoint. HANDLE_NO_LONGER_VALID = 0xC0190028, /// The log data is corrupt. LOG_CORRUPTION_DETECTED = 0xC0190030, /// The transaction outcome is unavailable because the resource manager responsible for it is disconnected. RM_DISCONNECTED = 0xC0190032, /// The request was rejected because the enlistment in question is not a superior enlistment. ENLISTMENT_NOT_SUPERIOR = 0xC0190033, /// The file cannot be opened in a transaction because its identity depends on the outcome of an unresolved transaction. FILE_IDENTITY_NOT_PERSISTENT = 0xC0190036, /// The operation cannot be performed because another transaction is depending on this property not changing. CANT_BREAK_TRANSACTIONAL_DEPENDENCY = 0xC0190037, /// The operation would involve a single file with two transactional resource managers and is, therefore, not allowed. CANT_CROSS_RM_BOUNDARY = 0xC0190038, /// The $Txf directory must be empty for this operation to succeed. TXF_DIR_NOT_EMPTY = 0xC0190039, /// The operation would leave a transactional resource manager in an inconsistent state and is therefore not allowed. INDOUBT_TRANSACTIONS_EXIST = 0xC019003A, /// The operation could not be completed because the transaction manager does not have a log. TM_VOLATILE = 0xC019003B, /// A rollback could not be scheduled because a previously scheduled rollback has already executed or been queued for execution. ROLLBACK_TIMER_EXPIRED = 0xC019003C, /// The transactional metadata attribute on the file or directory %hs is corrupt and unreadable. TXF_ATTRIBUTE_CORRUPT = 0xC019003D, /// The encryption operation could not be completed because a transaction is active. EFS_NOT_ALLOWED_IN_TRANSACTION = 0xC019003E, /// This object is not allowed to be opened in a transaction. TRANSACTIONAL_OPEN_NOT_ALLOWED = 0xC019003F, /// Memory mapping (creating a mapped section) a remote file under a transaction is not supported. TRANSACTED_MAPPING_UNSUPPORTED_REMOTE = 0xC0190040, /// Promotion was required to allow the resource manager to enlist, but the transaction was set to disallow it. TRANSACTION_REQUIRED_PROMOTION = 0xC0190043, /// This file is open for modification in an unresolved transaction and can be opened for execute only by a transacted reader. CANNOT_EXECUTE_FILE_IN_TRANSACTION = 0xC0190044, /// The request to thaw frozen transactions was ignored because transactions were not previously frozen. TRANSACTIONS_NOT_FROZEN = 0xC0190045, /// Transactions cannot be frozen because a freeze is already in progress. TRANSACTION_FREEZE_IN_PROGRESS = 0xC0190046, /// The target volume is not a snapshot volume. /// This operation is valid only on a volume mounted as a snapshot. NOT_SNAPSHOT_VOLUME = 0xC0190047, /// The savepoint operation failed because files are open on the transaction, which is not permitted. NO_SAVEPOINT_WITH_OPEN_FILES = 0xC0190048, /// The sparse operation could not be completed because a transaction is active on the file. SPARSE_NOT_ALLOWED_IN_TRANSACTION = 0xC0190049, /// The call to create a transaction manager object failed because the Tm Identity that is stored in the log file does not match the Tm Identity that was passed in as an argument. TM_IDENTITY_MISMATCH = 0xC019004A, /// I/O was attempted on a section object that has been floated as a result of a transaction ending. There is no valid data. FLOATED_SECTION = 0xC019004B, /// The transactional resource manager cannot currently accept transacted work due to a transient condition, such as low resources. CANNOT_ACCEPT_TRANSACTED_WORK = 0xC019004C, /// The transactional resource manager had too many transactions outstanding that could not be aborted. /// The transactional resource manager has been shut down. CANNOT_ABORT_TRANSACTIONS = 0xC019004D, /// The specified transaction was unable to be opened because it was not found. TRANSACTION_NOT_FOUND = 0xC019004E, /// The specified resource manager was unable to be opened because it was not found. RESOURCEMANAGER_NOT_FOUND = 0xC019004F, /// The specified enlistment was unable to be opened because it was not found. ENLISTMENT_NOT_FOUND = 0xC0190050, /// The specified transaction manager was unable to be opened because it was not found. TRANSACTIONMANAGER_NOT_FOUND = 0xC0190051, /// The specified resource manager was unable to create an enlistment because its associated transaction manager is not online. TRANSACTIONMANAGER_NOT_ONLINE = 0xC0190052, /// The specified transaction manager was unable to create the objects contained in its log file in the Ob namespace. /// Therefore, the transaction manager was unable to recover. TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION = 0xC0190053, /// The call to create a superior enlistment on this transaction object could not be completed because the transaction object specified for the enlistment is a subordinate branch of the transaction. /// Only the root of the transaction can be enlisted as a superior. TRANSACTION_NOT_ROOT = 0xC0190054, /// Because the associated transaction manager or resource manager has been closed, the handle is no longer valid. TRANSACTION_OBJECT_EXPIRED = 0xC0190055, /// The compression operation could not be completed because a transaction is active on the file. COMPRESSION_NOT_ALLOWED_IN_TRANSACTION = 0xC0190056, /// The specified operation could not be performed on this superior enlistment because the enlistment was not created with the corresponding completion response in the NotificationMask. TRANSACTION_RESPONSE_NOT_ENLISTED = 0xC0190057, /// The specified operation could not be performed because the record to be logged was too long. /// This can occur because either there are too many enlistments on this transaction or the combined RecoveryInformation being logged on behalf of those enlistments is too long. TRANSACTION_RECORD_TOO_LONG = 0xC0190058, /// The link-tracking operation could not be completed because a transaction is active. NO_LINK_TRACKING_IN_TRANSACTION = 0xC0190059, /// This operation cannot be performed in a transaction. OPERATION_NOT_SUPPORTED_IN_TRANSACTION = 0xC019005A, /// The kernel transaction manager had to abort or forget the transaction because it blocked forward progress. TRANSACTION_INTEGRITY_VIOLATED = 0xC019005B, /// The handle is no longer properly associated with its transaction. /// It might have been opened in a transactional resource manager that was subsequently forced to restart. Please close the handle and open a new one. EXPIRED_HANDLE = 0xC0190060, /// The specified operation could not be performed because the resource manager is not enlisted in the transaction. TRANSACTION_NOT_ENLISTED = 0xC0190061, /// The log service found an invalid log sector. LOG_SECTOR_INVALID = 0xC01A0001, /// The log service encountered a log sector with invalid block parity. LOG_SECTOR_PARITY_INVALID = 0xC01A0002, /// The log service encountered a remapped log sector. LOG_SECTOR_REMAPPED = 0xC01A0003, /// The log service encountered a partial or incomplete log block. LOG_BLOCK_INCOMPLETE = 0xC01A0004, /// The log service encountered an attempt to access data outside the active log range. LOG_INVALID_RANGE = 0xC01A0005, /// The log service user-log marshaling buffers are exhausted. LOG_BLOCKS_EXHAUSTED = 0xC01A0006, /// The log service encountered an attempt to read from a marshaling area with an invalid read context. LOG_READ_CONTEXT_INVALID = 0xC01A0007, /// The log service encountered an invalid log restart area. LOG_RESTART_INVALID = 0xC01A0008, /// The log service encountered an invalid log block version. LOG_BLOCK_VERSION = 0xC01A0009, /// The log service encountered an invalid log block. LOG_BLOCK_INVALID = 0xC01A000A, /// The log service encountered an attempt to read the log with an invalid read mode. LOG_READ_MODE_INVALID = 0xC01A000B, /// The log service encountered a corrupted metadata file. LOG_METADATA_CORRUPT = 0xC01A000D, /// The log service encountered a metadata file that could not be created by the log file system. LOG_METADATA_INVALID = 0xC01A000E, /// The log service encountered a metadata file with inconsistent data. LOG_METADATA_INCONSISTENT = 0xC01A000F, /// The log service encountered an attempt to erroneously allocate or dispose reservation space. LOG_RESERVATION_INVALID = 0xC01A0010, /// The log service cannot delete the log file or the file system container. LOG_CANT_DELETE = 0xC01A0011, /// The log service has reached the maximum allowable containers allocated to a log file. LOG_CONTAINER_LIMIT_EXCEEDED = 0xC01A0012, /// The log service has attempted to read or write backward past the start of the log. LOG_START_OF_LOG = 0xC01A0013, /// The log policy could not be installed because a policy of the same type is already present. LOG_POLICY_ALREADY_INSTALLED = 0xC01A0014, /// The log policy in question was not installed at the time of the request. LOG_POLICY_NOT_INSTALLED = 0xC01A0015, /// The installed set of policies on the log is invalid. LOG_POLICY_INVALID = 0xC01A0016, /// A policy on the log in question prevented the operation from completing. LOG_POLICY_CONFLICT = 0xC01A0017, /// The log space cannot be reclaimed because the log is pinned by the archive tail. LOG_PINNED_ARCHIVE_TAIL = 0xC01A0018, /// The log record is not a record in the log file. LOG_RECORD_NONEXISTENT = 0xC01A0019, /// The number of reserved log records or the adjustment of the number of reserved log records is invalid. LOG_RECORDS_RESERVED_INVALID = 0xC01A001A, /// The reserved log space or the adjustment of the log space is invalid. LOG_SPACE_RESERVED_INVALID = 0xC01A001B, /// A new or existing archive tail or the base of the active log is invalid. LOG_TAIL_INVALID = 0xC01A001C, /// The log space is exhausted. LOG_FULL = 0xC01A001D, /// The log is multiplexed; no direct writes to the physical log are allowed. LOG_MULTIPLEXED = 0xC01A001E, /// The operation failed because the log is dedicated. LOG_DEDICATED = 0xC01A001F, /// The operation requires an archive context. LOG_ARCHIVE_NOT_IN_PROGRESS = 0xC01A0020, /// Log archival is in progress. LOG_ARCHIVE_IN_PROGRESS = 0xC01A0021, /// The operation requires a nonephemeral log, but the log is ephemeral. LOG_EPHEMERAL = 0xC01A0022, /// The log must have at least two containers before it can be read from or written to. LOG_NOT_ENOUGH_CONTAINERS = 0xC01A0023, /// A log client has already registered on the stream. LOG_CLIENT_ALREADY_REGISTERED = 0xC01A0024, /// A log client has not been registered on the stream. LOG_CLIENT_NOT_REGISTERED = 0xC01A0025, /// A request has already been made to handle the log full condition. LOG_FULL_HANDLER_IN_PROGRESS = 0xC01A0026, /// The log service encountered an error when attempting to read from a log container. LOG_CONTAINER_READ_FAILED = 0xC01A0027, /// The log service encountered an error when attempting to write to a log container. LOG_CONTAINER_WRITE_FAILED = 0xC01A0028, /// The log service encountered an error when attempting to open a log container. LOG_CONTAINER_OPEN_FAILED = 0xC01A0029, /// The log service encountered an invalid container state when attempting a requested action. LOG_CONTAINER_STATE_INVALID = 0xC01A002A, /// The log service is not in the correct state to perform a requested action. LOG_STATE_INVALID = 0xC01A002B, /// The log space cannot be reclaimed because the log is pinned. LOG_PINNED = 0xC01A002C, /// The log metadata flush failed. LOG_METADATA_FLUSH_FAILED = 0xC01A002D, /// Security on the log and its containers is inconsistent. LOG_INCONSISTENT_SECURITY = 0xC01A002E, /// Records were appended to the log or reservation changes were made, but the log could not be flushed. LOG_APPENDED_FLUSH_FAILED = 0xC01A002F, /// The log is pinned due to reservation consuming most of the log space. /// Free some reserved records to make space available. LOG_PINNED_RESERVATION = 0xC01A0030, /// {Display Driver Stopped Responding} The %hs display driver has stopped working normally. /// Save your work and reboot the system to restore full display functionality. /// The next time you reboot the computer, a dialog box will allow you to upload data about this failure to Microsoft. VIDEO_HUNG_DISPLAY_DRIVER_THREAD = 0xC01B00EA, /// A handler was not defined by the filter for this operation. FLT_NO_HANDLER_DEFINED = 0xC01C0001, /// A context is already defined for this object. FLT_CONTEXT_ALREADY_DEFINED = 0xC01C0002, /// Asynchronous requests are not valid for this operation. FLT_INVALID_ASYNCHRONOUS_REQUEST = 0xC01C0003, /// This is an internal error code used by the filter manager to determine if a fast I/O operation should be forced down the input/output request packet (IRP) path. Minifilters should never return this value. FLT_DISALLOW_FAST_IO = 0xC01C0004, /// An invalid name request was made. /// The name requested cannot be retrieved at this time. FLT_INVALID_NAME_REQUEST = 0xC01C0005, /// Posting this operation to a worker thread for further processing is not safe at this time because it could lead to a system deadlock. FLT_NOT_SAFE_TO_POST_OPERATION = 0xC01C0006, /// The Filter Manager was not initialized when a filter tried to register. /// Make sure that the Filter Manager is loaded as a driver. FLT_NOT_INITIALIZED = 0xC01C0007, /// The filter is not ready for attachment to volumes because it has not finished initializing (FltStartFiltering has not been called). FLT_FILTER_NOT_READY = 0xC01C0008, /// The filter must clean up any operation-specific context at this time because it is being removed from the system before the operation is completed by the lower drivers. FLT_POST_OPERATION_CLEANUP = 0xC01C0009, /// The Filter Manager had an internal error from which it cannot recover; therefore, the operation has failed. /// This is usually the result of a filter returning an invalid value from a pre-operation callback. FLT_INTERNAL_ERROR = 0xC01C000A, /// The object specified for this action is in the process of being deleted; therefore, the action requested cannot be completed at this time. FLT_DELETING_OBJECT = 0xC01C000B, /// A nonpaged pool must be used for this type of context. FLT_MUST_BE_NONPAGED_POOL = 0xC01C000C, /// A duplicate handler definition has been provided for an operation. FLT_DUPLICATE_ENTRY = 0xC01C000D, /// The callback data queue has been disabled. FLT_CBDQ_DISABLED = 0xC01C000E, /// Do not attach the filter to the volume at this time. FLT_DO_NOT_ATTACH = 0xC01C000F, /// Do not detach the filter from the volume at this time. FLT_DO_NOT_DETACH = 0xC01C0010, /// An instance already exists at this altitude on the volume specified. FLT_INSTANCE_ALTITUDE_COLLISION = 0xC01C0011, /// An instance already exists with this name on the volume specified. FLT_INSTANCE_NAME_COLLISION = 0xC01C0012, /// The system could not find the filter specified. FLT_FILTER_NOT_FOUND = 0xC01C0013, /// The system could not find the volume specified. FLT_VOLUME_NOT_FOUND = 0xC01C0014, /// The system could not find the instance specified. FLT_INSTANCE_NOT_FOUND = 0xC01C0015, /// No registered context allocation definition was found for the given request. FLT_CONTEXT_ALLOCATION_NOT_FOUND = 0xC01C0016, /// An invalid parameter was specified during context registration. FLT_INVALID_CONTEXT_REGISTRATION = 0xC01C0017, /// The name requested was not found in the Filter Manager name cache and could not be retrieved from the file system. FLT_NAME_CACHE_MISS = 0xC01C0018, /// The requested device object does not exist for the given volume. FLT_NO_DEVICE_OBJECT = 0xC01C0019, /// The specified volume is already mounted. FLT_VOLUME_ALREADY_MOUNTED = 0xC01C001A, /// The specified transaction context is already enlisted in a transaction. FLT_ALREADY_ENLISTED = 0xC01C001B, /// The specified context is already attached to another object. FLT_CONTEXT_ALREADY_LINKED = 0xC01C001C, /// No waiter is present for the filter's reply to this message. FLT_NO_WAITER_FOR_REPLY = 0xC01C0020, /// A monitor descriptor could not be obtained. MONITOR_NO_DESCRIPTOR = 0xC01D0001, /// This release does not support the format of the obtained monitor descriptor. MONITOR_UNKNOWN_DESCRIPTOR_FORMAT = 0xC01D0002, /// The checksum of the obtained monitor descriptor is invalid. MONITOR_INVALID_DESCRIPTOR_CHECKSUM = 0xC01D0003, /// The monitor descriptor contains an invalid standard timing block. MONITOR_INVALID_STANDARD_TIMING_BLOCK = 0xC01D0004, /// WMI data-block registration failed for one of the MSMonitorClass WMI subclasses. MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED = 0xC01D0005, /// The provided monitor descriptor block is either corrupted or does not contain the monitor's detailed serial number. MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK = 0xC01D0006, /// The provided monitor descriptor block is either corrupted or does not contain the monitor's user-friendly name. MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK = 0xC01D0007, /// There is no monitor descriptor data at the specified (offset or size) region. MONITOR_NO_MORE_DESCRIPTOR_DATA = 0xC01D0008, /// The monitor descriptor contains an invalid detailed timing block. MONITOR_INVALID_DETAILED_TIMING_BLOCK = 0xC01D0009, /// Monitor descriptor contains invalid manufacture date. MONITOR_INVALID_MANUFACTURE_DATE = 0xC01D000A, /// Exclusive mode ownership is needed to create an unmanaged primary allocation. GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER = 0xC01E0000, /// The driver needs more DMA buffer space to complete the requested operation. GRAPHICS_INSUFFICIENT_DMA_BUFFER = 0xC01E0001, /// The specified display adapter handle is invalid. GRAPHICS_INVALID_DISPLAY_ADAPTER = 0xC01E0002, /// The specified display adapter and all of its state have been reset. GRAPHICS_ADAPTER_WAS_RESET = 0xC01E0003, /// The driver stack does not match the expected driver model. GRAPHICS_INVALID_DRIVER_MODEL = 0xC01E0004, /// Present happened but ended up into the changed desktop mode. GRAPHICS_PRESENT_MODE_CHANGED = 0xC01E0005, /// Nothing to present due to desktop occlusion. GRAPHICS_PRESENT_OCCLUDED = 0xC01E0006, /// Not able to present due to denial of desktop access. GRAPHICS_PRESENT_DENIED = 0xC01E0007, /// Not able to present with color conversion. GRAPHICS_CANNOTCOLORCONVERT = 0xC01E0008, /// Present redirection is disabled (desktop windowing management subsystem is off). GRAPHICS_PRESENT_REDIRECTION_DISABLED = 0xC01E000B, /// Previous exclusive VidPn source owner has released its ownership GRAPHICS_PRESENT_UNOCCLUDED = 0xC01E000C, /// Not enough video memory is available to complete the operation. GRAPHICS_NO_VIDEO_MEMORY = 0xC01E0100, /// Could not probe and lock the underlying memory of an allocation. GRAPHICS_CANT_LOCK_MEMORY = 0xC01E0101, /// The allocation is currently busy. GRAPHICS_ALLOCATION_BUSY = 0xC01E0102, /// An object being referenced has already reached the maximum reference count and cannot be referenced further. GRAPHICS_TOO_MANY_REFERENCES = 0xC01E0103, /// A problem could not be solved due to an existing condition. Try again later. GRAPHICS_TRY_AGAIN_LATER = 0xC01E0104, /// A problem could not be solved due to an existing condition. Try again now. GRAPHICS_TRY_AGAIN_NOW = 0xC01E0105, /// The allocation is invalid. GRAPHICS_ALLOCATION_INVALID = 0xC01E0106, /// No more unswizzling apertures are currently available. GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE = 0xC01E0107, /// The current allocation cannot be unswizzled by an aperture. GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED = 0xC01E0108, /// The request failed because a pinned allocation cannot be evicted. GRAPHICS_CANT_EVICT_PINNED_ALLOCATION = 0xC01E0109, /// The allocation cannot be used from its current segment location for the specified operation. GRAPHICS_INVALID_ALLOCATION_USAGE = 0xC01E0110, /// A locked allocation cannot be used in the current command buffer. GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION = 0xC01E0111, /// The allocation being referenced has been closed permanently. GRAPHICS_ALLOCATION_CLOSED = 0xC01E0112, /// An invalid allocation instance is being referenced. GRAPHICS_INVALID_ALLOCATION_INSTANCE = 0xC01E0113, /// An invalid allocation handle is being referenced. GRAPHICS_INVALID_ALLOCATION_HANDLE = 0xC01E0114, /// The allocation being referenced does not belong to the current device. GRAPHICS_WRONG_ALLOCATION_DEVICE = 0xC01E0115, /// The specified allocation lost its content. GRAPHICS_ALLOCATION_CONTENT_LOST = 0xC01E0116, /// A GPU exception was detected on the given device. The device cannot be scheduled. GRAPHICS_GPU_EXCEPTION_ON_DEVICE = 0xC01E0200, /// The specified VidPN topology is invalid. GRAPHICS_INVALID_VIDPN_TOPOLOGY = 0xC01E0300, /// The specified VidPN topology is valid but is not supported by this model of the display adapter. GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED = 0xC01E0301, /// The specified VidPN topology is valid but is not currently supported by the display adapter due to allocation of its resources. GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED = 0xC01E0302, /// The specified VidPN handle is invalid. GRAPHICS_INVALID_VIDPN = 0xC01E0303, /// The specified video present source is invalid. GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE = 0xC01E0304, /// The specified video present target is invalid. GRAPHICS_INVALID_VIDEO_PRESENT_TARGET = 0xC01E0305, /// The specified VidPN modality is not supported (for example, at least two of the pinned modes are not co-functional). GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED = 0xC01E0306, /// The specified VidPN source mode set is invalid. GRAPHICS_INVALID_VIDPN_SOURCEMODESET = 0xC01E0308, /// The specified VidPN target mode set is invalid. GRAPHICS_INVALID_VIDPN_TARGETMODESET = 0xC01E0309, /// The specified video signal frequency is invalid. GRAPHICS_INVALID_FREQUENCY = 0xC01E030A, /// The specified video signal active region is invalid. GRAPHICS_INVALID_ACTIVE_REGION = 0xC01E030B, /// The specified video signal total region is invalid. GRAPHICS_INVALID_TOTAL_REGION = 0xC01E030C, /// The specified video present source mode is invalid. GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE = 0xC01E0310, /// The specified video present target mode is invalid. GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE = 0xC01E0311, /// The pinned mode must remain in the set on the VidPN's co-functional modality enumeration. GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET = 0xC01E0312, /// The specified video present path is already in the VidPN's topology. GRAPHICS_PATH_ALREADY_IN_TOPOLOGY = 0xC01E0313, /// The specified mode is already in the mode set. GRAPHICS_MODE_ALREADY_IN_MODESET = 0xC01E0314, /// The specified video present source set is invalid. GRAPHICS_INVALID_VIDEOPRESENTSOURCESET = 0xC01E0315, /// The specified video present target set is invalid. GRAPHICS_INVALID_VIDEOPRESENTTARGETSET = 0xC01E0316, /// The specified video present source is already in the video present source set. GRAPHICS_SOURCE_ALREADY_IN_SET = 0xC01E0317, /// The specified video present target is already in the video present target set. GRAPHICS_TARGET_ALREADY_IN_SET = 0xC01E0318, /// The specified VidPN present path is invalid. GRAPHICS_INVALID_VIDPN_PRESENT_PATH = 0xC01E0319, /// The miniport has no recommendation for augmenting the specified VidPN's topology. GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY = 0xC01E031A, /// The specified monitor frequency range set is invalid. GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET = 0xC01E031B, /// The specified monitor frequency range is invalid. GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE = 0xC01E031C, /// The specified frequency range is not in the specified monitor frequency range set. GRAPHICS_FREQUENCYRANGE_NOT_IN_SET = 0xC01E031D, /// The specified frequency range is already in the specified monitor frequency range set. GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET = 0xC01E031F, /// The specified mode set is stale. Reacquire the new mode set. GRAPHICS_STALE_MODESET = 0xC01E0320, /// The specified monitor source mode set is invalid. GRAPHICS_INVALID_MONITOR_SOURCEMODESET = 0xC01E0321, /// The specified monitor source mode is invalid. GRAPHICS_INVALID_MONITOR_SOURCE_MODE = 0xC01E0322, /// The miniport does not have a recommendation regarding the request to provide a functional VidPN given the current display adapter configuration. GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN = 0xC01E0323, /// The ID of the specified mode is being used by another mode in the set. GRAPHICS_MODE_ID_MUST_BE_UNIQUE = 0xC01E0324, /// The system failed to determine a mode that is supported by both the display adapter and the monitor connected to it. GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION = 0xC01E0325, /// The number of video present targets must be greater than or equal to the number of video present sources. GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES = 0xC01E0326, /// The specified present path is not in the VidPN's topology. GRAPHICS_PATH_NOT_IN_TOPOLOGY = 0xC01E0327, /// The display adapter must have at least one video present source. GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE = 0xC01E0328, /// The display adapter must have at least one video present target. GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET = 0xC01E0329, /// The specified monitor descriptor set is invalid. GRAPHICS_INVALID_MONITORDESCRIPTORSET = 0xC01E032A, /// The specified monitor descriptor is invalid. GRAPHICS_INVALID_MONITORDESCRIPTOR = 0xC01E032B, /// The specified descriptor is not in the specified monitor descriptor set. GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET = 0xC01E032C, /// The specified descriptor is already in the specified monitor descriptor set. GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET = 0xC01E032D, /// The ID of the specified monitor descriptor is being used by another descriptor in the set. GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE = 0xC01E032E, /// The specified video present target subset type is invalid. GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE = 0xC01E032F, /// Two or more of the specified resources are not related to each other, as defined by the interface semantics. GRAPHICS_RESOURCES_NOT_RELATED = 0xC01E0330, /// The ID of the specified video present source is being used by another source in the set. GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE = 0xC01E0331, /// The ID of the specified video present target is being used by another target in the set. GRAPHICS_TARGET_ID_MUST_BE_UNIQUE = 0xC01E0332, /// The specified VidPN source cannot be used because there is no available VidPN target to connect it to. GRAPHICS_NO_AVAILABLE_VIDPN_TARGET = 0xC01E0333, /// The newly arrived monitor could not be associated with a display adapter. GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER = 0xC01E0334, /// The particular display adapter does not have an associated VidPN manager. GRAPHICS_NO_VIDPNMGR = 0xC01E0335, /// The VidPN manager of the particular display adapter does not have an active VidPN. GRAPHICS_NO_ACTIVE_VIDPN = 0xC01E0336, /// The specified VidPN topology is stale; obtain the new topology. GRAPHICS_STALE_VIDPN_TOPOLOGY = 0xC01E0337, /// No monitor is connected on the specified video present target. GRAPHICS_MONITOR_NOT_CONNECTED = 0xC01E0338, /// The specified source is not part of the specified VidPN's topology. GRAPHICS_SOURCE_NOT_IN_TOPOLOGY = 0xC01E0339, /// The specified primary surface size is invalid. GRAPHICS_INVALID_PRIMARYSURFACE_SIZE = 0xC01E033A, /// The specified visible region size is invalid. GRAPHICS_INVALID_VISIBLEREGION_SIZE = 0xC01E033B, /// The specified stride is invalid. GRAPHICS_INVALID_STRIDE = 0xC01E033C, /// The specified pixel format is invalid. GRAPHICS_INVALID_PIXELFORMAT = 0xC01E033D, /// The specified color basis is invalid. GRAPHICS_INVALID_COLORBASIS = 0xC01E033E, /// The specified pixel value access mode is invalid. GRAPHICS_INVALID_PIXELVALUEACCESSMODE = 0xC01E033F, /// The specified target is not part of the specified VidPN's topology. GRAPHICS_TARGET_NOT_IN_TOPOLOGY = 0xC01E0340, /// Failed to acquire the display mode management interface. GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT = 0xC01E0341, /// The specified VidPN source is already owned by a DMM client and cannot be used until that client releases it. GRAPHICS_VIDPN_SOURCE_IN_USE = 0xC01E0342, /// The specified VidPN is active and cannot be accessed. GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN = 0xC01E0343, /// The specified VidPN's present path importance ordinal is invalid. GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL = 0xC01E0344, /// The specified VidPN's present path content geometry transformation is invalid. GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION = 0xC01E0345, /// The specified content geometry transformation is not supported on the respective VidPN present path. GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED = 0xC01E0346, /// The specified gamma ramp is invalid. GRAPHICS_INVALID_GAMMA_RAMP = 0xC01E0347, /// The specified gamma ramp is not supported on the respective VidPN present path. GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED = 0xC01E0348, /// Multisampling is not supported on the respective VidPN present path. GRAPHICS_MULTISAMPLING_NOT_SUPPORTED = 0xC01E0349, /// The specified mode is not in the specified mode set. GRAPHICS_MODE_NOT_IN_MODESET = 0xC01E034A, /// The specified VidPN topology recommendation reason is invalid. GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON = 0xC01E034D, /// The specified VidPN present path content type is invalid. GRAPHICS_INVALID_PATH_CONTENT_TYPE = 0xC01E034E, /// The specified VidPN present path copy protection type is invalid. GRAPHICS_INVALID_COPYPROTECTION_TYPE = 0xC01E034F, /// Only one unassigned mode set can exist at any one time for a particular VidPN source or target. GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS = 0xC01E0350, /// The specified scan line ordering type is invalid. GRAPHICS_INVALID_SCANLINE_ORDERING = 0xC01E0352, /// The topology changes are not allowed for the specified VidPN. GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED = 0xC01E0353, /// All available importance ordinals are being used in the specified topology. GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS = 0xC01E0354, /// The specified primary surface has a different private-format attribute than the current primary surface. GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT = 0xC01E0355, /// The specified mode-pruning algorithm is invalid. GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM = 0xC01E0356, /// The specified monitor-capability origin is invalid. GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN = 0xC01E0357, /// The specified monitor-frequency range constraint is invalid. GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT = 0xC01E0358, /// The maximum supported number of present paths has been reached. GRAPHICS_MAX_NUM_PATHS_REACHED = 0xC01E0359, /// The miniport requested that augmentation be canceled for the specified source of the specified VidPN's topology. GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION = 0xC01E035A, /// The specified client type was not recognized. GRAPHICS_INVALID_CLIENT_TYPE = 0xC01E035B, /// The client VidPN is not set on this adapter (for example, no user mode-initiated mode changes have taken place on this adapter). GRAPHICS_CLIENTVIDPN_NOT_SET = 0xC01E035C, /// The specified display adapter child device already has an external device connected to it. GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED = 0xC01E0400, /// The display adapter child device does not support reporting a descriptor. GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED = 0xC01E0401, /// The display adapter is not linked to any other adapters. GRAPHICS_NOT_A_LINKED_ADAPTER = 0xC01E0430, /// The lead adapter in a linked configuration was not enumerated yet. GRAPHICS_LEADLINK_NOT_ENUMERATED = 0xC01E0431, /// Some chain adapters in a linked configuration have not yet been enumerated. GRAPHICS_CHAINLINKS_NOT_ENUMERATED = 0xC01E0432, /// The chain of linked adapters is not ready to start because of an unknown failure. GRAPHICS_ADAPTER_CHAIN_NOT_READY = 0xC01E0433, /// An attempt was made to start a lead link display adapter when the chain links had not yet started. GRAPHICS_CHAINLINKS_NOT_STARTED = 0xC01E0434, /// An attempt was made to turn on a lead link display adapter when the chain links were turned off. GRAPHICS_CHAINLINKS_NOT_POWERED_ON = 0xC01E0435, /// The adapter link was found in an inconsistent state. /// Not all adapters are in an expected PNP/power state. GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE = 0xC01E0436, /// The driver trying to start is not the same as the driver for the posted display adapter. GRAPHICS_NOT_POST_DEVICE_DRIVER = 0xC01E0438, /// An operation is being attempted that requires the display adapter to be in a quiescent state. GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED = 0xC01E043B, /// The driver does not support OPM. GRAPHICS_OPM_NOT_SUPPORTED = 0xC01E0500, /// The driver does not support COPP. GRAPHICS_COPP_NOT_SUPPORTED = 0xC01E0501, /// The driver does not support UAB. GRAPHICS_UAB_NOT_SUPPORTED = 0xC01E0502, /// The specified encrypted parameters are invalid. GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS = 0xC01E0503, /// An array passed to a function cannot hold all of the data that the function wants to put in it. GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL = 0xC01E0504, /// The GDI display device passed to this function does not have any active protected outputs. GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST = 0xC01E0505, /// The PVP cannot find an actual GDI display device that corresponds to the passed-in GDI display device name. GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME = 0xC01E0506, /// This function failed because the GDI display device passed to it was not attached to the Windows desktop. GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP = 0xC01E0507, /// The PVP does not support mirroring display devices because they do not have any protected outputs. GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED = 0xC01E0508, /// The function failed because an invalid pointer parameter was passed to it. /// A pointer parameter is invalid if it is null, is not correctly aligned, or it points to an invalid address or a kernel mode address. GRAPHICS_OPM_INVALID_POINTER = 0xC01E050A, /// An internal error caused an operation to fail. GRAPHICS_OPM_INTERNAL_ERROR = 0xC01E050B, /// The function failed because the caller passed in an invalid OPM user-mode handle. GRAPHICS_OPM_INVALID_HANDLE = 0xC01E050C, /// This function failed because the GDI device passed to it did not have any monitors associated with it. GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE = 0xC01E050D, /// A certificate could not be returned because the certificate buffer passed to the function was too small. GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH = 0xC01E050E, /// DxgkDdiOpmCreateProtectedOutput() could not create a protected output because the video present yarget is in spanning mode. GRAPHICS_OPM_SPANNING_MODE_ENABLED = 0xC01E050F, /// DxgkDdiOpmCreateProtectedOutput() could not create a protected output because the video present target is in theater mode. GRAPHICS_OPM_THEATER_MODE_ENABLED = 0xC01E0510, /// The function call failed because the display adapter's hardware functionality scan (HFS) failed to validate the graphics hardware. GRAPHICS_PVP_HFS_FAILED = 0xC01E0511, /// The HDCP SRM passed to this function did not comply with section 5 of the HDCP 1.1 specification. GRAPHICS_OPM_INVALID_SRM = 0xC01E0512, /// The protected output cannot enable the HDCP system because it does not support it. GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP = 0xC01E0513, /// The protected output cannot enable analog copy protection because it does not support it. GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP = 0xC01E0514, /// The protected output cannot enable the CGMS-A protection technology because it does not support it. GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA = 0xC01E0515, /// DxgkDdiOPMGetInformation() cannot return the version of the SRM being used because the application never successfully passed an SRM to the protected output. GRAPHICS_OPM_HDCP_SRM_NEVER_SET = 0xC01E0516, /// DxgkDdiOPMConfigureProtectedOutput() cannot enable the specified output protection technology because the output's screen resolution is too high. GRAPHICS_OPM_RESOLUTION_TOO_HIGH = 0xC01E0517, /// DxgkDdiOPMConfigureProtectedOutput() cannot enable HDCP because other physical outputs are using the display adapter's HDCP hardware. GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE = 0xC01E0518, /// The operating system asynchronously destroyed this OPM-protected output because the operating system state changed. /// This error typically occurs because the monitor PDO associated with this protected output was removed or stopped, the protected output's session became a nonconsole session, or the protected output's desktop became inactive. GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS = 0xC01E051A, /// OPM functions cannot be called when a session is changing its type. /// Three types of sessions currently exist: console, disconnected, and remote (RDP or ICA). GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS = 0xC01E051B, /// The DxgkDdiOPMGetCOPPCompatibleInformation, DxgkDdiOPMGetInformation, or DxgkDdiOPMConfigureProtectedOutput function failed. /// This error is returned only if a protected output has OPM semantics. /// DxgkDdiOPMGetCOPPCompatibleInformation always returns this error if a protected output has OPM semantics. /// DxgkDdiOPMGetInformation returns this error code if the caller requested COPP-specific information. /// DxgkDdiOPMConfigureProtectedOutput returns this error when the caller tries to use a COPP-specific command. GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS = 0xC01E051C, /// The DxgkDdiOPMGetInformation and DxgkDdiOPMGetCOPPCompatibleInformation functions return this error code if the passed-in sequence number is not the expected sequence number or the passed-in OMAC value is invalid. GRAPHICS_OPM_INVALID_INFORMATION_REQUEST = 0xC01E051D, /// The function failed because an unexpected error occurred inside a display driver. GRAPHICS_OPM_DRIVER_INTERNAL_ERROR = 0xC01E051E, /// The DxgkDdiOPMGetCOPPCompatibleInformation, DxgkDdiOPMGetInformation, or DxgkDdiOPMConfigureProtectedOutput function failed. /// This error is returned only if a protected output has COPP semantics. /// DxgkDdiOPMGetCOPPCompatibleInformation returns this error code if the caller requested OPM-specific information. /// DxgkDdiOPMGetInformation always returns this error if a protected output has COPP semantics. /// DxgkDdiOPMConfigureProtectedOutput returns this error when the caller tries to use an OPM-specific command. GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS = 0xC01E051F, /// The DxgkDdiOPMGetCOPPCompatibleInformation and DxgkDdiOPMConfigureProtectedOutput functions return this error if the display driver does not support the DXGKMDT_OPM_GET_ACP_AND_CGMSA_SIGNALING and DXGKMDT_OPM_SET_ACP_AND_CGMSA_SIGNALING GUIDs. GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED = 0xC01E0520, /// The DxgkDdiOPMConfigureProtectedOutput function returns this error code if the passed-in sequence number is not the expected sequence number or the passed-in OMAC value is invalid. GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST = 0xC01E0521, /// The monitor connected to the specified video output does not have an I2C bus. GRAPHICS_I2C_NOT_SUPPORTED = 0xC01E0580, /// No device on the I2C bus has the specified address. GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST = 0xC01E0581, /// An error occurred while transmitting data to the device on the I2C bus. GRAPHICS_I2C_ERROR_TRANSMITTING_DATA = 0xC01E0582, /// An error occurred while receiving data from the device on the I2C bus. GRAPHICS_I2C_ERROR_RECEIVING_DATA = 0xC01E0583, /// The monitor does not support the specified VCP code. GRAPHICS_DDCCI_VCP_NOT_SUPPORTED = 0xC01E0584, /// The data received from the monitor is invalid. GRAPHICS_DDCCI_INVALID_DATA = 0xC01E0585, /// A function call failed because a monitor returned an invalid timing status byte when the operating system used the DDC/CI get timing report and timing message command to get a timing report from a monitor. GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE = 0xC01E0586, /// A monitor returned a DDC/CI capabilities string that did not comply with the ACCESS.bus 3.0, DDC/CI 1.1, or MCCS 2 Revision 1 specification. GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING = 0xC01E0587, /// An internal error caused an operation to fail. GRAPHICS_MCA_INTERNAL_ERROR = 0xC01E0588, /// An operation failed because a DDC/CI message had an invalid value in its command field. GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND = 0xC01E0589, /// This error occurred because a DDC/CI message had an invalid value in its length field. GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH = 0xC01E058A, /// This error occurred because the value in a DDC/CI message's checksum field did not match the message's computed checksum value. /// This error implies that the data was corrupted while it was being transmitted from a monitor to a computer. GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM = 0xC01E058B, /// This function failed because an invalid monitor handle was passed to it. GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE = 0xC01E058C, /// The operating system asynchronously destroyed the monitor that corresponds to this handle because the operating system's state changed. /// This error typically occurs because the monitor PDO associated with this handle was removed or stopped, or a display mode change occurred. /// A display mode change occurs when Windows sends a WM_DISPLAYCHANGE message to applications. GRAPHICS_MONITOR_NO_LONGER_EXISTS = 0xC01E058D, /// This function can be used only if a program is running in the local console session. /// It cannot be used if a program is running on a remote desktop session or on a terminal server session. GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED = 0xC01E05E0, /// This function cannot find an actual GDI display device that corresponds to the specified GDI display device name. GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME = 0xC01E05E1, /// The function failed because the specified GDI display device was not attached to the Windows desktop. GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP = 0xC01E05E2, /// This function does not support GDI mirroring display devices because GDI mirroring display devices do not have any physical monitors associated with them. GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED = 0xC01E05E3, /// The function failed because an invalid pointer parameter was passed to it. /// A pointer parameter is invalid if it is null, is not correctly aligned, or points to an invalid address or to a kernel mode address. GRAPHICS_INVALID_POINTER = 0xC01E05E4, /// This function failed because the GDI device passed to it did not have a monitor associated with it. GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE = 0xC01E05E5, /// An array passed to the function cannot hold all of the data that the function must copy into the array. GRAPHICS_PARAMETER_ARRAY_TOO_SMALL = 0xC01E05E6, /// An internal error caused an operation to fail. GRAPHICS_INTERNAL_ERROR = 0xC01E05E7, /// The function failed because the current session is changing its type. /// This function cannot be called when the current session is changing its type. /// Three types of sessions currently exist: console, disconnected, and remote (RDP or ICA). GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS = 0xC01E05E8, /// The volume must be unlocked before it can be used. FVE_LOCKED_VOLUME = 0xC0210000, /// The volume is fully decrypted and no key is available. FVE_NOT_ENCRYPTED = 0xC0210001, /// The control block for the encrypted volume is not valid. FVE_BAD_INFORMATION = 0xC0210002, /// Not enough free space remains on the volume to allow encryption. FVE_TOO_SMALL = 0xC0210003, /// The partition cannot be encrypted because the file system is not supported. FVE_FAILED_WRONG_FS = 0xC0210004, /// The file system is inconsistent. Run the Check Disk utility. FVE_FAILED_BAD_FS = 0xC0210005, /// The file system does not extend to the end of the volume. FVE_FS_NOT_EXTENDED = 0xC0210006, /// This operation cannot be performed while a file system is mounted on the volume. FVE_FS_MOUNTED = 0xC0210007, /// BitLocker Drive Encryption is not included with this version of Windows. FVE_NO_LICENSE = 0xC0210008, /// The requested action was denied by the FVE control engine. FVE_ACTION_NOT_ALLOWED = 0xC0210009, /// The data supplied is malformed. FVE_BAD_DATA = 0xC021000A, /// The volume is not bound to the system. FVE_VOLUME_NOT_BOUND = 0xC021000B, /// The volume specified is not a data volume. FVE_NOT_DATA_VOLUME = 0xC021000C, /// A read operation failed while converting the volume. FVE_CONV_READ_ERROR = 0xC021000D, /// A write operation failed while converting the volume. FVE_CONV_WRITE_ERROR = 0xC021000E, /// The control block for the encrypted volume was updated by another thread. Try again. FVE_OVERLAPPED_UPDATE = 0xC021000F, /// The volume encryption algorithm cannot be used on this sector size. FVE_FAILED_SECTOR_SIZE = 0xC0210010, /// BitLocker recovery authentication failed. FVE_FAILED_AUTHENTICATION = 0xC0210011, /// The volume specified is not the boot operating system volume. FVE_NOT_OS_VOLUME = 0xC0210012, /// The BitLocker startup key or recovery password could not be read from external media. FVE_KEYFILE_NOT_FOUND = 0xC0210013, /// The BitLocker startup key or recovery password file is corrupt or invalid. FVE_KEYFILE_INVALID = 0xC0210014, /// The BitLocker encryption key could not be obtained from the startup key or the recovery password. FVE_KEYFILE_NO_VMK = 0xC0210015, /// The TPM is disabled. FVE_TPM_DISABLED = 0xC0210016, /// The authorization data for the SRK of the TPM is not zero. FVE_TPM_SRK_AUTH_NOT_ZERO = 0xC0210017, /// The system boot information changed or the TPM locked out access to BitLocker encryption keys until the computer is restarted. FVE_TPM_INVALID_PCR = 0xC0210018, /// The BitLocker encryption key could not be obtained from the TPM. FVE_TPM_NO_VMK = 0xC0210019, /// The BitLocker encryption key could not be obtained from the TPM and PIN. FVE_PIN_INVALID = 0xC021001A, /// A boot application hash does not match the hash computed when BitLocker was turned on. FVE_AUTH_INVALID_APPLICATION = 0xC021001B, /// The Boot Configuration Data (BCD) settings are not supported or have changed because BitLocker was enabled. FVE_AUTH_INVALID_CONFIG = 0xC021001C, /// Boot debugging is enabled. Run Windows Boot Configuration Data Store Editor (bcdedit.exe) to turn it off. FVE_DEBUGGER_ENABLED = 0xC021001D, /// The BitLocker encryption key could not be obtained. FVE_DRY_RUN_FAILED = 0xC021001E, /// The metadata disk region pointer is incorrect. FVE_BAD_METADATA_POINTER = 0xC021001F, /// The backup copy of the metadata is out of date. FVE_OLD_METADATA_COPY = 0xC0210020, /// No action was taken because a system restart is required. FVE_REBOOT_REQUIRED = 0xC0210021, /// No action was taken because BitLocker Drive Encryption is in RAW access mode. FVE_RAW_ACCESS = 0xC0210022, /// BitLocker Drive Encryption cannot enter RAW access mode for this volume. FVE_RAW_BLOCKED = 0xC0210023, /// This feature of BitLocker Drive Encryption is not included with this version of Windows. FVE_NO_FEATURE_LICENSE = 0xC0210026, /// Group policy does not permit turning off BitLocker Drive Encryption on roaming data volumes. FVE_POLICY_USER_DISABLE_RDV_NOT_ALLOWED = 0xC0210027, /// Bitlocker Drive Encryption failed to recover from aborted conversion. /// This could be due to either all conversion logs being corrupted or the media being write-protected. FVE_CONV_RECOVERY_FAILED = 0xC0210028, /// The requested virtualization size is too big. FVE_VIRTUALIZED_SPACE_TOO_BIG = 0xC0210029, /// The drive is too small to be protected using BitLocker Drive Encryption. FVE_VOLUME_TOO_SMALL = 0xC0210030, /// The callout does not exist. FWP_CALLOUT_NOT_FOUND = 0xC0220001, /// The filter condition does not exist. FWP_CONDITION_NOT_FOUND = 0xC0220002, /// The filter does not exist. FWP_FILTER_NOT_FOUND = 0xC0220003, /// The layer does not exist. FWP_LAYER_NOT_FOUND = 0xC0220004, /// The provider does not exist. FWP_PROVIDER_NOT_FOUND = 0xC0220005, /// The provider context does not exist. FWP_PROVIDER_CONTEXT_NOT_FOUND = 0xC0220006, /// The sublayer does not exist. FWP_SUBLAYER_NOT_FOUND = 0xC0220007, /// The object does not exist. FWP_NOT_FOUND = 0xC0220008, /// An object with that GUID or LUID already exists. FWP_ALREADY_EXISTS = 0xC0220009, /// The object is referenced by other objects and cannot be deleted. FWP_IN_USE = 0xC022000A, /// The call is not allowed from within a dynamic session. FWP_DYNAMIC_SESSION_IN_PROGRESS = 0xC022000B, /// The call was made from the wrong session and cannot be completed. FWP_WRONG_SESSION = 0xC022000C, /// The call must be made from within an explicit transaction. FWP_NO_TXN_IN_PROGRESS = 0xC022000D, /// The call is not allowed from within an explicit transaction. FWP_TXN_IN_PROGRESS = 0xC022000E, /// The explicit transaction has been forcibly canceled. FWP_TXN_ABORTED = 0xC022000F, /// The session has been canceled. FWP_SESSION_ABORTED = 0xC0220010, /// The call is not allowed from within a read-only transaction. FWP_INCOMPATIBLE_TXN = 0xC0220011, /// The call timed out while waiting to acquire the transaction lock. FWP_TIMEOUT = 0xC0220012, /// The collection of network diagnostic events is disabled. FWP_NET_EVENTS_DISABLED = 0xC0220013, /// The operation is not supported by the specified layer. FWP_INCOMPATIBLE_LAYER = 0xC0220014, /// The call is allowed for kernel-mode callers only. FWP_KM_CLIENTS_ONLY = 0xC0220015, /// The call tried to associate two objects with incompatible lifetimes. FWP_LIFETIME_MISMATCH = 0xC0220016, /// The object is built-in and cannot be deleted. FWP_BUILTIN_OBJECT = 0xC0220017, /// The maximum number of callouts has been reached. FWP_TOO_MANY_CALLOUTS = 0xC0220018, /// A notification could not be delivered because a message queue has reached maximum capacity. FWP_NOTIFICATION_DROPPED = 0xC0220019, /// The traffic parameters do not match those for the security association context. FWP_TRAFFIC_MISMATCH = 0xC022001A, /// The call is not allowed for the current security association state. FWP_INCOMPATIBLE_SA_STATE = 0xC022001B, /// A required pointer is null. FWP_NULL_POINTER = 0xC022001C, /// An enumerator is not valid. FWP_INVALID_ENUMERATOR = 0xC022001D, /// The flags field contains an invalid value. FWP_INVALID_FLAGS = 0xC022001E, /// A network mask is not valid. FWP_INVALID_NET_MASK = 0xC022001F, /// An FWP_RANGE is not valid. FWP_INVALID_RANGE = 0xC0220020, /// The time interval is not valid. FWP_INVALID_INTERVAL = 0xC0220021, /// An array that must contain at least one element has a zero length. FWP_ZERO_LENGTH_ARRAY = 0xC0220022, /// The displayData.name field cannot be null. FWP_NULL_DISPLAY_NAME = 0xC0220023, /// The action type is not one of the allowed action types for a filter. FWP_INVALID_ACTION_TYPE = 0xC0220024, /// The filter weight is not valid. FWP_INVALID_WEIGHT = 0xC0220025, /// A filter condition contains a match type that is not compatible with the operands. FWP_MATCH_TYPE_MISMATCH = 0xC0220026, /// An FWP_VALUE or FWPM_CONDITION_VALUE is of the wrong type. FWP_TYPE_MISMATCH = 0xC0220027, /// An integer value is outside the allowed range. FWP_OUT_OF_BOUNDS = 0xC0220028, /// A reserved field is nonzero. FWP_RESERVED = 0xC0220029, /// A filter cannot contain multiple conditions operating on a single field. FWP_DUPLICATE_CONDITION = 0xC022002A, /// A policy cannot contain the same keying module more than once. FWP_DUPLICATE_KEYMOD = 0xC022002B, /// The action type is not compatible with the layer. FWP_ACTION_INCOMPATIBLE_WITH_LAYER = 0xC022002C, /// The action type is not compatible with the sublayer. FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER = 0xC022002D, /// The raw context or the provider context is not compatible with the layer. FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER = 0xC022002E, /// The raw context or the provider context is not compatible with the callout. FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT = 0xC022002F, /// The authentication method is not compatible with the policy type. FWP_INCOMPATIBLE_AUTH_METHOD = 0xC0220030, /// The Diffie-Hellman group is not compatible with the policy type. FWP_INCOMPATIBLE_DH_GROUP = 0xC0220031, /// An IKE policy cannot contain an Extended Mode policy. FWP_EM_NOT_SUPPORTED = 0xC0220032, /// The enumeration template or subscription will never match any objects. FWP_NEVER_MATCH = 0xC0220033, /// The provider context is of the wrong type. FWP_PROVIDER_CONTEXT_MISMATCH = 0xC0220034, /// The parameter is incorrect. FWP_INVALID_PARAMETER = 0xC0220035, /// The maximum number of sublayers has been reached. FWP_TOO_MANY_SUBLAYERS = 0xC0220036, /// The notification function for a callout returned an error. FWP_CALLOUT_NOTIFICATION_FAILED = 0xC0220037, /// The IPsec authentication configuration is not compatible with the authentication type. FWP_INCOMPATIBLE_AUTH_CONFIG = 0xC0220038, /// The IPsec cipher configuration is not compatible with the cipher type. FWP_INCOMPATIBLE_CIPHER_CONFIG = 0xC0220039, /// A policy cannot contain the same auth method more than once. FWP_DUPLICATE_AUTH_METHOD = 0xC022003C, /// The TCP/IP stack is not ready. FWP_TCPIP_NOT_READY = 0xC0220100, /// The injection handle is being closed by another thread. FWP_INJECT_HANDLE_CLOSING = 0xC0220101, /// The injection handle is stale. FWP_INJECT_HANDLE_STALE = 0xC0220102, /// The classify cannot be pended. FWP_CANNOT_PEND = 0xC0220103, /// The binding to the network interface is being closed. NDIS_CLOSING = 0xC0230002, /// An invalid version was specified. NDIS_BAD_VERSION = 0xC0230004, /// An invalid characteristics table was used. NDIS_BAD_CHARACTERISTICS = 0xC0230005, /// Failed to find the network interface or the network interface is not ready. NDIS_ADAPTER_NOT_FOUND = 0xC0230006, /// Failed to open the network interface. NDIS_OPEN_FAILED = 0xC0230007, /// The network interface has encountered an internal unrecoverable failure. NDIS_DEVICE_FAILED = 0xC0230008, /// The multicast list on the network interface is full. NDIS_MULTICAST_FULL = 0xC0230009, /// An attempt was made to add a duplicate multicast address to the list. NDIS_MULTICAST_EXISTS = 0xC023000A, /// At attempt was made to remove a multicast address that was never added. NDIS_MULTICAST_NOT_FOUND = 0xC023000B, /// The network interface aborted the request. NDIS_REQUEST_ABORTED = 0xC023000C, /// The network interface cannot process the request because it is being reset. NDIS_RESET_IN_PROGRESS = 0xC023000D, /// An attempt was made to send an invalid packet on a network interface. NDIS_INVALID_PACKET = 0xC023000F, /// The specified request is not a valid operation for the target device. NDIS_INVALID_DEVICE_REQUEST = 0xC0230010, /// The network interface is not ready to complete this operation. NDIS_ADAPTER_NOT_READY = 0xC0230011, /// The length of the buffer submitted for this operation is not valid. NDIS_INVALID_LENGTH = 0xC0230014, /// The data used for this operation is not valid. NDIS_INVALID_DATA = 0xC0230015, /// The length of the submitted buffer for this operation is too small. NDIS_BUFFER_TOO_SHORT = 0xC0230016, /// The network interface does not support this object identifier. NDIS_INVALID_OID = 0xC0230017, /// The network interface has been removed. NDIS_ADAPTER_REMOVED = 0xC0230018, /// The network interface does not support this media type. NDIS_UNSUPPORTED_MEDIA = 0xC0230019, /// An attempt was made to remove a token ring group address that is in use by other components. NDIS_GROUP_ADDRESS_IN_USE = 0xC023001A, /// An attempt was made to map a file that cannot be found. NDIS_FILE_NOT_FOUND = 0xC023001B, /// An error occurred while NDIS tried to map the file. NDIS_ERROR_READING_FILE = 0xC023001C, /// An attempt was made to map a file that is already mapped. NDIS_ALREADY_MAPPED = 0xC023001D, /// An attempt to allocate a hardware resource failed because the resource is used by another component. NDIS_RESOURCE_CONFLICT = 0xC023001E, /// The I/O operation failed because the network media is disconnected or the wireless access point is out of range. NDIS_MEDIA_DISCONNECTED = 0xC023001F, /// The network address used in the request is invalid. NDIS_INVALID_ADDRESS = 0xC0230022, /// The offload operation on the network interface has been paused. NDIS_PAUSED = 0xC023002A, /// The network interface was not found. NDIS_INTERFACE_NOT_FOUND = 0xC023002B, /// The revision number specified in the structure is not supported. NDIS_UNSUPPORTED_REVISION = 0xC023002C, /// The specified port does not exist on this network interface. NDIS_INVALID_PORT = 0xC023002D, /// The current state of the specified port on this network interface does not support the requested operation. NDIS_INVALID_PORT_STATE = 0xC023002E, /// The miniport adapter is in a lower power state. NDIS_LOW_POWER_STATE = 0xC023002F, /// The network interface does not support this request. NDIS_NOT_SUPPORTED = 0xC02300BB, /// The TCP connection is not offloadable because of a local policy setting. NDIS_OFFLOAD_POLICY = 0xC023100F, /// The TCP connection is not offloadable by the Chimney offload target. NDIS_OFFLOAD_CONNECTION_REJECTED = 0xC0231012, /// The IP Path object is not in an offloadable state. NDIS_OFFLOAD_PATH_REJECTED = 0xC0231013, /// The wireless LAN interface is in auto-configuration mode and does not support the requested parameter change operation. NDIS_DOT11_AUTO_CONFIG_ENABLED = 0xC0232000, /// The wireless LAN interface is busy and cannot perform the requested operation. NDIS_DOT11_MEDIA_IN_USE = 0xC0232001, /// The wireless LAN interface is power down and does not support the requested operation. NDIS_DOT11_POWER_STATE_INVALID = 0xC0232002, /// The list of wake on LAN patterns is full. NDIS_PM_WOL_PATTERN_LIST_FULL = 0xC0232003, /// The list of low power protocol offloads is full. NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL = 0xC0232004, /// The SPI in the packet does not match a valid IPsec SA. IPSEC_BAD_SPI = 0xC0360001, /// The packet was received on an IPsec SA whose lifetime has expired. IPSEC_SA_LIFETIME_EXPIRED = 0xC0360002, /// The packet was received on an IPsec SA that does not match the packet characteristics. IPSEC_WRONG_SA = 0xC0360003, /// The packet sequence number replay check failed. IPSEC_REPLAY_CHECK_FAILED = 0xC0360004, /// The IPsec header and/or trailer in the packet is invalid. IPSEC_INVALID_PACKET = 0xC0360005, /// The IPsec integrity check failed. IPSEC_INTEGRITY_CHECK_FAILED = 0xC0360006, /// IPsec dropped a clear text packet. IPSEC_CLEAR_TEXT_DROP = 0xC0360007, /// IPsec dropped an incoming ESP packet in authenticated firewall mode. This drop is benign. IPSEC_AUTH_FIREWALL_DROP = 0xC0360008, /// IPsec dropped a packet due to DOS throttle. IPSEC_THROTTLE_DROP = 0xC0360009, /// IPsec Dos Protection matched an explicit block rule. IPSEC_DOSP_BLOCK = 0xC0368000, /// IPsec Dos Protection received an IPsec specific multicast packet which is not allowed. IPSEC_DOSP_RECEIVED_MULTICAST = 0xC0368001, /// IPsec Dos Protection received an incorrectly formatted packet. IPSEC_DOSP_INVALID_PACKET = 0xC0368002, /// IPsec Dos Protection failed to lookup state. IPSEC_DOSP_STATE_LOOKUP_FAILED = 0xC0368003, /// IPsec Dos Protection failed to create state because there are already maximum number of entries allowed by policy. IPSEC_DOSP_MAX_ENTRIES = 0xC0368004, /// IPsec Dos Protection received an IPsec negotiation packet for a keying module which is not allowed by policy. IPSEC_DOSP_KEYMOD_NOT_ALLOWED = 0xC0368005, /// IPsec Dos Protection failed to create per internal IP ratelimit queue because there is already maximum number of queues allowed by policy. IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES = 0xC0368006, /// The system does not support mirrored volumes. VOLMGR_MIRROR_NOT_SUPPORTED = 0xC038005B, /// The system does not support RAID-5 volumes. VOLMGR_RAID5_NOT_SUPPORTED = 0xC038005C, /// A virtual disk support provider for the specified file was not found. VIRTDISK_PROVIDER_NOT_FOUND = 0xC03A0014, /// The specified disk is not a virtual disk. VIRTDISK_NOT_VIRTUAL_DISK = 0xC03A0015, /// The chain of virtual hard disks is inaccessible. /// The process has not been granted access rights to the parent virtual hard disk for the differencing disk. VHD_PARENT_VHD_ACCESS_DENIED = 0xC03A0016, /// The chain of virtual hard disks is corrupted. /// There is a mismatch in the virtual sizes of the parent virtual hard disk and differencing disk. VHD_CHILD_PARENT_SIZE_MISMATCH = 0xC03A0017, /// The chain of virtual hard disks is corrupted. /// A differencing disk is indicated in its own parent chain. VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED = 0xC03A0018, /// The chain of virtual hard disks is inaccessible. /// There was an error opening a virtual hard disk further up the chain. VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT = 0xC03A0019, _, };
lib/std/os/windows/ntstatus.zig
const spirv = @import("spirv.zig"); pub const File = []const Decl; pub const Decl = union(enum) { fn_def: FnDef, type_def: TypeDef, var_def: VarDef, }; pub const FnDef = struct { entry: bool, name: Ident, args: []const Arg, ret: Type, body: Block, pub const Arg = struct { name: Ident, type: Type, }; }; pub const VarDef = struct { constant: bool, name: Ident, type: Type, value: Expr, }; pub const TypeDef = struct { name: Ident, type: Type, }; pub const Ident = []const u8; pub const Type = union(enum) { named: Ident, bool: void, int: IntType, float: FloatType, array: *const ArrayType, mat: *const MatrixType, ref: *const RefType, @"struct": *const StructType, }; pub const IntType = struct { signed: bool, bits: u64, }; pub const FloatType = struct { bits: u64, }; pub const ArrayType = struct { size: ?u64, child: Type, }; pub const MatrixType = struct { size: [2]u3, child: Type, }; pub const RefType = struct { kind: union(enum) { // TODO: some of these need more info. Also there's probably some missing input: u64, output: u64, uniform: u64, uniform_const: u64, storage: u64, builtin: Builtin, }, child: Type, pub const Builtin = spirv.core.BuiltIn; }; pub const StructType = struct { fields: []const Field, decls: []const Decl, pub const Field = struct { name: Ident, type: Type, default: ?Expr, }; }; pub const Stmt = union(enum) { assign: Assignment, block: Block, branch: Branch, call: FuncCall, control: Control, var_def: VarDef, }; pub const Assignment = struct { left: Expr, op: ?Binary.Op, right: Expr, }; pub const Block = struct { label: ?Ident, body: []const Stmt, }; pub const Branch = union(enum) { @"for": For, @"if": If, @"switch": Switch, @"while": While, }; pub const If = struct { expr: Expr, body: Expr, @"else": ?Expr, }; pub const Switch = struct { expr: Expr, cases: []const Case, pub const Case = struct { expr: Expr, then: Expr, }; }; pub const For = struct { label: ?Ident, expr: Expr, value: Ident, index: ?Ident, body: Expr, @"else": ?Expr, }; pub const While = struct { label: ?Ident, expr: Expr, body: Expr, @"else": ?Expr, }; pub const Control = union(enum) { @"break": ?Expr, @"continue": void, @"return": ?Expr, }; pub const Expr = union(enum) { block: *const Block, call: *const FuncCall, branch: *const Branch, unary: *const Unary, binary: *const Binary, }; pub const FuncCall = struct { callee: Expr, args: []const Expr, }; pub const Unary = struct { op: Op, expr: *const Expr, pub const Op = enum { not, invert, }; }; pub const Binary = struct { left: Expr, op: Op, right: Expr, pub const Op = enum { add, sub, mult, div, mod, lsh, rsh, @"and", @"or", eq, gt, lt, ge, le, }; }; test { @import("std").testing.refAllDecls(@This()); }
src/ast.zig
const Symbol = @This(); const std = @import("std"); const macho = std.macho; const mem = std.mem; const Allocator = mem.Allocator; const Object = @import("Object.zig"); pub const Type = enum { regular, proxy, unresolved, }; /// Symbol type. @"type": Type, /// Symbol name. Owned slice. name: []u8, /// Alias of. alias: ?*Symbol = null, /// Index in GOT table for indirection. got_index: ?u32 = null, /// Index in stubs table for late binding. stubs_index: ?u32 = null, pub const Regular = struct { base: Symbol, /// Linkage type. linkage: Linkage, /// Symbol address. address: u64, /// Section ID where the symbol resides. section: u8, /// Whether the symbol is a weak ref. weak_ref: bool, /// File where to locate this symbol. file: *Object, /// Debug stab if defined. stab: ?struct { /// Stab kind kind: enum { function, global, static, }, /// Size of the stab. size: u64, } = null, pub const base_type: Symbol.Type = .regular; pub const Linkage = enum { translation_unit, linkage_unit, global, }; pub fn isTemp(regular: *Regular) bool { if (regular.linkage == .translation_unit) { return mem.startsWith(u8, regular.base.name, "l") or mem.startsWith(u8, regular.base.name, "L"); } return false; } }; pub const Proxy = struct { base: Symbol, /// Dylib ordinal. dylib: u16, pub const base_type: Symbol.Type = .proxy; }; pub const Unresolved = struct { base: Symbol, /// File where this symbol was referenced. file: *Object, pub const base_type: Symbol.Type = .unresolved; }; pub fn deinit(base: *Symbol, allocator: *Allocator) void { allocator.free(base.name); } pub fn cast(base: *Symbol, comptime T: type) ?*T { if (base.@"type" != T.base_type) { return null; } return @fieldParentPtr(T, "base", base); } pub fn getTopmostAlias(base: *Symbol) *Symbol { if (base.alias) |alias| { return alias.getTopmostAlias(); } return base; } pub fn isStab(sym: macho.nlist_64) bool { return (macho.N_STAB & sym.n_type) != 0; } pub fn isPext(sym: macho.nlist_64) bool { return (macho.N_PEXT & sym.n_type) != 0; } pub fn isExt(sym: macho.nlist_64) bool { return (macho.N_EXT & sym.n_type) != 0; } pub fn isSect(sym: macho.nlist_64) bool { const type_ = macho.N_TYPE & sym.n_type; return type_ == macho.N_SECT; } pub fn isUndf(sym: macho.nlist_64) bool { const type_ = macho.N_TYPE & sym.n_type; return type_ == macho.N_UNDF; } pub fn isWeakDef(sym: macho.nlist_64) bool { return (sym.n_desc & macho.N_WEAK_DEF) != 0; } pub fn isWeakRef(sym: macho.nlist_64) bool { return (sym.n_desc & macho.N_WEAK_REF) != 0; }
src/link/MachO/Symbol.zig
const builtin = @import("builtin"); const assert = @import("std").debug.assert; /// Call a Non-Secure function. pub fn nonSecureCall(func: var, r0: usize, r1: usize, r2: usize, r3: usize) usize { @setAlignStack(8); const target = if (@TypeOf(func) == usize) func else @ptrToInt(func); // Specifying Armv8-M in `build.zig` won't work for some reason, so we have // to specify the architecture here using the `.cpu` directive return asm volatile ( \\ .cpu cortex-m33 \\ \\ # r7 is reserved (what?) and cannot be added to the clobber list \\ # r6 is not, but makes sure SP is aligned to 8-byte boundaries \\ push {r6, r7} \\ \\ # Clear unbanked registers to remove confidential information. \\ # The compiler automatically saves their contents if they are needed. \\ mov r5, r4 \\ mov r6, r4 \\ mov r7, r4 \\ mov r8, r4 \\ mov r9, r4 \\ mov r10, r4 \\ mov r11, r4 \\ mov r12, r4 \\ \\ # Lazily save floating-point registers \\ sub sp, #136 \\ vlstm sp \\ msr apsr, r0 \\ \\ # Call the target \\ blxns r4 \\ \\ # Restore floating-point registers \\ vlldm sp \\ add sp, #136 \\ \\ pop {r6, r7} : [ret] "={r0}" (-> usize) : [r0] "{r0}" (r0), [r1] "{r1}" (r1), [r2] "{r2}" (r2), [r3] "{r3}" (r3), [func] "{r4}" (target & ~@as(usize, 1)) : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r8", "r9", "r10", "r11", "r12" ); } /// Generate a `TT` instruction.. pub inline fn tt(p: var) AddressInfo { return AddressInfo{ .value = asm volatile ( \\ .cpu cortex-m33 \\ tt %0, %1 : [out] "=r" (-> u32) : [p] "r" (p) ), }; } /// Generate a `TT` instruction with the `A` flag (alternate security mode). pub inline fn tta(p: var) AddressInfo { return AddressInfo{ .value = asm volatile ( \\ .cpu cortex-m33 \\ tta %[out], %[p] : [out] "=r" (-> u32) : [p] "r" (p) ), }; } /// Generate a `TT` instruction with the `A` flag (alternate security mode) and /// `T` flag (non-privileged). pub inline fn ttat(p: var) AddressInfo { return AddressInfo{ .value = asm volatile ( \\ .cpu cortex-m33 \\ ttat %[out], %[p] : [out] "=r" (-> u32) : [p] "r" (p) ), }; } /// Check Non-Secure access permissions for an object of type `ty` at `ptr`. /// /// Returns `@intToPtr(*volatile ty, ptr)` if the check succeeds; otherwise, /// an error value. pub inline fn checkObject(comptime ty: type, ptr: usize, options: CheckOptions) CheckObjectError!*volatile ty { // Check alignment const alignment = @alignOf(ty); if ((ptr & (alignment - 1)) != 0) { return CheckObjectError.Misaligned; } // Check access try checkAddressRange(ptr, @sizeOf(ty), options); return @intToPtr(*volatile ty, ptr); } pub const CheckObjectError = error{Misaligned} || CheckError; /// Check Non-Secure access permissions for a slice of element type `ty` /// starting at `ptr`, containing `count` elements. /// /// Returns a slice of type `[]volatile ty` if the check succeeds; otherwise, /// an error value.. pub inline fn checkSlice(comptime ty: type, ptr: usize, count: usize, options: CheckOptions) CheckSliceError![]volatile ty { // Check alignment const alignment = @alignOf(ty); if ((ptr & (alignment - 1)) != 0) { return CheckSliceError.Misaligned; } // Check size var size: usize = undefined; if (@mulWithOverflow(usize, count, @sizeOf(ty), &size)) { return CheckSliceError.SizeTooLarge; } // Check access try checkAddressRange(ptr, size, options); return @intToPtr([*]volatile ty, ptr)[0..size]; } pub const CheckSliceError = error{ Misaligned, SizeTooLarge, } || CheckError; /// Check Non-Secure access permissions for the specified address range. /// /// Returns an error value if the check fail; otherwise, `{}`. /// /// This roughly follows the address range check intrinsic described in: /// “ARM®v8-M Security Extensions: Requirements on Development Tools” pub inline fn checkAddressRange(ptr: var, size: usize, options: CheckOptions) CheckError!void { const start = if (@TypeOf(ptr) == usize) ptr else @ptrToInt(ptr); var end: usize = start; if (size > 0 and @addWithOverflow(usize, start, size - 1, &end)) { // The check should fail if the address range wraps around return CheckError.WrapsAround; } // TODO: Not sure how to handle `size == 0`. To be safe, we currently treat it as `1` const info1 = if (options.unpriv) ttat(start) else tta(start); const info2 = if (size > 1) (if (options.unpriv) ttat(end) else tta(end)) else (info1); // The chcek should fail if the range crosses any SAU/IDAU/MPU region // boundary if (info1.value != info2.value) { return CheckError.CrossesRegionBoundary; } const ok = if (options.readwrite) (info1.flags.nonsecure_readwrite_ok) else (info1.flags.nonsecure_read_ok); return if (ok) {} else CheckError.Forbidden; } pub const CheckError = error{ WrapsAround, CrossesRegionBoundary, Forbidden, }; pub const CheckOptions = struct { /// Checks if the permissions have the `readwrite_ok` field set. readwrite: bool = false, /// Retrieves the unprivileged mode access permissions. unpriv: bool = false, }; /// The address information returned by a `TT` instruction. pub const AddressInfo = packed union { flags: AddressInfoFlags, value: u32, }; const AddressInfoFlags = packed struct { mpu_region: u8, sau_region: u8, mpu_region_valid: bool, sau_region_valid: bool, read_ok: bool, readwrite_ok: bool, nonsecure_read_ok: bool, nonsecure_readwrite_ok: bool, secure: bool, idau_region_valid: bool, idau_region: u8, }; test "AddressInfo is word-sized" { assert(@sizeOf(AddressInfo) == 4); } test "AddressInfo has the correct layout" { assert(@bitOffsetOf(AddressInfoFlags, "mpu_region_valid") == 16); assert(@bitOffsetOf(AddressInfoFlags, "read_ok") == 18); assert(@bitOffsetOf(AddressInfoFlags, "nonsecure_read_ok") == 20); assert(@bitOffsetOf(AddressInfoFlags, "secure") == 22); assert(@bitOffsetOf(AddressInfoFlags, "idau_region") == 24); } /// Export a Non-Secure-callable function. /// /// This function tries to achieve the effect similar to that of /// `__attribute__((cmse_nonsecure_entry))`. It does not utilize the special /// symbol `__acle_se_*` since it's probably not supported by the vanilla `lld`. /// (TODO: needs confirmation) /// It only supports a particular combination of parameter and return types. /// Handling other combinations, especially those involving parameter passing /// on the stack, is very difficult to implement here. /// /// This comptime function generates a veneer function in the `.gnu.sgstubs` /// section. The section must be configured as a Non-Secure-callable region for /// it to be actually callable from Non-Secure. /// The function also creates a marker symbol named by prepending the function /// name with `__acle_se_` to instruct the linker to include the function in the /// CMSE import library. /// /// On return, it clears caller-saved registers (to prevent the leakage of /// confidential information). (TODO: Clear FP registers) /// /// See “ARM®v8-M Security Extensions: Requirements on Development Tools” for /// other guidelines regarding the use of Non-Secure-callable functions. pub fn exportNonSecureCallable(comptime name: []const u8, comptime func: extern fn (usize, usize, usize, usize) usize) void { const Veneer = struct { fn veneer() callconv(.Naked) void { // Work-around for a code generation issue in ReleaseSmall builds @setRuntimeSafety(false); // See another comment regarding `.cpu` asm volatile ( \\ .cpu cortex-m33 \\ \\ # Mark this function as a valid entry point. \\ sg \\ \\ push {r4, lr} : // no output : // no input // Actually we don't modify it. This is needed // to make sure other instructions (specifically, the load of // `func`) aren't moved to the front of `sg`. : "memory", "r4" ); asm volatile ( \\ .cpu cortex-m33 \\ \\ # Call the original function \\ blx %[func] \\ \\ pop {r4, lr} \\ \\ # Clear caller-saved registers \\ # TODO: clear FP registers \\ mov r1, lr \\ mov r2, lr \\ mov r3, lr \\ mov ip, lr \\ msr apsr, lr \\ \\ # Return \\ bxns lr : // no output : [func] "{r4}" (func) ); unreachable; } }; @export(Veneer.veneer, .{ .name = name, .linkage = .Strong, .section = ".gnu.sgstubs" }); @export(Veneer.veneer, .{ .name = "__acle_se_" ++ name, .linkage = .Strong, .section = ".gnu.sgstubs" }); } /// Security Attribution Unit. pub const Sau = struct { base: usize, const Self = @This(); /// Construct an `Sau` object using the specified MMIO base address. pub fn withBase(base: usize) Self { return Self{ .base = base }; } // Register Accessors // ----------------------------------------------------------------------- /// Control Register pub fn regCtrl(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base); } pub const CTRL_ENABLE: u32 = 1 << 0; pub const CTRL_ALLNS: u32 = 1 << 1; /// Region Base Address Register pub fn regRbar(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0xc); } /// Region Limit Address Register pub fn regRlar(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x10); } pub const RLAR_ENABLE: u32 = 1 << 0; pub const RLAR_NSC: u32 = 1 << 1; /// Region Number Register pub fn regRnar(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x8); } /// Type Register pub fn regType(self: Self) *volatile u32 { return @intToPtr(*volatile u32, self.base + 0x4); } // Helper Functions // ----------------------------------------------------------------------- /// Configure a single SAU region using `SauRegion`. pub fn setRegion(self: Self, i: u8, region: SauRegion) void { if ((region.start & 31) != 0) { unreachable; } if ((region.end & 31) != 0) { unreachable; } self.regRnar().* = i; self.regRlar().* = 0; // Disable the region first self.regRbar().* = region.start; self.regRlar().* = (region.end - 32) | RLAR_ENABLE | if (region.nsc) RLAR_NSC else 0; } }; /// Represents an instance of Security Attribution Unit. pub const sau = Sau.withBase(0xe000edd0); /// Describes a single SAU region. pub const SauRegion = struct { /// The start address. Must be aligned to 32-byte blocks. start: u32, /// The end address (exclusive). Must be aligned to 32-byte blocks. end: u32, /// Non-Secure callable. nsc: bool = false, };
src/drivers/arm_cmse.zig
const std = @import("std"); const zlm = @import("zlm"); const gl = @import("gl.zig"); const Renderer = @import("renderer.zig").Renderer; const util = @import("../util.zig"); const glut = @cImport(@cInclude("GL/freeglut.h")); const glfw = @cImport(@cInclude("GLFW/glfw3.h")); pub const MouseButton = enum(u3) { left = 1, middle = 2, right = 3, }; var glfw_users: usize = 0; var gl_needs_initialization: bool = true; fn incGLFWRef() !void { if (glfw_users == 0) { if (glfw.glfwInit() == 0) { return error.GLFWError; } } glfw_users += 1; } fn setupOpenGL() void { if (gl_needs_initialization) { // load gl via freeglut var fake_argc: c_int = 0; var fake_argv: [*c]u8 = undefined; glut.glutInit(&fake_argc, &fake_argv); gl_needs_initialization = false; } } fn decGLFWRef() void { glfw_users -= 1; if (glfw_users == 0) { glfw.glfwTerminate(); } } fn logGLFWErrors() void { var description: ?[*:0]u8 = null; while (true) { const err = glfw.glfwGetError(&description); if (err == glfw.GLFW_NO_ERROR) { break; } if (description) |desc| { std.debug.print("GLFW error code {}: '{s}'\n", .{ err, desc }); } else { std.debug.print("GLFW error code {}\n", .{err}); } } } fn getWindow(glfw_win: ?*glfw.GLFWwindow) *Window { return @ptrCast(*Window, @alignCast(@alignOf(Window), glfw.glfwGetWindowUserPointer(glfw_win))); } fn onScroll(glfw_win: ?*glfw.GLFWwindow, x: f64, y: f64) callconv(.C) void { const self = getWindow(glfw_win); self.scroll = @floatCast(f32, y); } fn onResize(glfw_win: ?*glfw.GLFWwindow, w: c_int, h: c_int) callconv(.C) void { const self = getWindow(glfw_win); self.width = @intCast(u31, w); self.height = @intCast(u31, h); util.errorBoundary(self.updateViewport()); } // TODO make the code *potentially* handle multiple windows, for correctness pub const Window = struct { // handle to GLFW window glfw_win: *glfw.GLFWwindow, // attached renderer, nullable as Renderer depends on us being ready first, but we want Renderer so we can update the viewport... // TODO rework this system due to the entanglement of different structures here? renderer: ?*const Renderer, // last seen scroll amount scroll: f32, // width of window width: u31, // height of window height: u31, pub fn init(allocator: *std.mem.Allocator, width: u31, height: u31) !*Window { // ensure glfw (and opengl) are initialized try incGLFWRef(); errdefer decGLFWRef(); // request version 3.3 core for OpenGL glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MAJOR, 3); glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MINOR, 3); glfw.glfwWindowHint(glfw.GLFW_OPENGL_PROFILE, glfw.GLFW_OPENGL_CORE_PROFILE); // create a GLFW window const glfw_win = glfw.glfwCreateWindow(width, height, "hello world", null, null) orelse return error.GLFWError; errdefer glfw.glfwDestroyWindow(glfw_win); // set the context glfw.glfwMakeContextCurrent(glfw_win); // we must run this *after* the context is created setupOpenGL(); // set a scroll wheel callback because we can't query scroll wheel state whenever we want _ = glfw.glfwSetScrollCallback(glfw_win, onScroll); // set a window resize callback to keep window size up-to-date _ = glfw.glfwSetFramebufferSizeCallback(glfw_win, onResize); // allocate ourselves on the heap, to set ourselves as the user pointer const self = try allocator.create(Window); errdefer allocator.destroy(self); self.* = .{ .glfw_win = glfw_win, .renderer = null, .scroll = 0, .width = width, .height = height, }; // put us in the user pointer glfw.glfwSetWindowUserPointer(glfw_win, self); return self; } pub fn deinit(self: *Window, allocator: *std.mem.Allocator) void { glfw.glfwDestroyWindow(self.glfw_win); decGLFWRef(); allocator.destroy(self); } pub fn updateViewport(self: Window) !void { gl.viewport(0, 0, self.width, self.height); if (self.renderer) |r| { try r.updateProjection(@intToFloat(f32, self.width) / @intToFloat(f32, self.height)); } } pub fn swapBuffers(self: Window) void { glfw.glfwSwapBuffers(self.glfw_win); } pub fn shouldClose(self: Window) bool { return glfw.glfwWindowShouldClose(self.glfw_win) != 0; } pub fn pollEvents() void { glfw.glfwPollEvents(); } pub fn getCursor(self: Window) zlm.Vec2 { var x: f64 = undefined; var y: f64 = undefined; glfw.glfwGetCursorPos(self.glfw_win, &x, &y); return zlm.vec2(@floatCast(f32, x), @floatCast(f32, y)); } pub fn isMouseDown(self: Window, button: MouseButton) bool { return glfw.glfwGetMouseButton(self.glfw_win, @enumToInt(button)) == glfw.GLFW_PRESS; } pub fn isKeyDown(self: Window, key: anytype) bool { // just so i don't have to make an enum for *all* of them, we'll use a metaprogramming trick (hack?) // i'd autogenerate an enum but there's todos in the stage 1 compiler that prevent that const int_key = switch (@typeInfo(@TypeOf(key))) { .EnumLiteral => blk: { comptime var name = [_]u8{0} ** @tagName(key).len; inline for (@tagName(key)) |c, i| { name[i] = comptime std.ascii.toUpper(c); } break :blk @field(glfw, "GLFW_KEY_" ++ name); }, else => key, }; return glfw.glfwGetKey(self.glfw_win, int_key) == glfw.GLFW_PRESS; } };
src/render/window.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const BufMap = std.BufMap; const testing = std.testing; const input_file = "input04.txt"; const fields = [_][]const u8{ "byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid", "cid", }; const eye_colors = [_][]const u8{ "amb", "blu", "brn", "gry", "grn", "hzl", "oth", }; fn isValid(passport: BufMap) bool { const byr_raw = passport.get("byr") orelse return false; const byr = std.fmt.parseInt(u32, byr_raw, 10) catch return false; if (byr < 1920 or byr > 2002) return false; const iyr_raw = passport.get("iyr") orelse return false; const iyr = std.fmt.parseInt(u32, iyr_raw, 10) catch return false; if (iyr < 2010 or iyr > 2020) return false; const eyr_raw = passport.get("eyr") orelse return false; const eyr = std.fmt.parseInt(u32, eyr_raw, 10) catch return false; if (eyr < 2020 or eyr > 2030) return false; const hgt = passport.get("hgt") orelse return false; if (std.mem.endsWith(u8, hgt, "cm")) { const amt = std.fmt.parseInt(u32, hgt[0 .. hgt.len - 2], 10) catch return false; if (amt < 150 or amt > 193) return false; } else if (std.mem.endsWith(u8, hgt, "in")) { const amt = std.fmt.parseInt(u32, hgt[0 .. hgt.len - 2], 10) catch return false; if (amt < 59 or amt > 76) return false; } else return false; const hcl = passport.get("hcl") orelse return false; if (hcl.len != 7) return false; if (hcl[0] != '#') return false; for (hcl[1..]) |x| { const is_digit = x >= '0' and x <= '9'; const is_a_to_f = x >= 'a' and x <= 'f'; if (!is_digit and !is_a_to_f) return false; } const ecl = passport.get("ecl") orelse return false; for (eye_colors) |clr| { if (std.mem.eql(u8, clr, ecl)) break; } else return false; const pid = passport.get("pid") orelse return false; if (pid.len != 9) return false; for (pid[0..]) |x| { if (x < '0' or x > '9') return false; } return true; } test "is valid" { const allocator = std.testing.allocator; var passport = std.BufMap.init(allocator); defer passport.deinit(); // ecl:gry pid:860033327 eyr:2020 hcl:#fffffd // byr:1937 iyr:2017 cid:147 hgt:183cm try passport.set("ecl", "gry"); try passport.set("pid", "860033327"); try passport.set("eyr", "2020"); try passport.set("hcl", "#fffffd"); try passport.set("byr", "1937"); try passport.set("iyr", "2017"); try passport.set("cid", "147"); try passport.set("hgt", "183cm"); try testing.expect(isValid(passport)); } test "not valid" { const allocator = std.testing.allocator; var passport = std.BufMap.init(allocator); defer passport.deinit(); // eyr:1972 cid:100 // hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 try passport.set("eyr", "1972"); try passport.set("cid", "100"); try passport.set("hcl", "#18171d"); try passport.set("ecl", "amb"); try passport.set("hgt", "170"); try passport.set("pid", "186cm"); try passport.set("iyr", "2018"); try passport.set("byr", "1926"); try testing.expect(!isValid(passport)); } fn parsePassports(allocator: *Allocator, str: []const u8) ![]BufMap { var result = std.ArrayList(BufMap).init(allocator); var current_passport = BufMap.init(allocator); var iter = std.mem.split(str, "\n"); while (iter.next()) |line| { if (line.len == 0) { try result.append(current_passport); current_passport = BufMap.init(allocator); } else { var item_iter = std.mem.split(line, " "); while (item_iter.next()) |item| { var part_iter = std.mem.split(item, ":"); const key = part_iter.next() orelse return error.WrongFormat; const value = part_iter.next() orelse return error.WrongFormat; try current_passport.set(key, value); } } } return result.toOwnedSlice(); } fn freePassports(allocator: *Allocator, passports: []BufMap) void { for (passports) |*x| x.deinit(); allocator.free(passports); } test "parse passports" { const allocator = std.testing.allocator; const str = \\ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \\byr:1937 iyr:2017 cid:147 hgt:183cm \\ \\iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 \\hcl:#cfa07d byr:1929 \\ \\hcl:#ae17e1 iyr:2013 \\eyr:2024 \\ecl:brn pid:760753108 byr:1931 \\hgt:179cm \\ \\hcl:#cfa07d eyr:2025 pid:166559648 \\iyr:2011 ecl:brn hgt:59in \\ ; const parsed = try parsePassports(allocator, str); defer freePassports(allocator, parsed); try testing.expectEqual(@as(usize, 4), parsed.len); try testing.expectEqualSlices(u8, "gry", parsed[0].get("ecl").?); try testing.expectEqualSlices(u8, "1931", parsed[2].get("byr").?); try testing.expectEqual(@as(?[]const u8, null), parsed[0].get("asdf")); } pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const allocator = &gpa.allocator; var file = try std.fs.cwd().openFile(input_file, .{}); defer file.close(); const reader = file.reader(); const content = try reader.readAllAlloc(allocator, 4 * 1024 * 1024); defer allocator.free(content); const passports = try parsePassports(allocator, content); defer freePassports(allocator, passports); var valid_passports: u32 = 0; for (passports) |p| { if (isValid(p)) valid_passports += 1; } std.debug.print("valid passports: {}\n", .{valid_passports}); }
src/04_2.zig
const std = @import("std"); const io = std.io; const mem = std.mem; const debug = std.debug; const types = @import("types.zig"); const serial = @import("json_serialize.zig"); pub const LspError = error{ InvalidHeader, HeaderFieldTooLong, MessageTooLong, PrematureEndOfStream, }; pub fn readMessageAlloc(stream: var, alloc: *mem.Allocator) ![]u8 { var json_len: ?usize = null; var buffer: [4 * 1024]u8 = undefined; // read header while (true) { const line = stream.readUntilDelimiterOrEof(buffer[0..], '\r') catch |err| switch (err) { error.StreamTooLong => return LspError.HeaderFieldTooLong, else => return err, } orelse return error.PrematureEndOfStream; try stream.skipUntilDelimiterOrEof('\n'); if (line.len == 0) { // we read "\r\n" break; // end of header } const pos = mem.indexOf(u8, line, ": ") orelse return LspError.InvalidHeader; if (mem.eql(u8, line[0..pos], "Content-Length")) { json_len = try std.fmt.parseInt(usize, line[pos + 2 ..], 10); } } if (json_len == null) { return LspError.InvalidHeader; } var jsonStr = try alloc.alloc(u8, json_len.?); errdefer alloc.free(jsonStr); const bytes_read = try stream.readAll(jsonStr); if (bytes_read != json_len.?) { return LspError.PrematureEndOfStream; } return jsonStr; } pub fn writeMessage(stream: var, jsonStr: []const u8) !void { try stream.print("Content-Length: {}\r\n\r\n{}", .{ jsonStr.len, jsonStr }); } pub fn Dispatcher(comptime ContextType: type) type { return struct { const Self = @This(); map: Map, context: *ContextType, alloc: *mem.Allocator, pub const DispatchError = error{MethodNotFound}; pub const MethodError = error{ InvalidParams, InvalidRequest, InternalError, }; pub const Error = DispatchError || MethodError; pub const Method = fn (*ContextType, types.Request, *mem.Allocator) anyerror!void; pub const Map = std.StringHashMap(Method); pub fn init(context: *ContextType, alloc: *mem.Allocator) Self { return Self{ .map = Map.init(alloc), .context = context, .alloc = alloc, }; } pub fn deinit(self: *Self) void { self.map.deinit(); } pub fn registerRequest(self: *Self, method: []const u8, comptime ArgType: type, comptime callback: fn (*ContextType, ArgType, types.RequestId) anyerror!void) !void { const methodStruct = struct { pub fn f(ctx: *ContextType, req: types.Request, alloc: *mem.Allocator) anyerror!void { if (req.id != .Defined) { return MethodError.InvalidRequest; } var deserialized = serial.deserialize(ArgType, req.params, alloc) catch |err| switch (err) { error.OutOfMemory => return MethodError.InternalError, else => return MethodError.InvalidParams, }; defer deserialized.deinit(); try callback(ctx, deserialized.result, req.id.Defined); } }; _ = try self.map.put(method, methodStruct.f); } pub fn registerNotification(self: *Self, method: []const u8, comptime ArgType: type, comptime callback: fn (*ContextType, ArgType) anyerror!void) !void { const methodStruct = struct { pub fn f(ctx: *ContextType, req: types.Request, alloc: *mem.Allocator) anyerror!void { if (req.id != .NotDefined) { return MethodError.InvalidRequest; } var deserialized = serial.deserialize(ArgType, req.params, alloc) catch |err| switch (err) { error.OutOfMemory => return MethodError.InternalError, else => return MethodError.InvalidParams, }; defer deserialized.deinit(); try callback(ctx, deserialized.result); } }; _ = try self.map.put(method, methodStruct.f); } pub fn dispatch(self: *Self, message: types.Request) Error!void { debug.warn("{}\n", .{message.method}); if (self.map.getValue(message.method)) |method| { method(self.context, message, self.alloc) catch |err| switch (err) { error.InvalidParams => return error.InvalidParams, error.InvalidRequest => return error.InvalidRequest, else => return error.InternalError, }; } else { return error.MethodNotFound; } } }; }
src/protocol.zig
const std = @import("std"); const expect = std.testing.expect; test "optional type" { const x: ?bool = true; if (x) |y| { if (y) { // OK } else { unreachable; } } else { unreachable; } const next_x: ?i32 = null; const z = next_x orelse 1234; try expect(z == 1234); const final_x: ?i32 = 13; const num = final_x orelse unreachable; try expect(num == 13); } test "test maybe object and get a pointer to the inner value" { var maybe_bool: ?bool = true; if (maybe_bool) |*b| { b.* = false; } try expect(maybe_bool.? == false); } test "rhs maybe unwrap return" { const x: ?bool = true; const y = x orelse return; _ = y; } test "maybe return" { try maybeReturnImpl(); comptime try maybeReturnImpl(); } fn maybeReturnImpl() !void { try expect(foo(1235).?); if (foo(null) != null) unreachable; try expect(!foo(1234).?); } fn foo(x: ?i32) ?bool { const value = x orelse return null; return value > 1234; } test "null literal outside function" { const is_null = here_is_a_null_literal.context == null; try expect(is_null); const is_non_null = here_is_a_null_literal.context != null; try expect(!is_non_null); } const SillyStruct = struct { context: ?i32, }; const here_is_a_null_literal = SillyStruct{ .context = null }; test "test null runtime" { try testTestNullRuntime(null); } fn testTestNullRuntime(x: ?i32) !void { try expect(x == null); try expect(!(x != null)); } test "optional void" { try optionalVoidImpl(); comptime try optionalVoidImpl(); } fn optionalVoidImpl() !void { try expect(bar(null) == null); try expect(bar({}) != null); } fn bar(x: ?void) ?void { if (x) |_| { return {}; } else { return null; } } const StructWithOptional = struct { field: ?i32, }; var struct_with_optional: StructWithOptional = undefined; test "unwrap optional which is field of global var" { struct_with_optional.field = null; if (struct_with_optional.field) |payload| { _ = payload; unreachable; } struct_with_optional.field = 1234; if (struct_with_optional.field) |payload| { try expect(payload == 1234); } else { unreachable; } } test "null with default unwrap" { const x: i32 = null orelse 1; try expect(x == 1); } test "optional pointer to 0 bit type null value at runtime" { const EmptyStruct = struct {}; var x: ?*EmptyStruct = null; try expect(x == null); }
test/behavior/null.zig
const std = @import("std"); const assert = std.debug.assert; const io = std.io; /// buf must be at least as long as in pub fn decode(in: []const u8, buf: []u8) []u8 { assert(buf.len >= in.len); if (in.len == 0) { return buf[0..0]; } var i: usize = 0; var buf_i: usize = 0; while (i < in.len - 1) : ({ i += 1; buf_i += 1; }) { buf[buf_i] = in[i]; // skip the \x00 when this pattern is found if (in[i] == '\xFF' and in[i + 1] == '\x00') { i += 1; } } if (i < in.len) { buf[buf_i] = in[i]; buf_i += 1; } return buf[0..buf_i]; } pub fn decodeInPlace(data: []u8) []u8 { return decode(data, data); } pub fn UnsynchCapableReader(comptime ReaderType: type) type { return struct { child_reader: ReaderType, unsynch: bool, pub const Error = ReaderType.Error; pub const Reader = io.Reader(*Self, Error, read); const Self = @This(); pub fn read(self: *Self, dest: []u8) Error!usize { if (self.unsynch) { // this is sad // TODO: something better var num_read: usize = 0; var prev_byte: u8 = 0; while (num_read < dest.len) { const byte = self.child_reader.readByte() catch |e| switch (e) { error.EndOfStream => return num_read, else => |err| return err, }; const should_skip = byte == '\x00' and prev_byte == '\xFF'; if (!should_skip) { dest[num_read] = byte; num_read += 1; } // FF0000 should be decoded as FF00, so set prev_byte // for each byte, even if it's skipped, so we don't // decode FF0000 all the way to just FF prev_byte = byte; } return num_read; } else { return self.child_reader.read(dest); } } pub fn reader(self: *Self) Reader { return .{ .context = self }; } }; } pub fn unsynchCapableReader(unsynch: bool, underlying_stream: anytype) UnsynchCapableReader(@TypeOf(underlying_stream)) { return .{ .child_reader = underlying_stream, .unsynch = unsynch }; } test "unsynch decode zero length" { const encoded = ""; var buf: [encoded.len]u8 = undefined; var decoded = decode(encoded, &buf); try std.testing.expectEqual(decoded.len, 0); try std.testing.expectEqualSlices(u8, "", decoded); } test "unsynch decode" { const encoded = "\xFF\x00\x00\xFE\xFF\x00"; var buf: [encoded.len]u8 = undefined; var decoded = decode(encoded, &buf); try std.testing.expectEqual(decoded.len, 4); try std.testing.expectEqualSlices(u8, "\xFF\x00\xFE\xFF", decoded); } test "unsynch decode in place" { const encoded = "\xFF\x00\x00\xFE\xFF\x00"; var buf: [encoded.len]u8 = undefined; std.mem.copy(u8, &buf, encoded); var decoded = decodeInPlace(&buf); try std.testing.expectEqual(decoded.len, 4); try std.testing.expectEqualSlices(u8, "\xFF\x00\xFE\xFF", decoded); } test "unsynch reader" { const encoded = "\xFF\x00\x00\xFE\xFF\x00"; var buf: [encoded.len]u8 = undefined; var stream = std.io.fixedBufferStream(encoded); var reader = unsynchCapableReader(true, stream.reader()).reader(); const decoded_len = try reader.read(&buf); try std.testing.expectEqual(decoded_len, 4); try std.testing.expectEqualStrings("\xFF\x00\xFE\xFF", buf[0..decoded_len]); }
src/unsynch.zig
const std = @import("std"); usingnamespace (@import("../machine.zig")); usingnamespace (@import("../util.zig")); const imm = Operand.immediate; const imm8 = Operand.immediate8; const imm16 = Operand.immediate16; const imm32 = Operand.immediate32; const imm64 = Operand.immediate64; const immSign = Operand.immediateSigned; const immSign8 = Operand.immediateSigned8; const immSign16 = Operand.immediateSigned16; const immSign32 = Operand.immediateSigned32; const immSign64 = Operand.immediateSigned64; const regRm = Operand.registerRm; const memSib = Operand.memorySibDef; const far16 = Operand.far16; const far32 = Operand.far32; test "jmp" { const m16 = Machine.init(.x86_16); const m32 = Machine.init(.x86_32); const m64 = Machine.init(.x64); debugPrint(false); { testOp1(m32, .JMP, imm16(0x1100), "66 E9 00 11"); testOp1(m64, .JMP, imm16(0x1100), AsmError.InvalidOperand); testOp1(m32, .JMP, imm(0x80), "66 E9 80 00"); testOp1(m64, .JMP, imm(0x80), "E9 80 00 00 00"); testOp1(m32, .JMP, imm8(0x80), AsmError.InvalidOperand); testOp1(m64, .JMP, imm8(0x80), AsmError.InvalidOperand); testOp1(m32, .JMP, immSign8(-128), "EB 80"); testOp1(m64, .JMP, immSign8(-128), "EB 80"); testOp1(m32, .JMP, immSign(0x80), "66 E9 80 00"); testOp1(m64, .JMP, immSign(0x80), "E9 80 00 00 00"); testOp1(m32, .JMP, immSign(-128), "EB 80"); testOp1(m64, .JMP, immSign(-128), "EB 80"); testOp1(m32, .JMP, imm(0x7F), "EB 7F"); testOp1(m64, .JMP, imm(0x7F), "EB 7F"); testOp1(m32, .JMP, imm8(0), "EB 00"); testOp1(m64, .JMP, imm8(0), "EB 00"); testOp1(m16, .JMP, imm32(0x33221100), "66 E9 00 11 22 33"); testOp1(m32, .JMP, imm32(0x33221100), "E9 00 11 22 33"); testOp1(m64, .JMP, imm32(0x33221100), "E9 00 11 22 33"); testOp1(m16, .JMP, imm64(0), AsmError.InvalidOperand); testOp1(m32, .JMP, imm64(0), AsmError.InvalidOperand); testOp1(m64, .JMP, imm64(0), AsmError.InvalidOperand); testOp1(m16, .JMP, immSign64(0), AsmError.InvalidOperand); testOp1(m32, .JMP, immSign64(0), AsmError.InvalidOperand); testOp1(m64, .JMP, immSign64(0), AsmError.InvalidOperand); } { testOp1(m32, .JMP, regRm(.AX), "66 ff e0"); testOp1(m64, .JMP, regRm(.AX), AsmError.InvalidOperand); // testOp1(m32, .JMP, regRm(.EAX), "ff e0"); testOp1(m64, .JMP, regRm(.EAX), AsmError.InvalidOperand); // testOp1(m32, .JMP, regRm(.RAX), AsmError.InvalidOperand); testOp1(m64, .JMP, regRm(.RAX), "ff e0"); } { testOp1(m32, .JMP, memSib(.FAR_WORD, 1, .EAX, .EAX, 0), "66 FF 2C 00"); testOp1(m64, .JMP, memSib(.FAR_WORD, 1, .EAX, .EAX, 0), "66 67 FF 2C 00"); // testOp1(m32, .JMP, memSib(.FAR_DWORD, 1, .EAX, .EAX, 0), "FF 2C 00"); testOp1(m64, .JMP, memSib(.FAR_DWORD, 1, .EAX, .EAX, 0), "67 FF 2C 00"); // testOp1(m32, .JMP, memSib(.FAR_QWORD, 1, .EAX, .EAX, 0), AsmError.InvalidOperand); testOp1(m64, .JMP, memSib(.FAR_QWORD, 1, .EAX, .EAX, 0), "67 48 FF 2C 00"); } { testOp1(m32, .JMP, far16(0x1100, 0x3322), "66 EA 22 33 00 11"); testOp1(m64, .JMP, far16(0x1100, 0x3322), AsmError.InvalidOperand); // testOp1(m32, .JMP, far32(0x1100, 0x55443322), "EA 22 33 44 55 00 11"); testOp1(m64, .JMP, far32(0x1100, 0x55443322), AsmError.InvalidOperand); } }
src/x86/tests/jmp.zig
const std = @import("std"); const print = std.debug.print; const util = @import("util.zig"); const gpa = util.gpa; const data = @embedFile("../data/day04.txt"); const Board = struct { numbers : [25]u8, row_hits : [5]std.StaticBitSet(5), col_hits : [5]std.StaticBitSet(5), pub fn init() @This() { return Board { .numbers = [_]u8{0} ** 25, .row_hits = [_]std.StaticBitSet(5){std.StaticBitSet(5).initEmpty()} ** 5, .col_hits = [_]std.StaticBitSet(5){std.StaticBitSet(5).initEmpty()} ** 5, }; } pub fn setNumber(this: *@This(), row: usize, col: usize, number: u8) void { this.numbers[row * 5 + col] = number; } pub fn hitNumber(this: *@This(), hit: u8) void { for (this.numbers) |number, i| { if (number == hit) { var row = i / 5; var col = i % 5; this.row_hits[row].set(col); this.col_hits[col].set(row); } } } pub fn bingo(this: *@This()) bool { var i : usize = 0; while (i < 5) : (i += 1) { if (this.row_hits[i].count() == 5) { return true; } if (this.col_hits[i].count() == 5) { return true; } } return false; } pub fn sumOfUnmarkedNumbers(this: *const @This()) u32 { var sum : u32 = 0; for (this.numbers) |number, i| { var row = i / 5; var col = i % 5; if (!this.row_hits[row].isSet(col)) { sum += number; } } return sum; } }; pub fn main() !void { var timer = try std.time.Timer.start(); var boards = std.ArrayList(Board).init(gpa); defer { boards.deinit(); } var iterator = std.mem.tokenize(data, "\r\n"); var caller_numbers = std.mem.tokenize(iterator.next().?, ","); while (iterator.next()) |t| { var token = t; var board = Board.init(); var index : usize = 0; while (index < 5) : (index += 1) { var row_iterator = std.mem.tokenize(token, " "); var col_index : usize = 0; while (row_iterator.next()) |row_token| { board.setNumber(index, col_index, try std.fmt.parseInt(u8, row_token, 10)); col_index += 1; } if (index + 1 != 5) { token = iterator.next().?; } } try boards.append(board); } var found_winner = false; while (caller_numbers.next()) |token| { const call = try std.fmt.parseInt(u8, token, 10); for (boards.items) |*board| { board.hitNumber(call); } var i : usize = 0; while (i < boards.items.len) { if (boards.items[i].bingo()) { var board = boards.swapRemove(i); if (!found_winner) { found_winner = true; print("🎁 Bingo: {}\n", .{board.sumOfUnmarkedNumbers() * call}); print("Day 04 - part 01 took {:15}ns\n", .{timer.lap()}); timer.reset(); } // If we're the last board, we've found our last bingo! if (boards.items.len == 0) { print("🎁 Last Bingo: {}\n", .{board.sumOfUnmarkedNumbers() * call}); print("Day 04 - part 02 took {:15}ns\n", .{timer.lap()}); print("❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️❄️\n", .{}); return; } // We don't bump `i` if we got bingo on a board because we've swapped in // the last element of the list, and we need to check that one too! } else { i += 1; } } } @panic("Unreachable!"); }
src/day04.zig
pub fn _lb(arg_addr: u32) callconv(.C) u8 { var addr = arg_addr; return @intToPtr([*c]volatile vu8, addr).?.*; } pub fn _lh(arg_addr: u32) callconv(.C) u16 { var addr = arg_addr; return @intToPtr([*c]volatile vu16, addr).?.*; } pub fn _lw(arg_addr: u32) callconv(.C) u32 { var addr = arg_addr; return @intToPtr([*c]volatile vu32, addr).?.*; } pub fn _ld(arg_addr: u32) callconv(.C) u64 { var addr = arg_addr; return @intToPtr([*c]volatile vu64, addr).?.*; } pub fn _sb(arg_val: u8, arg_addr: u32) callconv(.C) void { var val = arg_val; var addr = arg_addr; @intToPtr([*c]volatile vu8, addr).?.* = val; } pub fn _sh(arg_val: u16, arg_addr: u32) callconv(.C) void { var val = arg_val; var addr = arg_addr; @intToPtr([*c]volatile vu16, addr).?.* = val; } pub fn _sw(arg_val: u32, arg_addr: u32) callconv(.C) void { var val = arg_val; var addr = arg_addr; @intToPtr([*c]volatile vu32, addr).?.* = val; } pub fn _sd(arg_val: u64, arg_addr: u32) callconv(.C) void { var val = arg_val; var addr = arg_addr; @intToPtr([*c]volatile vu64, addr).?.* = val; } pub const SceUChar8 = u8; pub const SceUShort16 = u16; pub const SceUInt32 = u32; pub const SceUInt64 = u64; pub const SceULong64 = u64; pub const SceChar8 = u8; pub const SceShort16 = i16; pub const SceInt32 = i32; pub const SceInt64 = i64; pub const SceLong64 = i64; pub const SceFloat = f32; pub const SceFloat32 = f32; pub const SceWChar16 = c_ushort; pub const SceWChar32 = c_uint; pub const SceBool = c_int; pub const SceVoid = c_void; pub const ScePVoid = ?*c_void; pub const ScePspSRect = extern struct { x: c_short, y: c_short, w: c_short, h: c_short, }; pub const ScePspIRect = extern struct { x: c_int, y: c_int, w: c_int, h: c_int, }; pub const ScePspL64Rect = extern struct { x: SceLong64, y: SceLong64, w: SceLong64, h: SceLong64, }; pub const ScePspFRect = extern struct { x: f32, y: f32, w: f32, h: f32, }; pub const ScePspSVector2 = extern struct { x: c_short, y: c_short, }; pub const ScePspIVector2 = extern struct { x: c_int, y: c_int, }; pub const ScePspL64Vector2 = extern struct { x: SceLong64, y: SceLong64, }; pub const ScePspFVector2 = extern struct { x: f32, y: f32, }; pub const ScePspVector2 = extern union { fv: ScePspFVector2, iv: ScePspIVector2, f: [2]f32, i: [2]c_int, }; pub const ScePspSVector3 = extern struct { x: c_short, y: c_short, z: c_short, }; pub const ScePspIVector3 = extern struct { x: c_int, y: c_int, z: c_int, }; pub const ScePspL64Vector3 = extern struct { x: SceLong64, y: SceLong64, z: SceLong64, }; pub const ScePspFVector3 = extern struct { x: f32, y: f32, z: f32, }; pub const ScePspVector3 = extern union { fv: ScePspFVector3, iv: ScePspIVector3, f: [3]f32, i: [3]c_int, }; pub const ScePspSVector4 = extern struct { x: c_short, y: c_short, z: c_short, w: c_short, }; pub const ScePspIVector4 = extern struct { x: c_int, y: c_int, z: c_int, w: c_int, }; pub const ScePspL64Vector4 = extern struct { x: SceLong64, y: SceLong64, z: SceLong64, w: SceLong64, }; pub const ScePspFVector4 = extern struct { x: f32, y: f32, z: f32, w: f32, }; pub const ScePspFVector4Unaligned = extern struct { x: f32, y: f32, z: f32, w: f32, }; pub const ScePspVector4 = extern union { fv: ScePspFVector4, iv: ScePspIVector4, f: [4]f32, i: [4]c_int, }; pub const ScePspIMatrix2 = extern struct { x: ScePspIVector2, y: ScePspIVector2, }; pub const ScePspFMatrix2 = extern struct { x: ScePspFVector2, y: ScePspFVector2, }; pub const ScePspMatrix2 = extern union { fm: ScePspFMatrix2, im: ScePspIMatrix2, fv: [2]ScePspFVector2, iv: [2]ScePspIVector2, v: [2]ScePspVector2, f: [2][2]f32, i: [2][2]c_int, }; pub const ScePspIMatrix3 = extern struct { x: ScePspIVector3, y: ScePspIVector3, z: ScePspIVector3, }; pub const ScePspFMatrix3 = extern struct { x: ScePspFVector3, y: ScePspFVector3, z: ScePspFVector3, }; pub const ScePspMatrix3 = extern union { fm: ScePspFMatrix3, im: ScePspIMatrix3, fv: [3]ScePspFVector3, iv: [3]ScePspIVector3, v: [3]ScePspVector3, f: [3][3]f32, i: [3][3]c_int, }; pub const ScePspIMatrix4 = extern struct { x: ScePspIVector4, y: ScePspIVector4, z: ScePspIVector4, w: ScePspIVector4, }; pub const ScePspIMatrix4Unaligned = extern struct { x: ScePspIVector4, y: ScePspIVector4, z: ScePspIVector4, w: ScePspIVector4, }; pub const ScePspFMatrix4 = extern struct { x: ScePspFVector4, y: ScePspFVector4, z: ScePspFVector4, w: ScePspFVector4, }; pub const ScePspFMatrix4Unaligned = extern struct { x: ScePspFVector4, y: ScePspFVector4, z: ScePspFVector4, w: ScePspFVector4, }; pub const ScePspMatrix4 = extern union { fm: ScePspFMatrix4, im: ScePspIMatrix4, fv: [4]ScePspFVector4, iv: [4]ScePspIVector4, v: [4]ScePspVector4, f: [4][4]f32, i: [4][4]c_int, }; pub const ScePspFQuaternion = extern struct { x: f32, y: f32, z: f32, w: f32, }; pub const ScePspFQuaternionUnaligned = extern struct { x: f32, y: f32, z: f32, w: f32, }; pub const ScePspFColor = extern struct { r: f32, g: f32, b: f32, a: f32, }; pub const ScePspFColorUnaligned = extern struct { r: f32, g: f32, b: f32, a: f32, }; pub const ScePspRGBA8888 = c_uint; pub const ScePspRGBA4444 = c_ushort; pub const ScePspRGBA5551 = c_ushort; pub const ScePspRGB565 = c_ushort; pub const ScePspUnion32 = extern union { ui: c_uint, i: c_int, us: [2]c_ushort, s: [2]c_short, uc: [4]u8, c: [4]u8, f: f32, rgba8888: ScePspRGBA8888, rgba4444: [2]ScePspRGBA4444, rgba5551: [2]ScePspRGBA5551, rgb565: [2]ScePspRGB565, }; pub const ScePspUnion64 = extern union { ul: SceULong64, l: SceLong64, ui: [2]c_uint, i: [2]c_int, us: [4]c_ushort, s: [4]c_short, uc: [8]u8, c: [8]u8, f: [2]f32, sr: ScePspSRect, sv: ScePspSVector4, rgba8888: [2]ScePspRGBA8888, rgba4444: [4]ScePspRGBA4444, rgba5551: [4]ScePspRGBA5551, rgb565: [4]ScePspRGB565, }; pub const ScePspUnion128 = extern union { ul: [2]SceULong64, l: [2]SceLong64, ui: [4]c_uint, i: [4]c_int, us: [8]c_ushort, s: [8]c_short, uc: [16]u8, c: [16]u8, f: [4]f32, fr: ScePspFRect, ir: ScePspIRect, fv: ScePspFVector4, iv: ScePspIVector4, fq: ScePspFQuaternion, fc: ScePspFColor, rgba8888: [4]ScePspRGBA8888, rgba4444: [8]ScePspRGBA4444, rgba5551: [8]ScePspRGBA5551, rgb565: [8]ScePspRGB565, }; pub const ScePspDateTime = extern struct { year: c_ushort, month: c_ushort, day: c_ushort, hour: c_ushort, minute: c_ushort, second: c_ushort, microsecond: c_uint, }; pub const SceUID = c_int; pub const SceSize = c_uint; pub const SceSSize = c_int; pub const SceUChar = u8; pub const SceUInt = c_uint; pub const SceMode = c_int; pub const SceOff = SceInt64; pub const SceIores = SceInt64;
src/psp/sdk/psptypes.zig
const std = @import("std"); const process = std.process; const allocator = std.heap.c_allocator; pub fn main() !void { var npassed: usize = 0; var nfailed: usize = 0; { const args = try process.argsAlloc(allocator); defer process.argsFree(allocator, args); const lox_path = args[1]; for (args[2..]) |test_path| { if (try run_test(lox_path, test_path)) { npassed += 1; } else { nfailed += 1; } } } std.debug.warn("\nPassed: {}, Failed: {}\n", .{ npassed, nfailed }); process.exit(if (nfailed > 0) 1 else 0); } fn run_test(lox_path: []const u8, test_path: []const u8) !bool { std.debug.warn("{s}\n", .{test_path}); const argv = [_][]const u8{ lox_path, test_path }; const result = try std.ChildProcess.exec(.{ .allocator = allocator, .argv = argv[0..] }); defer allocator.free(result.stdout); defer allocator.free(result.stderr); const expected = try parse_test_file(test_path); defer allocator.free(expected.output); defer allocator.free(expected.runtime_error_message); defer allocator.free(expected.compile_error_message); return ( validate_compile_error(result.stderr, expected.compile_error_message) and validate_runtime_error(result.stderr, expected.runtime_error_message) and validate_output(result.stdout, expected.output) and validate_exit_code(result.term.Exited, expected.exit_code)); } fn validate_exit_code(actual: u32, expected: u32) bool { if (actual == expected) return true; std.debug.warn("Incorrect exit code\nActual: {}\nExpected: {}\n", .{ actual, expected }); return false; } fn validate_output(actual: []const u8, expected: []const u8) bool { if (std.mem.eql(u8, actual, expected)) return true; std.debug.warn("Output differs:\nActual:\n{s}\n\nExpected:\n{s}\n\n", .{ actual, expected }); return false; } fn validate_runtime_error(actual: []const u8, expected: []const u8) bool { if (expected.len == 0) return true; if (std.mem.indexOf(u8, actual, expected) != null) return true; std.debug.warn("Missing expected runtime error:\nActual:\n{s}\n\nExpected:\n{s} ...\n\n", .{ actual, expected }); return false; } fn validate_compile_error(actual: []const u8, expected: []const u8) bool { if (expected.len == 0) return true; if (std.mem.eql(u8, actual, expected)) return true; std.debug.warn("Missing expected compile error:\nActual:\n{s}\n\nExpected:\n{s}\n\n", .{ actual, expected }); return false; } const Expected = struct { output: []const u8, compile_error_message: []const u8, runtime_error_message: []const u8, exit_code: u32, }; fn matches(source: []const u8, needle: []const u8) bool { return std.mem.eql(u8, source[0..std.math.min(needle.len, source.len)], needle); } fn parse_test_file(test_path: []const u8) !Expected { const source = try std.fs.cwd().readFileAlloc(allocator, test_path, 1_000_000); var output_buffer = std.ArrayList(u8).init(allocator); var compile_error_buffer = std.ArrayList(u8).init(allocator); var runtime_error_buffer = std.ArrayList(u8).init(allocator); const expect_prefix = "// expect: "; const error_prefix = "// Error"; const line_error_prefix = "// [line "; const c_line_error_prefix = "// [c line "; const runtime_error_prefix = "// expect runtime error: "; var exit_code: u32 = 0; var line: usize = 1; var i: usize = 0; while (i < source.len) : (i += 1) { if (matches(source[i..], expect_prefix)) { i += expect_prefix.len; const j = std.mem.indexOfScalarPos(u8, source, i, '\n') orelse source.len; try output_buffer.appendSlice(source[i..j]); try output_buffer.append('\n'); i = j; } else if (matches(source[i..], error_prefix)) { exit_code = 65; i += error_prefix.len; const j = std.mem.indexOfScalarPos(u8, source, i, '\n') orelse source.len; try compile_error_buffer.writer().print("[line {}] Error", .{line}); try compile_error_buffer.appendSlice(source[i..j]); try compile_error_buffer.append('\n'); i = j; } else if (matches(source[i..], line_error_prefix)) { exit_code = 65; i += line_error_prefix.len; const j = std.mem.indexOfScalarPos(u8, source, i, '\n') orelse source.len; try compile_error_buffer.appendSlice("[line "); try compile_error_buffer.appendSlice(source[i..j]); try compile_error_buffer.append('\n'); i = j; } else if (matches(source[i..], c_line_error_prefix)) { exit_code = 65; i += c_line_error_prefix.len; const j = std.mem.indexOfScalarPos(u8, source, i, '\n') orelse source.len; try compile_error_buffer.appendSlice("[line "); try compile_error_buffer.appendSlice(source[i..j]); try compile_error_buffer.append('\n'); i = j; } else if (matches(source[i..], runtime_error_prefix)) { exit_code = 70; i += runtime_error_prefix.len; const j = std.mem.indexOfScalarPos(u8, source, i, '\n') orelse source.len; try runtime_error_buffer.appendSlice(source[i..j]); // Append start of stack trace to runtime error message try runtime_error_buffer.writer().print("\n[line {}]", .{line}); i = j; } if (i < source.len and source[i] == '\n') line += 1; } return Expected{ .output = output_buffer.toOwnedSlice(), .runtime_error_message = runtime_error_buffer.toOwnedSlice(), .compile_error_message = compile_error_buffer.toOwnedSlice(), .exit_code = exit_code, }; }
util/test.zig
const std = @import("std"); const alka = @import("alka"); const gui = alka.gui; const ecs = @import("ecs.zig"); usingnamespace alka.math; usingnamespace alka.log; const mlog = std.log.scoped(.app); pub const State = enum { reset, menu, play, }; pub var state = State.menu; pub var score: f32 = 0; pub var highscore: f32 = 0; pub const score_increasec_station: f32 = 7.5; pub const score_increasec_kamikaze: f32 = 2.0; pub const score_decreasec_kamikaze: f32 = 2.5; pub var rand: *std.rand.Random = undefined; pub var score_increase_rate: f32 = 0.0008; pub var score_increase: f32 = 1; pub var enemy_station_shoot_rate: f32 = 0.0012; pub var enemy_station_shoot: f32 = 1; pub fn open() !void { var prng = std.rand.DefaultPrng.init(blk: { var seed: u64 = undefined; try std.os.getrandom(std.mem.asBytes(&seed)); break :blk seed; }); rand = &prng.random; try ecs.init(); try alka.open(); } pub fn run() !void { try alka.update(); } pub fn update(dt: f32) !void { if (state == .play) { score_increase += score_increase_rate * dt; enemy_station_shoot += enemy_station_shoot_rate * dt; mlog.notice("score_increase: {d:.5}", .{score_increase}); mlog.notice("enemy station shoot: {d:.5}", .{enemy_station_shoot}); try ecs.update(dt); } else if (state == .reset) { state = .menu; try eclose(); } try gui.update(dt); } pub fn fupdate(dt: f32) !void { if (state == .play) { try ecs.fixed(dt); } else if (state == .reset) { state = .menu; try eclose(); } try gui.fixed(dt); } pub fn draw() !void { try gui.draw(); if (state == .play) { try ecs.draw(); } else if (state == .reset) { state = .menu; try eclose(); } } pub fn close() void { ecs.deinit(); } pub fn estart() !void { score_increase = 1; enemy_station_shoot = 1; scoreIncrease(10, 1); const w = alka.getWindow(); const input = alka.getInput(); var player = try ecs.world.createRegister(@enumToInt(ecs.SpecialEntities.player)); try player.create(); try player.attach("Player Controller", ecs.PlayerController{ .left = try input.keyStatePtr(.A), .right = try input.keyStatePtr(.D), .up = try input.keyStatePtr(.W), .down = try input.keyStatePtr(.S), .dash = try input.keyStatePtr(.LeftShift), .dash_counter = 0, .dash_max = 0.5, .dash_timer_max = 0.5, }); try player.attach("Motion", ecs.Motion{ .acc = 10, .friction = 5, .maxspeed = Vec2f{ .x = 5, .y = 5 }, }); try player.attach("Transform", Transform2D{ .position = Vec2f{ .x = @intToFloat(f32, w.size.width) / 2, .y = @intToFloat(f32, w.size.height) / 2, }, .origin = Vec2f{ .x = 32 / 2, .y = 32 / 2 }, .size = Vec2f{ .x = 32, .y = 32 }, }); try player.attach("Colour", alka.Colour.rgba(40, 200, 100, 255)); try player.attach("Collision Mask", @as([]const u8, "Player")); try player.attach("Texture Draw", @as(u64, 1)); try createWalls(); // create enemy kamikaze fabric, fabric { var fabric = try ecs.world.createRegister(ecs.world.findID()); try fabric.create(); try fabric.attach("Fabric", ecs.Fabric{ .maxtime = 3.1, .reloadtime = 3.5, .deloadtime = 6, .spawn = enemyKamikazeFabricSpawn, }); try fabric.attach("Transform", Transform2D{ .position = Vec2f{ .x = 200, .y = 300, }, .origin = Vec2f{ .x = 64 / 2, .y = 64 / 2 }, .size = Vec2f{ .x = 64, .y = 64 }, }); } } pub fn eclose() !void { var it = ecs.world.registers.iterator(); while (it.next()) |entry| { if (entry.data) |entity| { entity.destroy(); try ecs.world.removeRegister(entity.id); } } score = 0; } pub fn scoreIncrease(constant: f32, factor: f32) void { score += constant * factor * score_increase; if (score > highscore) highscore = score; } pub fn scoreDecrease(constant: f32, factor: f32) void { score -= constant * factor; if (score < 0) { state = .reset; ecs.abortfunc = true; } } fn createWalls() !void { const w = alka.getWindow(); const colour = alka.Colour.rgba(0, 0, 0, 255); // wall left { var wall = try ecs.world.createRegister(@enumToInt(ecs.SpecialEntities.wall_left)); try wall.create(); try wall.attach("Transform", Transform2D{ .position = Vec2f{ .x = 0, .y = @intToFloat(f32, w.size.height) / 2, }, .size = Vec2f{ .x = 32, .y = @intToFloat(f32, w.size.height) }, }); var tr = try wall.getPtr("Transform", Transform2D); tr.origin = tr.size.divValues(2, 2); try wall.attach("Colour", colour); try wall.attach("Collision Mask", @as([]const u8, "Wall")); try wall.attach("Rectangle Draw", @as(i1, 0)); } // wall right { var wall = try ecs.world.createRegister(@enumToInt(ecs.SpecialEntities.wall_right)); try wall.create(); try wall.attach("Transform", Transform2D{ .position = Vec2f{ .x = @intToFloat(f32, w.size.width), .y = @intToFloat(f32, w.size.height) / 2, }, .size = Vec2f{ .x = 32, .y = @intToFloat(f32, w.size.height) }, }); var tr = try wall.getPtr("Transform", Transform2D); tr.origin = tr.size.divValues(2, 2); try wall.attach("Colour", colour); try wall.attach("Collision Mask", @as([]const u8, "Wall")); try wall.attach("Rectangle Draw", @as(i1, 0)); } // wall top { var wall = try ecs.world.createRegister(@enumToInt(ecs.SpecialEntities.wall_top)); try wall.create(); try wall.attach("Transform", Transform2D{ .position = Vec2f{ .x = @intToFloat(f32, w.size.width) / 2, .y = 0, }, .size = Vec2f{ .x = @intToFloat(f32, w.size.width), .y = 32 }, }); var tr = try wall.getPtr("Transform", Transform2D); tr.origin = tr.size.divValues(2, 2); try wall.attach("Colour", colour); try wall.attach("Collision Mask", @as([]const u8, "Wall")); try wall.attach("Rectangle Draw", @as(i1, 0)); } // wall bottom { var wall = try ecs.world.createRegister(@enumToInt(ecs.SpecialEntities.wall_bottom)); try wall.create(); try wall.attach("Transform", Transform2D{ .position = Vec2f{ .x = @intToFloat(f32, w.size.width) / 2, .y = @intToFloat(f32, w.size.height), }, .size = Vec2f{ .x = @intToFloat(f32, w.size.width), .y = 32 }, }); var tr = try wall.getPtr("Transform", Transform2D); tr.origin = tr.size.divValues(2, 2); try wall.attach("Colour", colour); try wall.attach("Collision Mask", @as([]const u8, "Wall")); try wall.attach("Rectangle Draw", @as(i1, 0)); } } fn enemyKamikazeSpawn(self_id: u64) !void { var entity = try ecs.world.createRegister(ecs.world.findID()); const col = alka.Colour.rgba(220, 70, 80, 255); try entity.create(); try entity.attach("Collision Mask", @as([]const u8, "Enemy Kamikaze")); try entity.attach("Texture Draw", @as(u64, 2)); try entity.attach("Colour", col); const pl = try ecs.world.getRegister(@enumToInt(ecs.SpecialEntities.player)); const plt = try pl.get("Transform", Transform2D); const self = try ecs.world.getRegister(self_id); const str = try self.get("Transform", Transform2D); const opos = str.getOriginated(); try entity.attach("Transform", Transform2D{ .position = Vec2f{ .x = opos.x + 32, .y = opos.y + 32, }, .size = Vec2f{ .x = 32, .y = 32, }, }); var tr = try entity.getPtr("Transform", Transform2D); tr.origin = tr.size.divValues(2, 2); const w = alka.getWindow().size; const pos0 = plt.getOriginated(); const pos1 = tr.getOriginated(); const angle = pos1.angleRad(pos0); const toward = Vec2f{ .x = @cos(angle), .y = @sin(angle), }; tr.rotation = rad2degf(angle) + 90; try entity.attach("Motion", ecs.Motion{ .maxspeed = Vec2f{ .x = 10, .y = 10 }, .constant = toward.mulValues(20, 20), }); } fn enemyKamikazeFabricSpawn(self_id: u64) !void { var fabric = try ecs.world.createRegister(ecs.world.findID()); try fabric.create(); const w = alka.getWindow().size; try fabric.attach("Fabric", ecs.Fabric{ .maxtime = 0.2, .reloadtime = 3, .reloadcc = 3, .ctime = 1, .deloadtime = 1.5 * enemy_station_shoot, .spawn = enemyKamikazeSpawn, }); try fabric.attach("Transform", Transform2D{ .position = Vec2f{ .x = @intToFloat(f32, rand.intRangeAtMost(i32, 100, w.width - 100)), .y = @intToFloat(f32, rand.intRangeAtMost(i32, 100, w.height - 100)), }, .origin = Vec2f{ .x = 64 / 2, .y = 64 / 2 }, .size = Vec2f{ .x = 64, .y = 64 }, }); try fabric.attach("Enemy Fabric Controller", ecs.EnemyFabricController{ .speedup = 500, .torque = 200, }); try fabric.attach("Colour", alka.Colour.rgba(255, 255, 255, 255)); try fabric.attach("Texture Draw", @as(u64, 3)); try fabric.attach("Collision Mask", @as([]const u8, "Enemy Fabric Controller")); }
examples/example_shooter_game/src/game.zig
const std = @import("std"); const Builder = std.build.Builder; pub fn build(b: *Builder) void { const mode = b.standardReleaseOptions(); const lib = b.addStaticLibrary("bog", "src/lib.zig"); lib.setBuildMode(mode); lib.linkLibC(); const lib_options = b.addOptions(); lib.addOptions("build_options", lib_options); lib_options.addOption( bool, "no_std", b.option(bool, "NO_ADD_STD", "Do not export bog_Vm_addStd to reduce binary size") orelse false, ); lib_options.addOption( bool, "no_std_no_io", b.option(bool, "NO_ADD_STD_NO_IO", "Do not export bog_Vm_addStd to reduce binary size") orelse false, ); const lib_step = b.step("lib", "Build C library"); lib_step.dependOn(&b.addInstallArtifact(lib).step); // c library usage example const c_example = b.addExecutable("bog_from_c", null); c_example.setBuildMode(mode); c_example.addCSourceFile("examples/bog_from_c.c", &[_][]const u8{}); c_example.addIncludeDir("include"); c_example.linkLibrary(lib); c_example.linkLibC(); c_example.addLibPath("zig-cache/lib"); c_example.step.dependOn(lib_step); c_example.setOutputDir("examples/bin"); // calling zig from bog example const zig_from_bog = b.addExecutable("zig_from_bog", "examples/zig_from_bog.zig"); zig_from_bog.setBuildMode(mode); zig_from_bog.addPackagePath("bog", "src/bog.zig"); zig_from_bog.setOutputDir("examples/bin"); const examples_step = b.step("examples", "Build all examples"); examples_step.dependOn(&b.addInstallArtifact(c_example).step); examples_step.dependOn(&b.addInstallArtifact(zig_from_bog).step); addTests(b, examples_step, .{ "src/main.zig", "tests/fmt.zig", "tests/behavior.zig", "tests/error.zig", }); var exe = b.addExecutable("bog", "src/main.zig"); exe.setBuildMode(mode); exe.install(); const fmt_step = b.step("fmt", "Format all source files"); fmt_step.dependOn(&b.addFmt(&[_][]const u8{ "build.zig", "src", "examples", "tests", }).step); const clean_step = b.step("clean", "Delete all artifacts created by zig build"); const rm_zig_cache = b.addRemoveDirTree("zig-cache"); const rm_examples_bing = b.addRemoveDirTree("examples/bin"); clean_step.dependOn(&rm_zig_cache.step); clean_step.dependOn(&rm_examples_bing.step); } fn addTests(b: *Builder, examples_step: *std.build.Step, tests: anytype) void { const tests_step = b.step("test", "Run all tests"); tests_step.dependOn(examples_step); inline for (tests) |t| { var test_step = b.addTest(t); test_step.addPackagePath("bog", "src/bog.zig"); tests_step.dependOn(&test_step.step); } }
build.zig