code
stringlengths
38
801k
repo_path
stringlengths
6
263
pub usingnamespace @import("raylib-wa.zig"); const rl = @This(); pub const Vector2 = extern struct { x: f32, y: f32, }; pub const Vector3 = extern struct { x: f32, y: f32, z: f32, }; pub const Vector4 = extern struct { x: f32, y: f32, z: f32, w: f32, }; pub const Quaternion = Vector4; pub const Matrix = extern struct { m0: f32, m4: f32, m8: f32, m12: f32, m1: f32, m5: f32, m9: f32, m13: f32, m2: f32, m6: f32, m10: f32, m14: f32, m3: f32, m7: f32, m11: f32, m15: f32, }; pub const Color = extern struct { r: u8, g: u8, b: u8, a: u8, }; pub const LIGHTGRAY = Color{ .r = 200, .g = 200, .b = 200, .a = 255 }; pub const GRAY = Color{ .r = 130, .g = 130, .b = 130, .a = 255 }; pub const DARKGRAY = Color{ .r = 80, .g = 80, .b = 80, .a = 255 }; pub const YELLOW = Color{ .r = 253, .g = 249, .b = 0, .a = 255 }; pub const GOLD = Color{ .r = 255, .g = 203, .b = 0, .a = 255 }; pub const ORANGE = Color{ .r = 255, .g = 161, .b = 0, .a = 255 }; pub const PINK = Color{ .r = 255, .g = 109, .b = 194, .a = 255 }; pub const RED = Color{ .r = 230, .g = 41, .b = 55, .a = 255 }; pub const MAROON = Color{ .r = 190, .g = 33, .b = 55, .a = 255 }; pub const GREEN = Color{ .r = 0, .g = 228, .b = 48, .a = 255 }; pub const LIME = Color{ .r = 0, .g = 158, .b = 47, .a = 255 }; pub const DARKGREEN = Color{ .r = 0, .g = 117, .b = 44, .a = 255 }; pub const SKYBLUE = Color{ .r = 102, .g = 191, .b = 255, .a = 255 }; pub const BLUE = Color{ .r = 0, .g = 121, .b = 241, .a = 255 }; pub const DARKBLUE = Color{ .r = 0, .g = 82, .b = 172, .a = 255 }; pub const PURPLE = Color{ .r = 200, .g = 122, .b = 255, .a = 255 }; pub const VIOLET = Color{ .r = 135, .g = 60, .b = 190, .a = 255 }; pub const DARKPURPLE = Color{ .r = 112, .g = 31, .b = 126, .a = 255 }; pub const BEIGE = Color{ .r = 211, .g = 176, .b = 131, .a = 255 }; pub const BROWN = Color{ .r = 127, .g = 106, .b = 79, .a = 255 }; pub const DARKBROWN = Color{ .r = 76, .g = 63, .b = 47, .a = 255 }; pub const WHITE = Color{ .r = 255, .g = 255, .b = 255, .a = 255 }; pub const BLACK = Color{ .r = 0, .g = 0, .b = 0, .a = 255 }; pub const BLANK = Color{ .r = 0, .g = 0, .b = 0, .a = 0 }; pub const MAGENTA = Color{ .r = 255, .g = 0, .b = 255, .a = 255 }; pub const RAYWHITE = Color{ .r = 245, .g = 245, .b = 245, .a = 255 }; pub const Rectangle = extern struct { x: f32, y: f32, width: f32, height: f32, }; pub const Image = extern struct { data: ?*anyopaque, width: c_int, height: c_int, mipmaps: c_int, format: PixelFormat, pub fn init(fileName: [*c]const u8) Image { return rl.LoadImage(fileName); } pub fn initRaw(fileName: [*c]const u8, width: c_int, height: c_int, format: PixelFormat, headerSize: c_int) Image { return rl.LoadImageRaw(fileName, width, height, format, headerSize); } pub fn initText(text: [*c]const u8, fontSize: c_int, color: Color) Image { return rl.ImageText(text, fontSize, color); } pub fn initTextEx(font: Font, text: [*c]const u8, fontSize: f32, spacing: f32, tint: Color) Image { return rl.ImageTextEx(font, text, fontSize, spacing, tint); } pub fn copy(image: Image) Image { return rl.ImageCopy(image); } pub fn copyRec(image: Image, rec: Rectangle) Image { return rl.ImageFromImage(image, rec); } pub fn GenColor(width: c_int, height: c_int, color: Color) Image { return rl.GenImageColor(width, height, color); } pub fn GenGradientV(width: c_int, height: c_int, top: Color, bottom: Color) Image { return rl.GenImageGradientV(width, height, top, bottom); } pub fn GenGradientH(width: c_int, height: c_int, left: Color, right: Color) Image { return rl.GenImageGradientH(width, height, left, right); } pub fn GenGradientRadial(width: c_int, height: c_int, density: f32, inner: Color, outer: Color) Image { return rl.GenImageGradientRadial(width, height, density, inner, outer); } pub fn GenChecked(width: c_int, height: c_int, checksX: c_int, checksY: c_int, col1: Color, col2: Color) Image { return rl.GenImageChecked(width, height, checksX, checksY, col1, col2); } pub fn GenWhiteNoise(width: c_int, height: c_int, factor: f32) Image { return rl.GenImageWhiteNoise(width, height, factor); } pub fn GenCellular(width: c_int, height: c_int, tileSize: c_int) Image { return rl.GenImageCellular(width, height, tileSize); } pub fn UseAsWindowIcon(self: Image) void { rl.SetWindowIcon(self); } }; pub const Texture = extern struct { id: c_uint, width: c_int, height: c_int, mipmaps: c_int, format: c_int, }; pub const Texture2D = Texture; pub const TextureCubemap = Texture; pub const RenderTexture = extern struct { id: c_uint, texture: Texture, depth: Texture, depthTexture: bool, pub fn Begin(self: RenderTexture2D) void { rl.BeginTextureMode(self); } pub fn End(_: RenderTexture2D) void { rl.EndTextureMode(); } }; pub const RenderTexture2D = RenderTexture; pub const NPatchInfo = extern struct { source: Rectangle, left: c_int, top: c_int, right: c_int, bottom: c_int, layout: c_int, }; pub const CharInfo = extern struct { value: c_int, offsetX: c_int, offsetY: c_int, advanceX: c_int, image: Image, }; pub const Font = extern struct { baseSize: c_int, charsCount: c_int, charsPadding: c_int, texture: Texture2D, recs: [*c]Rectangle, chars: [*c]CharInfo, }; pub const Camera3D = extern struct { position: Vector3, target: Vector3, up: Vector3, fovy: f32, projection: CameraProjection, pub fn Begin(self: Camera3D) void { rl.BeginMode3D(self); } pub fn Update(self: *Camera3D) void { rl.UpdateCamera(self); } pub fn GetMatrix(self: Camera3D) Matrix { return rl.GetCameraMatrix(self); } pub fn SetMode(self: Camera3D, mode: CameraMode) void { rl.SetCameraMode(self, mode); } pub fn End(_: Camera3D) void { rl.EndMode3D(); } }; pub const Camera = Camera3D; pub const Camera2D = extern struct { offset: Vector2, target: Vector2, rotation: f32, zoom: f32, pub fn Begin(self: Camera2D) void { rl.BeginMode2D(self); } pub fn GetMatrix(self: Camera2D) Matrix { return rl.GetCameraMatrix2D(self); } pub fn End(_: Camera2D) void { rl.EndMode2D(); } }; pub const Mesh = extern struct { vertexCount: c_int, triangleCount: c_int, vertices: [*c]f32, texcoords: [*c]f32, texcoords2: [*c]f32, normals: [*c]f32, tangents: [*c]f32, colors: [*c]u8, indices: [*c]c_ushort, animVertices: [*c]f32, animNormals: [*c]f32, boneIds: [*c]c_int, boneWeights: [*c]f32, vaoId: c_uint, vboId: [*c]c_uint, }; pub const Shader = extern struct { id: c_uint, locs: [*c]c_int, }; pub const MaterialMap = extern struct { texture: Texture2D, color: Color, value: f32, }; pub const Material = extern struct { shader: Shader, maps: [*c]MaterialMap, params: [4]f32, }; pub const Transform = extern struct { translation: Vector3, rotation: Quaternion, scale: Vector3, }; pub const BoneInfo = extern struct { name: [32]u8, parent: c_int, }; pub const Model = extern struct { transform: Matrix, meshCount: c_int, materialCount: c_int, meshes: [*c]Mesh, materials: [*c]Material, meshMaterial: [*c]c_int, boneCount: c_int, bones: [*c]BoneInfo, bindPose: [*c]Transform, }; pub const ModelAnimation = extern struct { boneCount: c_int, frameCount: c_int, bones: [*c]BoneInfo, framePoses: [*c][*c]Transform, }; pub const Ray = extern struct { position: Vector3, direction: Vector3, }; pub const RayHitInfo = extern struct { hit: bool, distance: f32, position: Vector3, normal: Vector3, }; pub const BoundingBox = extern struct { min: Vector3, max: Vector3, }; pub const Wave = extern struct { sampleCount: c_uint, sampleRate: c_uint, sampleSize: c_uint, channels: c_uint, data: ?*anyopaque, }; pub const rAudioBuffer = opaque {}; pub const AudioStream = extern struct { buffer: ?*rAudioBuffer, sampleRate: c_uint, sampleSize: c_uint, channels: c_uint, }; pub const Sound = extern struct { stream: AudioStream, sampleCount: c_uint, }; pub const Music = extern struct { stream: AudioStream, sampleCount: c_uint, looping: bool, ctxType: c_int, ctxData: ?*anyopaque, }; pub const VrDeviceInfo = extern struct { hResolution: c_int, vResolution: c_int, hScreenSize: f32, vScreenSize: f32, vScreenCenter: f32, eyeToScreenDistance: f32, lensSeparationDistance: f32, interpupillaryDistance: f32, lensDistortionValues: [4]f32, chromaAbCorrection: [4]f32, }; pub const VrStereoConfig = extern struct { projection: [2]Matrix, viewOffset: [2]Matrix, leftLensCenter: [2]f32, rightLensCenter: [2]f32, leftScreenCenter: [2]f32, rightScreenCenter: [2]f32, scale: [2]f32, scaleIn: [2]f32, }; pub const ConfigFlags = enum(c_int) { FLAG_FULLSCREEN_MODE = 2, FLAG_WINDOW_RESIZABLE = 4, FLAG_WINDOW_UNDECORATED = 8, FLAG_WINDOW_TRANSPARENT = 16, FLAG_MSAA_4X_HINT = 32, FLAG_VSYNC_HINT = 64, FLAG_WINDOW_HIDDEN = 128, FLAG_WINDOW_ALWAYS_RUN = 256, FLAG_WINDOW_MINIMIZED = 512, FLAG_WINDOW_MAXIMIZED = 1024, FLAG_WINDOW_UNFOCUSED = 2048, FLAG_WINDOW_TOPMOST = 4096, FLAG_WINDOW_HIGHDPI = 8192, FLAG_INTERLACED_HINT = 65536, }; pub const TraceLogLevel = enum(c_int) { LOG_ALL = 0, LOG_TRACE = 1, LOG_DEBUG = 2, LOG_INFO = 3, LOG_WARNING = 4, LOG_ERROR = 5, LOG_FATAL = 6, LOG_NONE = 7, }; pub const KeyboardKey = enum(c_int) { KEY_NULL = 0, KEY_APOSTROPHE = 39, KEY_COMMA = 44, KEY_MINUS = 45, KEY_PERIOD = 46, KEY_SLASH = 47, KEY_ZERO = 48, KEY_ONE = 49, KEY_TWO = 50, KEY_THREE = 51, KEY_FOUR = 52, KEY_FIVE = 53, KEY_SIX = 54, KEY_SEVEN = 55, KEY_EIGHT = 56, KEY_NINE = 57, KEY_SEMICOLON = 59, KEY_EQUAL = 61, KEY_A = 65, KEY_B = 66, KEY_C = 67, KEY_D = 68, KEY_E = 69, KEY_F = 70, KEY_G = 71, KEY_H = 72, KEY_I = 73, KEY_J = 74, KEY_K = 75, KEY_L = 76, KEY_M = 77, KEY_N = 78, KEY_O = 79, KEY_P = 80, KEY_Q = 81, KEY_R = 82, KEY_S = 83, KEY_T = 84, KEY_U = 85, KEY_V = 86, KEY_W = 87, KEY_X = 88, KEY_Y = 89, KEY_Z = 90, KEY_SPACE = 32, KEY_ESCAPE = 256, KEY_ENTER = 257, KEY_TAB = 258, KEY_BACKSPACE = 259, KEY_INSERT = 260, KEY_DELETE = 261, KEY_RIGHT = 262, KEY_LEFT = 263, KEY_DOWN = 264, KEY_UP = 265, KEY_PAGE_UP = 266, KEY_PAGE_DOWN = 267, KEY_HOME = 268, KEY_END = 269, KEY_CAPS_LOCK = 280, KEY_SCROLL_LOCK = 281, KEY_NUM_LOCK = 282, KEY_PRINT_SCREEN = 283, KEY_PAUSE = 284, KEY_F1 = 290, KEY_F2 = 291, KEY_F3 = 292, KEY_F4 = 293, KEY_F5 = 294, KEY_F6 = 295, KEY_F7 = 296, KEY_F8 = 297, KEY_F9 = 298, KEY_F10 = 299, KEY_F11 = 300, KEY_F12 = 301, KEY_LEFT_SHIFT = 340, KEY_LEFT_CONTROL = 341, KEY_LEFT_ALT = 342, KEY_LEFT_SUPER = 343, KEY_RIGHT_SHIFT = 344, KEY_RIGHT_CONTROL = 345, KEY_RIGHT_ALT = 346, KEY_RIGHT_SUPER = 347, KEY_KB_MENU = 348, KEY_LEFT_BRACKET = 91, KEY_BACKSLASH = 92, KEY_RIGHT_BRACKET = 93, KEY_GRAVE = 96, KEY_KP_0 = 320, KEY_KP_1 = 321, KEY_KP_2 = 322, KEY_KP_3 = 323, KEY_KP_4 = 324, KEY_KP_5 = 325, KEY_KP_6 = 326, KEY_KP_7 = 327, KEY_KP_8 = 328, KEY_KP_9 = 329, KEY_KP_DECIMAL = 330, KEY_KP_DIVIDE = 331, KEY_KP_MULTIPLY = 332, KEY_KP_SUBTRACT = 333, KEY_KP_ADD = 334, KEY_KP_ENTER = 335, KEY_KP_EQUAL = 336, KEY_BACK = 4, //KEY_MENU = 82, KEY_VOLUME_UP = 24, KEY_VOLUME_DOWN = 25, }; pub const MouseButton = enum(c_int) { MOUSE_LEFT_BUTTON = 0, MOUSE_RIGHT_BUTTON = 1, MOUSE_MIDDLE_BUTTON = 2, }; pub const MouseCursor = enum(c_int) { MOUSE_CURSOR_DEFAULT = 0, MOUSE_CURSOR_ARROW = 1, MOUSE_CURSOR_IBEAM = 2, MOUSE_CURSOR_CROSSHAIR = 3, MOUSE_CURSOR_POINTING_HAND = 4, MOUSE_CURSOR_RESIZE_EW = 5, MOUSE_CURSOR_RESIZE_NS = 6, MOUSE_CURSOR_RESIZE_NWSE = 7, MOUSE_CURSOR_RESIZE_NESW = 8, MOUSE_CURSOR_RESIZE_ALL = 9, MOUSE_CURSOR_NOT_ALLOWED = 10, }; pub const GamepadButton = enum(c_int) { GAMEPAD_BUTTON_UNKNOWN = 0, GAMEPAD_BUTTON_LEFT_FACE_UP = 1, GAMEPAD_BUTTON_LEFT_FACE_RIGHT = 2, GAMEPAD_BUTTON_LEFT_FACE_DOWN = 3, GAMEPAD_BUTTON_LEFT_FACE_LEFT = 4, GAMEPAD_BUTTON_RIGHT_FACE_UP = 5, GAMEPAD_BUTTON_RIGHT_FACE_RIGHT = 6, GAMEPAD_BUTTON_RIGHT_FACE_DOWN = 7, GAMEPAD_BUTTON_RIGHT_FACE_LEFT = 8, GAMEPAD_BUTTON_LEFT_TRIGGER_1 = 9, GAMEPAD_BUTTON_LEFT_TRIGGER_2 = 10, GAMEPAD_BUTTON_RIGHT_TRIGGER_1 = 11, GAMEPAD_BUTTON_RIGHT_TRIGGER_2 = 12, GAMEPAD_BUTTON_MIDDLE_LEFT = 13, GAMEPAD_BUTTON_MIDDLE = 14, GAMEPAD_BUTTON_MIDDLE_RIGHT = 15, GAMEPAD_BUTTON_LEFT_THUMB = 16, GAMEPAD_BUTTON_RIGHT_THUMB = 17, }; pub const GamepadAxis = enum(c_int) { GAMEPAD_AXIS_LEFT_X = 0, GAMEPAD_AXIS_LEFT_Y = 1, GAMEPAD_AXIS_RIGHT_X = 2, GAMEPAD_AXIS_RIGHT_Y = 3, GAMEPAD_AXIS_LEFT_TRIGGER = 4, GAMEPAD_AXIS_RIGHT_TRIGGER = 5, }; pub const MaterialMapIndex = enum(c_int) { MATERIAL_MAP_ALBEDO = 0, MATERIAL_MAP_METALNESS = 1, MATERIAL_MAP_NORMAL = 2, MATERIAL_MAP_ROUGHNESS = 3, MATERIAL_MAP_OCCLUSION = 4, MATERIAL_MAP_EMISSION = 5, MATERIAL_MAP_HEIGHT = 6, MATERIAL_MAP_BRDG = 7, MATERIAL_MAP_CUBEMAP = 8, MATERIAL_MAP_IRRADIANCE = 9, MATERIAL_MAP_PREFILTER = 10, }; pub const ShaderLocationIndex = enum(c_int) { SHADER_LOC_VERTEX_POSITION = 0, SHADER_LOC_VERTEX_TEXCOORD01 = 1, SHADER_LOC_VERTEX_TEXCOORD02 = 2, SHADER_LOC_VERTEX_NORMAL = 3, SHADER_LOC_VERTEX_TANGENT = 4, SHADER_LOC_VERTEX_COLOR = 5, SHADER_LOC_MATRIX_MVP = 6, SHADER_LOC_MATRIX_VIEW = 7, SHADER_LOC_MATRIX_PROJECTION = 8, SHADER_LOC_MATRIX_MODEL = 9, SHADER_LOC_MATRIX_NORMAL = 10, SHADER_LOC_VECTOR_VIEW = 11, SHADER_LOC_COLOR_DIFFUSE = 12, SHADER_LOC_COLOR_SPECULAR = 13, SHADER_LOC_COLOR_AMBIENT = 14, SHADER_LOC_MAP_ALBEDO = 15, SHADER_LOC_MAP_METALNESS = 16, SHADER_LOC_MAP_NORMAL = 17, SHADER_LOC_MAP_ROUGHNESS = 18, SHADER_LOC_MAP_OCCLUSION = 19, SHADER_LOC_MAP_EMISSION = 20, SHADER_LOC_MAP_HEIGHT = 21, SHADER_LOC_MAP_CUBEMAP = 22, SHADER_LOC_MAP_IRRADIANCE = 23, SHADER_LOC_MAP_PREFILTER = 24, SHADER_LOC_MAP_BRDF = 25, }; pub const ShaderUniformDataType = enum(c_int) { SHADER_UNIFORM_FLOAT = 0, SHADER_UNIFORM_VEC2 = 1, SHADER_UNIFORM_VEC3 = 2, SHADER_UNIFORM_VEC4 = 3, SHADER_UNIFORM_INT = 4, SHADER_UNIFORM_IVEC2 = 5, SHADER_UNIFORM_IVEC3 = 6, SHADER_UNIFORM_IVEC4 = 7, SHADER_UNIFORM_SAMPLER2D = 8, }; pub const PixelFormat = enum(c_int) { PIXELFORMAT_UNCOMPRESSED_GRAYSCALE = 1, PIXELFORMAT_UNCOMPRESSED_GRAY_ALPHA = 2, PIXELFORMAT_UNCOMPRESSED_R5G6B5 = 3, PIXELFORMAT_UNCOMPRESSED_R8G8B8 = 4, PIXELFORMAT_UNCOMPRESSED_R5G5B5A1 = 5, PIXELFORMAT_UNCOMPRESSED_R4G4B4A4 = 6, PIXELFORMAT_PIXELFORMAT_UNCOMPRESSED_R8G8B8A8 = 7, PIXELFORMAT_UNCOMPRESSED_R32 = 8, PIXELFORMAT_UNCOMPRESSED_R32G32B32 = 9, PIXELFORMAT_UNCOMPRESSED_R32G32B32A32 = 10, PIXELFORMAT_COMPRESSED_DXT1_RGB = 11, PIXELFORMAT_COMPRESSED_DXT1_RGBA = 12, PIXELFORMAT_COMPRESSED_DXT3_RGBA = 13, PIXELFORMAT_COMPRESSED_DXT5_RGBA = 14, PIXELFORMAT_COMPRESSED_ETC1_RGB = 15, PIXELFORMAT_COMPRESSED_ETC2_RGB = 16, PIXELFORMAT_COMPRESSED_ETC2_EAC_RGBA = 17, PIXELFORMAT_COMPRESSED_PVRT_RGB = 18, PIXELFORMAT_COMPRESSED_PVRT_RGBA = 19, PIXELFORMAT_COMPRESSED_ASTC_4x4_RGBA = 20, PIXELFORMAT_COMPRESSED_ASTC_8x8_RGBA = 21, }; pub const TextureFilter = enum(c_int) { TEXTURE_FILTER_POINT = 0, TEXTURE_FILTER_BILINEAR = 1, TEXTURE_FILTER_TRILINEAR = 2, TEXTURE_FILTER_ANISOTROPIC_4X = 3, TEXTURE_FILTER_ANISOTROPIC_8X = 4, TEXTURE_FILTER_ANISOTROPIC_16X = 5, }; pub const TextureWrap = enum(c_int) { TEXTURE_WRAP_REPEAT = 0, TEXTURE_WRAP_CLAMP = 1, TEXTURE_WRAP_MIRROR_REPEAT = 2, TEXTURE_WRAP_MIRROR_CLAMP = 3, }; pub const CubemapLayout = enum(c_int) { CUBEMAP_LAYOUT_AUTO_DETECT = 0, CUBEMAP_LAYOUT_LINE_VERTICAL = 1, CUBEMAP_LAYOUT_LINE_HORIZONTAL = 2, CUBEMAP_LAYOUT_CROSS_THREE_BY_FOUR = 3, CUBEMAP_LAYOUT_CROSS_FOUR_BY_THREE = 4, CUBEMAP_LAYOUT_PANORAMA = 5, }; pub const FontType = enum(c_int) { FONT_DEFAULT = 0, FONT_BITMAP = 1, FONT_SDF = 2, }; pub const BlendMode = enum(c_int) { BLEND_ALPHA = 0, BLEND_ADDITIVE = 1, BLEND_MULTIPLIED = 2, BLEND_ADD_COLORS = 3, BLEND_SUBTRACT_COLORS = 4, BLEND_CUSTOM = 5, }; pub const Gestures = enum(c_int) { GESTURE_NONE = 0, GESTURE_TAP = 1, GESTURE_DOUBLETAP = 2, GESTURE_HOLD = 4, GESTURE_DRAG = 8, GESTURE_SWIPE_RIGHT = 16, GESTURE_SWIPE_LEFT = 32, GESTURE_SWIPE_UP = 64, GESTURE_SWIPE_DOWN = 128, GESTURE_PINCH_IN = 256, GESTURE_PINCH_OUT = 512, }; pub const CameraMode = enum(c_int) { CAMERA_CUSTOM = 0, CAMERA_FREE = 1, CAMERA_ORBITAL = 2, CAMERA_FIRST_PERSON = 3, CAMERA_THIRD_PERSON = 4, }; pub const CameraProjection = enum(c_int) { CAMERA_PERSPECTIVE = 0, CAMERA_ORTHOGRAPHIC = 1, }; pub const NPatchType = enum(c_int) { NPT_9PATCH = 0, NPT_3PATCH_VERTICAL = 1, NPT_3PATCH_HORIZONTAL = 2, }; // pub const TraceLogCallback = ?fn (c_int, [*c]const u8, [*c]struct___va_list_tag) callconv(.C) void; pub const LoadFileDataCallback = ?fn ([*c]const u8, [*c]c_uint) callconv(.C) [*c]u8; pub const SaveFileDataCallback = ?fn ([*c]const u8, ?*anyopaque, c_uint) callconv(.C) bool; pub const LoadFileTextCallback = ?fn ([*c]const u8) callconv(.C) [*c]u8; pub const SaveFileTextCallback = ?fn ([*c]const u8, [*c]u8) callconv(.C) bool; pub const MAX_TOUCH_POINTS = 10; pub const MAX_MATERIAL_MAPS = 12; pub const MAX_SHADER_LOCATIONS = 32; pub const PI = 3.141593; pub const SpriteFont = rl.Font; pub const SubText = rl.TextSubtext; pub const FormatText = rl.TextFormat; pub const LoadText = rl.LoadFileText; pub const GetExtension = rl.GetFileExtension; pub const GetImageData = rl.LoadImageColors; pub const FILTER_POINT = TextureFilter.TEXTURE_FILTER_POINT; pub const FILTER_BILINEAR = TextureFilter.TEXTURE_FILTER_BILINEAR; pub const MAP_DIFFUSE = MATERIAL_MAP_DIFFUSE; pub const PIXELFORMAT_UNCOMPRESSED_R8G8B8A8 = PixelFormat.PIXELFORMAT_PIXELFORMAT_UNCOMPRESSED_R8G8B8A8; pub const MATERIAL_MAP_DIFFUSE = MaterialMapIndex.MATERIAL_MAP_ALBEDO; pub const MATERIAL_MAP_SPECULAR = MaterialMapIndex.MATERIAL_MAP_METALNESS; pub const SHADER_LOC_MAP_DIFFUSE = ShaderLocationIndex.SHADER_LOC_MAP_ALBEDO; pub const SHADER_LOC_MAP_SPECULAR = ShaderLocationIndex.SHADER_LOC_MAP_METALNESS;
lib/raylib-zig.zig
const std = @import("std"); const print = std.debug.print; const ArrayList = std.ArrayList; const Allocator = std.mem.Allocator; const builtin = @import("builtin"); const utils = @import("utils.zig"); const ea = @import("extend_align.zig"); const ba = @import("banded_align.zig"); const Cigar = @import("cigar.zig").Cigar; const CigarOp = @import("cigar.zig").CigarOp; const Search = @import("search.zig").Search; const SearchOptions = @import("search.zig").SearchOptions; const SearchHitList = @import("search.zig").SearchHitList; const Sequence = @import("sequence.zig").Sequence; const SequenceList = @import("sequence.zig").SequenceList; const FastaReader = @import("io/io.zig").FastaReader; const AlnoutWriter = @import("io/io.zig").AlnoutWriter; const Database = @import("database.zig").Database; const Highscores = @import("highscores.zig").Highscores; const bio = @import("bio/bio.zig"); const alphabet = bio.alphabet; const Args = @import("args.zig").Args; const parseArgs = @import("args.zig").parseArgs; const Progress = @import("progress.zig").Progress; var progress = Progress(SearchStage){}; pub fn Result(comptime A: type) type { return struct { const Self = @This(); allocator: std.mem.Allocator, query: Sequence(A), hits: SearchHitList(A), pub fn init(allocator: std.mem.Allocator, query: Sequence(A), hits: SearchHitList(A)) !Self { return Self{ .allocator = allocator, .query = try query.clone(), .hits = try hits.clone(), }; } pub fn deinit(self: *Self) void { self.query.deinit(); self.hits.deinit(); } }; } pub fn Worker(comptime DatabaseType: type) type { return struct { // const NumSearchedQueriesToReport = 1; pub const WorkItem = struct { query: Sequence(DatabaseType.Alphabet), }; pub const WorkerContext = struct { allocator: std.mem.Allocator, queue: *std.atomic.Queue(WorkItem), results: *std.atomic.Queue(Result(DatabaseType.Alphabet)), database: *DatabaseType, search_options: SearchOptions, search_count: *std.atomic.Atomic(usize), hit_count: *std.atomic.Atomic(usize), }; fn entryPoint(context: *WorkerContext) !void { const allocator = context.allocator; const database = context.database; var search = try Search(DatabaseType).init(allocator, database, context.search_options); defer search.deinit(); while (context.queue.get()) |node| { const query = node.data.query; var hits = SearchHitList(DatabaseType.Alphabet).init(allocator); defer hits.deinit(); try search.search(query, &hits); if (hits.list.items.len > 0) { const out = try context.allocator.create(std.atomic.Queue(Result(DatabaseType.Alphabet)).Node); out.data = try Result(DatabaseType.Alphabet).init(allocator, query, hits); context.results.put(out); _ = context.hit_count.fetchAdd(hits.list.items.len, .Monotonic); } allocator.destroy(node); _ = context.search_count.fetchAdd(1, .Monotonic); } } }; } pub fn Writer(comptime A: type) type { return struct { pub const WriterContext = struct { allocator: std.mem.Allocator, results: *std.atomic.Queue(Result(A)), search_finished: *std.atomic.Atomic(bool), written_count: *std.atomic.Atomic(usize), path: []const u8, }; fn entryPoint(context: *WriterContext) !void { const dir: std.fs.Dir = std.fs.cwd(); const file: std.fs.File = try dir.createFile(context.path, .{}); defer file.close(); while (true) { const node = context.results.get(); if (node == null) { var search_finished = context.search_finished.load(.SeqCst); if (search_finished) { break; } std.time.sleep(5 * std.time.ns_per_ms); continue; } var result = node.?.data; defer result.deinit(); try AlnoutWriter(A).write(file.writer(), result.query, result.hits); context.allocator.destroy(node.?); _ = context.written_count.fetchAdd(1, .Monotonic); } } }; } const SearchStage = enum { read_database, analyze_database, index_database, read_queries, search_database, write_hits, }; pub fn SearchExec(comptime A: type) type { const KmerLength = switch (A) { alphabet.DNA => 8, alphabet.Protein => 5, else => unreachable, }; const databaseType = Database(A, KmerLength); const workerType = Worker(databaseType); const writerType = Writer(A); return struct { pub fn run(allocator: std.mem.Allocator, args: Args) !void { progress.add(.read_database, "Read database", .bytes); progress.add(.analyze_database, "Analyze database", .counts); progress.add(.index_database, "Index database", .counts); progress.add(.read_queries, "Read queries", .bytes); progress.add(.search_database, "Search database", .counts); progress.add(.write_hits, "Write hits", .counts); try progress.start(); const filePathToDatabase = std.mem.sliceTo(&args.db, 0); const filePathToQuery = std.mem.sliceTo(&args.query, 0); const filePathToOutput = std.mem.sliceTo(&args.out, 0); const dir: std.fs.Dir = std.fs.cwd(); // Read DB var bench_start = std.time.milliTimestamp(); var sequences = SequenceList(A).init(allocator); defer sequences.deinit(); const db_file: std.fs.File = try dir.openFile(filePathToDatabase, .{ .read = true }); defer db_file.close(); var db_source = std.io.StreamSource{ .file = db_file }; var db_reader = FastaReader(A).init(allocator, &db_source); defer db_reader.deinit(); progress.activate(.read_database); while (try db_reader.next()) |sequence| { try sequences.list.append(sequence); progress.set(.read_database, db_reader.bytes_read, db_reader.bytes_total); } // Index const callback = struct { pub fn callback(typ: databaseType.ProgressType, a: usize, b: usize) void { switch (typ) { .analyze => { progress.activate(.analyze_database); progress.set(.analyze_database, a, b); }, .index => { progress.activate(.index_database); progress.set(.index_database, a, b); }, } } }.callback; bench_start = std.time.milliTimestamp(); var db = try databaseType.init(allocator, sequences.list.toOwnedSlice(), callback); defer db.deinit(); // Read Query var queries = SequenceList(A).init(allocator); defer queries.deinit(); const query_file: std.fs.File = try dir.openFile(filePathToQuery, .{ .read = true }); defer query_file.close(); var query_source = std.io.StreamSource{ .file = query_file }; var query_reader = FastaReader(A).init(allocator, &query_source); defer query_reader.deinit(); progress.activate(.read_queries); while (try query_reader.next()) |sequence| { try queries.list.append(sequence); progress.set(.read_queries, query_reader.bytes_read, query_reader.bytes_total); } // Fill queue var queue = std.atomic.Queue(workerType.WorkItem).init(); var results = std.atomic.Queue(Result(A)).init(); var search_finished = std.atomic.Atomic(bool).init(false); var search_count = std.atomic.Atomic(usize).init(0); var hit_count = std.atomic.Atomic(usize).init(0); var written_count = std.atomic.Atomic(usize).init(0); var context: workerType.WorkerContext = .{ .allocator = allocator, .queue = &queue, .database = &db, .results = &results, .search_count = &search_count, .hit_count = &hit_count, .search_options = .{ .max_accepts = args.max_hits, .max_rejects = args.max_rejects, .min_identity = args.min_identity, .strand = args.strand, }, }; var writer_context: writerType.WriterContext = .{ .allocator = allocator, .results = &results, .search_finished = &search_finished, .path = filePathToOutput, .written_count = &written_count, }; for (queries.list.items) |query| { const node = try context.allocator.create(std.atomic.Queue(workerType.WorkItem).Node); node.data = workerType.WorkItem{ .query = query, }; queue.put(node); } // Search in threads bench_start = std.time.milliTimestamp(); const worker_count = std.math.max(1, std.Thread.getCpuCount() catch 1); const threads = try allocator.alloc(std.Thread, worker_count); defer allocator.free(threads); var writer_thread = try std.Thread.spawn(.{}, writerType.entryPoint, .{&writer_context}); for (threads) |*thread| { thread.* = try std.Thread.spawn(.{}, workerType.entryPoint, .{&context}); } // wait until queue is empty progress.activate(.search_database); while (!queue.isEmpty()) { progress.set(.search_database, search_count.load(.SeqCst), queries.list.items.len); std.time.sleep(50 * std.time.ns_per_ms); } progress.set(.search_database, queries.list.items.len, queries.list.items.len); // 100% // wait for searches to finish for (threads) |thread| { thread.join(); } // tell writer all searches are done, write remaining results and then be happy _ = search_finished.swap(true, .SeqCst); // wait until results are written progress.activate(.write_hits); while (!results.isEmpty()) { progress.set(.write_hits, written_count.load(.SeqCst), hit_count.load(.SeqCst)); std.time.sleep(50 * std.time.ns_per_ms); } progress.set(.write_hits, hit_count.load(.SeqCst), hit_count.load(.SeqCst)); writer_thread.join(); progress.finish(); } }; } pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const allocator = switch (builtin.mode) { .Debug => gpa.allocator(), else => std.heap.c_allocator, }; const args = parseArgs(allocator) catch std.os.exit(1); switch (args.mode) { .dna => try SearchExec(alphabet.DNA).run(allocator, args), .protein => try SearchExec(alphabet.Protein).run(allocator, args), } } test "specs" { std.testing.refAllDecls(@This()); }
src/main.zig
const std = @import("std"); const zt = @import("zt"); const ig = @import("imgui"); const zg = zt.custom_components; const scenes_n = [_][]const u8{ "2D Rendering", "2D Shaders", "2D Render Targets", "2D Spatial Hash (Squares)", "2D Collider Support", "ImGui Demo", }; const scenes = [_]fn (*SampleApplication.Context) void{ @import("scenes/renderer.zig").update, @import("scenes/shaders.zig").update, @import("scenes/rendertarget.zig").update, @import("scenes/spatialhash_squares.zig").update, @import("scenes/colliders.zig").update, @import("scenes/imgui.zig").update, }; /// SampleData will be available through the application's context. pub const SampleData = struct { currentScene: usize = 0, render: zt.game.Renderer = undefined, sheet: zt.gl.Texture = undefined, redShader: zt.gl.Shader = undefined, consoleOpen: bool = true, }; pub const SampleApplication = zt.App(SampleData); pub fn main() !void { var context = SampleApplication.begin(std.heap.c_allocator); // Lets customize! var io = ig.igGetIO(); var font = context.addFont(zt.path("public-sans.ttf"), 14.0); io.*.FontDefault = font; // Set up state context.settings.energySaving = false; // Some examples are games, and will benefit from this. context.data.render = zt.game.Renderer.init(); context.data.sheet = try zt.gl.Texture.init(zt.path("texture/sheet.png")); context.data.sheet.setNearestFilter(); // Pixel art looks better with nearest filters. // Creating a shader from `zt.game.Renderer` only needs a fragment shader's source as the vertex shader // will be provided by `zt.game.Renderer`. If you need more flexibility than this you'll want to // edit ZT itself, or create your own buffer type. context.data.redShader = zt.game.Renderer.createShader(@embedFile("scenes/shader/red.fragment")); context.setWindowSize(1280, 720); context.setWindowTitle("ZT Demo"); context.setWindowIcon(zt.path("texture/ico.png")); // You control your own main loop, all you got to do is call begin and end frame, // and zt will handle the rest. while (context.open) { context.beginFrame(); inspectContext(context); // Call into the selected demo: var index = std.math.clamp(context.data.currentScene, 0, scenes.len - 1); scenes[index](context); context.endFrame(); } context.data.sheet.deinit(); context.data.render.deinit(); context.deinit(); } // This is a simple side panel that will display information about the scene, your context, and settings. fn inspectContext(ctx: *SampleApplication.Context) void { // Basic toggle const glfw = @import("glfw"); var io = ig.igGetIO(); if (io.*.KeysDownDuration[glfw.GLFW_KEY_GRAVE_ACCENT] == 0.0) { ctx.data.consoleOpen = !ctx.data.consoleOpen; } if (!ctx.data.consoleOpen) return; const flags = ig.ImGuiWindowFlags_NoMove | ig.ImGuiWindowFlags_NoTitleBar | ig.ImGuiWindowFlags_NoResize; ig.igSetNextWindowPos(zt.math.vec2(8, 8), ig.ImGuiCond_Always, .{}); ig.igSetNextWindowSize(zt.math.vec2(300, io.*.DisplaySize.y - 16), ig.ImGuiCond_Always); if (ig.igBegin("Context Inspection", null, flags)) { ig.igText("Data Settings"); // zg.ztEdit("Current Scene", &ctx.data.currentScene); if (ig.igBeginListBox("##Listbox pog", .{})) { var i: usize = 0; while (i < scenes.len) : (i += 1) { if (ig.igSelectable_Bool(scenes_n[i].ptr, i == ctx.data.currentScene, ig.ImGuiSelectableFlags_SpanAvailWidth, .{})) { ctx.data.currentScene = i; } } ig.igEndListBox(); } ig.igSeparator(); ig.igText("Settings"); _ = zg.ztEdit("Energy Saving", &ctx.settings.energySaving); if (ig.igCheckbox("V Sync", &ctx.settings.vsync)) { // The vsync setting is only a getter, setting it does nothing. // So on change, we follow through with the real call that changes it. ctx.setVsync(ctx.settings.vsync); } _ = zg.ztEdit("ig.ImGui Active (Warning!!)", &ctx.settings.imguiActive); ig.igSeparator(); ig.igText("Information"); zg.ztText("{d:.1}fps", .{ctx.time.fps}); } ig.igEnd(); }
example/src/main.zig
const zig_std = @import("std"); // Import all runtime namespaces pub usingnamespace @import("common/ir.zig"); pub usingnamespace @import("common/compile-unit.zig"); pub usingnamespace @import("common/disassembler.zig"); pub usingnamespace @import("common/decoder.zig"); /// Contains functions and structures for executing LoLa code. pub const runtime = struct { usingnamespace @import("runtime/value.zig"); usingnamespace @import("runtime/environment.zig"); usingnamespace @import("runtime/vm.zig"); usingnamespace @import("runtime/context.zig"); usingnamespace @import("runtime/objects.zig"); pub const EnvironmentMap = @import("runtime/environmentmap.zig").EnvironmentMap; }; /// LoLa libraries that provide pre-defined functions and variables. pub const libs = @import("libraries/libs.zig"); /// Contains functions and structures to compile LoLa code. pub const compiler = struct { pub const Diagnostics = @import("compiler/diagnostics.zig").Diagnostics; pub const Location = @import("compiler/location.zig").Location; pub const tokenizer = @import("compiler/tokenizer.zig"); pub const parser = @import("compiler/parser.zig"); pub const ast = @import("compiler/ast.zig"); pub const validate = @import("compiler/analysis.zig").validate; pub const generateIR = @import("compiler/codegen.zig").generateIR; /// Compiles a LoLa source code into a CompileUnit. /// - `allocator` is used to perform all allocations in the compilation process. /// - `diagnostics` will contain all diagnostic messages after compilation. /// - `chunk_name` is the name of the source code piece. This is the name that will be used to refer to chunk in error messages, it is usually the file name. /// - `source_code` is the LoLa source code that should be compiled. /// The function returns either a compile unit when `source_code` is a valid program, otherwise it will return `null`. pub fn compile( allocator: *zig_std.mem.Allocator, diagnostics: *Diagnostics, chunk_name: []const u8, source_code: []const u8, ) !?CompileUnit { const seq = try tokenizer.tokenize(allocator, diagnostics, chunk_name, source_code); defer allocator.free(seq); var pgm = try parser.parse(allocator, diagnostics, seq); defer pgm.deinit(); const valid_program = try validate(allocator, diagnostics, pgm); if (!valid_program) return null; return try generateIR(allocator, pgm, chunk_name); } }; comptime { if (zig_std.builtin.is_test) { // include tests _ = @import("libraries/stdlib.zig"); _ = @import("libraries/runtime.zig"); _ = @import("compiler/diagnostics.zig"); _ = @import("compiler/string-escaping.zig"); _ = @import("compiler/codegen.zig"); _ = @import("compiler/code-writer.zig"); _ = @import("compiler/typeset.zig"); _ = @import("compiler/analysis.zig"); _ = @import("compiler/tokenizer.zig"); _ = @import("compiler/parser.zig"); _ = @import("compiler/location.zig"); _ = @import("compiler/ast.zig"); _ = @import("compiler/scope.zig"); _ = @import("runtime/vm.zig"); _ = @import("runtime/objects.zig"); _ = @import("runtime/environment.zig"); _ = @import("runtime/environmentmap.zig"); _ = @import("runtime/value.zig"); _ = @import("runtime/context.zig"); _ = @import("common/decoder.zig"); _ = @import("common/disassembler.zig"); _ = @import("common/utility.zig"); _ = @import("common/ir.zig"); _ = @import("common/compile-unit.zig"); _ = compiler.compile; _ = libs.runtime; _ = libs.std; } }
src/library/main.zig
const std = @import("std"); const c = @import("c.zig"); const types = @import("types.zig"); const GlyphSlot = @import("GlyphSlot.zig"); const Library = @import("Library.zig"); const Error = @import("error.zig").Error; const intToError = @import("error.zig").intToError; const utils = @import("utils.zig"); const Face = @This(); pub const SizeMetrics = extern struct { x_ppem: u16, y_ppem: u16, x_scale: c_long, y_scale: c_long, ascender: c_long, descender: c_long, height: c_long, max_advance: c_long, }; pub const KerningMode = enum(u2) { default = c.FT_KERNING_DEFAULT, unfitted = c.FT_KERNING_UNFITTED, unscaled = c.FT_KERNING_UNSCALED, }; pub const LoadFlags = packed struct { no_scale: bool = false, no_hinting: bool = false, render: bool = false, no_bitmap: bool = false, vertical_layout: bool = false, force_autohint: bool = false, crop_bitmap: bool = false, pedantic: bool = false, ignore_global_advance_with: bool = false, no_recurse: bool = false, ignore_transform: bool = false, monochrome: bool = false, linear_design: bool = false, no_autohint: bool = false, target_normal: bool = false, target_light: bool = false, target_mono: bool = false, target_lcd: bool = false, target_lcd_v: bool = false, color: bool = false, pub const Flag = enum(u21) { no_scale = c.FT_LOAD_NO_SCALE, no_hinting = c.FT_LOAD_NO_HINTING, render = c.FT_LOAD_RENDER, no_bitmap = c.FT_LOAD_NO_BITMAP, vertical_layout = c.FT_LOAD_VERTICAL_LAYOUT, force_autohint = c.FT_LOAD_FORCE_AUTOHINT, crop_bitmap = c.FT_LOAD_CROP_BITMAP, pedantic = c.FT_LOAD_PEDANTIC, ignore_global_advance_with = c.FT_LOAD_IGNORE_GLOBAL_ADVANCE_WIDTH, no_recurse = c.FT_LOAD_NO_RECURSE, ignore_transform = c.FT_LOAD_IGNORE_TRANSFORM, monochrome = c.FT_LOAD_MONOCHROME, linear_design = c.FT_LOAD_LINEAR_DESIGN, no_autohint = c.FT_LOAD_NO_AUTOHINT, target_normal = c.FT_LOAD_TARGET_NORMAL, target_light = c.FT_LOAD_TARGET_LIGHT, target_mono = c.FT_LOAD_TARGET_MONO, target_lcd = c.FT_LOAD_TARGET_LCD, target_lcd_v = c.FT_LOAD_TARGET_LCD_V, color = c.FT_LOAD_COLOR, }; pub fn toBitFields(flags: LoadFlags) u21 { return utils.structToBitFields(u21, Flag, flags); } }; pub const StyleFlags = packed struct { bold: bool = false, italic: bool = false, pub const Flag = enum(u2) { bold = c.FT_STYLE_FLAG_BOLD, italic = c.FT_STYLE_FLAG_ITALIC, }; pub fn toBitFields(flags: StyleFlags) u2 { return utils.structToBitFields(u2, StyleFlags, Flag, flags); } }; handle: c.FT_Face, glyph: GlyphSlot, pub fn init(handle: c.FT_Face) Face { return Face{ .handle = handle, .glyph = GlyphSlot.init(handle.*.glyph), }; } pub fn deinit(self: Face) void { intToError(c.FT_Done_Face(self.handle)) catch |err| { std.log.err("mach/freetype: Failed to destroy Face: {}", .{err}); }; } pub fn attachFile(self: Face, path: []const u8) Error!void { return self.attachStream(.{ .flags = .{ .path = true }, .data = .{ .path = path }, }); } pub fn attachMemory(self: Face, bytes: []const u8) Error!void { return self.attachStream(.{ .flags = .{ .memory = true }, .data = .{ .memory = bytes }, }); } pub fn attachStream(self: Face, args: types.OpenArgs) Error!void { return intToError(c.FT_Attach_Stream(self.handle, &args.toCInterface())); } pub fn setCharSize(self: Face, pt_width: i32, pt_height: i32, horz_resolution: u16, vert_resolution: u16) Error!void { return intToError(c.FT_Set_Char_Size(self.handle, pt_width, pt_height, horz_resolution, vert_resolution)); } pub fn setPixelSizes(self: Face, pixel_width: u32, pixel_height: u32) Error!void { return intToError(c.FT_Set_Pixel_Sizes(self.handle, pixel_width, pixel_height)); } pub fn loadGlyph(self: Face, index: u32, flags: LoadFlags) Error!void { return intToError(c.FT_Load_Glyph(self.handle, index, flags.toBitFields())); } pub fn loadChar(self: Face, char: u32, flags: LoadFlags) Error!void { return intToError(c.FT_Load_Char(self.handle, char, flags.toBitFields())); } pub fn setTransform(self: Face, matrix: ?types.Matrix, delta: ?types.Vector) Error!void { var m = matrix orelse std.mem.zeroes(types.Matrix); var d = delta orelse std.mem.zeroes(types.Vector); return c.FT_Set_Transform(self.handle, @ptrCast(*c.FT_Matrix, &m), @ptrCast(*c.FT_Vector, &d)); } pub fn getCharIndex(self: Face, index: u32) ?u32 { const i = c.FT_Get_Char_Index(self.handle, index); return if (i == 0) null else i; } pub fn getKerning(self: Face, left_char_index: u32, right_char_index: u32, mode: KerningMode) Error!types.Vector { var vec = std.mem.zeroes(types.Vector); try intToError(c.FT_Get_Kerning(self.handle, left_char_index, right_char_index, @enumToInt(mode), @ptrCast(*c.FT_Vector, &vec))); return vec; } pub fn hasHorizontal(self: Face) bool { return c.FT_HAS_HORIZONTAL(self.handle); } pub fn hasVertical(self: Face) bool { return c.FT_HAS_VERTICAL(self.handle); } pub fn hasKerning(self: Face) bool { return c.FT_HAS_KERNING(self.handle); } pub fn hasFixedSizes(self: Face) bool { return c.FT_HAS_FIXED_SIZES(self.handle); } pub fn hasGlyphNames(self: Face) bool { return c.FT_HAS_GLYPH_NAMES(self.handle); } pub fn hasColor(self: Face) bool { return c.FT_HAS_COLOR(self.handle); } pub fn isScalable(self: Face) bool { return c.FT_IS_SCALABLE(self.handle); } pub fn isSfnt(self: Face) bool { return c.FT_IS_SFNT(self.handle); } pub fn isFixedWidth(self: Face) bool { return c.FT_IS_FIXED_WIDTH(self.handle); } pub fn isCidKeyed(self: Face) bool { return c.FT_IS_CID_KEYED(self.handle); } pub fn isTricky(self: Face) bool { return c.FT_IS_TRICKY(self.handle); } pub fn ascender(self: Face) i16 { return self.handle.*.ascender; } pub fn descender(self: Face) i16 { return self.handle.*.descender; } pub fn emSize(self: Face) u16 { return self.handle.*.units_per_EM; } pub fn height(self: Face) i16 { return self.handle.*.height; } pub fn maxAdvanceWidth(self: Face) i16 { return self.handle.*.max_advance_width; } pub fn maxAdvanceHeight(self: Face) i16 { return self.handle.*.max_advance_height; } pub fn underlinePosition(self: Face) i16 { return self.handle.*.underline_position; } pub fn underlineThickness(self: Face) i16 { return self.handle.*.underline_thickness; } pub fn numFaces(self: Face) i64 { return self.handle.*.num_faces; } pub fn numGlyphs(self: Face) i64 { return self.handle.*.num_glyphs; } pub fn familyName(self: Face) ?[:0]const u8 { return if (self.handle.*.family_name) |family| std.mem.span(family) else null; } pub fn styleName(self: Face) ?[:0]const u8 { return if (self.handle.*.style_name) |style_name| std.mem.span(style_name) else null; } pub fn styleFlags(self: Face) StyleFlags { const flags = self.handle.*.style_flags; return utils.bitFieldsToStruct(StyleFlags, StyleFlags.Flag, flags); } pub fn sizeMetrics(self: Face) ?SizeMetrics { return if (self.handle.*.size) |size| @ptrCast(*SizeMetrics, &size.*.metrics).* else null; } pub fn postscriptName(self: Face) ?[:0]const u8 { return if (c.FT_Get_Postscript_Name(self.handle)) |face_name| std.mem.span(face_name) else null; }
freetype/src/Face.zig
pub extern fn zip_strerror(errnum: c_int) [*c]const u8; pub const struct_zip_t = opaque {}; pub extern fn zip_open(zipname: [*c]const u8, level: c_int, mode: u8) ?*struct_zip_t; pub extern fn zip_close(zip: ?*struct_zip_t) void; pub extern fn zip_is64(zip: ?*struct_zip_t) c_int; pub extern fn zip_entry_open(zip: ?*struct_zip_t, entryname: [*c]const u8) c_int; pub extern fn zip_entry_openbyindex(zip: ?*struct_zip_t, index: c_int) c_int; pub extern fn zip_entry_close(zip: ?*struct_zip_t) c_int; pub extern fn zip_entry_name(zip: ?*struct_zip_t) [*c]const u8; pub extern fn zip_entry_index(zip: ?*struct_zip_t) c_int; pub extern fn zip_entry_isdir(zip: ?*struct_zip_t) c_int; pub extern fn zip_entry_size(zip: ?*struct_zip_t) c_ulonglong; pub extern fn zip_entry_crc32(zip: ?*struct_zip_t) c_uint; pub extern fn zip_entry_write(zip: ?*struct_zip_t, buf: ?*const c_void, bufsize: usize) c_int; pub extern fn zip_entry_fwrite(zip: ?*struct_zip_t, filename: [*c]const u8) c_int; pub extern fn zip_entry_read(zip: ?*struct_zip_t, buf: [*c]?*c_void, bufsize: [*c]usize) isize; pub extern fn zip_entry_noallocread(zip: ?*struct_zip_t, buf: ?*c_void, bufsize: usize) isize; pub extern fn zip_entry_fread(zip: ?*struct_zip_t, filename: [*c]const u8) c_int; pub extern fn zip_entry_extract(zip: ?*struct_zip_t, on_extract: ?fn (?*c_void, c_ulonglong, ?*const c_void, usize) callconv(.C) usize, arg: ?*c_void) c_int; pub extern fn zip_entries_total(zip: ?*struct_zip_t) c_int; pub extern fn zip_entries_delete(zip: ?*struct_zip_t, entries: [*c]const [*c]u8, len: usize) c_int; pub extern fn zip_stream_extract(stream: [*c]const u8, size: usize, dir: [*c]const u8, on_extract: ?fn ([*c]const u8, ?*c_void) callconv(.C) c_int, arg: ?*c_void) c_int; pub extern fn zip_stream_open(stream: [*c]const u8, size: usize, level: c_int, mode: u8) ?*struct_zip_t; pub extern fn zip_stream_copy(zip: ?*struct_zip_t, buf: [*c]?*c_void, bufsize: [*c]usize) isize; pub extern fn zip_stream_close(zip: ?*struct_zip_t) void; pub extern fn zip_create(zipname: [*c]const u8, filenames: [*c][*c]const u8, len: usize) c_int; pub extern fn zip_extract(zipname: [*c]const u8, dir: [*c]const u8, on_extract_entry: ?fn ([*c]const u8, ?*c_void) callconv(.C) c_int, arg: ?*c_void) c_int; pub const ZIP_DEFAULT_COMPRESSION_LEVEL = @as(c_int, 6); pub const ZIP_ENOINIT = -@as(c_int, 1); pub const ZIP_EINVENTNAME = -@as(c_int, 2); pub const ZIP_ENOENT = -@as(c_int, 3); pub const ZIP_EINVMODE = -@as(c_int, 4); pub const ZIP_EINVLVL = -@as(c_int, 5); pub const ZIP_ENOSUP64 = -@as(c_int, 6); pub const ZIP_EMEMSET = -@as(c_int, 7); pub const ZIP_EWRTENT = -@as(c_int, 8); pub const ZIP_ETDEFLINIT = -@as(c_int, 9); pub const ZIP_EINVIDX = -@as(c_int, 10); pub const ZIP_ENOHDR = -@as(c_int, 11); pub const ZIP_ETDEFLBUF = -@as(c_int, 12); pub const ZIP_ECRTHDR = -@as(c_int, 13); pub const ZIP_EWRTHDR = -@as(c_int, 14); pub const ZIP_EWRTDIR = -@as(c_int, 15); pub const ZIP_EOPNFILE = -@as(c_int, 16); pub const ZIP_EINVENTTYPE = -@as(c_int, 17); pub const ZIP_EMEMNOALLOC = -@as(c_int, 18); pub const ZIP_ENOFILE = -@as(c_int, 19); pub const ZIP_ENOPERM = -@as(c_int, 20); pub const ZIP_EOOMEM = -@as(c_int, 21); pub const ZIP_EINVZIPNAME = -@as(c_int, 22); pub const ZIP_EMKDIR = -@as(c_int, 23); pub const ZIP_ESYMLINK = -@as(c_int, 24); pub const ZIP_ECLSZIP = -@as(c_int, 25); pub const ZIP_ECAPSIZE = -@as(c_int, 26); pub const ZIP_EFSEEK = -@as(c_int, 27); pub const ZIP_EFREAD = -@as(c_int, 28); pub const ZIP_EFWRITE = -@as(c_int, 29); pub const zip_t = struct_zip_t;
src/deps/zip/zip.zig
pub const RDCE_TABLE_FULL = @as(u32, 2147745793); pub const RDCE_TABLE_CORRUPT = @as(u32, 2147745794); pub const MSRDC_SIGNATURE_HASHSIZE = @as(u32, 16); pub const SimilarityFileIdMinSize = @as(u32, 4); pub const SimilarityFileIdMaxSize = @as(u32, 32); pub const MSRDC_VERSION = @as(u32, 65536); pub const MSRDC_MINIMUM_COMPATIBLE_APP_VERSION = @as(u32, 65536); pub const MSRDC_MINIMUM_DEPTH = @as(u32, 1); pub const MSRDC_MAXIMUM_DEPTH = @as(u32, 8); pub const MSRDC_MINIMUM_COMPAREBUFFER = @as(u32, 100000); pub const MSRDC_MAXIMUM_COMPAREBUFFER = @as(u32, 1073741824); pub const MSRDC_DEFAULT_COMPAREBUFFER = @as(u32, 3200000); pub const MSRDC_MINIMUM_INPUTBUFFERSIZE = @as(u32, 1024); pub const MSRDC_MINIMUM_HORIZONSIZE = @as(u32, 128); pub const MSRDC_MAXIMUM_HORIZONSIZE = @as(u32, 16384); pub const MSRDC_MINIMUM_HASHWINDOWSIZE = @as(u32, 2); pub const MSRDC_MAXIMUM_HASHWINDOWSIZE = @as(u32, 96); pub const MSRDC_DEFAULT_HASHWINDOWSIZE_1 = @as(u32, 48); pub const MSRDC_DEFAULT_HORIZONSIZE_1 = @as(u32, 1024); pub const MSRDC_DEFAULT_HASHWINDOWSIZE_N = @as(u32, 2); pub const MSRDC_DEFAULT_HORIZONSIZE_N = @as(u32, 128); pub const MSRDC_MAXIMUM_TRAITVALUE = @as(u32, 63); pub const MSRDC_MINIMUM_MATCHESREQUIRED = @as(u32, 1); pub const MSRDC_MAXIMUM_MATCHESREQUIRED = @as(u32, 16); //-------------------------------------------------------------------------------- // Section: Types (48) //-------------------------------------------------------------------------------- const CLSID_RdcLibrary_Value = Guid.initString("96236a85-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_RdcLibrary = &CLSID_RdcLibrary_Value; const CLSID_RdcGeneratorParameters_Value = Guid.initString("96236a86-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_RdcGeneratorParameters = &CLSID_RdcGeneratorParameters_Value; const CLSID_RdcGeneratorFilterMaxParameters_Value = Guid.initString("96236a87-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_RdcGeneratorFilterMaxParameters = &CLSID_RdcGeneratorFilterMaxParameters_Value; const CLSID_RdcGenerator_Value = Guid.initString("96236a88-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_RdcGenerator = &CLSID_RdcGenerator_Value; const CLSID_RdcFileReader_Value = Guid.initString("96236a89-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_RdcFileReader = &CLSID_RdcFileReader_Value; const CLSID_RdcSignatureReader_Value = Guid.initString("96236a8a-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_RdcSignatureReader = &CLSID_RdcSignatureReader_Value; const CLSID_RdcComparator_Value = Guid.initString("96236a8b-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_RdcComparator = &CLSID_RdcComparator_Value; const CLSID_SimilarityReportProgress_Value = Guid.initString("96236a8d-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_SimilarityReportProgress = &CLSID_SimilarityReportProgress_Value; const CLSID_SimilarityTableDumpState_Value = Guid.initString("96236a8e-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_SimilarityTableDumpState = &CLSID_SimilarityTableDumpState_Value; const CLSID_SimilarityTraitsTable_Value = Guid.initString("96236a8f-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_SimilarityTraitsTable = &CLSID_SimilarityTraitsTable_Value; const CLSID_SimilarityFileIdTable_Value = Guid.initString("96236a90-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_SimilarityFileIdTable = &CLSID_SimilarityFileIdTable_Value; const CLSID_Similarity_Value = Guid.initString("96236a91-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_Similarity = &CLSID_Similarity_Value; const CLSID_RdcSimilarityGenerator_Value = Guid.initString("96236a92-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_RdcSimilarityGenerator = &CLSID_RdcSimilarityGenerator_Value; const CLSID_FindSimilarResults_Value = Guid.initString("96236a93-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_FindSimilarResults = &CLSID_FindSimilarResults_Value; const CLSID_SimilarityTraitsMapping_Value = Guid.initString("96236a94-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_SimilarityTraitsMapping = &CLSID_SimilarityTraitsMapping_Value; const CLSID_SimilarityTraitsMappedView_Value = Guid.initString("96236a95-9dbc-11da-9e3f-0011114ae311"); pub const CLSID_SimilarityTraitsMappedView = &CLSID_SimilarityTraitsMappedView_Value; pub const RDC_ErrorCode = enum(i32) { NoError = 0, HeaderVersionNewer = 1, HeaderVersionOlder = 2, HeaderMissingOrCorrupt = 3, HeaderWrongType = 4, DataMissingOrCorrupt = 5, DataTooManyRecords = 6, FileChecksumMismatch = 7, ApplicationError = 8, Aborted = 9, Win32Error = 10, }; pub const RDC_NoError = RDC_ErrorCode.NoError; pub const RDC_HeaderVersionNewer = RDC_ErrorCode.HeaderVersionNewer; pub const RDC_HeaderVersionOlder = RDC_ErrorCode.HeaderVersionOlder; pub const RDC_HeaderMissingOrCorrupt = RDC_ErrorCode.HeaderMissingOrCorrupt; pub const RDC_HeaderWrongType = RDC_ErrorCode.HeaderWrongType; pub const RDC_DataMissingOrCorrupt = RDC_ErrorCode.DataMissingOrCorrupt; pub const RDC_DataTooManyRecords = RDC_ErrorCode.DataTooManyRecords; pub const RDC_FileChecksumMismatch = RDC_ErrorCode.FileChecksumMismatch; pub const RDC_ApplicationError = RDC_ErrorCode.ApplicationError; pub const RDC_Aborted = RDC_ErrorCode.Aborted; pub const RDC_Win32Error = RDC_ErrorCode.Win32Error; pub const GeneratorParametersType = enum(i32) { Unused = 0, FilterMax = 1, }; pub const RDCGENTYPE_Unused = GeneratorParametersType.Unused; pub const RDCGENTYPE_FilterMax = GeneratorParametersType.FilterMax; pub const RdcNeedType = enum(i32) { SOURCE = 0, TARGET = 1, SEED = 2, SEED_MAX = 255, }; pub const RDCNEED_SOURCE = RdcNeedType.SOURCE; pub const RDCNEED_TARGET = RdcNeedType.TARGET; pub const RDCNEED_SEED = RdcNeedType.SEED; pub const RDCNEED_SEED_MAX = RdcNeedType.SEED_MAX; pub const RdcNeed = extern struct { m_BlockType: RdcNeedType, m_FileOffset: u64, m_BlockLength: u64, }; pub const RdcBufferPointer = extern struct { m_Size: u32, m_Used: u32, m_Data: ?*u8, }; pub const RdcNeedPointer = extern struct { m_Size: u32, m_Used: u32, m_Data: ?*RdcNeed, }; pub const RdcSignature = extern struct { m_Signature: [16]u8, m_BlockLength: u16, }; pub const RdcSignaturePointer = extern struct { m_Size: u32, m_Used: u32, m_Data: ?*RdcSignature, }; pub const RdcCreatedTables = enum(i32) { InvalidOrUnknown = 0, Existing = 1, New = 2, }; pub const RDCTABLE_InvalidOrUnknown = RdcCreatedTables.InvalidOrUnknown; pub const RDCTABLE_Existing = RdcCreatedTables.Existing; pub const RDCTABLE_New = RdcCreatedTables.New; pub const RdcMappingAccessMode = enum(i32) { Undefined = 0, ReadOnly = 1, ReadWrite = 2, }; pub const RDCMAPPING_Undefined = RdcMappingAccessMode.Undefined; pub const RDCMAPPING_ReadOnly = RdcMappingAccessMode.ReadOnly; pub const RDCMAPPING_ReadWrite = RdcMappingAccessMode.ReadWrite; pub const SimilarityMappedViewInfo = extern struct { m_Data: ?*u8, m_Length: u32, }; pub const SimilarityData = extern struct { m_Data: [16]u8, }; pub const FindSimilarFileIndexResults = extern struct { m_FileIndex: u32, m_MatchCount: u32, }; pub const SimilarityDumpData = extern struct { m_FileIndex: u32, m_Data: SimilarityData, }; pub const SimilarityFileId = extern struct { m_FileId: [32]u8, }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IRdcGeneratorParameters_Value = Guid.initString("96236a71-9dbc-11da-9e3f-0011114ae311"); pub const IID_IRdcGeneratorParameters = &IID_IRdcGeneratorParameters_Value; pub const IRdcGeneratorParameters = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetGeneratorParametersType: fn( self: *const IRdcGeneratorParameters, parametersType: ?*GeneratorParametersType, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetParametersVersion: fn( self: *const IRdcGeneratorParameters, currentVersion: ?*u32, minimumCompatibleAppVersion: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSerializeSize: fn( self: *const IRdcGeneratorParameters, size: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Serialize: fn( self: *const IRdcGeneratorParameters, size: u32, parametersBlob: ?*u8, bytesWritten: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcGeneratorParameters_GetGeneratorParametersType(self: *const T, parametersType: ?*GeneratorParametersType) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcGeneratorParameters.VTable, self.vtable).GetGeneratorParametersType(@ptrCast(*const IRdcGeneratorParameters, self), parametersType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcGeneratorParameters_GetParametersVersion(self: *const T, currentVersion: ?*u32, minimumCompatibleAppVersion: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcGeneratorParameters.VTable, self.vtable).GetParametersVersion(@ptrCast(*const IRdcGeneratorParameters, self), currentVersion, minimumCompatibleAppVersion); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcGeneratorParameters_GetSerializeSize(self: *const T, size: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcGeneratorParameters.VTable, self.vtable).GetSerializeSize(@ptrCast(*const IRdcGeneratorParameters, self), size); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcGeneratorParameters_Serialize(self: *const T, size: u32, parametersBlob: ?*u8, bytesWritten: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcGeneratorParameters.VTable, self.vtable).Serialize(@ptrCast(*const IRdcGeneratorParameters, self), size, parametersBlob, bytesWritten); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IRdcGeneratorFilterMaxParameters_Value = Guid.initString("96236a72-9dbc-11da-9e3f-0011114ae311"); pub const IID_IRdcGeneratorFilterMaxParameters = &IID_IRdcGeneratorFilterMaxParameters_Value; pub const IRdcGeneratorFilterMaxParameters = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetHorizonSize: fn( self: *const IRdcGeneratorFilterMaxParameters, horizonSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetHorizonSize: fn( self: *const IRdcGeneratorFilterMaxParameters, horizonSize: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHashWindowSize: fn( self: *const IRdcGeneratorFilterMaxParameters, hashWindowSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetHashWindowSize: fn( self: *const IRdcGeneratorFilterMaxParameters, hashWindowSize: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcGeneratorFilterMaxParameters_GetHorizonSize(self: *const T, horizonSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcGeneratorFilterMaxParameters.VTable, self.vtable).GetHorizonSize(@ptrCast(*const IRdcGeneratorFilterMaxParameters, self), horizonSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcGeneratorFilterMaxParameters_SetHorizonSize(self: *const T, horizonSize: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcGeneratorFilterMaxParameters.VTable, self.vtable).SetHorizonSize(@ptrCast(*const IRdcGeneratorFilterMaxParameters, self), horizonSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcGeneratorFilterMaxParameters_GetHashWindowSize(self: *const T, hashWindowSize: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcGeneratorFilterMaxParameters.VTable, self.vtable).GetHashWindowSize(@ptrCast(*const IRdcGeneratorFilterMaxParameters, self), hashWindowSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcGeneratorFilterMaxParameters_SetHashWindowSize(self: *const T, hashWindowSize: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcGeneratorFilterMaxParameters.VTable, self.vtable).SetHashWindowSize(@ptrCast(*const IRdcGeneratorFilterMaxParameters, self), hashWindowSize); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IRdcGenerator_Value = Guid.initString("96236a73-9dbc-11da-9e3f-0011114ae311"); pub const IID_IRdcGenerator = &IID_IRdcGenerator_Value; pub const IRdcGenerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetGeneratorParameters: fn( self: *const IRdcGenerator, level: u32, iGeneratorParameters: ?*?*IRdcGeneratorParameters, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Process: fn( self: *const IRdcGenerator, endOfInput: BOOL, endOfOutput: ?*BOOL, inputBuffer: ?*RdcBufferPointer, depth: u32, outputBuffers: [*]?*RdcBufferPointer, rdc_ErrorCode: ?*RDC_ErrorCode, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcGenerator_GetGeneratorParameters(self: *const T, level: u32, iGeneratorParameters: ?*?*IRdcGeneratorParameters) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcGenerator.VTable, self.vtable).GetGeneratorParameters(@ptrCast(*const IRdcGenerator, self), level, iGeneratorParameters); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcGenerator_Process(self: *const T, endOfInput: BOOL, endOfOutput: ?*BOOL, inputBuffer: ?*RdcBufferPointer, depth: u32, outputBuffers: [*]?*RdcBufferPointer, rdc_ErrorCode: ?*RDC_ErrorCode) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcGenerator.VTable, self.vtable).Process(@ptrCast(*const IRdcGenerator, self), endOfInput, endOfOutput, inputBuffer, depth, outputBuffers, rdc_ErrorCode); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IRdcFileReader_Value = Guid.initString("96236a74-9dbc-11da-9e3f-0011114ae311"); pub const IID_IRdcFileReader = &IID_IRdcFileReader_Value; pub const IRdcFileReader = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetFileSize: fn( self: *const IRdcFileReader, fileSize: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Read: fn( self: *const IRdcFileReader, offsetFileStart: u64, bytesToRead: u32, bytesActuallyRead: ?*u32, buffer: ?*u8, eof: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilePosition: fn( self: *const IRdcFileReader, offsetFromStart: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcFileReader_GetFileSize(self: *const T, fileSize: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcFileReader.VTable, self.vtable).GetFileSize(@ptrCast(*const IRdcFileReader, self), fileSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcFileReader_Read(self: *const T, offsetFileStart: u64, bytesToRead: u32, bytesActuallyRead: ?*u32, buffer: ?*u8, eof: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcFileReader.VTable, self.vtable).Read(@ptrCast(*const IRdcFileReader, self), offsetFileStart, bytesToRead, bytesActuallyRead, buffer, eof); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcFileReader_GetFilePosition(self: *const T, offsetFromStart: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcFileReader.VTable, self.vtable).GetFilePosition(@ptrCast(*const IRdcFileReader, self), offsetFromStart); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IRdcFileWriter_Value = Guid.initString("96236a75-9dbc-11da-9e3f-0011114ae311"); pub const IID_IRdcFileWriter = &IID_IRdcFileWriter_Value; pub const IRdcFileWriter = extern struct { pub const VTable = extern struct { base: IRdcFileReader.VTable, Write: fn( self: *const IRdcFileWriter, offsetFileStart: u64, bytesToWrite: u32, buffer: ?*u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Truncate: fn( self: *const IRdcFileWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DeleteOnClose: fn( self: *const IRdcFileWriter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IRdcFileReader.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcFileWriter_Write(self: *const T, offsetFileStart: u64, bytesToWrite: u32, buffer: ?*u8) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcFileWriter.VTable, self.vtable).Write(@ptrCast(*const IRdcFileWriter, self), offsetFileStart, bytesToWrite, buffer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcFileWriter_Truncate(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcFileWriter.VTable, self.vtable).Truncate(@ptrCast(*const IRdcFileWriter, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcFileWriter_DeleteOnClose(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcFileWriter.VTable, self.vtable).DeleteOnClose(@ptrCast(*const IRdcFileWriter, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IRdcSignatureReader_Value = Guid.initString("96236a76-9dbc-11da-9e3f-0011114ae311"); pub const IID_IRdcSignatureReader = &IID_IRdcSignatureReader_Value; pub const IRdcSignatureReader = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, ReadHeader: fn( self: *const IRdcSignatureReader, rdc_ErrorCode: ?*RDC_ErrorCode, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ReadSignatures: fn( self: *const IRdcSignatureReader, rdcSignaturePointer: ?*RdcSignaturePointer, endOfOutput: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcSignatureReader_ReadHeader(self: *const T, rdc_ErrorCode: ?*RDC_ErrorCode) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcSignatureReader.VTable, self.vtable).ReadHeader(@ptrCast(*const IRdcSignatureReader, self), rdc_ErrorCode); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcSignatureReader_ReadSignatures(self: *const T, rdcSignaturePointer: ?*RdcSignaturePointer, endOfOutput: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcSignatureReader.VTable, self.vtable).ReadSignatures(@ptrCast(*const IRdcSignatureReader, self), rdcSignaturePointer, endOfOutput); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IRdcComparator_Value = Guid.initString("96236a77-9dbc-11da-9e3f-0011114ae311"); pub const IID_IRdcComparator = &IID_IRdcComparator_Value; pub const IRdcComparator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Process: fn( self: *const IRdcComparator, endOfInput: BOOL, endOfOutput: ?*BOOL, inputBuffer: ?*RdcBufferPointer, outputBuffer: ?*RdcNeedPointer, rdc_ErrorCode: ?*RDC_ErrorCode, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcComparator_Process(self: *const T, endOfInput: BOOL, endOfOutput: ?*BOOL, inputBuffer: ?*RdcBufferPointer, outputBuffer: ?*RdcNeedPointer, rdc_ErrorCode: ?*RDC_ErrorCode) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcComparator.VTable, self.vtable).Process(@ptrCast(*const IRdcComparator, self), endOfInput, endOfOutput, inputBuffer, outputBuffer, rdc_ErrorCode); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IRdcLibrary_Value = Guid.initString("96236a78-9dbc-11da-9e3f-0011114ae311"); pub const IID_IRdcLibrary = &IID_IRdcLibrary_Value; pub const IRdcLibrary = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, ComputeDefaultRecursionDepth: fn( self: *const IRdcLibrary, fileSize: u64, depth: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateGeneratorParameters: fn( self: *const IRdcLibrary, parametersType: GeneratorParametersType, level: u32, iGeneratorParameters: ?*?*IRdcGeneratorParameters, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OpenGeneratorParameters: fn( self: *const IRdcLibrary, size: u32, parametersBlob: ?*const u8, iGeneratorParameters: ?*?*IRdcGeneratorParameters, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateGenerator: fn( self: *const IRdcLibrary, depth: u32, iGeneratorParametersArray: [*]?*IRdcGeneratorParameters, iGenerator: ?*?*IRdcGenerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateComparator: fn( self: *const IRdcLibrary, iSeedSignaturesFile: ?*IRdcFileReader, comparatorBufferSize: u32, iComparator: ?*?*IRdcComparator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateSignatureReader: fn( self: *const IRdcLibrary, iFileReader: ?*IRdcFileReader, iSignatureReader: ?*?*IRdcSignatureReader, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRDCVersion: fn( self: *const IRdcLibrary, currentVersion: ?*u32, minimumCompatibleAppVersion: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcLibrary_ComputeDefaultRecursionDepth(self: *const T, fileSize: u64, depth: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcLibrary.VTable, self.vtable).ComputeDefaultRecursionDepth(@ptrCast(*const IRdcLibrary, self), fileSize, depth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcLibrary_CreateGeneratorParameters(self: *const T, parametersType: GeneratorParametersType, level: u32, iGeneratorParameters: ?*?*IRdcGeneratorParameters) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcLibrary.VTable, self.vtable).CreateGeneratorParameters(@ptrCast(*const IRdcLibrary, self), parametersType, level, iGeneratorParameters); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcLibrary_OpenGeneratorParameters(self: *const T, size: u32, parametersBlob: ?*const u8, iGeneratorParameters: ?*?*IRdcGeneratorParameters) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcLibrary.VTable, self.vtable).OpenGeneratorParameters(@ptrCast(*const IRdcLibrary, self), size, parametersBlob, iGeneratorParameters); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcLibrary_CreateGenerator(self: *const T, depth: u32, iGeneratorParametersArray: [*]?*IRdcGeneratorParameters, iGenerator: ?*?*IRdcGenerator) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcLibrary.VTable, self.vtable).CreateGenerator(@ptrCast(*const IRdcLibrary, self), depth, iGeneratorParametersArray, iGenerator); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcLibrary_CreateComparator(self: *const T, iSeedSignaturesFile: ?*IRdcFileReader, comparatorBufferSize: u32, iComparator: ?*?*IRdcComparator) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcLibrary.VTable, self.vtable).CreateComparator(@ptrCast(*const IRdcLibrary, self), iSeedSignaturesFile, comparatorBufferSize, iComparator); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcLibrary_CreateSignatureReader(self: *const T, iFileReader: ?*IRdcFileReader, iSignatureReader: ?*?*IRdcSignatureReader) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcLibrary.VTable, self.vtable).CreateSignatureReader(@ptrCast(*const IRdcLibrary, self), iFileReader, iSignatureReader); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcLibrary_GetRDCVersion(self: *const T, currentVersion: ?*u32, minimumCompatibleAppVersion: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcLibrary.VTable, self.vtable).GetRDCVersion(@ptrCast(*const IRdcLibrary, self), currentVersion, minimumCompatibleAppVersion); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_ISimilarityReportProgress_Value = Guid.initString("96236a7a-9dbc-11da-9e3f-0011114ae311"); pub const IID_ISimilarityReportProgress = &IID_ISimilarityReportProgress_Value; pub const ISimilarityReportProgress = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, ReportProgress: fn( self: *const ISimilarityReportProgress, percentCompleted: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityReportProgress_ReportProgress(self: *const T, percentCompleted: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityReportProgress.VTable, self.vtable).ReportProgress(@ptrCast(*const ISimilarityReportProgress, self), percentCompleted); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_ISimilarityTableDumpState_Value = Guid.initString("96236a7b-9dbc-11da-9e3f-0011114ae311"); pub const IID_ISimilarityTableDumpState = &IID_ISimilarityTableDumpState_Value; pub const ISimilarityTableDumpState = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetNextData: fn( self: *const ISimilarityTableDumpState, resultsSize: u32, resultsUsed: ?*u32, eof: ?*BOOL, results: ?*SimilarityDumpData, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTableDumpState_GetNextData(self: *const T, resultsSize: u32, resultsUsed: ?*u32, eof: ?*BOOL, results: ?*SimilarityDumpData) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTableDumpState.VTable, self.vtable).GetNextData(@ptrCast(*const ISimilarityTableDumpState, self), resultsSize, resultsUsed, eof, results); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_ISimilarityTraitsMappedView_Value = Guid.initString("96236a7c-9dbc-11da-9e3f-0011114ae311"); pub const IID_ISimilarityTraitsMappedView = &IID_ISimilarityTraitsMappedView_Value; pub const ISimilarityTraitsMappedView = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Flush: fn( self: *const ISimilarityTraitsMappedView, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Unmap: fn( self: *const ISimilarityTraitsMappedView, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Get: fn( self: *const ISimilarityTraitsMappedView, index: u64, dirty: BOOL, numElements: u32, viewInfo: ?*SimilarityMappedViewInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetView: fn( self: *const ISimilarityTraitsMappedView, mappedPageBegin: ?*const ?*u8, mappedPageEnd: ?*const ?*u8, ) callconv(@import("std").os.windows.WINAPI) void, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsMappedView_Flush(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsMappedView.VTable, self.vtable).Flush(@ptrCast(*const ISimilarityTraitsMappedView, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsMappedView_Unmap(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsMappedView.VTable, self.vtable).Unmap(@ptrCast(*const ISimilarityTraitsMappedView, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsMappedView_Get(self: *const T, index: u64, dirty: BOOL, numElements: u32, viewInfo: ?*SimilarityMappedViewInfo) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsMappedView.VTable, self.vtable).Get(@ptrCast(*const ISimilarityTraitsMappedView, self), index, dirty, numElements, viewInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsMappedView_GetView(self: *const T, mappedPageBegin: ?*const ?*u8, mappedPageEnd: ?*const ?*u8) callconv(.Inline) void { return @ptrCast(*const ISimilarityTraitsMappedView.VTable, self.vtable).GetView(@ptrCast(*const ISimilarityTraitsMappedView, self), mappedPageBegin, mappedPageEnd); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_ISimilarityTraitsMapping_Value = Guid.initString("96236a7d-9dbc-11da-9e3f-0011114ae311"); pub const IID_ISimilarityTraitsMapping = &IID_ISimilarityTraitsMapping_Value; pub const ISimilarityTraitsMapping = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CloseMapping: fn( self: *const ISimilarityTraitsMapping, ) callconv(@import("std").os.windows.WINAPI) void, SetFileSize: fn( self: *const ISimilarityTraitsMapping, fileSize: u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFileSize: fn( self: *const ISimilarityTraitsMapping, fileSize: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OpenMapping: fn( self: *const ISimilarityTraitsMapping, accessMode: RdcMappingAccessMode, begin: u64, end: u64, actualEnd: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ResizeMapping: fn( self: *const ISimilarityTraitsMapping, accessMode: RdcMappingAccessMode, begin: u64, end: u64, actualEnd: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPageSize: fn( self: *const ISimilarityTraitsMapping, pageSize: ?*u32, ) callconv(@import("std").os.windows.WINAPI) void, CreateView: fn( self: *const ISimilarityTraitsMapping, minimumMappedPages: u32, accessMode: RdcMappingAccessMode, mappedView: ?*?*ISimilarityTraitsMappedView, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsMapping_CloseMapping(self: *const T) callconv(.Inline) void { return @ptrCast(*const ISimilarityTraitsMapping.VTable, self.vtable).CloseMapping(@ptrCast(*const ISimilarityTraitsMapping, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsMapping_SetFileSize(self: *const T, fileSize: u64) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsMapping.VTable, self.vtable).SetFileSize(@ptrCast(*const ISimilarityTraitsMapping, self), fileSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsMapping_GetFileSize(self: *const T, fileSize: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsMapping.VTable, self.vtable).GetFileSize(@ptrCast(*const ISimilarityTraitsMapping, self), fileSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsMapping_OpenMapping(self: *const T, accessMode: RdcMappingAccessMode, begin: u64, end: u64, actualEnd: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsMapping.VTable, self.vtable).OpenMapping(@ptrCast(*const ISimilarityTraitsMapping, self), accessMode, begin, end, actualEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsMapping_ResizeMapping(self: *const T, accessMode: RdcMappingAccessMode, begin: u64, end: u64, actualEnd: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsMapping.VTable, self.vtable).ResizeMapping(@ptrCast(*const ISimilarityTraitsMapping, self), accessMode, begin, end, actualEnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsMapping_GetPageSize(self: *const T, pageSize: ?*u32) callconv(.Inline) void { return @ptrCast(*const ISimilarityTraitsMapping.VTable, self.vtable).GetPageSize(@ptrCast(*const ISimilarityTraitsMapping, self), pageSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsMapping_CreateView(self: *const T, minimumMappedPages: u32, accessMode: RdcMappingAccessMode, mappedView: ?*?*ISimilarityTraitsMappedView) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsMapping.VTable, self.vtable).CreateView(@ptrCast(*const ISimilarityTraitsMapping, self), minimumMappedPages, accessMode, mappedView); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_ISimilarityTraitsTable_Value = Guid.initString("96236a7e-9dbc-11da-9e3f-0011114ae311"); pub const IID_ISimilarityTraitsTable = &IID_ISimilarityTraitsTable_Value; pub const ISimilarityTraitsTable = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateTable: fn( self: *const ISimilarityTraitsTable, path: ?PWSTR, truncate: BOOL, securityDescriptor: ?*u8, isNew: ?*RdcCreatedTables, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateTableIndirect: fn( self: *const ISimilarityTraitsTable, mapping: ?*ISimilarityTraitsMapping, truncate: BOOL, isNew: ?*RdcCreatedTables, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CloseTable: fn( self: *const ISimilarityTraitsTable, isValid: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Append: fn( self: *const ISimilarityTraitsTable, data: ?*SimilarityData, fileIndex: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, FindSimilarFileIndex: fn( self: *const ISimilarityTraitsTable, similarityData: ?*SimilarityData, numberOfMatchesRequired: u16, findSimilarFileIndexResults: ?*FindSimilarFileIndexResults, resultsSize: u32, resultsUsed: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BeginDump: fn( self: *const ISimilarityTraitsTable, similarityTableDumpState: ?*?*ISimilarityTableDumpState, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLastIndex: fn( self: *const ISimilarityTraitsTable, fileIndex: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsTable_CreateTable(self: *const T, path: ?PWSTR, truncate: BOOL, securityDescriptor: ?*u8, isNew: ?*RdcCreatedTables) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsTable.VTable, self.vtable).CreateTable(@ptrCast(*const ISimilarityTraitsTable, self), path, truncate, securityDescriptor, isNew); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsTable_CreateTableIndirect(self: *const T, mapping: ?*ISimilarityTraitsMapping, truncate: BOOL, isNew: ?*RdcCreatedTables) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsTable.VTable, self.vtable).CreateTableIndirect(@ptrCast(*const ISimilarityTraitsTable, self), mapping, truncate, isNew); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsTable_CloseTable(self: *const T, isValid: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsTable.VTable, self.vtable).CloseTable(@ptrCast(*const ISimilarityTraitsTable, self), isValid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsTable_Append(self: *const T, data: ?*SimilarityData, fileIndex: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsTable.VTable, self.vtable).Append(@ptrCast(*const ISimilarityTraitsTable, self), data, fileIndex); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsTable_FindSimilarFileIndex(self: *const T, similarityData: ?*SimilarityData, numberOfMatchesRequired: u16, findSimilarFileIndexResults: ?*FindSimilarFileIndexResults, resultsSize: u32, resultsUsed: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsTable.VTable, self.vtable).FindSimilarFileIndex(@ptrCast(*const ISimilarityTraitsTable, self), similarityData, numberOfMatchesRequired, findSimilarFileIndexResults, resultsSize, resultsUsed); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsTable_BeginDump(self: *const T, similarityTableDumpState: ?*?*ISimilarityTableDumpState) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsTable.VTable, self.vtable).BeginDump(@ptrCast(*const ISimilarityTraitsTable, self), similarityTableDumpState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityTraitsTable_GetLastIndex(self: *const T, fileIndex: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityTraitsTable.VTable, self.vtable).GetLastIndex(@ptrCast(*const ISimilarityTraitsTable, self), fileIndex); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_ISimilarityFileIdTable_Value = Guid.initString("96236a7f-9dbc-11da-9e3f-0011114ae311"); pub const IID_ISimilarityFileIdTable = &IID_ISimilarityFileIdTable_Value; pub const ISimilarityFileIdTable = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateTable: fn( self: *const ISimilarityFileIdTable, path: ?PWSTR, truncate: BOOL, securityDescriptor: ?*u8, recordSize: u32, isNew: ?*RdcCreatedTables, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateTableIndirect: fn( self: *const ISimilarityFileIdTable, fileIdFile: ?*IRdcFileWriter, truncate: BOOL, recordSize: u32, isNew: ?*RdcCreatedTables, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CloseTable: fn( self: *const ISimilarityFileIdTable, isValid: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Append: fn( self: *const ISimilarityFileIdTable, similarityFileId: ?*SimilarityFileId, similarityFileIndex: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Lookup: fn( self: *const ISimilarityFileIdTable, similarityFileIndex: u32, similarityFileId: ?*SimilarityFileId, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Invalidate: fn( self: *const ISimilarityFileIdTable, similarityFileIndex: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRecordCount: fn( self: *const ISimilarityFileIdTable, recordCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityFileIdTable_CreateTable(self: *const T, path: ?PWSTR, truncate: BOOL, securityDescriptor: ?*u8, recordSize: u32, isNew: ?*RdcCreatedTables) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityFileIdTable.VTable, self.vtable).CreateTable(@ptrCast(*const ISimilarityFileIdTable, self), path, truncate, securityDescriptor, recordSize, isNew); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityFileIdTable_CreateTableIndirect(self: *const T, fileIdFile: ?*IRdcFileWriter, truncate: BOOL, recordSize: u32, isNew: ?*RdcCreatedTables) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityFileIdTable.VTable, self.vtable).CreateTableIndirect(@ptrCast(*const ISimilarityFileIdTable, self), fileIdFile, truncate, recordSize, isNew); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityFileIdTable_CloseTable(self: *const T, isValid: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityFileIdTable.VTable, self.vtable).CloseTable(@ptrCast(*const ISimilarityFileIdTable, self), isValid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityFileIdTable_Append(self: *const T, similarityFileId: ?*SimilarityFileId, similarityFileIndex: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityFileIdTable.VTable, self.vtable).Append(@ptrCast(*const ISimilarityFileIdTable, self), similarityFileId, similarityFileIndex); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityFileIdTable_Lookup(self: *const T, similarityFileIndex: u32, similarityFileId: ?*SimilarityFileId) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityFileIdTable.VTable, self.vtable).Lookup(@ptrCast(*const ISimilarityFileIdTable, self), similarityFileIndex, similarityFileId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityFileIdTable_Invalidate(self: *const T, similarityFileIndex: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityFileIdTable.VTable, self.vtable).Invalidate(@ptrCast(*const ISimilarityFileIdTable, self), similarityFileIndex); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarityFileIdTable_GetRecordCount(self: *const T, recordCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarityFileIdTable.VTable, self.vtable).GetRecordCount(@ptrCast(*const ISimilarityFileIdTable, self), recordCount); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IRdcSimilarityGenerator_Value = Guid.initString("96236a80-9dbc-11da-9e3f-0011114ae311"); pub const IID_IRdcSimilarityGenerator = &IID_IRdcSimilarityGenerator_Value; pub const IRdcSimilarityGenerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, EnableSimilarity: fn( self: *const IRdcSimilarityGenerator, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Results: fn( self: *const IRdcSimilarityGenerator, similarityData: ?*SimilarityData, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcSimilarityGenerator_EnableSimilarity(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcSimilarityGenerator.VTable, self.vtable).EnableSimilarity(@ptrCast(*const IRdcSimilarityGenerator, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRdcSimilarityGenerator_Results(self: *const T, similarityData: ?*SimilarityData) callconv(.Inline) HRESULT { return @ptrCast(*const IRdcSimilarityGenerator.VTable, self.vtable).Results(@ptrCast(*const IRdcSimilarityGenerator, self), similarityData); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IFindSimilarResults_Value = Guid.initString("96236a81-9dbc-11da-9e3f-0011114ae311"); pub const IID_IFindSimilarResults = &IID_IFindSimilarResults_Value; pub const IFindSimilarResults = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSize: fn( self: *const IFindSimilarResults, size: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetNextFileId: fn( self: *const IFindSimilarResults, numTraitsMatched: ?*u32, similarityFileId: ?*SimilarityFileId, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFindSimilarResults_GetSize(self: *const T, size: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IFindSimilarResults.VTable, self.vtable).GetSize(@ptrCast(*const IFindSimilarResults, self), size); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IFindSimilarResults_GetNextFileId(self: *const T, numTraitsMatched: ?*u32, similarityFileId: ?*SimilarityFileId) callconv(.Inline) HRESULT { return @ptrCast(*const IFindSimilarResults.VTable, self.vtable).GetNextFileId(@ptrCast(*const IFindSimilarResults, self), numTraitsMatched, similarityFileId); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_ISimilarity_Value = Guid.initString("96236a83-9dbc-11da-9e3f-0011114ae311"); pub const IID_ISimilarity = &IID_ISimilarity_Value; pub const ISimilarity = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateTable: fn( self: *const ISimilarity, path: ?PWSTR, truncate: BOOL, securityDescriptor: ?*u8, recordSize: u32, isNew: ?*RdcCreatedTables, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateTableIndirect: fn( self: *const ISimilarity, mapping: ?*ISimilarityTraitsMapping, fileIdFile: ?*IRdcFileWriter, truncate: BOOL, recordSize: u32, isNew: ?*RdcCreatedTables, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CloseTable: fn( self: *const ISimilarity, isValid: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Append: fn( self: *const ISimilarity, similarityFileId: ?*SimilarityFileId, similarityData: ?*SimilarityData, ) callconv(@import("std").os.windows.WINAPI) HRESULT, FindSimilarFileId: fn( self: *const ISimilarity, similarityData: ?*SimilarityData, numberOfMatchesRequired: u16, resultsSize: u32, findSimilarResults: ?*?*IFindSimilarResults, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CopyAndSwap: fn( self: *const ISimilarity, newSimilarityTables: ?*ISimilarity, reportProgress: ?*ISimilarityReportProgress, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRecordCount: fn( self: *const ISimilarity, recordCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarity_CreateTable(self: *const T, path: ?PWSTR, truncate: BOOL, securityDescriptor: ?*u8, recordSize: u32, isNew: ?*RdcCreatedTables) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarity.VTable, self.vtable).CreateTable(@ptrCast(*const ISimilarity, self), path, truncate, securityDescriptor, recordSize, isNew); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarity_CreateTableIndirect(self: *const T, mapping: ?*ISimilarityTraitsMapping, fileIdFile: ?*IRdcFileWriter, truncate: BOOL, recordSize: u32, isNew: ?*RdcCreatedTables) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarity.VTable, self.vtable).CreateTableIndirect(@ptrCast(*const ISimilarity, self), mapping, fileIdFile, truncate, recordSize, isNew); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarity_CloseTable(self: *const T, isValid: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarity.VTable, self.vtable).CloseTable(@ptrCast(*const ISimilarity, self), isValid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarity_Append(self: *const T, similarityFileId: ?*SimilarityFileId, similarityData: ?*SimilarityData) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarity.VTable, self.vtable).Append(@ptrCast(*const ISimilarity, self), similarityFileId, similarityData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarity_FindSimilarFileId(self: *const T, similarityData: ?*SimilarityData, numberOfMatchesRequired: u16, resultsSize: u32, findSimilarResults: ?*?*IFindSimilarResults) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarity.VTable, self.vtable).FindSimilarFileId(@ptrCast(*const ISimilarity, self), similarityData, numberOfMatchesRequired, resultsSize, findSimilarResults); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarity_CopyAndSwap(self: *const T, newSimilarityTables: ?*ISimilarity, reportProgress: ?*ISimilarityReportProgress) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarity.VTable, self.vtable).CopyAndSwap(@ptrCast(*const ISimilarity, self), newSimilarityTables, reportProgress); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimilarity_GetRecordCount(self: *const T, recordCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISimilarity.VTable, self.vtable).GetRecordCount(@ptrCast(*const ISimilarity, self), recordCount); } };} pub usingnamespace MethodMixin(@This()); }; //-------------------------------------------------------------------------------- // Section: Functions (0) //-------------------------------------------------------------------------------- //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (5) //-------------------------------------------------------------------------------- const Guid = @import("../zig.zig").Guid; const BOOL = @import("../foundation.zig").BOOL; const HRESULT = @import("../foundation.zig").HRESULT; const IUnknown = @import("../system/com.zig").IUnknown; const PWSTR = @import("../foundation.zig").PWSTR; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/networking/remote_differential_compression.zig
const std = @import("std"); const assert = std.debug.assert; const tools = @import("tools"); pub const main = tools.defaultMain("2021/day03.txt", run); fn filter_in_place(initial_list: []u16, mode: enum { least_frequent, most_frequent }) u16 { var list = initial_list; var bit_index: u4 = 15; while (bit_index >= 0) : (bit_index -= 1) { const mask = @as(u16, 1) << bit_index; var ones: u32 = 0; for (list) |l| { ones += @boolToInt(l & mask != 0); } const criteria = switch (mode) { .least_frequent => if (ones >= (list.len + 1) / 2 or ones == 0) 0 else mask, // or ones == 0 : s'il y en a zero, on garde tout .most_frequent => if (ones >= (list.len + 1) / 2) mask else 0, }; var i: usize = 0; var j: usize = list.len; while (i < j) { const keep = list[i] & mask == criteria; if (keep) { i += 1; } else { j -= 1; const t = list[j]; list[j] = list[i]; list[i] = t; } } assert(j >= 1); list.len = j; if (list.len == 1) return list[0]; } unreachable; } pub fn run(input: []const u8, gpa: std.mem.Allocator) tools.RunError![2][]const u8 { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); const allocator = arena.allocator(); var number_list: []u16 = undefined; var digits_list: []@Vector(16, u1) = undefined; // msb { const line_count = std.mem.count(u8, input, "\n") + 1; const nums = try allocator.alloc(u16, line_count); const digs = try allocator.alloc(@Vector(16, u1), line_count); var it = std.mem.tokenize(u8, input, "\n\r"); var l: u32 = 0; while (it.next()) |line| : (l += 1) { var val = @splat(16, @as(u1, 0)); var num: u16 = 0; for (line) |c, i| { val[i] = @boolToInt(c == '1'); num = (num * 2) | @boolToInt(c == '1'); } nums[l] = num; digs[l] = val; } number_list = nums[0..l]; digits_list = digs[0..l]; } const ans1 = ans: { const zero = @splat(16, @as(u16, 0)); var counters = zero; for (digits_list) |val| { counters += @as(@Vector(16, u16), val); } var gamma: u32 = 0; var epsilon: u32 = 0; //std.debug.print("line_count={}, counts={}\n", .{ digits_list.len, counters }); const ones = counters > @splat(16, digits_list.len / 2); const width = @reduce(.Add, @as(@Vector(16, u16), @bitCast(@Vector(16, u1), counters > zero))); //std.debug.print("width={}, ones={}\n", .{ width, ones }); for (@as([16]bool, ones)[0..width]) |bit| { gamma *= 2; epsilon *= 2; gamma |= @boolToInt(bit); epsilon |= @boolToInt(!bit); } break :ans epsilon * gamma; }; const ans2 = ans: { const co2 = filter_in_place(number_list, .least_frequent); const oxygen = filter_in_place(number_list, .most_frequent); //std.debug.print("oxygen: {b}, co2: {b}\n", .{oxygen, co2}); break :ans @as(u32, oxygen) * @as(u32, co2); }; return [_][]const u8{ try std.fmt.allocPrint(gpa, "{}", .{ans1}), try std.fmt.allocPrint(gpa, "{}", .{ans2}), }; } test { const res = try run( \\00100 \\11110 \\10110 \\10111 \\10101 \\01111 \\00111 \\11100 \\10000 \\11001 \\00010 \\01010 , std.testing.allocator); defer std.testing.allocator.free(res[0]); defer std.testing.allocator.free(res[1]); try std.testing.expectEqualStrings("198", res[0]); try std.testing.expectEqualStrings("230", res[1]); }
2021/day03.zig
const std = @import("std"); const Answer = struct { @"0": u32, @"1": u64 }; const WinLoss = struct { wins: u64, losses: u64, }; fn add_win_loss(u: WinLoss, v: WinLoss) WinLoss { return WinLoss{ .wins = u.wins + v.wins, .losses = u.losses + v.losses, }; } fn scale_win_loss(c: u64, v: WinLoss) WinLoss { return WinLoss{ .wins = c * v.wins, .losses = c * v.losses, }; } fn play_deterministic(init_positions: [2]u32) u32 { var die: u32 = 0; var scores = [2]u32{ 0, 0 }; var positions = init_positions; var player_i: usize = 0; var num_rolls: u32 = 0; while (true) { var roll_i: usize = 0; while (roll_i < 3) : (roll_i += 1) { positions[player_i] = @mod(positions[player_i] + die + 1, 10); die = @mod(die + 1, 100); } num_rolls += 3; scores[player_i] += positions[player_i] + 1; if (scores[player_i] >= 1000) { return scores[1 - player_i] * num_rolls; } player_i = 1 - player_i; } } fn play_dirac(init_positions: [2]u32, init_scores: [2]u32, player_i: usize) WinLoss { if (init_scores[1 - player_i] >= 21) { if (player_i == 0) { return WinLoss{ .wins = 0, .losses = 1, }; } else { return WinLoss{ .wins = 1, .losses = 0, }; } } var win_loss = WinLoss{ .wins = 0, .losses = 0, }; const ways = [_]u64{ 1, 3, 6, 7, 6, 3, 1 }; inline for (ways) |num_ways, i| { var positions = init_positions; positions[player_i] = @mod(positions[player_i] + (3 + @intCast(u32, i)), 10); var scores = init_scores; scores[player_i] += positions[player_i] + 1; win_loss = add_win_loss( win_loss, scale_win_loss(num_ways, play_dirac(positions, scores, 1 - player_i)), ); } return win_loss; } fn run(filename: []const u8) !Answer { const file = try std.fs.cwd().openFile(filename, .{ .read = true }); defer file.close(); var reader = std.io.bufferedReader(file.reader()).reader(); var buffer: [4096]u8 = undefined; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); var arena = std.heap.ArenaAllocator.init(&gpa.allocator); defer arena.deinit(); var positions: [2]u32 = undefined; var player_i: usize = 0; while (try reader.readUntilDelimiterOrEof(&buffer, '\n')) |line| : (player_i += 1) { var tokens = std.mem.tokenize(u8, line, " :"); var i: usize = 0; while (tokens.next()) |token| : (i += 1) { if (i == 4) { positions[player_i] = (try std.fmt.parseInt(u32, token, 10)) - 1; break; } } } const dirac_win_loss = play_dirac(positions, [2]u32{ 0, 0 }, 0); return Answer{ .@"0" = play_deterministic(positions), .@"1" = @maximum(dirac_win_loss.wins, dirac_win_loss.losses), }; } pub fn main() !void { const answer = try run("inputs/" ++ @typeName(@This()) ++ ".txt"); std.debug.print("{d}\n", .{answer.@"0"}); std.debug.print("{d}\n", .{answer.@"1"}); } test { const answer = try run("test-inputs/" ++ @typeName(@This()) ++ ".txt"); try std.testing.expectEqual(answer.@"0", 739785); try std.testing.expectEqual(answer.@"1", 444356092776315); }
src/day21.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const warn = std.debug.warn; const Order = std.math.Order; const testing = std.testing; const expect = testing.expect; const expectEqual = testing.expectEqual; const expectError = testing.expectError; /// Min-Max Heap for storing generic data. Initialize with `init`. pub fn MinMaxHeap(comptime T: type) type { return struct { const Self = @This(); items: []T, len: usize, allocator: *Allocator, compareFn: fn (a: T, b: T) Order, /// Initialize and return a new priority deheap. Provide `compareFn` /// that returns `Order.lt` when its first argument should /// get min-popped before its second argument, `Order.eq` if the /// arguments are of equal priority, or `Order.gt` if the second /// argument should be min-popped first. Popping the max element works /// in reverse. For example, to make `popMin` return the smallest /// number, provide /// /// `fn lessThan(a: T, b: T) Order { return std.math.order(a, b); }` pub fn init(allocator: *Allocator, compareFn: fn (T, T) Order) Self { return Self{ .items = &[_]T{}, .len = 0, .allocator = allocator, .compareFn = compareFn, }; } /// Free memory used by the deheap. pub fn deinit(self: Self) void { self.allocator.free(self.items); } /// Insert a new element, maintaining priority. pub fn add(self: *Self, elem: T) !void { try ensureCapacity(self, self.len + 1); addUnchecked(self, elem); } /// Add each element in `items` to the deheap. pub fn addSlice(self: *Self, items: []const T) !void { try self.ensureCapacity(self.len + items.len); for (items) |e| { self.addUnchecked(e); } } fn addUnchecked(self: *Self, elem: T) void { self.items[self.len] = elem; if (self.len > 0) { const start = self.getStartForSiftUp(elem, self.len); self.siftUp(start); } self.len += 1; } fn isMinLayer(index: usize) bool { // In the min-max heap structure: // The first element is on a min layer; // next two are on a max layer; // next four are on a min layer, and so on. const leading_zeros = @clz(usize, index + 1); const highest_set_bit = @bitSizeOf(usize) - 1 - leading_zeros; return (highest_set_bit & 1) == 0; } fn nextIsMinLayer(self: Self) bool { return isMinLayer(self.len); } const StartIndexAndLayer = struct { index: usize, min_layer: bool, }; fn getStartForSiftUp(self: Self, child: T, index: usize) StartIndexAndLayer { var child_index = index; var parent_index = parentIndex(child_index); const parent = self.items[parent_index]; const min_layer = self.nextIsMinLayer(); const order = self.compareFn(child, parent); if ((min_layer and order == .gt) or (!min_layer and order == .lt)) { // We must swap the item with it's parent if it is on the "wrong" layer self.items[parent_index] = child; self.items[child_index] = parent; return .{ .index = parent_index, .min_layer = !min_layer, }; } else { return .{ .index = child_index, .min_layer = min_layer, }; } } fn siftUp(self: *Self, start: StartIndexAndLayer) void { if (start.min_layer) { doSiftUp(self, start.index, .lt); } else { doSiftUp(self, start.index, .gt); } } fn doSiftUp(self: *Self, start_index: usize, target_order: Order) void { var child_index = start_index; while (child_index > 2) { var grandparent_index = grandparentIndex(child_index); const child = self.items[child_index]; const grandparent = self.items[grandparent_index]; // If the grandparent is already better or equal, we have gone as far as we need to if (self.compareFn(child, grandparent) != target_order) break; // Otherwise swap the item with it's grandparent self.items[grandparent_index] = child; self.items[child_index] = grandparent; child_index = grandparent_index; } } /// Look at the smallest element in the deheap. Returns /// `null` if empty. pub fn peekMin(self: *Self) ?T { return if (self.len > 0) self.items[0] else null; } /// Look at the largest element in the deheap. Returns /// `null` if empty. pub fn peekMax(self: *Self) ?T { if (self.len == 0) return null; if (self.len == 1) return self.items[0]; if (self.len == 2) return self.items[1]; return self.bestItemAtIndices(1, 2, .gt).item; } fn maxIndex(self: Self) ?usize { if (self.len == 0) return null; if (self.len == 1) return 0; if (self.len == 2) return 1; return self.bestItemAtIndices(1, 2, .gt).index; } /// Pop the smallest element from the deheap. Returns /// `null` if empty. pub fn removeMinOrNull(self: *Self) ?T { return if (self.len > 0) self.removeMin() else null; } /// Remove and return the smallest element from the /// deheap. pub fn removeMin(self: *Self) T { return self.removeIndex(0); } /// Pop the largest element from the deheap. Returns /// `null` if empty. pub fn removeMaxOrNull(self: *Self) ?T { return if (self.len > 0) self.removeMax() else null; } /// Remove and return the largest element from the /// deheap. pub fn removeMax(self: *Self) T { return self.removeIndex(self.maxIndex().?); } /// Remove and return element at index. Indices are in the /// same order as iterator, which is not necessarily priority /// order. pub fn removeIndex(self: *Self, index: usize) T { assert(self.len > index); const item = self.items[index]; const last = self.items[self.len - 1]; self.items[index] = last; self.len -= 1; siftDown(self, index); return item; } fn siftDown(self: *Self, index: usize) void { if (isMinLayer(index)) { self.doSiftDown(index, .lt); } else { self.doSiftDown(index, .gt); } } fn doSiftDown(self: *Self, start_index: usize, target_order: Order) void { var index = start_index; const half = self.len >> 1; while (true) { const first_grandchild_index = firstGrandchildIndex(index); const last_grandchild_index = first_grandchild_index + 3; const elem = self.items[index]; if (last_grandchild_index < self.len) { // All four grandchildren exist const index2 = first_grandchild_index + 1; const index3 = index2 + 1; // Find the best grandchild const best_left = self.bestItemAtIndices(first_grandchild_index, index2, target_order); const best_right = self.bestItemAtIndices(index3, last_grandchild_index, target_order); const best_grandchild = self.bestItem(best_left, best_right, target_order); // If the item is better than or equal to its best grandchild, we are done if (self.compareFn(best_grandchild.item, elem) != target_order) return; // Otherwise, swap them self.items[best_grandchild.index] = elem; self.items[index] = best_grandchild.item; index = best_grandchild.index; // We might need to swap the element with it's parent self.swapIfParentIsBetter(elem, index, target_order); } else { // The children or grandchildren are the last layer const first_child_index = firstChildIndex(index); if (first_child_index > self.len) return; const best_descendent = self.bestDescendent(first_child_index, first_grandchild_index, target_order); // If the item is better than or equal to its best descendant, we are done if (self.compareFn(best_descendent.item, elem) != target_order) return; // Otherwise swap them self.items[best_descendent.index] = elem; self.items[index] = best_descendent.item; index = best_descendent.index; // If we didn't swap a grandchild, we are done if (index < first_grandchild_index) return; // We might need to swap the element with it's parent self.swapIfParentIsBetter(elem, index, target_order); return; } // If we are now in the last layer, we are done if (index >= half) return; } } fn swapIfParentIsBetter(self: *Self, child: T, child_index: usize, target_order: Order) void { const parent_index = parentIndex(child_index); const parent = self.items[parent_index]; if (self.compareFn(parent, child) == target_order) { self.items[parent_index] = child; self.items[child_index] = parent; } } const ItemAndIndex = struct { item: T, index: usize, }; fn getItem(self: Self, index: usize) ItemAndIndex { return .{ .item = self.items[index], .index = index, }; } fn bestItem(self: Self, item1: ItemAndIndex, item2: ItemAndIndex, target_order: Order) ItemAndIndex { if (self.compareFn(item1.item, item2.item) == target_order) { return item1; } else { return item2; } } fn bestItemAtIndices(self: Self, index1: usize, index2: usize, target_order: Order) ItemAndIndex { var item1 = self.getItem(index1); var item2 = self.getItem(index2); return self.bestItem(item1, item2, target_order); } fn bestDescendent(self: Self, first_child_index: usize, first_grandchild_index: usize, target_order: Order) ItemAndIndex { const second_child_index = first_child_index + 1; if (first_grandchild_index >= self.len) { // No grandchildren, find the best child (second may not exist) if (second_child_index >= self.len) { return .{ .item = self.items[first_child_index], .index = first_child_index, }; } else { return self.bestItemAtIndices(first_child_index, second_child_index, target_order); } } const second_grandchild_index = first_grandchild_index + 1; if (second_grandchild_index >= self.len) { // One grandchild, so we know there is a second child. Compare first grandchild and second child return self.bestItemAtIndices(first_grandchild_index, second_child_index, target_order); } const best_left_grandchild_index = self.bestItemAtIndices(first_grandchild_index, second_grandchild_index, target_order).index; const third_grandchild_index = second_grandchild_index + 1; if (third_grandchild_index >= self.len) { // Two grandchildren, and we know the best. Compare this to second child. return self.bestItemAtIndices(best_left_grandchild_index, second_child_index, target_order); } else { // Three grandchildren, compare the min of the first two with the third return self.bestItemAtIndices(best_left_grandchild_index, third_grandchild_index, target_order); } } /// Return the number of elements remaining in the deheap pub fn count(self: Self) usize { return self.len; } /// Return the number of elements that can be added to the /// deheap before more memory is allocated. pub fn capacity(self: Self) usize { return self.items.len; } /// Deheap takes ownership of the passed in slice. The slice must have been /// allocated with `allocator`. /// De-initialize with `deinit`. pub fn fromOwnedSlice(allocator: *Allocator, compareFn: fn (T, T) Order, items: []T) Self { var heap = Self{ .items = items, .len = items.len, .allocator = allocator, .compareFn = compareFn, }; if (heap.len <= 1) return heap; const half = (heap.len >> 1) - 1; var i: usize = 0; while (i <= half) : (i += 1) { const index = half - i; heap.siftDown(index); } return heap; } pub fn ensureCapacity(self: *Self, new_capacity: usize) !void { var better_capacity = self.capacity(); if (better_capacity >= new_capacity) return; while (true) { better_capacity += better_capacity / 2 + 8; if (better_capacity >= new_capacity) break; } self.items = try self.allocator.realloc(self.items, better_capacity); } /// Reduce allocated capacity to `new_len`. pub fn shrinkAndFree(self: *Self, new_len: usize) void { assert(new_len <= self.items.len); // Cannot shrink to smaller than the current heap size without invalidating the heap property assert(new_len >= self.len); self.items = self.allocator.realloc(self.items[0..], new_len) catch |e| switch (e) { error.OutOfMemory => { // no problem, capacity is still correct then. self.items.len = new_len; return; }, }; self.len = new_len; } /// Reduce length to `new_len`. pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { assert(new_len <= self.items.len); // Cannot shrink to smaller than the current heap size without invalidating the heap property assert(new_len >= self.len); self.len = new_len; } pub fn update(self: *Self, elem: T, new_elem: T) !void { var old_index: usize = std.mem.indexOfScalar(T, self.items[0..self.len], elem) orelse return error.ElementNotFound; _ = self.removeIndex(old_index); self.addUnchecked(new_elem); } pub const Iterator = struct { heap: *MinMaxHeap(T), count: usize, pub fn next(it: *Iterator) ?T { if (it.count >= it.heap.len) return null; const out = it.count; it.count += 1; return it.heap.items[out]; } pub fn reset(it: *Iterator) void { it.count = 0; } }; /// Return an iterator that walks the heap without consuming /// it. Invalidated if the heap is modified. pub fn iterator(self: *Self) Iterator { return Iterator{ .heap = self, .count = 0, }; } fn dump(self: *Self) void { warn("{{ ", .{}); warn("items: ", .{}); for (self.items) |e, i| { if (i >= self.len) break; warn("{}, ", .{e}); } warn("array: ", .{}); for (self.items) |e, i| { warn("{}, ", .{e}); } warn("len: {} ", .{self.len}); warn("capacity: {}", .{self.capacity()}); warn(" }}\n", .{}); } fn parentIndex(index: usize) usize { return (index - 1) >> 1; } fn grandparentIndex(index: usize) usize { return parentIndex(parentIndex(index)); } fn firstChildIndex(index: usize) usize { return (index << 1) + 1; } fn firstGrandchildIndex(index: usize) usize { return firstChildIndex(firstChildIndex(index)); } }; } fn lessThanComparison(a: u32, b: u32) Order { return std.math.order(a, b); } const Heap = MinMaxHeap(u32); test "MinMaxHeap: add and remove min" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.add(54); try heap.add(12); try heap.add(7); try heap.add(23); try heap.add(25); try heap.add(13); expectEqual(@as(u32, 7), heap.removeMin()); expectEqual(@as(u32, 12), heap.removeMin()); expectEqual(@as(u32, 13), heap.removeMin()); expectEqual(@as(u32, 23), heap.removeMin()); expectEqual(@as(u32, 25), heap.removeMin()); expectEqual(@as(u32, 54), heap.removeMin()); } test "MinMaxHeap: add and remove min structs" { const S = struct { size: u32, }; var heap = MinMaxHeap(S).init(testing.allocator, struct { fn order(a: S, b: S) Order { return std.math.order(a.size, b.size); } }.order); defer heap.deinit(); try heap.add(.{ .size = 54 }); try heap.add(.{ .size = 12 }); try heap.add(.{ .size = 7 }); try heap.add(.{ .size = 23 }); try heap.add(.{ .size = 25 }); try heap.add(.{ .size = 13 }); expectEqual(@as(u32, 7), heap.removeMin().size); expectEqual(@as(u32, 12), heap.removeMin().size); expectEqual(@as(u32, 13), heap.removeMin().size); expectEqual(@as(u32, 23), heap.removeMin().size); expectEqual(@as(u32, 25), heap.removeMin().size); expectEqual(@as(u32, 54), heap.removeMin().size); } test "MinMaxHeap: add and remove max" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.add(54); try heap.add(12); try heap.add(7); try heap.add(23); try heap.add(25); try heap.add(13); expectEqual(@as(u32, 54), heap.removeMax()); expectEqual(@as(u32, 25), heap.removeMax()); expectEqual(@as(u32, 23), heap.removeMax()); expectEqual(@as(u32, 13), heap.removeMax()); expectEqual(@as(u32, 12), heap.removeMax()); expectEqual(@as(u32, 7), heap.removeMax()); } test "MinMaxHeap: add and remove same min" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.add(1); try heap.add(1); try heap.add(2); try heap.add(2); try heap.add(1); try heap.add(1); expectEqual(@as(u32, 1), heap.removeMin()); expectEqual(@as(u32, 1), heap.removeMin()); expectEqual(@as(u32, 1), heap.removeMin()); expectEqual(@as(u32, 1), heap.removeMin()); expectEqual(@as(u32, 2), heap.removeMin()); expectEqual(@as(u32, 2), heap.removeMin()); } test "MinMaxHeap: add and remove same max" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.add(1); try heap.add(1); try heap.add(2); try heap.add(2); try heap.add(1); try heap.add(1); expectEqual(@as(u32, 2), heap.removeMax()); expectEqual(@as(u32, 2), heap.removeMax()); expectEqual(@as(u32, 1), heap.removeMax()); expectEqual(@as(u32, 1), heap.removeMax()); expectEqual(@as(u32, 1), heap.removeMax()); expectEqual(@as(u32, 1), heap.removeMax()); } test "MinMaxHeap: removeOrNull empty" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); expect(heap.removeMinOrNull() == null); expect(heap.removeMaxOrNull() == null); } test "MinMaxHeap: edge case 3 elements" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.add(9); try heap.add(3); try heap.add(2); expectEqual(@as(u32, 2), heap.removeMin()); expectEqual(@as(u32, 3), heap.removeMin()); expectEqual(@as(u32, 9), heap.removeMin()); } test "MinMaxHeap: edge case 3 elements max" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.add(9); try heap.add(3); try heap.add(2); expectEqual(@as(u32, 9), heap.removeMax()); expectEqual(@as(u32, 3), heap.removeMax()); expectEqual(@as(u32, 2), heap.removeMax()); } test "MinMaxHeap: peekMin" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); expect(heap.peekMin() == null); try heap.add(9); try heap.add(3); try heap.add(2); expect(heap.peekMin().? == 2); expect(heap.peekMin().? == 2); } test "MinMaxHeap: peekMax" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); expect(heap.peekMin() == null); try heap.add(9); try heap.add(3); try heap.add(2); expect(heap.peekMax().? == 9); expect(heap.peekMax().? == 9); } test "MinMaxHeap: sift up with odd indices" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; for (items) |e| { try heap.add(e); } const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; for (sorted_items) |e| { expectEqual(e, heap.removeMin()); } } test "MinMaxHeap: sift up with odd indices" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; for (items) |e| { try heap.add(e); } const sorted_items = [_]u32{ 25, 24, 24, 22, 21, 16, 15, 15, 14, 13, 12, 11, 7, 7, 6, 5, 2, 1 }; for (sorted_items) |e| { expectEqual(e, heap.removeMax()); } } test "MinMaxHeap: addSlice min" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; try heap.addSlice(items[0..]); const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; for (sorted_items) |e| { expectEqual(e, heap.removeMin()); } } test "MinMaxHeap: addSlice max" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; try heap.addSlice(items[0..]); const sorted_items = [_]u32{ 25, 24, 24, 22, 21, 16, 15, 15, 14, 13, 12, 11, 7, 7, 6, 5, 2, 1 }; for (sorted_items) |e| { expectEqual(e, heap.removeMax()); } } test "MinMaxHeap: fromOwnedSlice trivial case 0" { const items = [0]u32{}; const heap_items = try testing.allocator.dupe(u32, &items); var heap = Heap.fromOwnedSlice(testing.allocator, lessThanComparison, heap_items[0..]); defer heap.deinit(); expectEqual(@as(usize, 0), heap.len); expect(heap.removeMinOrNull() == null); } test "MinMaxHeap: fromOwnedSlice trivial case 1" { const items = [1]u32{1}; const heap_items = try testing.allocator.dupe(u32, &items); var heap = Heap.fromOwnedSlice(testing.allocator, lessThanComparison, heap_items[0..]); defer heap.deinit(); expectEqual(@as(usize, 1), heap.len); expectEqual(items[0], heap.removeMin()); expect(heap.removeMinOrNull() == null); } test "MinMaxHeap: fromOwnedSlice" { const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 }; const heap_items = try testing.allocator.dupe(u32, items[0..]); var heap = Heap.fromOwnedSlice(testing.allocator, lessThanComparison, heap_items[0..]); defer heap.deinit(); const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 }; for (sorted_items) |e| { expectEqual(e, heap.removeMin()); } } test "MinMaxHeap: update min heap" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.add(55); try heap.add(44); try heap.add(11); try heap.update(55, 5); try heap.update(44, 4); try heap.update(11, 1); expectEqual(@as(u32, 1), heap.removeMin()); expectEqual(@as(u32, 4), heap.removeMin()); expectEqual(@as(u32, 5), heap.removeMin()); } test "MinMaxHeap: update same min heap" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.add(1); try heap.add(1); try heap.add(2); try heap.add(2); try heap.update(1, 5); try heap.update(2, 4); expectEqual(@as(u32, 1), heap.removeMin()); expectEqual(@as(u32, 2), heap.removeMin()); expectEqual(@as(u32, 4), heap.removeMin()); expectEqual(@as(u32, 5), heap.removeMin()); } test "MinMaxHeap: update max heap" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.add(55); try heap.add(44); try heap.add(11); try heap.update(55, 5); try heap.update(44, 1); try heap.update(11, 4); expectEqual(@as(u32, 5), heap.removeMax()); expectEqual(@as(u32, 4), heap.removeMax()); expectEqual(@as(u32, 1), heap.removeMax()); } test "MinMaxHeap: update same max heap" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.add(1); try heap.add(1); try heap.add(2); try heap.add(2); try heap.update(1, 5); try heap.update(2, 4); expectEqual(@as(u32, 5), heap.removeMax()); expectEqual(@as(u32, 4), heap.removeMax()); expectEqual(@as(u32, 2), heap.removeMax()); expectEqual(@as(u32, 1), heap.removeMax()); } test "MinMaxHeap: iterator" { var heap = Heap.init(testing.allocator, lessThanComparison); var map = std.AutoHashMap(u32, void).init(testing.allocator); defer { heap.deinit(); map.deinit(); } const items = [_]u32{ 54, 12, 7, 23, 25, 13 }; for (items) |e| { _ = try heap.add(e); _ = try map.put(e, {}); } var it = heap.iterator(); while (it.next()) |e| { _ = map.remove(e); } expectEqual(@as(usize, 0), map.count()); } test "MinMaxHeap: remove at index" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.add(3); try heap.add(2); try heap.add(1); var it = heap.iterator(); var elem = it.next(); var idx: usize = 0; const two_idx = while (elem != null) : (elem = it.next()) { if (elem.? == 2) break idx; idx += 1; } else unreachable; expectEqual(heap.removeIndex(two_idx), 2); expectEqual(heap.removeMin(), 1); expectEqual(heap.removeMin(), 3); expectEqual(heap.removeMinOrNull(), null); } test "MinMaxHeap: iterator while empty" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); var it = heap.iterator(); expectEqual(it.next(), null); } test "MinMaxHeap: shrinkRetainingCapacity and shrinkAndFree" { var heap = Heap.init(testing.allocator, lessThanComparison); defer heap.deinit(); try heap.ensureCapacity(4); expect(heap.capacity() >= 4); try heap.add(1); try heap.add(2); try heap.add(3); expect(heap.capacity() >= 4); expectEqual(@as(usize, 3), heap.len); heap.shrinkRetainingCapacity(3); expect(heap.capacity() >= 4); expectEqual(@as(usize, 3), heap.len); heap.shrinkAndFree(3); expectEqual(@as(usize, 3), heap.capacity()); expectEqual(@as(usize, 3), heap.len); expectEqual(@as(u32, 3), heap.removeMax()); expectEqual(@as(u32, 2), heap.removeMax()); expectEqual(@as(u32, 1), heap.removeMax()); expect(heap.removeMaxOrNull() == null); } test "MinMaxHeap: fuzz testing min" { var prng = std.rand.DefaultPrng.init(0x12345678); const test_case_count = 100; const heap_size = 1_000; var i: usize = 0; while (i < test_case_count) : (i += 1) { try fuzzTestMin(&prng.random, heap_size); } } fn fuzzTestMin(rng: *std.rand.Random, comptime heap_size: usize) !void { const allocator = testing.allocator; const items = try generateRandomSlice(allocator, rng, heap_size); var heap = Heap.fromOwnedSlice(allocator, lessThanComparison, items); defer heap.deinit(); var last_removed: ?u32 = null; while (heap.removeMinOrNull()) |next| { if (last_removed) |last| { expect(last <= next); } last_removed = next; } } test "MinMaxHeap: fuzz testing max" { var prng = std.rand.DefaultPrng.init(0x87654321); const test_case_count = 100; const heap_size = 1_000; var i: usize = 0; while (i < test_case_count) : (i += 1) { try fuzzTestMax(&prng.random, heap_size); } } fn fuzzTestMax(rng: *std.rand.Random, heap_size: usize) !void { const allocator = testing.allocator; const items = try generateRandomSlice(allocator, rng, heap_size); var heap = Heap.fromOwnedSlice(testing.allocator, lessThanComparison, items); defer heap.deinit(); var last_removed: ?u32 = null; while (heap.removeMaxOrNull()) |next| { if (last_removed) |last| { expect(last >= next); } last_removed = next; } } test "MinMaxHeap: fuzz testing min and max" { var prng = std.rand.DefaultPrng.init(0x87654321); const test_case_count = 100; const heap_size = 1_000; var i: usize = 0; while (i < test_case_count) : (i += 1) { try fuzzTestMinMax(&prng.random, heap_size); } } fn fuzzTestMinMax(rng: *std.rand.Random, heap_size: usize) !void { const allocator = testing.allocator; const items = try generateRandomSlice(allocator, rng, heap_size); var heap = Heap.fromOwnedSlice(allocator, lessThanComparison, items); defer heap.deinit(); var last_min: ?u32 = null; var last_max: ?u32 = null; var i: usize = 0; while (i < heap_size) : (i += 1) { if (i % 2 == 0) { const next = heap.removeMin(); if (last_min) |last| { expect(last <= next); } last_min = next; } else { const next = heap.removeMax(); if (last_max) |last| { expect(last >= next); } last_max = next; } } } fn generateRandomSlice(allocator: *std.mem.Allocator, rng: *std.rand.Random, size: usize) ![]u32 { var array = std.ArrayList(u32).init(allocator); try array.ensureCapacity(size); var i: usize = 0; while (i < size) : (i += 1) { const elem = rng.int(u32); try array.append(elem); } return array.toOwnedSlice(); }
min_max_heap.zig
const std = @import("std"); const builtin = @import("builtin"); const testing = std.testing; pub const VInt = @import("vint.zig"); const Allocator = std.mem.Allocator; const File = std.fs.File; pub const ids = @import("element_ids.zig"); /// Records the ID, location, and width of an EBML element in a file. pub const Element = struct { id: u64, data_size: u64, offset: u64, /// Attempts to read an element from the file at the specified offset. pub fn read(file: File, offset: u64) !Element { var buffer: [32]u8 = undefined; _ = try file.pread(&buffer, offset); const element_id = VInt.parse(&buffer); const element_size = VInt.parse(buffer[element_id.width..]); return Element{ .id = element_id.data, .data_size = element_size.data, .offset = offset + element_id.width + element_size.width, }; } /// Returns the element next to this one in the file. Note this means /// literally located next to it in the encoding. In particular this can /// cause you change levels in the encoded tree. pub fn next(self: *const Element, file: File) !Element { return Element.read(file, self.offset + self.data_size); } /// Descend into the tree contained in this element. Caller is expected to /// know that this is a master element, weird things will happen if it is /// not. pub fn child(self: *const Element, file: File) !Element { return Element.read(file, self.offset); } /// Read the bytes defining the payload of the EBML element. Caller owns /// the data. Errors if it fails to read the whole payload. pub fn get_slice(self: *const Element, allocator: *Allocator, file: File) ![]u8 { if (self.data_size == 0) return ""; var buffer = try allocator.alloc(u8, self.data_size); errdefer allocator.free(buffer); const bytes_read = try file.pread(buffer, self.offset); if (bytes_read != self.data_size) return error.EOF; return buffer; } /// Parse the payload to a known Zig type. For 0 length elements the default /// is returned if defined else the predefined global default is used. See /// section 7 of the spec for more details. pub fn get(self: *const Element, comptime T: type, default: ?T, file: File) !T { switch (@typeInfo(T)) { .Int => |i| { if (self.data_size == 0) return default orelse 0; comptime const len = try std.math.divCeil(usize, i.bits, 8); comptime if (len > 8) @compileError(@typeName(T) ++ " not supported. EBML only supports up to 64 bit ints"); var buffer: [len]u8 = [1]u8{0} ** len; if (self.data_size > 8) return error.WrongSize; const bytes_read = try file.pread(buffer[len - self.data_size ..], self.offset); if (bytes_read != self.data_size) return error.EOF; return std.mem.readInt(T, &buffer, .Big); }, .Float => { // TODO I assume the floats just line up with memory // representation. This has worked but may not be globally // correct. if (T != f32 and T != f64) @compileError(@typeName(T) ++ " not supported. EBML only supports 32 and 64 bit floats"); switch (self.data_size) { 0 => return default orelse 0, 4 => { var buffer: [4]u8 = undefined; const bytes_read = try file.pread(&buffer, self.offset); if (bytes_read != 4) return error.EOF; if (builtin.endian != .Big) std.mem.reverse(u8, &buffer); const short = std.mem.bytesToValue(f32, &buffer); return @floatCast(T, short); }, 8 => { var buffer: [8]u8 = undefined; const bytes_read = try file.pread(&buffer, self.offset); if (bytes_read != 8) return error.EOF; if (builtin.endian != .Big) std.mem.reverse(u8, &buffer); const short = std.mem.bytesToValue(f64, &buffer); return @floatCast(T, short); }, else => return error.WrongSize, } }, .Pointer => |p| { comptime if (p.size != .Slice) @compileError(@typeName(T) ++ " not supported. EBML only encodes ints, floats, and byte arrays"); @compileError("See get_slice to read the underlying bytes"); }, else => @compileError(@typeName(T) ++ " not supported. EBML only encodes ints, floats, and byte arrays"), } } };
src/main.zig
const std = @import("std"); const io = std.io; const mem = std.mem; const net = std.net; const os = std.os; usingnamespace @import("../primitive_types.zig"); const sm = @import("../string_map.zig"); const testing = @import("../testing.zig"); pub const PrimitiveWriter = struct { const Self = @This(); wbuf: std.ArrayList(u8), pub fn deinit(self: *Self, allocator: *mem.Allocator) void { self.wbuf.deinit(); } pub fn reset(self: *Self, allocator: *mem.Allocator) !void { self.wbuf = try std.ArrayList(u8).initCapacity(allocator, 1024); } pub fn toOwnedSlice(self: *Self) []u8 { return self.wbuf.toOwnedSlice(); } pub fn getWritten(self: *Self) []u8 { return self.wbuf.items; } /// Write either a short, a int or a long to the buffer. pub fn writeInt(self: *Self, comptime T: type, value: T) !void { var buf: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined; mem.writeIntBig(T, &buf, value); return self.wbuf.appendSlice(&buf); } /// Write a byte to the buffer. pub fn writeByte(self: *Self, value: u8) !void { return self.wbuf.append(value); } /// Write a length-prefixed byte slice to the buffer. The length is 2 bytes. /// The slice can be null. pub fn writeShortBytes(self: *Self, value: ?[]const u8) !void { return self.writeBytesGeneric(i16, value); } /// Write a length-prefixed byte slice to the buffer. The length is 4 bytes. /// The slice can be null. pub fn writeBytes(self: *Self, value: ?[]const u8) !void { return self.writeBytesGeneric(i32, value); } /// Write bytes from the stream in a generic way. fn writeBytesGeneric(self: *Self, comptime LenType: type, value: ?[]const u8) !void { if (value) |v| { try self.writeInt(LenType, @intCast(LenType, v.len)); return self.wbuf.appendSlice(v); } else { try self.writeInt(LenType, @intCast(LenType, -1)); } } /// Write a length-prefixed string to the buffer. The length is 2 bytes. /// The string can't be null. pub fn writeString(self: *Self, value: []const u8) !void { return self.writeBytesGeneric(i16, value); } /// Write a length-prefixed string to the buffer. The length is 4 bytes. /// The string can't be null. pub fn writeLongString(self: *Self, value: []const u8) !void { return self.writeBytesGeneric(i32, value); } /// Write a UUID to the buffer. pub fn writeUUID(self: *Self, uuid: [16]u8) !void { return self.wbuf.appendSlice(&uuid); } /// Write a list of string to the buffer. pub fn writeStringList(self: *Self, list: []const []const u8) !void { try self.writeInt(u16, @intCast(u16, list.len)); for (list) |value| { _ = try self.writeString(value); } } /// Write a value to the buffer. pub fn writeValue(self: *Self, value: Value) !void { return switch (value) { .Null => self.writeInt(i32, @as(i32, -1)), .NotSet => self.writeInt(i32, @as(i32, -2)), .Set => |data| { try self.writeInt(i32, @intCast(i32, data.len)); return self.wbuf.appendSlice(data); }, }; } pub fn writeInetaddr(self: *Self, inet: net.Address) callconv(.Inline) !void { return self.writeInetGeneric(inet, false); } pub fn writeInet(self: *Self, inet: net.Address) callconv(.Inline) !void { return self.writeInetGeneric(inet, true); } /// Write a net address to the buffer. fn writeInetGeneric(self: *Self, inet: net.Address, comptime with_port: bool) !void { switch (inet.any.family) { os.AF_INET => { if (with_port) { var buf: [9]u8 = undefined; buf[0] = 4; mem.writeIntNative(u32, buf[1..5], inet.in.sa.addr); mem.writeIntBig(u32, buf[5..9], inet.getPort()); return self.wbuf.appendSlice(&buf); } else { var buf: [5]u8 = undefined; buf[0] = 4; mem.writeIntNative(u32, buf[1..5], inet.in.sa.addr); return self.wbuf.appendSlice(&buf); } }, os.AF_INET6 => { if (with_port) { var buf: [21]u8 = undefined; buf[0] = 16; mem.copy(u8, buf[1..17], &inet.in6.sa.addr); mem.writeIntBig(u32, buf[17..21], inet.getPort()); return self.wbuf.appendSlice(&buf); } else { var buf: [17]u8 = undefined; buf[0] = 16; mem.copy(u8, buf[1..17], &inet.in6.sa.addr); return self.wbuf.appendSlice(&buf); } }, else => |af| std.debug.panic("invalid address family {}\n", .{af}), } } pub fn writeConsistency(self: *Self, consistency: Consistency) !void { const n = @intCast(u16, @enumToInt(consistency)); return self.writeInt(u16, n); } pub fn startStringMap(self: *Self, size: usize) !void { _ = try self.writeInt(u16, @intCast(u16, size)); } }; test "primitive writer: write int" { var arena = testing.arenaAllocator(); defer arena.deinit(); var pw: PrimitiveWriter = undefined; try pw.reset(&arena.allocator); try pw.writeInt(i32, 2101504); testing.expectEqualSlices(u8, "\x00\x20\x11\x00", pw.getWritten()[0..4]); try pw.writeInt(i64, 70368746279168); testing.expectEqualSlices(u8, "\x00\x00\x40\x00\x00\x20\x11\x00", pw.getWritten()[4..12]); try pw.writeInt(i16, 4352); testing.expectEqualSlices(u8, "\x11\x00", pw.getWritten()[12..14]); try pw.writeByte(0xFF); testing.expectEqualSlices(u8, "\xff", pw.getWritten()[14..15]); } test "primitive writer: write strings and bytes" { var arena = testing.arenaAllocator(); defer arena.deinit(); var pw: PrimitiveWriter = undefined; try pw.reset(&arena.allocator); { // short string _ = try pw.writeString("foobar"); testing.expectEqualSlices(u8, "\x00\x06foobar", pw.getWritten()[0..8]); // long string _ = try pw.writeLongString("foobar"); testing.expectEqualSlices(u8, "\x00\x00\x00\x06foobar", pw.getWritten()[8..18]); } { // int32 + bytes _ = try pw.writeBytes("123456789A"); testing.expectEqualSlices(u8, "\x00\x00\x00\x0A123456789A", pw.getWritten()[18..32]); _ = try pw.writeBytes(""); testing.expectEqualSlices(u8, "\x00\x00\x00\x00", pw.getWritten()[32..36]); _ = try pw.writeBytes(null); testing.expectEqualSlices(u8, "\xff\xff\xff\xff", pw.getWritten()[36..40]); } { // int16 + bytes _ = try pw.writeShortBytes("123456789A"); testing.expectEqualSlices(u8, "\x00\x0A123456789A", pw.getWritten()[40..52]); _ = try pw.writeShortBytes(""); testing.expectEqualSlices(u8, "\x00\x00", pw.getWritten()[52..54]); _ = try pw.writeShortBytes(null); testing.expectEqualSlices(u8, "\xff\xff", pw.getWritten()[54..56]); } } test "primitive writer: write uuid" { var arena = testing.arenaAllocator(); defer arena.deinit(); var pw: PrimitiveWriter = undefined; try pw.reset(&arena.allocator); var uuid: [16]u8 = undefined; try std.os.getrandom(&uuid); _ = try pw.writeUUID(uuid); testing.expectEqualSlices(u8, &uuid, pw.getWritten()[0..16]); } test "primitive writer: write string list" { var arena = testing.arenaAllocator(); defer arena.deinit(); var pw: PrimitiveWriter = undefined; try pw.reset(&arena.allocator); const list = &[_][]const u8{ "foo", "bar" }; _ = try pw.writeStringList(list); testing.expectEqualSlices(u8, "\x00\x02\x00\x03foo\x00\x03bar", pw.getWritten()[0..12]); } test "primitive writer: write value" { var arena = testing.arenaAllocator(); defer arena.deinit(); var pw: PrimitiveWriter = undefined; try pw.reset(&arena.allocator); // Normal value _ = try pw.writeValue(Value{ .Set = "ab" }); testing.expectEqualSlices(u8, "\x00\x00\x00\x02\x61\x62", pw.getWritten()[0..6]); // Null value _ = try pw.writeValue(Value{ .Null = {} }); testing.expectEqualSlices(u8, "\xff\xff\xff\xff", pw.getWritten()[6..10]); // "Not set" value _ = try pw.writeValue(Value{ .NotSet = {} }); testing.expectEqualSlices(u8, "\xff\xff\xff\xfe", pw.getWritten()[10..14]); } test "primitive writer: write inet and inetaddr" { var arena = testing.arenaAllocator(); defer arena.deinit(); var pw: PrimitiveWriter = undefined; try pw.reset(&arena.allocator); // IPv4 _ = try pw.writeInet(net.Address.initIp4([_]u8{ 0x78, 0x56, 0x34, 0x12 }, 34)); testing.expectEqualSlices(u8, "\x04\x78\x56\x34\x12\x00\x00\x00\x22", pw.getWritten()[0..9]); // IPv6 _ = try pw.writeInet(net.Address.initIp6([_]u8{0xff} ** 16, 34, 0, 0)); testing.expectEqualSlices(u8, "\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x22", pw.getWritten()[9..30]); // IPv4 without port _ = try pw.writeInetaddr(net.Address.initIp4([_]u8{ 0x78, 0x56, 0x34, 0x12 }, 34)); testing.expectEqualSlices(u8, "\x04\x78\x56\x34\x12", pw.getWritten()[30..35]); // IPv6 without port _ = try pw.writeInetaddr(net.Address.initIp6([_]u8{0xff} ** 16, 34, 0, 0)); testing.expectEqualSlices(u8, "\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff", pw.getWritten()[35..52]); } test "primitive writer: write consistency" { var arena = testing.arenaAllocator(); defer arena.deinit(); var pw: PrimitiveWriter = undefined; try pw.reset(&arena.allocator); const testCase = struct { consistency: Consistency, exp: []const u8, }; const testCases = [_]testCase{ testCase{ .consistency = Consistency.Any, .exp = "\x00\x00" }, testCase{ .consistency = Consistency.One, .exp = "\x00\x01" }, testCase{ .consistency = Consistency.Two, .exp = "\x00\x02" }, testCase{ .consistency = Consistency.Three, .exp = "\x00\x03" }, testCase{ .consistency = Consistency.Quorum, .exp = "\x00\x04" }, testCase{ .consistency = Consistency.All, .exp = "\x00\x05" }, testCase{ .consistency = Consistency.LocalQuorum, .exp = "\x00\x06" }, testCase{ .consistency = Consistency.EachQuorum, .exp = "\x00\x07" }, testCase{ .consistency = Consistency.Serial, .exp = "\x00\x08" }, testCase{ .consistency = Consistency.LocalSerial, .exp = "\x00\x09" }, testCase{ .consistency = Consistency.LocalOne, .exp = "\x00\x0A" }, }; for (testCases) |tc| { try pw.reset(&arena.allocator); _ = try pw.writeConsistency(tc.consistency); testing.expectEqualSlices(u8, tc.exp, pw.getWritten()[0..2]); } }
src/primitive/writer.zig
const std = @import("std"); const mem = std.mem; const Allocator = mem.Allocator; const c = @import("c.zig"); const nvg = @import("nvg.zig"); const Game = @import("game.zig"); extern fn gladLoadGL() callconv(.C) c_int; // init OpenGL function pointers on Windows and Linux extern fn SetProcessDPIAware() callconv(.C) c_int; export fn WinMain() callconv(.C) c_int { main() catch return 1; // TODO report error return 0; } var sdl_window: ?*c.SDL_Window = null; var sdl_gl_context: c.SDL_GLContext = undefined; var video_width: f32 = 1280; var video_height: f32 = 720; var video_scale: f32 = 1; var fullscreen: bool = false; var game: Game = undefined; fn draw() void { sdlSetupFrame(); c.glClearColor(0.3, 0.5, 0.8, 1); c.glClear(c.GL_COLOR_BUFFER_BIT); nvg.beginFrame(video_width, video_height, video_scale); game.draw(); nvg.endFrame(); //c.glFlush(); c.SDL_GL_SwapWindow(sdl_window); } var first_surrogate_half: ?u16 = null; fn sdlProcessTextInput(text_event: c.SDL_TextInputEvent) !void { const text = mem.spanZ(std.meta.assumeSentinel(&text_event.text, 0)); if (std.unicode.utf8ValidateSlice(text)) { try game.onTextInput(text); } else if (text.len == 3) { // Windows specific? _ = std.unicode.utf8Decode(text) catch |err| switch (err) { error.Utf8EncodesSurrogateHalf => { var codepoint: u21 = text[0] & 0b00001111; codepoint <<= 6; codepoint |= text[1] & 0b00111111; codepoint <<= 6; codepoint |= text[2] & 0b00111111; const surrogate = @intCast(u16, codepoint); if (first_surrogate_half) |first_surrogate0| { const utf16 = [_]u16{ first_surrogate0, surrogate }; var utf8 = [_]u8{0} ** 4; _ = std.unicode.utf16leToUtf8(&utf8, &utf16) catch unreachable; first_surrogate_half = null; try game.onTextInput(&utf8); } else { first_surrogate_half = surrogate; } }, else => {}, }; } } fn getVideoScale() f32 { const default_dpi: f32 = switch (std.builtin.os.tag) { .windows => 96, .macos => 72, else => 96, // TODO }; const display = if (sdl_window) |_| c.SDL_GetWindowDisplayIndex(sdl_window) else 0; var dpi: f32 = undefined; _ = c.SDL_GetDisplayDPI(display, &dpi, null, null); return dpi / default_dpi; } fn sdlSetupFrame() void { const new_video_scale = getVideoScale(); if (new_video_scale != video_scale) { // DPI change //std.debug.print("new_video_scale {} {}\n", .{ new_video_scale, dpi }); video_scale = new_video_scale; var window_width: i32 = undefined; var window_height: i32 = undefined; if (std.builtin.os.tag == .macos) { window_width = @floatToInt(i32, video_width); window_height = @floatToInt(i32, video_height); } else { window_width = @floatToInt(i32, video_scale * video_width); window_height = @floatToInt(i32, video_scale * video_height); } c.SDL_SetWindowSize(sdl_window, window_width, window_height); } var drawable_width: i32 = undefined; var drawable_height: i32 = undefined; c.SDL_GL_GetDrawableSize(sdl_window, &drawable_width, &drawable_height); c.glViewport(0, 0, drawable_width, drawable_height); // only when window is resizable video_width = @intToFloat(f32, drawable_width) / video_scale; video_height = @intToFloat(f32, drawable_height) / video_scale; game.setSize(video_width, video_height); } fn sdlToggleFullscreen() void { fullscreen = !fullscreen; _ = c.SDL_SetWindowFullscreen(sdl_window, if (fullscreen) c.SDL_WINDOW_FULLSCREEN_DESKTOP else 0); _ = c.SDL_ShowCursor(if (fullscreen) c.SDL_DISABLE else c.SDL_ENABLE); } fn sdlEventWatch(userdata: ?*c_void, sdl_event_ptr: [*c]c.SDL_Event) callconv(.C) c_int { const sdl_event = sdl_event_ptr[0]; if (sdl_event.type == c.SDL_WINDOWEVENT and sdl_event.window.event == c.SDL_WINDOWEVENT_RESIZED) { draw(); return 0; } return 1; // unhandled } pub fn main() !void { if (std.builtin.os.tag == .windows) { _ = SetProcessDPIAware(); } if (c.SDL_Init(c.SDL_INIT_VIDEO | c.SDL_INIT_TIMER) != 0) { c.SDL_Log("Unable to initialize SDL: %s", c.SDL_GetError()); return error.SDLInitializationFailed; } defer c.SDL_Quit(); _ = c.SDL_GL_SetAttribute(.SDL_GL_STENCIL_SIZE, 1); _ = c.SDL_GL_SetAttribute(.SDL_GL_MULTISAMPLEBUFFERS, 1); _ = c.SDL_GL_SetAttribute(.SDL_GL_MULTISAMPLESAMPLES, 4); const window_flags = c.SDL_WINDOW_OPENGL | c.SDL_WINDOW_ALLOW_HIGHDPI | c.SDL_WINDOW_RESIZABLE; var window_width: i32 = undefined; var window_height: i32 = undefined; video_scale = getVideoScale(); if (std.builtin.os.tag == .macos) { window_width = @floatToInt(i32, video_width); window_height = @floatToInt(i32, video_height); } else { window_width = @floatToInt(i32, video_scale * video_width); window_height = @floatToInt(i32, video_scale * video_height); } sdl_window = c.SDL_CreateWindow("Zig Gorillas", c.SDL_WINDOWPOS_CENTERED, c.SDL_WINDOWPOS_CENTERED, window_width, window_height, window_flags); if (sdl_window == null) { c.SDL_Log("Unable to create window: %s", c.SDL_GetError()); return error.SDLCreateWindowFailed; } defer c.SDL_DestroyWindow(sdl_window); sdl_gl_context = c.SDL_GL_CreateContext(sdl_window); if (sdl_gl_context == null) { c.SDL_Log("Unable to create gl context: %s", c.SDL_GetError()); return error.SDLCreateGLContextFailed; } defer c.SDL_GL_DeleteContext(sdl_gl_context); _ = c.SDL_GL_SetSwapInterval(1); if (std.builtin.os.tag == .windows or std.builtin.os.tag == .linux) { _ = gladLoadGL(); } c.SDL_AddEventWatch(sdlEventWatch, null); nvg.init(); defer nvg.quit(); _ = nvg.createFontMem("font", @embedFile("../art/PressStart2P-Regular.ttf")); var gpa = std.heap.GeneralPurposeAllocator(.{ .enable_memory_limit = true, }){}; defer { const leaked = gpa.deinit(); if (leaked) @panic("Memory leak :("); } game = try Game.init(&gpa.allocator); defer game.deinit(); mainLoop: while (true) { var sdl_event: c.SDL_Event = undefined; while (c.SDL_PollEvent(&sdl_event) != 0) { switch (sdl_event.type) { c.SDL_QUIT => break :mainLoop, c.SDL_TEXTINPUT => try sdlProcessTextInput(sdl_event.text), c.SDL_KEYDOWN => switch (sdl_event.key.keysym.sym) { c.SDLK_RETURN, c.SDLK_KP_ENTER => game.onKeyReturn(), c.SDLK_BACKSPACE => game.onKeyBackspace(), c.SDLK_F11 => sdlToggleFullscreen(), else => {}, }, else => {}, } } game.tick(); draw(); } }
src/main.zig
const std = @import("std"); const mem = std.mem; pub const decode = @import("midi/decode.zig"); pub const encode = @import("midi/encode.zig"); pub const file = @import("midi/file.zig"); test "midi" { _ = @import("midi/test.zig"); _ = decode; _ = file; } pub const File = file.File; pub const Message = struct { status: u7, values: [2]u7, pub fn kind(message: Message) Kind { const _kind = @truncate(u3, message.status >> 4); const _channel = @truncate(u4, message.status); return switch (_kind) { 0x0 => Kind.NoteOff, 0x1 => Kind.NoteOn, 0x2 => Kind.PolyphonicKeyPressure, 0x3 => Kind.ControlChange, 0x4 => Kind.ProgramChange, 0x5 => Kind.ChannelPressure, 0x6 => Kind.PitchBendChange, 0x7 => switch (_channel) { 0x0 => Kind.ExclusiveStart, 0x1 => Kind.MidiTimeCodeQuarterFrame, 0x2 => Kind.SongPositionPointer, 0x3 => Kind.SongSelect, 0x6 => Kind.TuneRequest, 0x7 => Kind.ExclusiveEnd, 0x8 => Kind.TimingClock, 0xA => Kind.Start, 0xB => Kind.Continue, 0xC => Kind.Stop, 0xE => Kind.ActiveSensing, 0xF => Kind.Reset, 0x4, 0x5, 0x9, 0xD => Kind.Undefined, }, }; } pub fn channel(message: Message) ?u4 { const _kind = message.kind(); const _channel = @truncate(u4, message.status); switch (_kind) { // Channel events .NoteOff, .NoteOn, .PolyphonicKeyPressure, .ControlChange, .ProgramChange, .ChannelPressure, .PitchBendChange, => return _channel, // System events .ExclusiveStart, .MidiTimeCodeQuarterFrame, .SongPositionPointer, .SongSelect, .TuneRequest, .ExclusiveEnd, .TimingClock, .Start, .Continue, .Stop, .ActiveSensing, .Reset, => return null, .Undefined => return null, } } pub fn value(message: Message) u14 { // TODO: Is this the right order according to the midi spec? return @as(u14, message.values[0]) << 7 | message.values[1]; } pub fn setValue(message: *Message, v: u14) void { message.values = .{ @truncate(u7, v >> 7), @truncate(u7, v), }; } pub const Kind = enum { // Channel events NoteOff, NoteOn, PolyphonicKeyPressure, ControlChange, ProgramChange, ChannelPressure, PitchBendChange, // System events ExclusiveStart, MidiTimeCodeQuarterFrame, SongPositionPointer, SongSelect, TuneRequest, ExclusiveEnd, TimingClock, Start, Continue, Stop, ActiveSensing, Reset, Undefined, }; };
midi.zig
const std = @import("std"); const testing = std.testing; pub const AEffect = extern struct { pub const Magic: i32 = ('V' << 24) | ('s' << 16) | ('t' << 8) | 'P'; /// Magic bytes required to identify a VST Plugin magic: i32 = Magic, /// Host to Plugin Dispatcher dispatcher: DispatcherCallback, /// Deprecated in VST 2.4 deprecated_process: ProcessCallback = deprecatedProcessCallback, setParameter: SetParameterCallback, getParameter: GetParameterCallback, num_programs: i32, num_params: i32, num_inputs: i32, num_outputs: i32, flags: i32, reserved1: isize = 0, reserved2: isize = 0, initial_delay: i32, _real_qualities: i32 = 0, _off_qualities: i32 = 0, _io_ratio: i32 = 0, object: ?*c_void = null, user: ?*c_void = null, unique_id: i32, version: i32, processReplacing: ProcessCallback, processReplacingF64: ProcessCallbackF64, future: [56]u8 = [_]u8{0} ** 56, fn deprecatedProcessCallback(effect: *AEffect, inputs: [*][*]f32, outputs: [*][*]f32, sample_frames: i32) callconv(.C) void {} }; test "AEffect" { try testing.expectEqual(@as(i32, 0x56737450), AEffect.Magic); } pub const Codes = struct { pub const HostToPlugin = enum(i32) { Initialize = 0, Shutdown = 1, GetCurrentPresetNum = 3, GetProductName = 48, GetVendorName = 47, GetInputInfo = 33, GetOutputInfo = 34, GetCategory = 35, GetTailSize = 52, GetApiVersion = 58, SetSampleRate = 10, SetBufferSize = 11, StateChange = 12, GetMidiInputs = 78, GetMidiOutputs = 79, GetMidiKeyName = 66, StartProcess = 71, StopProcess = 72, GetPresetName = 29, CanDo = 51, GetVendorVersion = 49, GetEffectName = 45, EditorGetRect = 13, EditorOpen = 14, EditorClose = 15, ChangePreset = 2, GetCurrentPresetName = 5, GetParameterLabel = 6, GetParameterDisplay = 7, GetParameterName = 8, CanBeAutomated = 26, EditorIdle = 19, EditorKeyDown = 59, EditorKeyUp = 60, ProcessEvents = 25, pub fn toInt(self: HostToPlugin) i32 { return @enumToInt(self); } pub fn fromInt(int: anytype) !HostToPlugin { return std.meta.intToEnum(HostToPlugin, int); } }; pub const PluginToHost = enum(i32) { GetVersion = 1, IOChanged = 13, GetSampleRate = 16, GetBufferSize = 17, GetVendorString = 32, pub fn toInt(self: PluginToHost) i32 { return @enumToInt(self); } pub fn fromInt(int: anytype) !PluginToHost { return std.meta.intToEnum(HostToPlugin, int); } }; }; pub const Plugin = struct { pub const Flag = enum(i32) { HasEditor = 1, CanReplacing = 1 << 4, ProgramChunks = 1 << 5, IsSynth = 1 << 8, NoSoundInStop = 1 << 9, CanDoubleReplacing = 1 << 12, pub fn toI32(self: Flag) i32 { return @intCast(i32, @enumToInt(self)); } pub fn toBitmask(flags: []const Flag) i32 { var result: i32 = 0; for (flags) |flag| { result = result | flag.toI32(); } return result; } }; pub const Category = enum { Unknown, Effect, Synthesizer, Analysis, Mastering, Spacializer, RoomFx, SurroundFx, Restoration, OfflineProcess, Shell, Generator, pub fn toI32(self: Category) i32 { return @intCast(i32, @enumToInt(self)); } }; }; pub const Rect = extern struct { top: i16, left: i16, bottom: i16, right: i16, }; pub const ProductNameMaxLength = 64; pub const VendorNameMaxLength = 64; pub const ParamMaxLength = 32; pub const PluginMain = fn ( callback: HostCallback, ) callconv(.C) ?*AEffect; pub const HostCallback = fn ( effect: *AEffect, opcode: i32, index: i32, value: isize, ptr: ?*c_void, opt: f32, ) callconv(.C) isize; pub const DispatcherCallback = fn ( effect: *AEffect, opcode: i32, index: i32, value: isize, ptr: ?*c_void, opt: f32, ) callconv(.C) isize; pub const ProcessCallback = fn ( effect: *AEffect, inputs: [*][*]f32, outputs: [*][*]f32, sample_frames: i32, ) callconv(.C) void; pub const ProcessCallbackF64 = fn ( effect: *AEffect, inputs: [*][*]f64, outputs: [*][*]f64, sample_frames: i32, ) callconv(.C) void; pub const SetParameterCallback = fn ( effect: *AEffect, index: i32, parameter: f32, ) callconv(.C) void; pub const GetParameterCallback = fn ( effect: *AEffect, index: i32, ) callconv(.C) f32; /// This is should eventually replace HostToPlugin.Code. /// Like everything in this library it is subject to change and not yet final. pub const HighLevelCode = union(enum(i32)) { Initialize: void = 1, SetSampleRate: f32 = 10, SetBufferSize: isize = 11, EditorGetRect: *?*Rect = 13, EditorOpen: void = 14, EditorClose: void = 15, ProcessEvents: *const VstEvents = 25, GetPresetName: [*:0]u8 = 29, GetCategory: void = 35, GetVendorName: [*:0]u8 = 47, GetProductName: [*:0]u8 = 48, GetTailSize: void = 52, GetApiVersion: void = 58, pub fn parseOpCode(opcode: i32) ?std.meta.Tag(HighLevelCode) { const T = std.meta.Tag(HighLevelCode); return std.meta.intToEnum(T, opcode) catch return null; } pub fn parse( opcode: i32, index: i32, value: isize, ptr: ?*c_void, opt: f32, ) ?HighLevelCode { const code = HighLevelCode.parseOpCode(opcode) orelse return null; return switch (code) { .SetSampleRate => .{ .SetSampleRate = opt }, .SetBufferSize => .{ .SetBufferSize = value }, .EditorGetRect => .{ .EditorGetRect = @ptrCast(*?*Rect, @alignCast(@alignOf(*?*Rect), ptr)) }, .EditorOpen => .{ .EditorOpen = {} }, .EditorClose => .{ .EditorClose = {} }, .ProcessEvents => .{ .ProcessEvents = @ptrCast(*VstEvents, @alignCast(@alignOf(VstEvents), ptr)) }, .GetPresetName => .{ .GetPresetName = @ptrCast([*:0]u8, ptr) }, .GetCategory => .{ .GetCategory = {} }, .GetVendorName => .{ .GetVendorName = @ptrCast([*:0]u8, ptr) }, .GetProductName => .{ .GetProductName = @ptrCast([*:0]u8, ptr) }, .GetApiVersion => .{ .GetApiVersion = {} }, else => return null, }; } }; pub const VstEvents = struct { num_events: i32, reserved: *isize, events: [*]VstEvent, pub fn iterate(self: *const VstEvents) Iterator { return .{ .index = 0, .ptr = self }; } pub const Iterator = struct { index: usize, ptr: *const VstEvents, pub fn next(self: *Iterator) ?VstEvent { if (self.index >= self.ptr.num_events) { return null; } const ev = self.ptr.events[self.index]; self.index += 1; return ev; } }; }; pub const VstEvent = struct { pub const Type = enum { midi = 1, audio = 2, video = 3, parameter = 4, trigger = 5, sysex = 6, }; typ: Type, byte_size: i32, delta_frames: i32, flags: i32, data: [16]u8, }; pub const Event = union(enum) { Midi: struct { flags: i32, note_length: i32, note_offset: i32, data: [4]u8, detune: i8, note_off_velocity: u8, }, pub fn parse(event: VstEvent) Event { switch (event.typ) { .midi => { const raw = @ptrCast(*const RawVstMidiEvent, &event); return Event{ .Midi = .{ .flags = raw.flags, .note_length = raw.note_length, .note_offset = raw.note_offset, .data = raw.midi_data, .detune = raw.detune, .note_off_velocity = raw.note_off_velocity, } }; }, .sysex => unreachable, // TODO else => unreachable, } } }; pub const MidiFlags = enum(i32) { is_realtime = 1 << 0, }; pub const RawVstMidiEvent = struct { typ: VstEvent.Type, byte_size: i32, delta_frames: i32, flags: i32, note_length: i32, note_offset: i32, midi_data: [4]u8, detune: i8, note_off_velocity: u8, reserved: [2]u8, };
src/api.zig
const std = @import("std"); const gamekit = @import("gamekit.zig"); const renderkit = @import("renderkit"); const math = gamekit.math; // high level wrapper objects that use the low-level backend api pub const Texture = @import("graphics/texture.zig").Texture; pub const OffscreenPass = @import("graphics/offscreen_pass.zig").OffscreenPass; pub const Shader = @import("graphics/shader.zig").Shader; pub const ShaderState = @import("graphics/shader.zig").ShaderState; // even higher level wrappers for 2D game dev pub const Mesh = @import("graphics/mesh.zig").Mesh; pub const DynamicMesh = @import("graphics/mesh.zig").DynamicMesh; pub const Batcher = @import("graphics/batcher.zig").Batcher; pub const MultiBatcher = @import("graphics/multi_batcher.zig").MultiBatcher; pub const TriangleBatcher = @import("graphics/triangle_batcher.zig").TriangleBatcher; pub const FontBook = @import("graphics/fontbook.zig").FontBook; pub const Vertex = extern struct { pos: math.Vec2 = .{ .x = 0, .y = 0 }, uv: math.Vec2 = .{ .x = 0, .y = 0 }, col: u32 = 0xFFFFFFFF, }; pub const PassConfig = struct { color_action: renderkit.ClearAction = .clear, color: math.Color = math.Color.aya, stencil_action: renderkit.ClearAction = .dont_care, stencil: u8 = 0, depth_action: renderkit.ClearAction = .dont_care, depth: f64 = 0, trans_mat: ?math.Mat32 = null, shader: ?*Shader = null, pass: ?OffscreenPass = null, pub fn asClearCommand(self: PassConfig) renderkit.ClearCommand { return .{ .color = self.color.asArray(), .color_action = self.color_action, .stencil_action = self.stencil_action, .stencil = self.stencil, .depth_action = self.depth_action, .depth = self.depth, }; } }; pub var state = struct { shader: Shader = undefined, transform_mat: math.Mat32 = math.Mat32.identity, }{}; pub fn init() void { state.shader = Shader.initDefaultSpriteShader() catch unreachable; draw.init(); } pub fn deinit() void { draw.deinit(); state.shader.deinit(); } pub fn setShader(shader: ?*Shader) void { const new_shader = shader orelse &state.shader; draw.batcher.flush(); new_shader.bind(); new_shader.setTransformMatrix(&state.transform_mat); } pub fn setRenderState(renderstate: renderkit.RenderState) void { draw.batcher.flush(); renderkit.renderer.setRenderState(renderstate); } pub fn beginPass(config: PassConfig) void { var proj_mat: math.Mat32 = math.Mat32.init(); var clear_command = config.asClearCommand(); draw.batcher.begin(); if (config.pass) |pass| { renderkit.renderer.beginPass(pass.pass, clear_command); // inverted for OpenGL offscreen passes if (renderkit.current_renderer == .opengl) { proj_mat = math.Mat32.initOrthoInverted(pass.color_texture.width, pass.color_texture.height); } else { proj_mat = math.Mat32.initOrtho(pass.color_texture.width, pass.color_texture.height); } } else { const size = gamekit.window.drawableSize(); renderkit.renderer.beginDefaultPass(clear_command, size.w, size.h); proj_mat = math.Mat32.initOrtho(@intToFloat(f32, size.w), @intToFloat(f32, size.h)); } // if we were given a transform matrix multiply it here if (config.trans_mat) |trans_mat| { proj_mat = proj_mat.mul(trans_mat); } state.transform_mat = proj_mat; // if we were given a Shader use it else set the default Shader setShader(config.shader); } pub fn endPass() void { setShader(null); draw.batcher.end(); renderkit.renderer.endPass(); } /// if we havent yet blitted to the screen do so now pub fn commitFrame() void { renderkit.renderer.commitFrame(); } // import all the drawing methods pub const draw = @import("draw.zig").draw; // pub usingnamespace draw;
gamekit/gfx.zig
const std = @import("std"); const link = @import("link.zig"); const ir = @import("ir.zig"); const Allocator = std.mem.Allocator; var global_ctx: TestContext = undefined; test "self-hosted" { try global_ctx.init(); defer global_ctx.deinit(); try @import("stage2_tests").addCases(&global_ctx); try global_ctx.run(); } pub const TestContext = struct { zir_cmp_output_cases: std.ArrayList(ZIRCompareOutputCase), zir_transform_cases: std.ArrayList(ZIRTransformCase), pub const ZIRCompareOutputCase = struct { name: []const u8, src: [:0]const u8, expected_stdout: []const u8, }; pub const ZIRTransformCase = struct { name: []const u8, src: [:0]const u8, expected_zir: []const u8, }; pub fn addZIRCompareOutput( ctx: *TestContext, name: []const u8, src: [:0]const u8, expected_stdout: []const u8, ) void { ctx.zir_cmp_output_cases.append(.{ .name = name, .src = src, .expected_stdout = expected_stdout, }) catch unreachable; } pub fn addZIRTransform( ctx: *TestContext, name: []const u8, src: [:0]const u8, expected_zir: []const u8, ) void { ctx.zir_transform_cases.append(.{ .name = name, .src = src, .expected_zir = expected_zir, }) catch unreachable; } fn init(self: *TestContext) !void { self.* = .{ .zir_cmp_output_cases = std.ArrayList(ZIRCompareOutputCase).init(std.heap.page_allocator), .zir_transform_cases = std.ArrayList(ZIRTransformCase).init(std.heap.page_allocator), }; } fn deinit(self: *TestContext) void { self.zir_cmp_output_cases.deinit(); self.zir_transform_cases.deinit(); self.* = undefined; } fn run(self: *TestContext) !void { var progress = std.Progress{}; const root_node = try progress.start("zir", self.zir_cmp_output_cases.items.len + self.zir_transform_cases.items.len); defer root_node.end(); const native_info = try std.zig.system.NativeTargetInfo.detect(std.heap.page_allocator, .{}); for (self.zir_cmp_output_cases.items) |case| { std.testing.base_allocator_instance.reset(); try self.runOneZIRCmpOutputCase(std.testing.allocator, root_node, case, native_info.target); try std.testing.allocator_instance.validate(); } for (self.zir_transform_cases.items) |case| { std.testing.base_allocator_instance.reset(); try self.runOneZIRTransformCase(std.testing.allocator, root_node, case, native_info.target); try std.testing.allocator_instance.validate(); } } fn runOneZIRCmpOutputCase( self: *TestContext, allocator: *Allocator, root_node: *std.Progress.Node, case: ZIRCompareOutputCase, target: std.Target, ) !void { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); var prg_node = root_node.start(case.name, 4); prg_node.activate(); defer prg_node.end(); var zir_module = x: { var parse_node = prg_node.start("parse", null); parse_node.activate(); defer parse_node.end(); break :x try ir.text.parse(allocator, case.src); }; defer zir_module.deinit(allocator); if (zir_module.errors.len != 0) { debugPrintErrors(case.src, zir_module.errors); return error.ParseFailure; } var analyzed_module = x: { var analyze_node = prg_node.start("analyze", null); analyze_node.activate(); defer analyze_node.end(); break :x try ir.analyze(allocator, zir_module, .{ .target = target, .output_mode = .Exe, .link_mode = .Static, .optimize_mode = .Debug, }); }; defer analyzed_module.deinit(allocator); if (analyzed_module.errors.len != 0) { debugPrintErrors(case.src, analyzed_module.errors); return error.ParseFailure; } var link_result = x: { var link_node = prg_node.start("link", null); link_node.activate(); defer link_node.end(); break :x try link.updateFilePath(allocator, analyzed_module, tmp.dir, "a.out"); }; defer link_result.deinit(allocator); if (link_result.errors.len != 0) { debugPrintErrors(case.src, link_result.errors); return error.LinkFailure; } var exec_result = x: { var exec_node = prg_node.start("execute", null); exec_node.activate(); defer exec_node.end(); break :x try std.ChildProcess.exec(.{ .allocator = allocator, .argv = &[_][]const u8{"./a.out"}, .cwd_dir = tmp.dir, }); }; defer allocator.free(exec_result.stdout); defer allocator.free(exec_result.stderr); switch (exec_result.term) { .Exited => |code| { if (code != 0) { std.debug.warn("elf file exited with code {}\n", .{code}); return error.BinaryBadExitCode; } }, else => return error.BinaryCrashed, } std.testing.expectEqualSlices(u8, case.expected_stdout, exec_result.stdout); } fn runOneZIRTransformCase( self: *TestContext, allocator: *Allocator, root_node: *std.Progress.Node, case: ZIRTransformCase, target: std.Target, ) !void { var prg_node = root_node.start(case.name, 4); prg_node.activate(); defer prg_node.end(); var parse_node = prg_node.start("parse", null); parse_node.activate(); var zir_module = try ir.text.parse(allocator, case.src); defer zir_module.deinit(allocator); if (zir_module.errors.len != 0) { debugPrintErrors(case.src, zir_module.errors); return error.ParseFailure; } parse_node.end(); var analyze_node = prg_node.start("analyze", null); analyze_node.activate(); var analyzed_module = try ir.analyze(allocator, zir_module, .{ .target = target, .output_mode = .Obj, .link_mode = .Static, .optimize_mode = .Debug, }); defer analyzed_module.deinit(allocator); if (analyzed_module.errors.len != 0) { debugPrintErrors(case.src, analyzed_module.errors); return error.ParseFailure; } analyze_node.end(); var emit_node = prg_node.start("emit", null); emit_node.activate(); var new_zir_module = try ir.text.emit_zir(allocator, analyzed_module); defer new_zir_module.deinit(allocator); emit_node.end(); var write_node = prg_node.start("write", null); write_node.activate(); var out_zir = std.ArrayList(u8).init(allocator); defer out_zir.deinit(); try new_zir_module.writeToStream(allocator, out_zir.outStream()); write_node.end(); std.testing.expectEqualSlices(u8, case.expected_zir, out_zir.items); } }; fn debugPrintErrors(src: []const u8, errors: var) void { std.debug.warn("\n", .{}); var nl = true; var line: usize = 1; for (src) |byte| { if (nl) { std.debug.warn("{: >3}| ", .{line}); nl = false; } if (byte == '\n') { nl = true; line += 1; } std.debug.warn("{c}", .{byte}); } std.debug.warn("\n", .{}); for (errors) |err_msg| { const loc = std.zig.findLineColumn(src, err_msg.byte_offset); std.debug.warn("{}:{}: error: {}\n", .{ loc.line + 1, loc.column + 1, err_msg.msg }); } }
src-self-hosted/test.zig
const std = @import("std"); const mem = std.mem; const unicode = std.unicode; const CodePointIterator = @import("CodePointIterator.zig"); const Context = @import("../Context.zig"); allocator: *mem.Allocator, context: *Context, cp_iter: CodePointIterator, const Self = @This(); pub fn new(ctx: *Context, str: []const u8) !Self { return Self{ .allocator = ctx.allocator, .context = ctx, .cp_iter = try CodePointIterator.init(str), }; } /// reinit resets the iterator with a new string. pub fn reinit(self: *Self, str: []const u8) !void { self.cp_iter = try CodePointIterator.init(str); } // Special code points. const ZWJ: u21 = 0x200D; const CR: u21 = 0x000D; const LF: u21 = 0x000A; const Slice = struct { start: usize, end: usize, }; pub const Grapheme = struct { bytes: []const u8, pub fn eql(self: Grapheme, str: []const u8) bool { return mem.eql(u8, self.bytes, str); } }; /// next retrieves the next grapheme cluster. pub fn next(self: *Self) !?Grapheme { var cpo = self.cp_iter.next(); if (cpo == null) return null; const cp = cpo.?; const cp_end = self.cp_iter.i; const cp_start = self.cp_iter.prev_i; const next_cp = self.cp_iter.peek(); // GB9.2 const prepend = try self.context.getPrepend(); const control = try self.context.getControl(); if (prepend.isPrepend(cp)) { if (next_cp) |ncp| { if (ncp == CR or ncp == LF or control.isControl(ncp)) { return Grapheme{ .bytes = self.cp_iter.bytes[cp_start..cp_end], }; } const pncp = self.cp_iter.next().?; // We know there's a next. const pncp_end = self.cp_iter.i; const pncp_start = self.cp_iter.prev_i; const pncp_next_cp = self.cp_iter.peek(); const s = try self.processNonPrepend(pncp, pncp_start, pncp_end, pncp_next_cp); return Grapheme{ .bytes = self.cp_iter.bytes[cp_start..s.end], }; } return Grapheme{ .bytes = self.cp_iter.bytes[cp_start..cp_end], }; } const s = try self.processNonPrepend(cp, cp_start, cp_end, next_cp); return Grapheme{ .bytes = self.cp_iter.bytes[s.start..s.end], }; } fn processNonPrepend( self: *Self, cp: u21, cp_start: usize, cp_end: usize, next_cp: ?u21, ) !Slice { // GB3, GB4, GB5 if (cp == CR) { if (next_cp) |ncp| { if (ncp == LF) { _ = self.cp_iter.next(); // Advance past LF. return Slice{ .start = cp_start, .end = self.cp_iter.i }; } } return Slice{ .start = cp_start, .end = cp_end }; } if (cp == LF) { return Slice{ .start = cp_start, .end = cp_end }; } const control = try self.context.getControl(); if (control.isControl(cp)) { return Slice{ .start = cp_start, .end = cp_end }; } // GB6, GB7, GB8 const han_map = try self.context.getHangulMap(); if (han_map.syllableType(cp)) |hst| { if (next_cp) |ncp| { const ncp_hst = han_map.syllableType(ncp); if (ncp_hst) |nhst| { switch (hst) { .L => { if (nhst == .L or nhst == .V or nhst == .LV or nhst == .LVT) { _ = self.cp_iter.next(); // Advance past next syllable. } }, .LV, .V => { if (nhst == .V or nhst == .T) { _ = self.cp_iter.next(); // Advance past next syllable. } }, .LVT, .T => { if (nhst == .T) { _ = self.cp_iter.next(); // Advance past next syllable. } }, } } } // GB9 try self.fullAdvance(); return Slice{ .start = cp_start, .end = self.cp_iter.i }; } // GB11 const extpic = try self.context.getExtPic(); if (extpic.isExtendedPictographic(cp)) { try self.fullAdvance(); if (self.cp_iter.prev) |pcp| { if (pcp == ZWJ) { if (self.cp_iter.peek()) |ncp| { if (extpic.isExtendedPictographic(ncp)) { _ = self.cp_iter.next(); // Advance past end emoji. // GB9 try self.fullAdvance(); } } } } return Slice{ .start = cp_start, .end = self.cp_iter.i }; } // GB12 const regional = try self.context.getRegional(); if (regional.isRegionalIndicator(cp)) { if (next_cp) |ncp| { if (regional.isRegionalIndicator(ncp)) { _ = self.cp_iter.next(); // Advance past 2nd RI. } } try self.fullAdvance(); return Slice{ .start = cp_start, .end = self.cp_iter.i }; } // GB999 try self.fullAdvance(); return Slice{ .start = cp_start, .end = self.cp_iter.i }; } fn lexRun( self: *Self, ctx: anytype, comptime predicate: fn (ctx: @TypeOf(ctx), cp: u21) bool, ) void { while (self.cp_iter.peek()) |ncp| { if (!predicate(ctx, ncp)) break; _ = self.cp_iter.next(); } } fn fullAdvance(self: *Self) anyerror!void { const next_cp = self.cp_iter.peek(); // Base case. const extend = try self.context.getExtend(); const spacing = try self.context.getSpacing(); if (next_cp) |ncp| { if (ncp != ZWJ and !extend.isExtend(ncp) and !spacing.isSpacingMark(ncp)) return; } else { return; } // Recurse. const ncp = next_cp.?; // We now we have next. if (ncp == ZWJ) { _ = self.cp_iter.next(); try self.fullAdvance(); } else if (extend.isExtend(ncp)) { self.lexRun(extend.*, Context.Extend.isExtend); try self.fullAdvance(); } else if (spacing.isSpacingMark(ncp)) { self.lexRun(spacing.*, Context.Spacing.isSpacingMark); try self.fullAdvance(); } } test "Grapheme iterator" { var allocator = std.testing.allocator; var file = try std.fs.cwd().openFile("src/data/ucd/auxiliary/GraphemeBreakTest.txt", .{}); defer file.close(); var buf_reader = std.io.bufferedReader(file.reader()); var input_stream = buf_reader.reader(); var buf: [640]u8 = undefined; var line_no: usize = 1; var ctx = Context.init(allocator); defer ctx.deinit(); var giter: ?Self = null; while (try input_stream.readUntilDelimiterOrEof(&buf, '\n')) |raw| : (line_no += 1) { // Skip comments or empty lines. if (raw.len == 0 or raw[0] == '#' or raw[0] == '@') continue; // Clean up. var line = mem.trimLeft(u8, raw, "÷ "); if (mem.indexOf(u8, line, " ÷\t#")) |octo| { line = line[0..octo]; } // Iterate over fields. var want = std.ArrayList(Grapheme).init(allocator); defer { for (want.items) |gc| { allocator.free(gc.bytes); } want.deinit(); } var all_bytes = std.ArrayList(u8).init(allocator); defer all_bytes.deinit(); var graphemes = mem.split(line, " ÷ "); var bytes_index: usize = 0; while (graphemes.next()) |field| { var code_points = mem.split(field, " "); var cp_buf: [4]u8 = undefined; var cp_index: usize = 0; var first: u21 = undefined; var cp_bytes = std.ArrayList(u8).init(allocator); defer cp_bytes.deinit(); while (code_points.next()) |code_point| { if (mem.eql(u8, code_point, "×")) continue; const cp: u21 = try std.fmt.parseInt(u21, code_point, 16); if (cp_index == 0) first = cp; const len = try unicode.utf8Encode(cp, &cp_buf); try all_bytes.appendSlice(cp_buf[0..len]); try cp_bytes.appendSlice(cp_buf[0..len]); cp_index += len; } try want.append(Grapheme{ .bytes = cp_bytes.toOwnedSlice(), }); bytes_index += cp_index; } if (giter) |*gi| { try gi.reinit(all_bytes.items); } else { giter = try new(&ctx, all_bytes.items); } // Chaeck. for (want.items) |w| { const g = (try giter.?.next()).?; //std.debug.print("line {d}: w:({s}), g:({s})\n", .{ line_no, w.bytes, g.bytes }); std.testing.expectEqualStrings(w.bytes, g.bytes); } } } test "Grapheme width" { _ = @import("../components/aggregate/Width.zig"); }
src/zigstr/GraphemeIterator.zig
const std = @import("std"); const os = std.os; const DEBUG: bool = false; const QD :u13 = 32; const BS :usize = (16 * 1024); var infd :std.fs.File = undefined; var outfd :std.fs.File = undefined; /// some code is borrowed from <NAME> thx to him const IoData = struct { rw_flag: os.linux.IORING_OP, first_offset: usize, offset: usize, first_len :usize, iov: [] os.iovec, }; pub fn setup(entries: u13, flags: u32) !os.linux.IO_Uring { if (DEBUG) { std.debug.print("setup {} {}\n", .{ entries, flags, }); } return os.linux.IO_Uring.init(entries, flags); } pub fn queue_prepped(ring :*os.linux.IO_Uring, data :*IoData) !void { if (DEBUG){ std.debug.print("queue_prepped {} {}\n", .{ ring, data, }); } const sqe = try ring.get_sqe(); if (data.rw_flag == .READV) { os.linux.io_uring_prep_readv(sqe, infd.handle, data.iov, data.offset); } else { // kinda const cast (did not found better) os.linux.io_uring_prep_writev(sqe, outfd.handle, @ptrCast(*[]const os.iovec_const, &data.iov).*, data.offset); } sqe.user_data = @ptrToInt(data); } pub fn queue_read(ring :*os.linux.IO_Uring, allocator :*std.mem.Allocator, size :usize, offset :usize) !void { if (DEBUG){ std.debug.print(" queue_read size:{} offset:{}\n", .{ size, offset, }); } var data: *IoData = try allocator.create(IoData); data.* = .{ .rw_flag = .READV, .first_offset = offset, .offset = offset, .iov = try allocator.alloc(os.iovec, 1), .first_len = size, }; const buf = try allocator.alloc(u8, size); data.iov[0].iov_base = buf.ptr; data.iov[0].iov_len = buf.len; // get submission queue const sqe = try ring.get_sqe(); os.linux.io_uring_prep_readv(sqe, infd.handle, data.iov, data.offset); sqe.user_data = @ptrToInt(data); } pub fn queue_write(ring :*os.linux.IO_Uring, data :*IoData) !void { if (DEBUG){ std.debug.print("queue_write {}\n", .{ data, }); } // indicate write flag data.rw_flag = .WRITEV; data.offset = data.first_offset; const sqe = try ring.get_sqe(); os.linux.io_uring_prep_writev(sqe, outfd.handle, @ptrCast(*[]const os.iovec_const, &data.iov).*, data.offset); sqe.user_data = @ptrToInt(data); } pub fn main() anyerror!void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); var allocator = &arena.allocator; var args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); if (DEBUG) { std.debug.print("copying {s} to {s}\n", .{ args[1], args[2], }); } infd = try std.fs.cwd().openFile(args[1], .{}); defer infd.close(); outfd = try std.fs.cwd().createFile(args[2], .{}); defer outfd.close(); if (DEBUG) { std.debug.warn("nb SQEs: {} ; iovec size: {}\n", .{ QD, BS, }); } const insize = (try infd.stat()).size; // Initialize io_uring var ring = try setup(QD, 0); // pending reads sent to sqe var reads: usize = 0; // pending reads sent to sqe var writes: usize = 0; var offset: usize = 0; var write_left: usize = insize; var read_left: usize = insize; // flag used submit io for read var read_done: bool = false; // try to write until the end : write_left // wait for last write submission writes > 0 while ( write_left > 0 or writes > 0 ) { if (DEBUG){ std.debug.print("while write {} {}\n", .{ write_left, writes, }); } read_done = false; while (read_left > 0) { if (DEBUG){ std.debug.print("while read {}\n", .{ read_left, }); } // if queue is full wait for completion if (reads + writes >= QD) break; // if no more to read break if (read_left == 0) break; // if the size is bigger than block size // just read one block var read_size: usize = read_left; if ( read_left > BS ) { read_size = BS; } // try to read queue_read(&ring, allocator, read_size, offset) catch { if (DEBUG){ std.debug.print("queue_read fail sqe is full\n", .{ }); } break; }; read_left -= read_size; offset += read_size; reads += 1; read_done = true; } // if at least one read have been done submit queue if (read_done) { if (DEBUG) { std.debug.print("submit read\n", .{}); } var ret = try ring.submit(); if (ret < 0) { std.debug.print("io_uring_submit read error\n"); break; } } if (DEBUG) { std.debug.print("wait for cqe w:{} r:{}\n", .{ writes, reads, }); } // wait for cqe var cqe = try ring.copy_cqe(); if (DEBUG) { std.debug.print("received cqe {} \n", .{ cqe, }); } var io_data: *IoData = @intToPtr(*IoData, cqe.user_data); if (cqe.res < 0) { switch (cqe.res) { -std.os.EAGAIN => { // push the operation again try queue_prepped(&ring, io_data); //io_uring_cqe_seen(ring, cqe); continue; }, -os.EINVAL => { std.debug.warn("either you're trying to read too much data (can't read more than a isize), or the number of iovecs in a single SQE is > 1024\n", .{}); std.process.exit(1); }, else => { std.debug.warn("cqe errno: {}", .{cqe.res}); std.process.exit(1); }, } } //else if (cqe.res != io_data.iov[0].iov_len) // { // // read or write is shorter than expected // // adjusting the queue size accordingly // io_data.iov[0].iov_base += @intCast(usize, cqe.res); // io_data.iov[0].iov_len -= @intCast(usize, cqe.res); // // push the operation for missing data // try queue_prepped(&ring, io_data); // //io_uring_cqe_seen(ring, cqe); // continue; // } // read received if (io_data.rw_flag == .READV) { // if we are reading data transfer it to the write buffer try queue_write(&ring, io_data); var ret = try ring.submit(); if (ret < 0) { std.debug.print("io_uring_submit write error\n"); break; } write_left -= io_data.first_len; reads -= 1; writes += 1; } else { allocator.destroy(io_data.iov[0].iov_base); allocator.free(io_data.iov); allocator.destroy(io_data); writes -= 1; } } }
io_uring/cp/zig/io_uring_cp.zig
const std = @import("std"); const cpuinfo = @import("cpuinfo"); const slimy = @import("slimy.zig"); const version = @import("version.zig"); const zc = @import("zcompute"); pub fn main() u8 { mainInternal() catch |err| { std.debug.print("error: {s}\n", .{@errorName(err)}); if (@errorReturnTrace()) |trace| { std.debug.dumpStackTrace(trace.*); } return 1; }; return 0; } fn mainInternal() !void { var gpu = slimy.gpu.Context{}; try gpu.init(); try printHeader(gpu); const thread_count = std.math.lossyCast(u8, try std.Thread.getCpuCount()); // WARMUP TEST // This does two things: // - check that slimy is generating the expected results // - get a rough estimate of how fast things are running, to base later benchmarks on var collector = Collector{}; var params = warmup_params; var timer = try std.time.Timer.start(); params.method = .gpu; try gpu.search(params, &collector, Collector.result, null); const approx_gpu_rate = locationRate(timer.read(), params); try collector.check(warmup_results); collector.reset(); timer.reset(); params.method = .{ .cpu = thread_count }; try slimy.cpu.search(params, &collector, Collector.result, null); const approx_cpu_rate = locationRate(timer.read(), params); try collector.check(warmup_results); collector.reset(); const out = std.io.getStdOut().writer(); // 3x ~5s GPU benchmark try out.writeAll("Testing GPU"); var gpu_rate: u128 = approx_gpu_rate; for ([_]void{{}} ** 3) |_, i| { try out.writeByte('.'); gpu_rate += try benchGpu(&gpu, gpu_rate / (i + 1), 5); } gpu_rate = (gpu_rate - approx_gpu_rate) / 3; // Mean try out.print(" {} locations per second\n", .{fmtIntGrouped(gpu_rate)}); // 3x ~5s CPU benchmark try out.writeAll("Testing CPU"); var cpu_rate: u128 = approx_cpu_rate; for ([_]void{{}} ** 3) |_, i| { try out.writeByte('.'); cpu_rate += try benchCpu(thread_count, cpu_rate / (i + 1), 5); } cpu_rate = (cpu_rate - approx_cpu_rate) / 3; // Mean try out.print(" {} locations per second\n", .{fmtIntGrouped(cpu_rate)}); } fn fmtIntGrouped(val: u128) std.fmt.Formatter(formatIntGrouped) { return .{ .data = val }; } fn formatIntGrouped( value: u128, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype, ) !void { if (value == 0) { try writer.writeByte('0'); return; } // Approximation of the number of 3-digit groups in the largest u128 const n_group = 128 * 4 / 13 / 3; var groups: [n_group]u10 = undefined; var val = value; var i: u8 = 0; while (val > 1000) { groups[i] = @intCast(u10, val % 1000); val /= 1000; i += 1; } try writer.print("{}", .{val}); while (i > 0) { i -= 1; try writer.print(" {:0>3}", .{groups[i]}); } } const test_seed: i64 = -2152535657050944081; const warmup_params = slimy.SearchParams{ .world_seed = test_seed, .threshold = 39, .x0 = -1000, .z0 = -1000, .x1 = 1000, .z1 = 1000, .method = undefined, }; const warmup_results = &[_]slimy.Result{ .{ .x = 949, .z = -923, .count = 43 }, .{ .x = 950, .z = -924, .count = 42 }, .{ .x = 245, .z = 481, .count = 40 }, .{ .x = 246, .z = 484, .count = 40 }, .{ .x = -624, .z = -339, .count = 40 }, .{ .x = 669, .z = -643, .count = 40 }, .{ .x = -623, .z = -701, .count = 40 }, .{ .x = 948, .z = -923, .count = 40 }, .{ .x = 949, .z = -924, .count = 40 }, .{ .x = 950, .z = -923, .count = 40 }, .{ .x = 950, .z = -926, .count = 40 }, .{ .x = 327, .z = -140, .count = 39 }, .{ .x = -423, .z = 50, .count = 39 }, .{ .x = 430, .z = 298, .count = 39 }, .{ .x = -554, .z = 270, .count = 39 }, .{ .x = 664, .z = 356, .count = 39 }, .{ .x = 715, .z = -375, .count = 39 }, .{ .x = 716, .z = -375, .count = 39 }, .{ .x = -309, .z = 800, .count = 39 }, .{ .x = -310, .z = 800, .count = 39 }, .{ .x = -726, .z = -575, .count = 39 }, .{ .x = -725, .z = -579, .count = 39 }, .{ .x = -726, .z = -579, .count = 39 }, .{ .x = 671, .z = -644, .count = 39 }, .{ .x = -624, .z = -701, .count = 39 }, .{ .x = -883, .z = 338, .count = 39 }, .{ .x = 684, .z = -752, .count = 39 }, .{ .x = -700, .z = 758, .count = 39 }, .{ .x = -636, .z = 843, .count = 39 }, .{ .x = 949, .z = -922, .count = 39 }, .{ .x = 951, .z = -923, .count = 39 }, .{ .x = 949, .z = -926, .count = 39 }, .{ .x = 951, .z = -924, .count = 39 }, .{ .x = 950, .z = -928, .count = 39 }, }; fn printHeader(gpu: slimy.gpu.Context) !void { const out = std.io.getStdOut().writer(); try out.print("slimy benchmark v{}\n\n", .{version.version}); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const cpu_info = try cpuinfo.get(arena.allocator()); try out.print("CPU: {}\n", .{cpu_info}); const gpu_name = std.mem.sliceTo(&gpu.ctx.device_properties.device_name, 0); const gpu_type: []const u8 = switch (gpu.ctx.device_properties.device_type) { .integrated_gpu => "integrated", .discrete_gpu => "discrete", .virtual_gpu => "virtual", .cpu => "software renderer", .other => "other", _ => "unknown", }; try out.print("GPU: {s} ({s})\n\n", .{ gpu_name, gpu_type }); } const Collector = struct { buf: [64]slimy.Result = undefined, n: u6 = 0, pub fn reset(self: *Collector) void { self.n = 0; } pub fn result(self: *Collector, res: slimy.Result) void { self.buf[self.n] = res; self.n += 1; } pub fn check(self: *Collector, expected: []const slimy.Result) !void { if (expected.len != self.n) { return error.IncorrectResults; } std.sort.sort(slimy.Result, self.buf[0..self.n], {}, slimy.Result.sortLessThan); std.debug.assert( std.sort.isSorted(slimy.Result, expected, {}, slimy.Result.sortLessThan), ); for (expected) |r, i| { if (!std.meta.eql(r, self.buf[i])) { return error.IncorrectResults; } } } }; fn targetParams(rate: u128, target_secs: u8, method: slimy.SearchMethod) slimy.SearchParams { const num_locations = rate * target_secs; const width = @intCast(u31, std.math.sqrt(num_locations)); const height = @intCast(u31, num_locations / width); return .{ .world_seed = test_seed, .threshold = 100, .x0 = 0, .z0 = 0, .x1 = width, .z1 = height, .method = method, }; } fn benchGpu(gpu: *slimy.gpu.Context, rate: u128, target_secs: u8) !u128 { const params = targetParams(rate, target_secs, .gpu); var timer = try std.time.Timer.start(); try gpu.search(params, {}, devNull, null); return locationRate(timer.read(), params); } fn benchCpu(threads: u8, rate: u128, target_secs: u8) !u128 { const params = targetParams(rate, target_secs, .{ .cpu = threads }); var timer = try std.time.Timer.start(); try slimy.cpu.search(params, {}, devNull, null); return locationRate(timer.read(), params); } fn locationRate(time: u128, params: slimy.SearchParams) u128 { const locations_checked = @intCast(u128, params.x1 - params.x0) * @intCast(u128, params.z1 - params.z0); return locations_checked * std.time.ns_per_s / time; } fn devNull(_: void, res: slimy.Result) void { std.mem.doNotOptimizeAway(res); }
src/bench.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const List = std.ArrayList; const Map = std.AutoHashMap; const StrMap = std.StringHashMap; const BitSet = std.DynamicBitSet; const Str = []const u8; const util = @import("util.zig"); const gpa = util.gpa; const data = @embedFile("../data/day02.txt"); fn task1() i64 { var lines = tokenize(u8, data, "\n"); var x: i64 = 0; var y: i64 = 0; while (lines.next()) |line| { var commandAndValue = tokenize(u8, line, " "); var command = commandAndValue.next(); var value = commandAndValue.next(); var parsedValue = parseInt(i64, value.?, 10) catch unreachable; if (std.mem.eql(u8, command.?, "up")) { y -= parsedValue; } else if (std.mem.eql(u8, command.?, "down")) { y += parsedValue; } else if (std.mem.eql(u8, command.?, "forward")) { x += parsedValue; } else { std.debug.print("Unknown {s}", .{command}); } } return x * y; } fn task2() i64 { // down X increases your aim by X units. // up X decreases your aim by X units. // forward X does two things: // It increases your horizontal position by X units. // It increases your depth by your aim multiplied by X. var horizontalPosition: i64 = 0; var aim: i64 = 0; var depth: i64 = 0; var lines = tokenize(u8, data, "\n"); while (lines.next()) |line| { var commandAndValue = tokenize(u8, line, " "); var command = commandAndValue.next(); var value = commandAndValue.next(); var parsedValue = parseInt(i64, value.?, 10) catch unreachable; if (std.mem.eql(u8, command.?, "up")) { aim -= parsedValue; } else if (std.mem.eql(u8, command.?, "down")) { aim += parsedValue; } else if (std.mem.eql(u8, command.?, "forward")) { horizontalPosition += parsedValue; depth += aim * parsedValue; } else { std.debug.print("Unknown {s}", .{command}); } } // multiply your final horizontal position by your final depth return horizontalPosition * depth; } pub fn main() !void { print("{}\n", .{task1()}); print("{}\n", .{task2()}); } // Useful stdlib functions const tokenize = std.mem.tokenize; const split = std.mem.split; const indexOf = std.mem.indexOfScalar; const indexOfAny = std.mem.indexOfAny; const indexOfStr = std.mem.indexOfPosLinear; const lastIndexOf = std.mem.lastIndexOfScalar; const lastIndexOfAny = std.mem.lastIndexOfAny; const lastIndexOfStr = std.mem.lastIndexOfLinear; const trim = std.mem.trim; const sliceMin = std.mem.min; const sliceMax = std.mem.max; const parseInt = std.fmt.parseInt; const parseFloat = std.fmt.parseFloat; const min = std.math.min; const min3 = std.math.min3; const max = std.math.max; const max3 = std.math.max3; const print = std.debug.print; const assert = std.debug.assert; const sort = std.sort.sort; const asc = std.sort.asc; const desc = std.sort.desc;
src/day02.zig
const std = @import("std"); const fs = std.fs; const mem = std.mem; const log = std.log.scoped(.tapi); const Allocator = mem.Allocator; const Yaml = @import("tapi/yaml.zig").Yaml; const VersionField = union(enum) { string: []const u8, float: f64, int: u64, }; pub const TbdV3 = struct { archs: []const []const u8, uuids: []const []const u8, platform: []const u8, install_name: []const u8, current_version: ?VersionField, compatibility_version: ?VersionField, objc_constraint: ?[]const u8, parent_umbrella: ?[]const u8, exports: ?[]const struct { archs: []const []const u8, allowable_clients: ?[]const []const u8, re_exports: ?[]const []const u8, symbols: ?[]const []const u8, objc_classes: ?[]const []const u8, objc_ivars: ?[]const []const u8, objc_eh_types: ?[]const []const u8, }, }; pub const TbdV4 = struct { tbd_version: u3, targets: []const []const u8, uuids: []const struct { target: []const u8, value: []const u8, }, install_name: []const u8, current_version: ?VersionField, compatibility_version: ?VersionField, reexported_libraries: ?[]const struct { targets: []const []const u8, libraries: []const []const u8, }, parent_umbrella: ?[]const struct { targets: []const []const u8, umbrella: []const u8, }, exports: ?[]const struct { targets: []const []const u8, symbols: ?[]const []const u8, objc_classes: ?[]const []const u8, objc_ivars: ?[]const []const u8, objc_eh_types: ?[]const []const u8, }, reexports: ?[]const struct { targets: []const []const u8, symbols: ?[]const []const u8, objc_classes: ?[]const []const u8, objc_ivars: ?[]const []const u8, objc_eh_types: ?[]const []const u8, }, allowable_clients: ?[]const struct { targets: []const []const u8, clients: []const []const u8, }, objc_classes: ?[]const []const u8, objc_ivars: ?[]const []const u8, objc_eh_types: ?[]const []const u8, }; pub const Tbd = union(enum) { v3: TbdV3, v4: TbdV4, pub fn currentVersion(self: Tbd) ?VersionField { return switch (self) { .v3 => |v3| v3.current_version, .v4 => |v4| v4.current_version, }; } pub fn compatibilityVersion(self: Tbd) ?VersionField { return switch (self) { .v3 => |v3| v3.compatibility_version, .v4 => |v4| v4.compatibility_version, }; } pub fn installName(self: Tbd) []const u8 { return switch (self) { .v3 => |v3| v3.install_name, .v4 => |v4| v4.install_name, }; } }; pub const LibStub = struct { /// Underlying memory for stub's contents. yaml: Yaml, /// Typed contents of the tbd file. inner: []Tbd, pub fn loadFromFile(allocator: *Allocator, file: fs.File) !LibStub { const source = try file.readToEndAlloc(allocator, std.math.maxInt(u32)); defer allocator.free(source); var lib_stub = LibStub{ .yaml = try Yaml.load(allocator, source), .inner = undefined, }; // TODO revisit this logic in the hope of simplifying it. lib_stub.inner = blk: { err: { log.debug("trying to parse as []TbdV4", .{}); const inner = lib_stub.yaml.parse([]TbdV4) catch break :err; var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, inner.len); for (inner) |doc, i| { out[i] = .{ .v4 = doc }; } break :blk out; } err: { log.debug("trying to parse as TbdV4", .{}); const inner = lib_stub.yaml.parse(TbdV4) catch break :err; var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, 1); out[0] = .{ .v4 = inner }; break :blk out; } err: { log.debug("trying to parse as []TbdV3", .{}); const inner = lib_stub.yaml.parse([]TbdV3) catch break :err; var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, inner.len); for (inner) |doc, i| { out[i] = .{ .v3 = doc }; } break :blk out; } err: { log.debug("trying to parse as TbdV3", .{}); const inner = lib_stub.yaml.parse(TbdV3) catch break :err; var out = try lib_stub.yaml.arena.allocator.alloc(Tbd, 1); out[0] = .{ .v3 = inner }; break :blk out; } return error.NotLibStub; }; return lib_stub; } pub fn deinit(self: *LibStub) void { self.yaml.deinit(); } };
src/link/tapi.zig
const std = @import("std"); const Builder = std.build.Builder; const dbuild = @import("src/common/dbuild.zig"); const dcommon = @import("src/common/dcommon.zig"); pub fn build(b: *Builder) !void { const board = try dbuild.getBoard(b); var target = dbuild.crossTargetFor(board); target.os_tag = .uefi; if (target.cpu_arch.? == .riscv64) { try buildRiscv64(b, board, target); return; } const exe = b.addExecutable(bootName(b, board, target), "src/dainboot.zig"); exe.setTarget(target); exe.setBuildMode(b.standardReleaseOptions()); try dbuild.addBuildOptions(b, exe, board); exe.addPackagePath("dtb", "../dtb/src/dtb.zig"); exe.install(); b.default_step.dependOn(&exe.step); } fn buildRiscv64(b: *Builder, board: dcommon.Board, target: std.zig.CrossTarget) !void { const crt0 = b.addAssemble("crt0-efi-riscv64", "src/crt0-efi-riscv64.S"); crt0.setTarget(std.zig.CrossTarget{ .cpu_arch = target.cpu_arch, .os_tag = .freestanding, }); crt0.setBuildMode(b.standardReleaseOptions()); const obj = b.addObject(bootName(b, board, target), "src/dainboot.zig"); obj.setTarget(target); obj.setBuildMode(b.standardReleaseOptions()); try dbuild.addBuildOptions(b, obj, board); obj.addPackagePath("dtb", "../dtb/src/dtb.zig"); const combined = b.addSystemCommand(&.{ "ld.lld", "-nostdlib", "-znocombreloc", "-T", "elf_riscv64_efi.lds", "-shared", "-Bsymbolic", "-o", "combined.o", "-s", }); combined.addArtifactArg(crt0); combined.addArtifactArg(obj); try std.fs.cwd().makePath("zig-out/bin"); const efi = b.addSystemCommand(&.{ "llvm-objcopy", "-j", ".header", "-j", ".text", "-j", ".plt", "-j", ".sdata", "-j", ".data", "-j", ".dynamic", "-j", ".dynstr", "-j", ".dynsym", "-j", ".rel*", "-j", ".rela*", "-j", ".reloc", "--output-target=binary", "combined.o", b.fmt("zig-out/bin/{s}.efi", .{bootName(b, board, target)}), }); efi.step.dependOn(&combined.step); b.default_step.dependOn(&efi.step); } fn bootName(b: *Builder, board: dcommon.Board, target: std.zig.CrossTarget) []const u8 { return b.fmt("BOOT{s}.{s}", .{ dbuild.efiTagFor(target.cpu_arch.?), @tagName(board) }); }
dainboot/build.zig
const std = @import("std"); const real_input = @embedFile("day-15_real-input"); const test_input = @embedFile("day-15_test-input"); pub fn main() !void { std.debug.print("--- Day 15 ---\n", .{}); const result = try execute(real_input, true); std.debug.print("lowest total risk is {}\n", .{ result }); } fn execute(input: []const u8, unfold_map: bool) !u32 { var alloc_buffer: [2 * 1024 * 1024]u8 = undefined; var alloc = std.heap.FixedBufferAllocator.init(alloc_buffer[0..]); var initial_map = std.ArrayList(Pos).init(alloc.allocator()); defer initial_map.deinit(); var initial_edge_length: usize = undefined; var input_it = std.mem.tokenize(u8, input, "\r\n"); while (input_it.next()) |line| { if (line.len == 0) continue; initial_edge_length = line.len; for (line) |pos_cost_char| { const cost = @intCast(u4, pos_cost_char - '0'); const pos = Pos { .individual_cost = cost, }; try initial_map.append(pos); } } var map: []Pos = undefined; var edge_length: usize = undefined; if (unfold_map) { edge_length = initial_edge_length * 5; var full_map = try std.ArrayList(Pos).initCapacity(alloc.allocator(), edge_length * edge_length); var y: usize = 0; while (y < edge_length):(y += 1) { var x: usize = 0; while (x < edge_length):(x += 1) { const initial_map_x = x % initial_edge_length; const initial_map_y = y % initial_edge_length; const initial_map_i = initial_map_y * initial_edge_length + initial_map_x; const increment = (x / initial_edge_length) + (y / initial_edge_length); var pos = initial_map.items[initial_map_i]; pos.individual_cost += @intCast(u4, increment); while (pos.individual_cost > 9) pos.individual_cost -= 9; try full_map.append(pos); } } map = full_map.items; } else { map = initial_map.items; edge_length = initial_edge_length; } map[0].lowest_cost = 0; var anything_changed = true; while (anything_changed) { anything_changed = false; for (map) |pos, i| { const x = i % edge_length; const y = i / edge_length; var neighbor_indices = std.ArrayList(usize).init(alloc.allocator()); defer neighbor_indices.deinit(); if (x > 0) { try neighbor_indices.append(i - 1); } if (x < edge_length - 1) { try neighbor_indices.append(i + 1); } if (y > 0) { try neighbor_indices.append(i - edge_length); } if (y < edge_length - 1) { try neighbor_indices.append(i + edge_length); } for (neighbor_indices.items) |neighbor_i| { const neighbor = &map[neighbor_i]; const potential_cost = pos.lowest_cost + neighbor.individual_cost; if (potential_cost < neighbor.lowest_cost) { neighbor.lowest_cost = potential_cost; anything_changed = true; } } } } return map[map.len - 1].lowest_cost; } const Pos = packed struct { individual_cost: u8, lowest_cost: u32 = std.math.maxInt(u32), }; test "day 1 test" { const expected: u32 = 40; const result = try execute(test_input, false); try std.testing.expectEqual(expected, result); } test "day 2 test" { const expected: u32 = 315; const result = try execute(test_input, true); try std.testing.expectEqual(expected, result); }
day-15.zig
const std = @import("std"); const iup = @import("iup.zig"); const MainLoop = iup.MainLoop; const Dialog = iup.Dialog; const Button = iup.Button; const MessageDlg = iup.MessageDlg; const Multiline = iup.Multiline; const Label = iup.Label; const Text = iup.Text; const VBox = iup.VBox; const HBox = iup.HBox; const Menu = iup.Menu; const SubMenu = iup.SubMenu; const Separator = iup.Separator; const Fill = iup.Fill; const Item = iup.Item; const FileDlg = iup.FileDlg; const Toggle = iup.Toggle; const List = iup.List; const Tree = iup.Tree; const Frame = iup.Frame; const Radio = iup.Radio; const ScreenSize = iup.ScreenSize; const Image = iup.Image; const ImageRgb = iup.ImageRgb; const ImageRgba = iup.ImageRgba; const Rgb = iup.Rgb; pub fn main() !void { try MainLoop.open(); defer MainLoop.close(); var dlg = try createDialog(); defer dlg.deinit(); try dlg.showXY(.Center, .Center); initTree(dlg); try MainLoop.beginLoop(); } fn createDialog() !*Dialog { var tree = try createTree(); return try (Dialog.init() .setTitle("IUP for Zig - Tree") .setSize(.Half, .Half) .setChildren( .{ VBox.init() .setMargin(20, 20) .setGap(10) .setChildren( .{ HBox.init() .setChildren( .{ tree, }, ), }, ), }, ).unwrap()); } fn createTree() !*Tree { var tree = try (Tree.init() .setName("tree") .setShowRename(true) .setRightClickCallback(rightclick_cb) .setKAnyCallback(k_any_cb) .unwrap()); return tree; } fn initTree(dialog: *Dialog) void { var tree = dialog.getDialogChild("tree").?.Tree; tree.setTitle(0, "Figures"); tree.addBranch(0, "3D"); tree.addBranch(1, "parallelogram"); tree.addLeaf(2, "diamond"); tree.addLeaf(2, "square"); tree.addBranch(0, "2D"); tree.addBranch(1, "triangle"); tree.addLeaf(2, "scalenus"); tree.addLeaf(2, "isoceles"); tree.addLeaf(2, "equilateral"); tree.addLeaf(0, "Test"); } fn rightclick_cb(tree: *Tree, id: i32) !void { var popup = try (Menu.init().setChildren( .{ Item.init().setTitle("Add Leaf").setActionCallback(addLeaf), Item.init().setTitle("Add Branch").setActionCallback(addBranch), Item.init().setTitle("Rename Node").setActionCallback(rename), Item.init().setTitle("Remove Node").setActionCallback(removeNode), SubMenu.init() .setTitle("Selection") .setChildren( .{ Item.init().setTitle("ROOT").setActionCallback(selectNode), Item.init().setTitle("LAST").setActionCallback(selectNode), Item.init().setTitle("PGUP").setActionCallback(selectNode), Item.init().setTitle("PGDN").setActionCallback(selectNode), Item.init().setTitle("NEXT").setActionCallback(selectNode), Item.init().setTitle("PREVIOUS").setActionCallback(selectNode), Separator.init(), Item.init().setTitle("INVERT").setActionCallback(selectNode), Item.init().setTitle("BLOCK").setActionCallback(selectNode), Item.init().setTitle("CLEARALL").setActionCallback(selectNode), Item.init().setTitle("MARKALL").setActionCallback(selectNode), Item.init().setTitle("INVERTALL").setActionCallback(selectNode), }, ), }, ) .unwrap()); // Can't use "getDialogChild" function on callbacks, because this is a dettached popup // Original example sets a GlobalHandler to retrieve the tree // Here we use a custom PtrAttribute to access the tree popup.setPtrAttribute(Tree, "tree", tree); defer popup.deinit(); tree.setValue(id); try popup.popup(.MousePos, .MousePos); } fn removeNode(item: *Item) !void { var tree = item.getPtrAttribute(Tree, "tree").?; const node = tree.getValue(); tree.delNode(node, .Selected); } fn addLeaf(item: *Item) !void { var tree = item.getPtrAttribute(Tree, "tree").?; var node = tree.getValue(); tree.addLeaf(node, ""); var new_node = tree.getLastAddNode(); std.debug.print("new_node {}\n", .{new_node}); tree.setValue(new_node); std.debug.print("current_node {}\n", .{tree.getValue()}); tree.rename(); } fn addBranch(item: *Item) !void { var tree = item.getPtrAttribute(Tree, "tree").?; var node = tree.getValue(); tree.addBranch(node, ""); var new_node = tree.getLastAddNode(); tree.setValue(new_node); tree.rename(); } fn rename(item: *Item) !void { var tree = item.getPtrAttribute(Tree, "tree").?; tree.rename(); } fn selectNode(item: *Item) anyerror!void { var tree = item.getPtrAttribute(Tree, "tree").?; const value = item.getTitle(); // Fix this: // Value can be a int or one of relative nodes "NEXT", "PREVIOUS", "ROOT", etc // use a union for that? tree.setStrAttribute("VALUE", value); } fn k_any_cb(tree: *Tree, key: i32) !void { if (key == iup.keys.DEL) { const node = tree.getValue(); tree.delNode(node, .Selected); } }
src/tree_example.zig
const std = @import("std"); const zt = @import("../zt.zig"); const ig = @import("imgui"); /// You can't remove the background from this, but you can make it invisible with /// style.Colors. pub fn ztViewPort() ig.ImGuiID { const dockNodeFlags = ig.ImGuiDockNodeFlags_PassthruCentralNode; const windowFlags = ig.ImGuiWindowFlags_NoCollapse | ig.ImGuiWindowFlags_NoDecoration | ig.ImGuiWindowFlags_NoDocking | ig.ImGuiWindowFlags_NoMove | ig.ImGuiWindowFlags_NoResize | ig.ImGuiWindowFlags_NoScrollbar | ig.ImGuiWindowFlags_NoTitleBar | ig.ImGuiWindowFlags_NoNavFocus | ig.ImGuiWindowFlags_NoBackground | ig.ImGuiWindowFlags_NoFocusOnAppearing | ig.ImGuiWindowFlags_NoMouseInputs | ig.ImGuiWindowFlags_NoInputs | ig.ImGuiWindowFlags_NoBringToFrontOnFocus; var mainView = ig.igGetMainViewport(); var pos: ig.ImVec2 = mainView.*.WorkPos; var size: ig.ImVec2 = mainView.*.WorkSize; ig.igSetNextWindowPos(pos, ig.ImGuiCond_Always, .{}); ig.igSetNextWindowSize(size, ig.ImGuiCond_Always); ig.igPushStyleVar_Vec2(ig.ImGuiStyleVar_WindowPadding, .{}); _ = ig.igBegin("###DockSpace", null, windowFlags); var id = ig.igGetID_Str("DefaultDockingViewport"); _ = ig.igDockSpace(id, .{}, dockNodeFlags, ig.ImGuiWindowClass_ImGuiWindowClass()); ig.igEnd(); ig.igPopStyleVar(1); return id; } /// If you ever need to format a string for use inside imgui, this will work the same as any format function. pub inline fn fmtTextForImgui(comptime fmt: []const u8, args: anytype) []const u8 { var alloc = zt.Allocators.ring(); return alloc.dupeZ(u8, std.fmt.allocPrint(alloc, fmt, args) catch unreachable) catch unreachable; } /// Uses a ring allocator to spit out imgui text using zig.ig's formatting library. pub fn ztText(comptime fmt: []const u8, args: anytype) void { var text = fmtTextForImgui(fmt, args); ig.igText(text.ptr); } /// Uses a ring allocator to spit out imgui text using zig's formatting library, wrapping if needed. pub fn ztTextWrap(comptime fmt: []const u8, args: anytype) void { var text = fmtTextForImgui(fmt, args); ig.igTextWrapped(text.ptr); } /// Uses a ring allocator to spit out imgui text using zig's formatting library, in the disabled color. pub fn ztTextDisabled(comptime fmt: []const u8, args: anytype) void { var text = fmtTextForImgui(fmt, args); ig.igTextDisabled(text.ptr); } /// Uses a ring allocator to spit out imgui text using zig's formatting library with a custom color. pub fn ztTextColor(comptime fmt: []const u8, color: ig.ImVec4, args: anytype) void { var text = fmtTextForImgui(fmt, args); ig.igTextColored(color, text.ptr); } /// Attempts to create a general editor for most structs, including math structs. This isnt always what you want, and in /// those cases its always better to layout your own editor. This is biased towards creating drag inputs. pub fn ztEditDrag(label: []const u8, speed: f32, ptr: anytype) bool { // Array buffers are weird. Lets sort them out first. const ti: std.builtin.TypeInfo = @typeInfo(@TypeOf(ptr.*)); if (ti == .Array) { if (ti.Array.child == u8) { return ig.igInputText(label.ptr, ptr, @intCast(usize, ti.Array.len), ig.ImGuiInputTextFlags_None, null, null); } } const fmax = std.math.f32_max; switch (@TypeOf(ptr)) { *bool => { return ig.igCheckbox(label.ptr, ptr); }, *i32 => { const imin = std.math.minInt(i32); const imax = std.math.maxInt(i32); return ig.igDragInt(label.ptr, ptr, speed, @intCast(c_int, imin), @intCast(c_int, imax), "%i", ig.ImGuiSliderFlags_NoRoundToFormat); }, *f32 => { return ig.igDragFloat(label.ptr, ptr, speed, -fmax, fmax, "%.2f", ig.ImGuiSliderFlags_NoRoundToFormat); }, *usize => { var cast = @intCast(c_int, ptr.*); var result = ig.igInputInt(label.ptr, &cast, 1, 5, ig.ImGuiInputTextFlags_None); if (result) { ptr.* = @intCast(usize, std.math.max(0, cast)); } return result; }, *zt.math.Vec2 => { var cast: [2]f32 = .{ ptr.*.x, ptr.*.y }; var result = ig.igDragFloat2(label.ptr, &cast, speed, -fmax, fmax, "%.2f", ig.ImGuiSliderFlags_NoRoundToFormat); if (result) { ptr.* = zt.math.vec2(cast[0], cast[1]); } return result; }, *zt.math.Vec3 => { var cast: [3]f32 = .{ ptr.*.x, ptr.*.y, ptr.*.z }; var result = ig.igDragFloat3(label.ptr, &cast, speed, -fmax, fmax, "%.2f", ig.ImGuiSliderFlags_NoRoundToFormat); if (result) { ptr.* = zt.math.vec3(cast[0], cast[1], cast[2]); } return result; }, *zt.math.Vec4 => { var cast: [4]f32 = .{ ptr.*.x, ptr.*.y, ptr.*.z, ptr.*.w }; var result = ig.igColorEdit4(label.ptr, &cast, ig.ImGuiColorEditFlags_Float); if (result) { ptr.* = zt.math.vec4(cast[0], cast[1], cast[2], cast[3]); } return result; }, *zt.math.Rect => { var cast: [4]f32 = .{ ptr.*.position.x, ptr.*.position.y, ptr.*.size.x, ptr.*.size.y }; var result = ig.igDragFloat4(label.ptr, &cast, speed, -fmax, fmax, "%.2f", ig.ImGuiSliderFlags_NoRoundToFormat); if (result) { ptr.* = zt.math.rect(cast[0], cast[1], cast[2], cast[3]); } return result; }, else => { std.debug.print("No editor found for type {s}\n", .{@typeName(@TypeOf(ptr))}); return false; }, } } pub fn ztEdit(label: []const u8, ptr: anytype) bool { // Array buffers are weird. Lets sort them out first. const ti: std.builtin.TypeInfo = @typeInfo(@TypeOf(ptr.*)); if (ti == .Array) { if (ti.Array.child == u8) { return ig.igInputText(label.ptr, ptr, @intCast(usize, ti.Array.len), ig.ImGuiInputTextFlags_None, null, null); } } switch (@TypeOf(ptr)) { *bool => { return ig.igCheckbox(label.ptr, ptr); }, *i32 => { return ig.igInputInt(label.ptr, ptr, 1, 3, ig.ImGuiInputTextFlags_None); }, *f32 => { return ig.igInputFloat(label.ptr, ptr, 1, 3, "%.2f", ig.ImGuiInputTextFlags_None); }, *usize => { var cast = @intCast(c_int, ptr.*); var result = ig.igInputInt(label.ptr, &cast, 1, 5, ig.ImGuiInputTextFlags_None); if (result) { ptr.* = @intCast(usize, std.math.max(0, cast)); } return result; }, *zt.math.Vec2 => { var cast: [2]f32 = .{ ptr.*.x, ptr.*.y }; var result = ig.igInputFloat2(label.ptr, &cast, "%.2f", ig.ImGuiInputTextFlags_None); if (result) { ptr.* = zt.math.vec2(cast[0], cast[1]); } return result; }, *zt.math.Vec3 => { var cast: [3]f32 = .{ ptr.*.x, ptr.*.y, ptr.*.z }; var result = ig.igInputFloat3(label.ptr, &cast, "%.2f", ig.ImGuiInputTextFlags_None); if (result) { ptr.* = zt.math.vec3(cast[0], cast[1], cast[2]); } return result; }, *zt.math.Vec4 => { var cast: [4]f32 = .{ ptr.*.x, ptr.*.y, ptr.*.z, ptr.*.w }; var result = ig.igColorEdit4(label.ptr, &cast, ig.ImGuiColorEditFlags_Float); if (result) { ptr.* = zt.math.vec4(cast[0], cast[1], cast[2], cast[3]); } return result; }, *zt.math.Rect => { var cast: [4]f32 = .{ ptr.*.position.x, ptr.*.position.y, ptr.*.size.x, ptr.*.size.y }; var result = ig.igInputFloat4(label.ptr, &cast, "%.2f", ig.ImGuiInputTextFlags_None); if (result) { ptr.* = zt.math.rect(cast[0], cast[1], cast[2], cast[3]); } return result; }, else => { std.debug.print("No editor found for type {s}\n", .{@typeName(@TypeOf(ptr))}); return false; }, } }
src/zt/customComponents.zig
const std = @import("std"); const builtin = @import("builtin"); const expect = std.testing.expect; const mem = std.mem; fn initStaticArray() [10]i32 { var array: [10]i32 = undefined; array[0] = 1; array[4] = 2; array[7] = 3; array[9] = 4; return array; } const static_array = initStaticArray(); test "init static array to undefined" { // This test causes `initStaticArray()` to be codegen'd, and the // C backend does not yet support returning arrays, so it fails if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; try expect(static_array[0] == 1); try expect(static_array[4] == 2); try expect(static_array[7] == 3); try expect(static_array[9] == 4); comptime { try expect(static_array[0] == 1); try expect(static_array[4] == 2); try expect(static_array[7] == 3); try expect(static_array[9] == 4); } } const Foo = struct { x: i32, fn setFooXMethod(foo: *Foo) void { foo.x = 3; } }; fn setFooX(foo: *Foo) void { foo.x = 2; } test "assign undefined to struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; comptime { var foo: Foo = undefined; setFooX(&foo); try expect(foo.x == 2); } { var foo: Foo = undefined; setFooX(&foo); try expect(foo.x == 2); } } test "assign undefined to struct with method" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; comptime { var foo: Foo = undefined; foo.setFooXMethod(); try expect(foo.x == 3); } { var foo: Foo = undefined; foo.setFooXMethod(); try expect(foo.x == 3); } } test "type name of undefined" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const x = undefined; try expect(mem.eql(u8, @typeName(@TypeOf(x)), "@Type(.Undefined)")); }
test/behavior/undefined.zig
const builtin = @import("builtin"); const std = @import("std"); const time = std.time; const Timer = time.Timer; const hash = std.hash; const KiB = 1024; const MiB = 1024 * KiB; const GiB = 1024 * MiB; var prng = std.rand.DefaultPrng.init(0); const Hash = struct { ty: type, name: []const u8, has_iterative_api: bool = true, init_u8s: ?[]const u8 = null, init_u64: ?u64 = null, }; const siphash_key = "<KEY>"; const hashes = [_]Hash{ Hash{ .ty = hash.Wyhash, .name = "wyhash", .init_u64 = 0, }, Hash{ .ty = hash.SipHash64(1, 3), .name = "siphash(1,3)", .init_u8s = siphash_key, }, Hash{ .ty = hash.SipHash64(2, 4), .name = "siphash(2,4)", .init_u8s = siphash_key, }, Hash{ .ty = hash.Fnv1a_64, .name = "fnv1a", }, Hash{ .ty = hash.Adler32, .name = "adler32", }, Hash{ .ty = hash.crc.Crc32WithPoly(.IEEE), .name = "crc32-slicing-by-8", }, Hash{ .ty = hash.crc.Crc32SmallWithPoly(.IEEE), .name = "crc32-half-byte-lookup", }, Hash{ .ty = hash.CityHash32, .name = "cityhash-32", .has_iterative_api = false, }, Hash{ .ty = hash.CityHash64, .name = "cityhash-64", .has_iterative_api = false, }, Hash{ .ty = hash.Murmur2_32, .name = "murmur2-32", .has_iterative_api = false, }, Hash{ .ty = hash.Murmur2_64, .name = "murmur2-64", .has_iterative_api = false, }, Hash{ .ty = hash.Murmur3_32, .name = "murmur3-32", .has_iterative_api = false, }, }; const Result = struct { hash: u64, throughput: u64, }; const block_size: usize = 8 * 8192; pub fn benchmarkHash(comptime H: var, bytes: usize) !Result { var h = blk: { if (H.init_u8s) |init| { break :blk H.ty.init(init); } if (H.init_u64) |init| { break :blk H.ty.init(init); } break :blk H.ty.init(); }; var block: [block_size]u8 = undefined; prng.random.bytes(block[0..]); var offset: usize = 0; var timer = try Timer.start(); const start = timer.lap(); while (offset < bytes) : (offset += block.len) { h.update(block[0..]); } const end = timer.read(); const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s; const throughput = @floatToInt(u64, @intToFloat(f64, bytes) / elapsed_s); return Result{ .hash = h.final(), .throughput = throughput, }; } pub fn benchmarkHashSmallKeys(comptime H: var, key_size: usize, bytes: usize) !Result { const key_count = bytes / key_size; var block: [block_size]u8 = undefined; prng.random.bytes(block[0..]); var i: usize = 0; var timer = try Timer.start(); const start = timer.lap(); var sum: u64 = 0; while (i < key_count) : (i += 1) { const small_key = block[0..key_size]; sum +%= blk: { if (H.init_u8s) |init| { break :blk H.ty.hash(init, small_key); } if (H.init_u64) |init| { break :blk H.ty.hash(init, small_key); } break :blk H.ty.hash(small_key); }; } const end = timer.read(); const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s; const throughput = @floatToInt(u64, @intToFloat(f64, bytes) / elapsed_s); return Result{ .hash = sum, .throughput = throughput, }; } fn usage() void { std.debug.warn( \\throughput_test [options] \\ \\Options: \\ --filter [test-name] \\ --seed [int] \\ --count [int] \\ --key-size [int] \\ --iterative-only \\ --help \\ , .{}); } fn mode(comptime x: comptime_int) comptime_int { return if (builtin.mode == .Debug) x / 64 else x; } pub fn main() !void { const stdout = std.io.getStdOut().writer(); var buffer: [1024]u8 = undefined; var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]); const args = try std.process.argsAlloc(&fixed.allocator); var filter: ?[]u8 = ""; var count: usize = mode(128 * MiB); var key_size: usize = 32; var seed: u32 = 0; var test_iterative_only = false; var i: usize = 1; while (i < args.len) : (i += 1) { if (std.mem.eql(u8, args[i], "--mode")) { try stdout.print("{}\n", .{builtin.mode}); return; } else if (std.mem.eql(u8, args[i], "--seed")) { i += 1; if (i == args.len) { usage(); std.os.exit(1); } seed = try std.fmt.parseUnsigned(u32, args[i], 10); // we seed later } else if (std.mem.eql(u8, args[i], "--filter")) { i += 1; if (i == args.len) { usage(); std.os.exit(1); } filter = args[i]; } else if (std.mem.eql(u8, args[i], "--count")) { i += 1; if (i == args.len) { usage(); std.os.exit(1); } const c = try std.fmt.parseUnsigned(usize, args[i], 10); count = c * MiB; } else if (std.mem.eql(u8, args[i], "--key-size")) { i += 1; if (i == args.len) { usage(); std.os.exit(1); } key_size = try std.fmt.parseUnsigned(usize, args[i], 10); if (key_size > block_size) { try stdout.print("key_size cannot exceed block size of {}\n", .{block_size}); std.os.exit(1); } } else if (std.mem.eql(u8, args[i], "--iterative-only")) { test_iterative_only = true; } else if (std.mem.eql(u8, args[i], "--help")) { usage(); return; } else { usage(); std.os.exit(1); } } inline for (hashes) |H| { if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) { if (!test_iterative_only or H.has_iterative_api) { try stdout.print("{}\n", .{H.name}); // Always reseed prior to every call so we are hashing the same buffer contents. // This allows easier comparison between different implementations. if (H.has_iterative_api) { prng.seed(seed); const result = try benchmarkHash(H, count); try stdout.print(" iterative: {:5} MiB/s [{x:0<16}]\n", .{ result.throughput / (1 * MiB), result.hash }); } if (!test_iterative_only) { prng.seed(seed); const result_small = try benchmarkHashSmallKeys(H, key_size, count); try stdout.print(" small keys: {:5} MiB/s [{x:0<16}]\n", .{ result_small.throughput / (1 * MiB), result_small.hash }); } } } } }
lib/std/hash/benchmark.zig
const builtin = @import("builtin"); const std = @import("../std.zig"); const math = std.math; const expect = std.testing.expect; /// Returns the tangent of the radian value x. /// /// Special Cases: /// - tan(+-0) = +-0 /// - tan(+-inf) = nan /// - tan(nan) = nan pub fn tan(x: anytype) @TypeOf(x) { const T = @TypeOf(x); return switch (T) { f32 => tan_(f32, x), f64 => tan_(f64, x), else => @compileError("tan not implemented for " ++ @typeName(T)), }; } const Tp0 = -1.30936939181383777646E4; const Tp1 = 1.15351664838587416140E6; const Tp2 = -1.79565251976484877988E7; const Tq1 = 1.36812963470692954678E4; const Tq2 = -1.32089234440210967447E6; const Tq3 = 2.50083801823357915839E7; const Tq4 = -5.38695755929454629881E7; const pi4a = 7.85398125648498535156e-1; const pi4b = 3.77489470793079817668E-8; const pi4c = 2.69515142907905952645E-15; const m4pi = 1.273239544735162542821171882678754627704620361328125; fn tan_(comptime T: type, x_: T) T { const I = std.meta.Int(.signed, @typeInfo(T).Float.bits); var x = x_; if (x == 0 or math.isNan(x)) { return x; } if (math.isInf(x)) { return math.nan(T); } var sign = x < 0; x = math.fabs(x); var y = math.floor(x * m4pi); var j = @floatToInt(I, y); if (j & 1 == 1) { j += 1; y += 1; } const z = ((x - y * pi4a) - y * pi4b) - y * pi4c; const w = z * z; var r = if (w > 1e-14) z + z * (w * ((Tp0 * w + Tp1) * w + Tp2) / ((((w + Tq1) * w + Tq2) * w + Tq3) * w + Tq4)) else z; if (j & 2 == 2) { r = -1 / r; } return if (sign) -r else r; } test "math.tan" { expect(tan(@as(f32, 0.0)) == tan_(f32, 0.0)); expect(tan(@as(f64, 0.0)) == tan_(f64, 0.0)); } test "math.tan32" { const epsilon = 0.000001; expect(math.approxEq(f32, tan_(f32, 0.0), 0.0, epsilon)); expect(math.approxEq(f32, tan_(f32, 0.2), 0.202710, epsilon)); expect(math.approxEq(f32, tan_(f32, 0.8923), 1.240422, epsilon)); expect(math.approxEq(f32, tan_(f32, 1.5), 14.101420, epsilon)); expect(math.approxEq(f32, tan_(f32, 37.45), -0.254397, epsilon)); expect(math.approxEq(f32, tan_(f32, 89.123), 2.285852, epsilon)); } test "math.tan64" { const epsilon = 0.000001; expect(math.approxEq(f64, tan_(f64, 0.0), 0.0, epsilon)); expect(math.approxEq(f64, tan_(f64, 0.2), 0.202710, epsilon)); expect(math.approxEq(f64, tan_(f64, 0.8923), 1.240422, epsilon)); expect(math.approxEq(f64, tan_(f64, 1.5), 14.101420, epsilon)); expect(math.approxEq(f64, tan_(f64, 37.45), -0.254397, epsilon)); expect(math.approxEq(f64, tan_(f64, 89.123), 2.2858376, epsilon)); } test "math.tan32.special" { expect(tan_(f32, 0.0) == 0.0); expect(tan_(f32, -0.0) == -0.0); expect(math.isNan(tan_(f32, math.inf(f32)))); expect(math.isNan(tan_(f32, -math.inf(f32)))); expect(math.isNan(tan_(f32, math.nan(f32)))); } test "math.tan64.special" { expect(tan_(f64, 0.0) == 0.0); expect(tan_(f64, -0.0) == -0.0); expect(math.isNan(tan_(f64, math.inf(f64)))); expect(math.isNan(tan_(f64, -math.inf(f64)))); expect(math.isNan(tan_(f64, math.nan(f64)))); }
lib/std/math/tan.zig
const std = @import("std"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; const ir = @import("ir.zig"); const fs = std.fs; const elf = std.elf; const codegen = @import("codegen.zig"); const default_entry_addr = 0x8000000; pub const ErrorMsg = struct { byte_offset: usize, msg: []const u8, }; pub const Result = struct { errors: []ErrorMsg, pub fn deinit(self: *Result, allocator: *mem.Allocator) void { for (self.errors) |err| { allocator.free(err.msg); } allocator.free(self.errors); self.* = undefined; } }; /// Attempts incremental linking, if the file already exists. /// If incremental linking fails, falls back to truncating the file and rewriting it. /// A malicious file is detected as incremental link failure and does not cause Illegal Behavior. /// This operation is not atomic. pub fn updateFilePath( allocator: *Allocator, module: ir.Module, dir: fs.Dir, sub_path: []const u8, ) !Result { const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = determineMode(module) }); defer file.close(); return updateFile(allocator, module, file); } /// Atomically overwrites the old file, if present. pub fn writeFilePath( allocator: *Allocator, module: ir.Module, dir: fs.Dir, sub_path: []const u8, ) !Result { const af = try dir.atomicFile(sub_path, .{ .mode = determineMode(module) }); defer af.deinit(); const result = try writeFile(allocator, module, af.file); try af.finish(); return result; } /// Attempts incremental linking, if the file already exists. /// If incremental linking fails, falls back to truncating the file and rewriting it. /// Returns an error if `file` is not already open with +read +write +seek abilities. /// A malicious file is detected as incremental link failure and does not cause Illegal Behavior. /// This operation is not atomic. pub fn updateFile(allocator: *Allocator, module: ir.Module, file: fs.File) !Result { return updateFileInner(allocator, module, file) catch |err| switch (err) { error.IncrFailed => { return writeFile(allocator, module, file); }, else => |e| return e, }; } const Update = struct { file: fs.File, module: *const ir.Module, /// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write. /// Same order as in the file. sections: std.ArrayList(elf.Elf64_Shdr), shdr_table_offset: ?u64, /// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write. /// Same order as in the file. program_headers: std.ArrayList(elf.Elf64_Phdr), phdr_table_offset: ?u64, /// The index into the program headers of a PT_LOAD program header with Read and Execute flags phdr_load_re_index: ?u16, entry_addr: ?u64, shstrtab: std.ArrayList(u8), shstrtab_index: ?u16, text_section_index: ?u16, symtab_section_index: ?u16, /// The same order as in the file symbols: std.ArrayList(elf.Elf64_Sym), errors: std.ArrayList(ErrorMsg), fn deinit(self: *Update) void { self.sections.deinit(); self.program_headers.deinit(); self.shstrtab.deinit(); self.symbols.deinit(); self.errors.deinit(); } // `expand_num / expand_den` is the factor of padding when allocation const alloc_num = 4; const alloc_den = 3; /// Returns end pos of collision, if any. fn detectAllocCollision(self: *Update, start: u64, size: u64) ?u64 { const small_ptr = self.module.target.cpu.arch.ptrBitWidth() == 32; const ehdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Ehdr) else @sizeOf(elf.Elf64_Ehdr); if (start < ehdr_size) return ehdr_size; const end = start + satMul(size, alloc_num) / alloc_den; if (self.shdr_table_offset) |off| { const shdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Shdr) else @sizeOf(elf.Elf64_Shdr); const tight_size = self.sections.items.len * shdr_size; const increased_size = satMul(tight_size, alloc_num) / alloc_den; const test_end = off + increased_size; if (end > off and start < test_end) { return test_end; } } if (self.phdr_table_offset) |off| { const phdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Phdr) else @sizeOf(elf.Elf64_Phdr); const tight_size = self.sections.items.len * phdr_size; const increased_size = satMul(tight_size, alloc_num) / alloc_den; const test_end = off + increased_size; if (end > off and start < test_end) { return test_end; } } for (self.sections.items) |section| { const increased_size = satMul(section.sh_size, alloc_num) / alloc_den; const test_end = section.sh_offset + increased_size; if (end > section.sh_offset and start < test_end) { return test_end; } } for (self.program_headers.items) |program_header| { const increased_size = satMul(program_header.p_filesz, alloc_num) / alloc_den; const test_end = program_header.p_offset + increased_size; if (end > program_header.p_offset and start < test_end) { return test_end; } } return null; } fn allocatedSize(self: *Update, start: u64) u64 { var min_pos: u64 = std.math.maxInt(u64); if (self.shdr_table_offset) |off| { if (off > start and off < min_pos) min_pos = off; } if (self.phdr_table_offset) |off| { if (off > start and off < min_pos) min_pos = off; } for (self.sections.items) |section| { if (section.sh_offset <= start) continue; if (section.sh_offset < min_pos) min_pos = section.sh_offset; } for (self.program_headers.items) |program_header| { if (program_header.p_offset <= start) continue; if (program_header.p_offset < min_pos) min_pos = program_header.p_offset; } return min_pos - start; } fn findFreeSpace(self: *Update, object_size: u64, min_alignment: u16) u64 { var start: u64 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { start = mem.alignForwardGeneric(u64, item_end, min_alignment); } return start; } fn makeString(self: *Update, bytes: []const u8) !u32 { const result = self.shstrtab.items.len; try self.shstrtab.appendSlice(bytes); try self.shstrtab.append(0); return @intCast(u32, result); } fn perform(self: *Update) !void { const ptr_width: enum { p32, p64 } = switch (self.module.target.cpu.arch.ptrBitWidth()) { 32 => .p32, 64 => .p64, else => return error.UnsupportedArchitecture, }; const small_ptr = switch (ptr_width) { .p32 => true, .p64 => false, }; // This means the entire read-only executable program code needs to be rewritten. var phdr_load_re_dirty = false; var phdr_table_dirty = false; var shdr_table_dirty = false; var shstrtab_dirty = false; var symtab_dirty = false; if (self.phdr_load_re_index == null) { self.phdr_load_re_index = @intCast(u16, self.program_headers.items.len); const file_size = 256 * 1024; const p_align = 0x1000; const off = self.findFreeSpace(file_size, p_align); //std.debug.warn("found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size }); try self.program_headers.append(.{ .p_type = elf.PT_LOAD, .p_offset = off, .p_filesz = file_size, .p_vaddr = default_entry_addr, .p_paddr = default_entry_addr, .p_memsz = 0, .p_align = p_align, .p_flags = elf.PF_X | elf.PF_R, }); self.entry_addr = null; phdr_load_re_dirty = true; phdr_table_dirty = true; } if (self.sections.items.len == 0) { // There must always be a null section in index 0 try self.sections.append(.{ .sh_name = 0, .sh_type = elf.SHT_NULL, .sh_flags = 0, .sh_addr = 0, .sh_offset = 0, .sh_size = 0, .sh_link = 0, .sh_info = 0, .sh_addralign = 0, .sh_entsize = 0, }); shdr_table_dirty = true; } if (self.shstrtab_index == null) { self.shstrtab_index = @intCast(u16, self.sections.items.len); assert(self.shstrtab.items.len == 0); try self.shstrtab.append(0); // need a 0 at position 0 const off = self.findFreeSpace(self.shstrtab.items.len, 1); //std.debug.warn("found shstrtab free space 0x{x} to 0x{x}\n", .{ off, off + self.shstrtab.items.len }); try self.sections.append(.{ .sh_name = try self.makeString(".shstrtab"), .sh_type = elf.SHT_STRTAB, .sh_flags = 0, .sh_addr = 0, .sh_offset = off, .sh_size = self.shstrtab.items.len, .sh_link = 0, .sh_info = 0, .sh_addralign = 1, .sh_entsize = 0, }); shstrtab_dirty = true; shdr_table_dirty = true; } if (self.text_section_index == null) { self.text_section_index = @intCast(u16, self.sections.items.len); const phdr = &self.program_headers.items[self.phdr_load_re_index.?]; try self.sections.append(.{ .sh_name = try self.makeString(".text"), .sh_type = elf.SHT_PROGBITS, .sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, .sh_addr = phdr.p_vaddr, .sh_offset = phdr.p_offset, .sh_size = phdr.p_filesz, .sh_link = 0, .sh_info = 0, .sh_addralign = phdr.p_align, .sh_entsize = 0, }); shdr_table_dirty = true; } if (self.symtab_section_index == null) { self.symtab_section_index = @intCast(u16, self.sections.items.len); const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym); const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym); const file_size = self.module.exports.len * each_size; const off = self.findFreeSpace(file_size, min_align); //std.debug.warn("found symtab free space 0x{x} to 0x{x}\n", .{ off, off + file_size }); try self.sections.append(.{ .sh_name = try self.makeString(".symtab"), .sh_type = elf.SHT_SYMTAB, .sh_flags = 0, .sh_addr = 0, .sh_offset = off, .sh_size = file_size, // The section header index of the associated string table. .sh_link = self.shstrtab_index.?, .sh_info = @intCast(u32, self.module.exports.len), .sh_addralign = min_align, .sh_entsize = each_size, }); symtab_dirty = true; shdr_table_dirty = true; } const shsize: u64 = switch (ptr_width) { .p32 => @sizeOf(elf.Elf32_Shdr), .p64 => @sizeOf(elf.Elf64_Shdr), }; const shalign: u16 = switch (ptr_width) { .p32 => @alignOf(elf.Elf32_Shdr), .p64 => @alignOf(elf.Elf64_Shdr), }; if (self.shdr_table_offset == null) { self.shdr_table_offset = self.findFreeSpace(self.sections.items.len * shsize, shalign); shdr_table_dirty = true; } const phsize: u64 = switch (ptr_width) { .p32 => @sizeOf(elf.Elf32_Phdr), .p64 => @sizeOf(elf.Elf64_Phdr), }; const phalign: u16 = switch (ptr_width) { .p32 => @alignOf(elf.Elf32_Phdr), .p64 => @alignOf(elf.Elf64_Phdr), }; if (self.phdr_table_offset == null) { self.phdr_table_offset = self.findFreeSpace(self.program_headers.items.len * phsize, phalign); phdr_table_dirty = true; } const foreign_endian = self.module.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian(); try self.writeCodeAndSymbols(phdr_table_dirty, shdr_table_dirty); if (phdr_table_dirty) { const allocated_size = self.allocatedSize(self.phdr_table_offset.?); const needed_size = self.program_headers.items.len * phsize; if (needed_size > allocated_size) { self.phdr_table_offset = null; // free the space self.phdr_table_offset = self.findFreeSpace(needed_size, phalign); } const allocator = self.program_headers.allocator; switch (ptr_width) { .p32 => { const buf = try allocator.alloc(elf.Elf32_Phdr, self.program_headers.items.len); defer allocator.free(buf); for (buf) |*phdr, i| { phdr.* = progHeaderTo32(self.program_headers.items[i]); if (foreign_endian) { bswapAllFields(elf.Elf32_Phdr, phdr); } } try self.file.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?); }, .p64 => { const buf = try allocator.alloc(elf.Elf64_Phdr, self.program_headers.items.len); defer allocator.free(buf); for (buf) |*phdr, i| { phdr.* = self.program_headers.items[i]; if (foreign_endian) { bswapAllFields(elf.Elf64_Phdr, phdr); } } try self.file.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?); }, } } { const shstrtab_sect = &self.sections.items[self.shstrtab_index.?]; if (shstrtab_dirty or self.shstrtab.items.len != shstrtab_sect.sh_size) { const allocated_size = self.allocatedSize(shstrtab_sect.sh_offset); const needed_size = self.shstrtab.items.len; if (needed_size > allocated_size) { shstrtab_sect.sh_size = 0; // free the space shstrtab_sect.sh_offset = self.findFreeSpace(needed_size, 1); } shstrtab_sect.sh_size = needed_size; //std.debug.warn("shstrtab start=0x{x} end=0x{x}\n", .{ shstrtab_sect.sh_offset, shstrtab_sect.sh_offset + needed_size }); try self.file.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset); if (!shdr_table_dirty) { // Then it won't get written with the others and we need to do it. try self.writeSectHeader(self.shstrtab_index.?); } } } if (shdr_table_dirty) { const allocated_size = self.allocatedSize(self.shdr_table_offset.?); const needed_size = self.sections.items.len * phsize; if (needed_size > allocated_size) { self.shdr_table_offset = null; // free the space self.shdr_table_offset = self.findFreeSpace(needed_size, phalign); } const allocator = self.sections.allocator; switch (ptr_width) { .p32 => { const buf = try allocator.alloc(elf.Elf32_Shdr, self.sections.items.len); defer allocator.free(buf); for (buf) |*shdr, i| { shdr.* = sectHeaderTo32(self.sections.items[i]); if (foreign_endian) { bswapAllFields(elf.Elf32_Shdr, shdr); } } try self.file.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?); }, .p64 => { const buf = try allocator.alloc(elf.Elf64_Shdr, self.sections.items.len); defer allocator.free(buf); for (buf) |*shdr, i| { shdr.* = self.sections.items[i]; //std.debug.warn("writing section {}\n", .{shdr.*}); if (foreign_endian) { bswapAllFields(elf.Elf64_Shdr, shdr); } } try self.file.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?); }, } } if (self.entry_addr == null and self.module.output_mode == .Exe) { const msg = try std.fmt.allocPrint(self.errors.allocator, "no entry point found", .{}); errdefer self.errors.allocator.free(msg); try self.errors.append(.{ .byte_offset = 0, .msg = msg, }); } else { try self.writeElfHeader(); } // TODO find end pos and truncate } fn writeElfHeader(self: *Update) !void { var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 = undefined; var index: usize = 0; hdr_buf[0..4].* = "\x7fELF".*; index += 4; const ptr_width: enum { p32, p64 } = switch (self.module.target.cpu.arch.ptrBitWidth()) { 32 => .p32, 64 => .p64, else => return error.UnsupportedArchitecture, }; hdr_buf[index] = switch (ptr_width) { .p32 => elf.ELFCLASS32, .p64 => elf.ELFCLASS64, }; index += 1; const endian = self.module.target.cpu.arch.endian(); hdr_buf[index] = switch (endian) { .Little => elf.ELFDATA2LSB, .Big => elf.ELFDATA2MSB, }; index += 1; hdr_buf[index] = 1; // ELF version index += 1; // OS ABI, often set to 0 regardless of target platform // ABI Version, possibly used by glibc but not by static executables // padding mem.set(u8, hdr_buf[index..][0..9], 0); index += 9; assert(index == 16); const elf_type = switch (self.module.output_mode) { .Exe => elf.ET.EXEC, .Obj => elf.ET.REL, .Lib => switch (self.module.link_mode) { .Static => elf.ET.REL, .Dynamic => elf.ET.DYN, }, }; mem.writeInt(u16, hdr_buf[index..][0..2], @enumToInt(elf_type), endian); index += 2; const machine = self.module.target.cpu.arch.toElfMachine(); mem.writeInt(u16, hdr_buf[index..][0..2], @enumToInt(machine), endian); index += 2; // ELF Version, again mem.writeInt(u32, hdr_buf[index..][0..4], 1, endian); index += 4; const e_entry = if (elf_type == .REL) 0 else self.entry_addr.?; switch (ptr_width) { .p32 => { mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, e_entry), endian); index += 4; // e_phoff mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.phdr_table_offset.?), endian); index += 4; // e_shoff mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.shdr_table_offset.?), endian); index += 4; }, .p64 => { // e_entry mem.writeInt(u64, hdr_buf[index..][0..8], e_entry, endian); index += 8; // e_phoff mem.writeInt(u64, hdr_buf[index..][0..8], self.phdr_table_offset.?, endian); index += 8; // e_shoff mem.writeInt(u64, hdr_buf[index..][0..8], self.shdr_table_offset.?, endian); index += 8; }, } const e_flags = 0; mem.writeInt(u32, hdr_buf[index..][0..4], e_flags, endian); index += 4; const e_ehsize: u16 = switch (ptr_width) { .p32 => @sizeOf(elf.Elf32_Ehdr), .p64 => @sizeOf(elf.Elf64_Ehdr), }; mem.writeInt(u16, hdr_buf[index..][0..2], e_ehsize, endian); index += 2; const e_phentsize: u16 = switch (ptr_width) { .p32 => @sizeOf(elf.Elf32_Phdr), .p64 => @sizeOf(elf.Elf64_Phdr), }; mem.writeInt(u16, hdr_buf[index..][0..2], e_phentsize, endian); index += 2; const e_phnum = @intCast(u16, self.program_headers.items.len); mem.writeInt(u16, hdr_buf[index..][0..2], e_phnum, endian); index += 2; const e_shentsize: u16 = switch (ptr_width) { .p32 => @sizeOf(elf.Elf32_Shdr), .p64 => @sizeOf(elf.Elf64_Shdr), }; mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian); index += 2; const e_shnum = @intCast(u16, self.sections.items.len); mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian); index += 2; mem.writeInt(u16, hdr_buf[index..][0..2], self.shstrtab_index.?, endian); index += 2; assert(index == e_ehsize); try self.file.pwriteAll(hdr_buf[0..index], 0); } fn writeCodeAndSymbols(self: *Update, phdr_table_dirty: bool, shdr_table_dirty: bool) !void { // index 0 is always a null symbol try self.symbols.resize(1); self.symbols.items[0] = .{ .st_name = 0, .st_info = 0, .st_other = 0, .st_shndx = 0, .st_value = 0, .st_size = 0, }; const phdr = &self.program_headers.items[self.phdr_load_re_index.?]; var vaddr: u64 = phdr.p_vaddr; var file_off: u64 = phdr.p_offset; var code = std.ArrayList(u8).init(self.sections.allocator); defer code.deinit(); for (self.module.exports) |exp| { code.shrink(0); var symbol = try codegen.generateSymbol(exp.typed_value, self.module.*, &code); defer symbol.deinit(code.allocator); if (symbol.errors.len != 0) { for (symbol.errors) |err| { const msg = try mem.dupe(self.errors.allocator, u8, err.msg); errdefer self.errors.allocator.free(msg); try self.errors.append(.{ .byte_offset = err.byte_offset, .msg = msg, }); } continue; } try self.file.pwriteAll(code.items, file_off); if (mem.eql(u8, exp.name, "_start")) { self.entry_addr = vaddr; } (try self.symbols.addOne()).* = .{ .st_name = try self.makeString(exp.name), .st_info = (elf.STB_LOCAL << 4) | elf.STT_FUNC, .st_other = 0, .st_shndx = self.text_section_index.?, .st_value = vaddr, .st_size = code.items.len, }; vaddr += code.items.len; } { // Now that we know the code size, we need to update the program header for executable code phdr.p_memsz = vaddr - phdr.p_vaddr; phdr.p_filesz = phdr.p_memsz; const shdr = &self.sections.items[self.text_section_index.?]; shdr.sh_size = phdr.p_filesz; if (!phdr_table_dirty) { // Then it won't get written with the others and we need to do it. try self.writeProgHeader(self.phdr_load_re_index.?); } if (!shdr_table_dirty) { // Then it won't get written with the others and we need to do it. try self.writeSectHeader(self.text_section_index.?); } } return self.writeSymbols(); } fn writeProgHeader(self: *Update, index: usize) !void { const foreign_endian = self.module.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian(); const offset = self.program_headers.items[index].p_offset; switch (self.module.target.cpu.arch.ptrBitWidth()) { 32 => { var phdr = [1]elf.Elf32_Phdr{progHeaderTo32(self.program_headers.items[index])}; if (foreign_endian) { bswapAllFields(elf.Elf32_Phdr, &phdr[0]); } return self.file.pwriteAll(mem.sliceAsBytes(&phdr), offset); }, 64 => { var phdr = [1]elf.Elf64_Phdr{self.program_headers.items[index]}; if (foreign_endian) { bswapAllFields(elf.Elf64_Phdr, &phdr[0]); } return self.file.pwriteAll(mem.sliceAsBytes(&phdr), offset); }, else => return error.UnsupportedArchitecture, } } fn writeSectHeader(self: *Update, index: usize) !void { const foreign_endian = self.module.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian(); const offset = self.sections.items[index].sh_offset; switch (self.module.target.cpu.arch.ptrBitWidth()) { 32 => { var shdr: [1]elf.Elf32_Shdr = undefined; shdr[0] = sectHeaderTo32(self.sections.items[index]); if (foreign_endian) { bswapAllFields(elf.Elf32_Shdr, &shdr[0]); } return self.file.pwriteAll(mem.sliceAsBytes(&shdr), offset); }, 64 => { var shdr = [1]elf.Elf64_Shdr{self.sections.items[index]}; if (foreign_endian) { bswapAllFields(elf.Elf64_Shdr, &shdr[0]); } return self.file.pwriteAll(mem.sliceAsBytes(&shdr), offset); }, else => return error.UnsupportedArchitecture, } } fn writeSymbols(self: *Update) !void { const ptr_width: enum { p32, p64 } = switch (self.module.target.cpu.arch.ptrBitWidth()) { 32 => .p32, 64 => .p64, else => return error.UnsupportedArchitecture, }; const small_ptr = ptr_width == .p32; const syms_sect = &self.sections.items[self.symtab_section_index.?]; const sym_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym); const sym_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym); const allocated_size = self.allocatedSize(syms_sect.sh_offset); const needed_size = self.symbols.items.len * sym_size; if (needed_size > allocated_size) { syms_sect.sh_size = 0; // free the space syms_sect.sh_offset = self.findFreeSpace(needed_size, sym_align); //std.debug.warn("moved symtab to 0x{x} to 0x{x}\n", .{ syms_sect.sh_offset, syms_sect.sh_offset + needed_size }); } //std.debug.warn("symtab start=0x{x} end=0x{x}\n", .{ syms_sect.sh_offset, syms_sect.sh_offset + needed_size }); syms_sect.sh_size = needed_size; syms_sect.sh_info = @intCast(u32, self.symbols.items.len); const allocator = self.symbols.allocator; const foreign_endian = self.module.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian(); switch (ptr_width) { .p32 => { const buf = try allocator.alloc(elf.Elf32_Sym, self.symbols.items.len); defer allocator.free(buf); for (buf) |*sym, i| { sym.* = .{ .st_name = self.symbols.items[i].st_name, .st_value = @intCast(u32, self.symbols.items[i].st_value), .st_size = @intCast(u32, self.symbols.items[i].st_size), .st_info = self.symbols.items[i].st_info, .st_other = self.symbols.items[i].st_other, .st_shndx = self.symbols.items[i].st_shndx, }; if (foreign_endian) { bswapAllFields(elf.Elf32_Sym, sym); } } try self.file.pwriteAll(mem.sliceAsBytes(buf), syms_sect.sh_offset); }, .p64 => { const buf = try allocator.alloc(elf.Elf64_Sym, self.symbols.items.len); defer allocator.free(buf); for (buf) |*sym, i| { sym.* = .{ .st_name = self.symbols.items[i].st_name, .st_value = self.symbols.items[i].st_value, .st_size = self.symbols.items[i].st_size, .st_info = self.symbols.items[i].st_info, .st_other = self.symbols.items[i].st_other, .st_shndx = self.symbols.items[i].st_shndx, }; if (foreign_endian) { bswapAllFields(elf.Elf64_Sym, sym); } } try self.file.pwriteAll(mem.sliceAsBytes(buf), syms_sect.sh_offset); }, } } }; /// Truncates the existing file contents and overwrites the contents. /// Returns an error if `file` is not already open with +read +write +seek abilities. pub fn writeFile(allocator: *Allocator, module: ir.Module, file: fs.File) !Result { switch (module.output_mode) { .Exe => {}, .Obj => {}, .Lib => return error.TODOImplementWritingLibFiles, } switch (module.object_format) { .unknown => unreachable, // TODO remove this tag from the enum .coff => return error.TODOImplementWritingCOFF, .elf => {}, .macho => return error.TODOImplementWritingMachO, .wasm => return error.TODOImplementWritingWasmObjects, } var update = Update{ .file = file, .module = &module, .sections = std.ArrayList(elf.Elf64_Shdr).init(allocator), .shdr_table_offset = null, .program_headers = std.ArrayList(elf.Elf64_Phdr).init(allocator), .phdr_table_offset = null, .phdr_load_re_index = null, .entry_addr = null, .shstrtab = std.ArrayList(u8).init(allocator), .shstrtab_index = null, .text_section_index = null, .symtab_section_index = null, .symbols = std.ArrayList(elf.Elf64_Sym).init(allocator), .errors = std.ArrayList(ErrorMsg).init(allocator), }; defer update.deinit(); try update.perform(); return Result{ .errors = update.errors.toOwnedSlice(), }; } /// Returns error.IncrFailed if incremental update could not be performed. fn updateFileInner(allocator: *Allocator, module: ir.Module, file: fs.File) !Result { //var ehdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 = undefined; // TODO implement incremental linking return error.IncrFailed; } /// Saturating multiplication fn satMul(a: var, b: var) @TypeOf(a, b) { const T = @TypeOf(a, b); return std.math.mul(T, a, b) catch std.math.maxInt(T); } fn bswapAllFields(comptime S: type, ptr: *S) void { @panic("TODO implement bswapAllFields"); } fn progHeaderTo32(phdr: elf.Elf64_Phdr) elf.Elf32_Phdr { return .{ .p_type = phdr.p_type, .p_flags = phdr.p_flags, .p_offset = @intCast(u32, phdr.p_offset), .p_vaddr = @intCast(u32, phdr.p_vaddr), .p_paddr = @intCast(u32, phdr.p_paddr), .p_filesz = @intCast(u32, phdr.p_filesz), .p_memsz = @intCast(u32, phdr.p_memsz), .p_align = @intCast(u32, phdr.p_align), }; } fn sectHeaderTo32(shdr: elf.Elf64_Shdr) elf.Elf32_Shdr { return .{ .sh_name = shdr.sh_name, .sh_type = shdr.sh_type, .sh_flags = @intCast(u32, shdr.sh_flags), .sh_addr = @intCast(u32, shdr.sh_addr), .sh_offset = @intCast(u32, shdr.sh_offset), .sh_size = @intCast(u32, shdr.sh_size), .sh_link = shdr.sh_link, .sh_info = shdr.sh_info, .sh_addralign = @intCast(u32, shdr.sh_addralign), .sh_entsize = @intCast(u32, shdr.sh_entsize), }; } fn determineMode(module: ir.Module) fs.File.Mode { // On common systems with a 0o022 umask, 0o777 will still result in a file created // with 0o755 permissions, but it works appropriately if the system is configured // more leniently. As another data point, C's fopen seems to open files with the // 666 mode. const executable_mode = if (std.Target.current.os.tag == .windows) 0 else 0o777; switch (module.output_mode) { .Lib => return switch (module.link_mode) { .Dynamic => executable_mode, .Static => fs.File.default_mode, }, .Exe => return executable_mode, .Obj => return fs.File.default_mode, } }
src-self-hosted/link.zig
const std = @import("std"); const mem = std.mem; const Extender = @This(); allocator: *mem.Allocator, array: []bool, lo: u21 = 183, hi: u21 = 125254, pub fn init(allocator: *mem.Allocator) !Extender { var instance = Extender{ .allocator = allocator, .array = try allocator.alloc(bool, 125072), }; mem.set(bool, instance.array, false); var index: u21 = 0; instance.array[0] = true; index = 537; while (index <= 538) : (index += 1) { instance.array[index] = true; } instance.array[1417] = true; instance.array[1859] = true; instance.array[2718] = true; instance.array[3471] = true; instance.array[3599] = true; instance.array[5971] = true; instance.array[6028] = true; instance.array[6640] = true; instance.array[7039] = true; instance.array[7108] = true; instance.array[12110] = true; index = 12154; while (index <= 12158) : (index += 1) { instance.array[index] = true; } index = 12262; while (index <= 12263) : (index += 1) { instance.array[index] = true; } index = 12357; while (index <= 12359) : (index += 1) { instance.array[index] = true; } instance.array[40798] = true; instance.array[42325] = true; instance.array[43288] = true; instance.array[43311] = true; instance.array[43449] = true; instance.array[43558] = true; index = 43580; while (index <= 43581) : (index += 1) { instance.array[index] = true; } instance.array[65209] = true; instance.array[70310] = true; index = 70927; while (index <= 70929) : (index += 1) { instance.array[index] = true; } instance.array[72161] = true; index = 92811; while (index <= 92812) : (index += 1) { instance.array[index] = true; } index = 93993; while (index <= 93994) : (index += 1) { instance.array[index] = true; } instance.array[93996] = true; index = 123013; while (index <= 123014) : (index += 1) { instance.array[index] = true; } index = 125069; while (index <= 125071) : (index += 1) { instance.array[index] = true; } // Placeholder: 0. Struct name, 1. Code point kind return instance; } pub fn deinit(self: *Extender) void { self.allocator.free(self.array); } // isExtender checks if cp is of the kind Extender. pub fn isExtender(self: Extender, cp: u21) bool { if (cp < self.lo or cp > self.hi) return false; const index = cp - self.lo; return if (index >= self.array.len) false else self.array[index]; }
src/components/autogen/PropList/Extender.zig
const std = @import("std"); const Builder = std.build.Builder; const Step = std.build.Step; const FileSource = std.build.FileSource; const GeneratedFile = std.build.GeneratedFile; const LibExeObjStep = std.build.LibExeObjStep; const Sdk = @This(); fn sdkRoot() []const u8 { return std.fs.path.dirname(@src().file) orelse "."; } const sdl2_symbol_definitions = @embedFile("stubs/libSDL2.def"); builder: *Builder, config_path: []const u8, prepare_sources: *PrepareStubSourceStep, /// Creates a instance of the Sdk and initializes internal steps. /// Initialize once, use everywhere (in your `build` function). pub fn init(b: *Builder) *Sdk { const sdk = b.allocator.create(Sdk) catch @panic("out of memory"); sdk.* = .{ .builder = b, .config_path = std.fs.path.join(b.allocator, &[_][]const u8{ b.pathFromRoot(".build_config"), "sdl.json", }) catch @panic("out of memory"), .prepare_sources = undefined, }; sdk.prepare_sources = PrepareStubSourceStep.create(sdk); return sdk; } /// Returns a package with the raw SDL api with proper argument types, but no functional/logical changes /// for a more *ziggy* feeling. /// This is similar to the *C import* result. pub fn getNativePackage(sdk: *Sdk, package_name: []const u8) std.build.Pkg { return std.build.Pkg{ .name = sdk.builder.dupe(package_name), .path = .{ .path = sdkRoot() ++ "/src/binding/sdl.zig" }, }; } /// Returns the smart wrapper for the SDL api. Contains convenient zig types, tagged unions and so on. pub fn getWrapperPackage(sdk: *Sdk, package_name: []const u8) std.build.Pkg { return sdk.builder.dupePkg(std.build.Pkg{ .name = sdk.builder.dupe(package_name), .path = .{ .path = sdkRoot() ++ "/src/wrapper/sdl.zig" }, .dependencies = &[_]std.build.Pkg{ sdk.getNativePackage("sdl-native"), }, }); } /// Links SDL2 to the given exe and adds required installs if necessary. /// **Important:** The target of the `exe` must already be set, otherwise the Sdk will do the wrong thing! pub fn link(sdk: *Sdk, exe: *LibExeObjStep, linkage: std.build.LibExeObjStep.Linkage) void { // TODO: Implement const b = sdk.builder; const target = (std.zig.system.NativeTargetInfo.detect(b.allocator, exe.target) catch @panic("failed to detect native target info!")).target; // This is required on all platforms exe.linkLibC(); if (target.os.tag == .windows) { // try linking with vcpkg // TODO: Implement proper vcpkg support exe.addVcpkgPaths(linkage) catch {}; if (exe.vcpkg_bin_path) |path| { exe.linkSystemLibrary("sdl2"); // we found SDL2 in vcpkg, just install and use this variant const src_path = std.fs.path.join(b.allocator, &.{ path, "SDL2.dll" }) catch @panic("out of memory"); b.installBinFile(src_path, "SDL2.dll"); return; } const sdk_paths = sdk.getPaths(target) catch |err| { const writer = std.io.getStdErr().writer(); const target_name = tripleName(sdk.builder.allocator, target) catch @panic("out of memory"); switch (err) { error.FileNotFound => { writer.print("Could not auto-detect SDL2 sdk configuration. Please provide {s} with the following contents filled out:\n", .{ sdk.config_path, }) catch @panic("io error"); writer.print("{{\n \"{s}\": {{\n", .{target_name}) catch @panic("io error"); writer.writeAll( \\ "include": "<path to sdl2 sdk>/include", \\ "libs": "<path to sdl2 sdk>/lib", \\ "bin": "<path to sdl2 sdk>/bin" \\ } \\} \\ ) catch @panic("io error"); writer.writeAll( \\ \\You can obtain a SDL2 sdk for windows from https://www.libsdl.org/download-2.0.php \\ ) catch @panic("io error"); }, error.MissingTarget => { writer.print("{s} is missing a SDK definition for {s}. Please add the following section to the file and fill the paths:\n", .{ sdk.config_path, target_name, }) catch @panic("io error"); writer.print(" \"{s}\": {{\n", .{target_name}) catch @panic("io error"); writer.writeAll( \\ "include": "<path to sdl2 sdk>/include", \\ "libs": "<path to sdl2 sdk>/lib", \\ "bin": "<path to sdl2 sdk>/bin" \\} ) catch @panic("io error"); writer.writeAll( \\ \\You can obtain a SDL2 sdk for windows from https://www.libsdl.org/download-2.0.php \\ ) catch @panic("io error"); }, error.InvalidJson => { writer.print("{s} contains invalid JSON. Please fix that file!\n", .{ sdk.config_path, }) catch @panic("io error"); }, error.InvalidTarget => { writer.print("{s} contains a invalid zig triple. Please fix that file!\n", .{ sdk.config_path, }) catch @panic("io error"); }, } std.os.exit(1); }; // linking on windows is sadly not as trivial as on linux: // we have to respect 6 different configurations {x86,x64}-{msvc,mingw}-{dynamic,static} if (target.abi == .msvc and linkage != .dynamic) @panic("SDL cannot be linked statically for MSVC"); // These will be added for C-Imports or C files. if (target.abi != .msvc) { // SDL2 (mingw) ships the SDL include files under `include/SDL2/` which is very inconsitent with // all other platforms, so we just remove this prefix here const include_path = std.fs.path.join(b.allocator, &[_][]const u8{ sdk_paths.include, "SDL2", }) catch @panic("out of memory"); exe.addIncludeDir(include_path); } else { exe.addIncludeDir(sdk_paths.include); } // link the right libraries if (target.abi == .msvc) { // and links those as normal libraries exe.addLibPath(sdk_paths.libs); exe.linkSystemLibraryName("SDL2"); } else { const file_name = switch (linkage) { .static => "libSDL2.a", .dynamic => "libSDL2.dll.a", }; const lib_path = std.fs.path.join(b.allocator, &[_][]const u8{ sdk_paths.libs, file_name, }) catch @panic("out of memory"); exe.addObjectFile(lib_path); if (linkage == .static) { // link all system libraries required for SDL2: const static_libs = [_][]const u8{ "setupapi", "user32", "gdi32", "winmm", "imm32", "ole32", "oleaut32", "shell32", "version", "uuid", }; for (static_libs) |lib| exe.linkSystemLibrary(lib); } } if (linkage == .dynamic and exe.kind == .exe) { // On window, we need to copy SDL2.dll to the bin directory // for executables const sdl2_dll_path = std.fs.path.join(sdk.builder.allocator, &[_][]const u8{ sdk_paths.bin, "SDL2.dll", }) catch @panic("out of memory"); sdk.builder.installBinFile(sdl2_dll_path, "SDL2.dll"); } } else if (target.os.tag == .linux) { if (std.Target.current.os.tag != .linux or target.cpu.arch != std.Target.current.cpu.arch) { // linux cross-compilation requires us to do some dirty hacks: // we compile a stub .so file we will link against. // This will allow us to work around the "SDL doesn't provide dev packages for linux" const build_linux_sdl_stub = b.addSharedLibrary("SDL2", null, .unversioned); build_linux_sdl_stub.addAssemblyFileSource(sdk.prepare_sources.getStubFile()); build_linux_sdl_stub.setTarget(exe.target); // We need to link against libc exe.linkLibC(); // link against the output of our stub exe.linkLibrary(build_linux_sdl_stub); } else { // on linux, we should rely on the system libraries to "just work" exe.linkSystemLibrary("sdl2"); } } else if (target.isDarwin()) { // on MacOS, we require a brew install // requires sdl2 and sdl2_image to be installed via brew exe.linkSystemLibrary("sdl2"); } else { // on all other platforms, just try the system way: exe.linkSystemLibrary("sdl2"); } } const Paths = struct { include: []const u8, libs: []const u8, bin: []const u8, }; fn getPaths(sdk: *Sdk, target: std.Target) error{ MissingTarget, FileNotFound, InvalidJson, InvalidTarget }!Paths { const json_data = std.fs.cwd().readFileAlloc(sdk.builder.allocator, sdk.config_path, 1 << 20) catch |err| switch (err) { error.FileNotFound => return error.FileNotFound, else => |e| @panic(@errorName(e)), }; var parser = std.json.Parser.init(sdk.builder.allocator, false); var tree = parser.parse(json_data) catch return error.InvalidJson; var root_node = tree.root.Object; var config_iterator = root_node.iterator(); while (config_iterator.next()) |entry| { const config_cross_target = std.zig.CrossTarget.parse(.{ .arch_os_abi = entry.key_ptr.*, }) catch return error.InvalidTarget; const config_target = (std.zig.system.NativeTargetInfo.detect(sdk.builder.allocator, config_cross_target) catch @panic("out of memory")).target; if (target.cpu.arch != config_target.cpu.arch) continue; if (target.os.tag != config_target.os.tag) continue; if (target.abi != config_target.abi) continue; // load paths const node = entry.value_ptr.*.Object; return Paths{ .include = node.get("include").?.String, .libs = node.get("libs").?.String, .bin = node.get("bin").?.String, }; } return error.MissingTarget; } const PrepareStubSourceStep = struct { const Self = @This(); step: Step, sdk: *Sdk, assembly_source: GeneratedFile, pub fn create(sdk: *Sdk) *PrepareStubSourceStep { const psss = sdk.builder.allocator.create(Self) catch @panic("out of memory"); psss.* = .{ .step = std.build.Step.init( .custom, "Prepare SDL2 stub sources", sdk.builder.allocator, make, ), .sdk = sdk, .assembly_source = .{ .step = &psss.step }, }; return psss; } pub fn getStubFile(self: *Self) FileSource { return .{ .generated = &self.assembly_source }; } fn make(step: *Step) !void { const self = @fieldParentPtr(Self, "step", step); var cache = CacheBuilder.init(self.sdk.builder, "sdl"); cache.addBytes(sdl2_symbol_definitions); var dirpath = try cache.createAndGetDir(); defer dirpath.dir.close(); var file = try dirpath.dir.createFile("sdl.S", .{}); defer file.close(); var writer = file.writer(); try writer.writeAll(".text\n"); var iter = std.mem.split(u8, sdl2_symbol_definitions, "\n"); while (iter.next()) |line| { const sym = std.mem.trim(u8, line, " \r\n\t"); if (sym.len == 0) continue; try writer.print(".global {s}\n", .{sym}); try writer.writeAll(".align 4\n"); try writer.print("{s}:\n", .{sym}); try writer.writeAll(" .byte 0\n"); } self.assembly_source.path = try std.fs.path.join(self.sdk.builder.allocator, &[_][]const u8{ dirpath.path, "sdl.S", }); } }; fn tripleName(allocator: *std.mem.Allocator, target: std.Target) ![]u8 { const arch_name = @tagName(target.cpu.arch); const os_name = @tagName(target.os.tag); const abi_name = @tagName(target.abi); return std.fmt.allocPrint(allocator, "{s}-{s}-{s}", .{ arch_name, os_name, abi_name }); } const CacheBuilder = struct { const Self = @This(); builder: *std.build.Builder, hasher: std.crypto.hash.Sha1, subdir: ?[]const u8, pub fn init(builder: *std.build.Builder, subdir: ?[]const u8) Self { return Self{ .builder = builder, .hasher = std.crypto.hash.Sha1.init(.{}), .subdir = if (subdir) |s| builder.dupe(s) else null, }; } pub fn addBytes(self: *Self, bytes: []const u8) void { self.hasher.update(bytes); } pub fn addFile(self: *Self, file: std.build.FileSource) !void { const path = file.getPath(self.builder); const data = try std.fs.cwd().readFileAlloc(self.builder.allocator, path, 1 << 32); // 4 GB defer self.builder.allocator.free(data); self.addBytes(data); } fn createPath(self: *Self) ![]const u8 { var hash: [20]u8 = undefined; self.hasher.final(&hash); const path = if (self.subdir) |subdir| try std.fmt.allocPrint( self.builder.allocator, "{s}/{s}/o/{}", .{ self.builder.cache_root, subdir, std.fmt.fmtSliceHexLower(&hash), }, ) else try std.fmt.allocPrint( self.builder.allocator, "{s}/o/{}", .{ self.builder.cache_root, std.fmt.fmtSliceHexLower(&hash), }, ); return path; } pub const DirAndPath = struct { dir: std.fs.Dir, path: []const u8, }; pub fn createAndGetDir(self: *Self) !DirAndPath { const path = try self.createPath(); return DirAndPath{ .path = path, .dir = try std.fs.cwd().makeOpenPath(path, .{}), }; } pub fn createAndGetPath(self: *Self) ![]const u8 { const path = try self.createPath(); try std.fs.cwd().makePath(path); return path; } };
Sdk.zig
pub const DosQVariant = anyopaque; pub const DosQModelIndex = anyopaque; pub const DosQAbstractItemModel = anyopaque; pub const DosQAbstractListModel = anyopaque; pub const DosQAbstractTableModel = anyopaque; pub const DosQQmlApplicationEngine = anyopaque; pub const DosQQuickView = anyopaque; pub const DosQQmlContext = anyopaque; pub const DosQHashIntQByteArray = anyopaque; pub const DosQUrl = anyopaque; pub const DosQMetaObject = anyopaque; pub const DosQObject = anyopaque; pub const DosQQuickImageProvider = anyopaque; pub const DosPixmap = anyopaque; pub const DosQPointer = anyopaque; pub const RequestPixmapCallback = ?fn ([*c]const u8, [*c]c_int, [*c]c_int, c_int, c_int, ?*DosPixmap) callconv(.C) void; pub const DObjectCallback = ?fn (?*anyopaque, ?*DosQVariant, c_int, [*c]?*DosQVariant) callconv(.C) void; pub const RowCountCallback = ?fn (?*anyopaque, ?*const DosQModelIndex, [*c]c_int) callconv(.C) void; pub const ColumnCountCallback = ?fn (?*anyopaque, ?*const DosQModelIndex, [*c]c_int) callconv(.C) void; pub const DataCallback = ?fn (?*anyopaque, ?*const DosQModelIndex, c_int, ?*DosQVariant) callconv(.C) void; pub const SetDataCallback = ?fn (?*anyopaque, ?*const DosQModelIndex, ?*const DosQVariant, c_int, [*c]bool) callconv(.C) void; pub const RoleNamesCallback = ?fn (?*anyopaque, ?*DosQHashIntQByteArray) callconv(.C) void; pub const FlagsCallback = ?fn (?*anyopaque, ?*const DosQModelIndex, [*c]c_int) callconv(.C) void; pub const HeaderDataCallback = ?fn (?*anyopaque, c_int, c_int, c_int, ?*DosQVariant) callconv(.C) void; pub const IndexCallback = ?fn (?*anyopaque, c_int, c_int, ?*const DosQModelIndex, ?*DosQModelIndex) callconv(.C) void; pub const ParentCallback = ?fn (?*anyopaque, ?*const DosQModelIndex, ?*DosQModelIndex) callconv(.C) void; pub const HasChildrenCallback = ?fn (?*anyopaque, ?*const DosQModelIndex, [*c]bool) callconv(.C) void; pub const CanFetchMoreCallback = ?fn (?*anyopaque, ?*const DosQModelIndex, [*c]bool) callconv(.C) void; pub const FetchMoreCallback = ?fn (?*anyopaque, ?*const DosQModelIndex) callconv(.C) void; pub const CreateDObject = ?fn (c_int, ?*anyopaque, [*c]?*anyopaque, [*c]?*anyopaque) callconv(.C) void; pub const DeleteDObject = ?fn (c_int, ?*anyopaque) callconv(.C) void; pub const DosQVariantArray = extern struct { size: c_int, data: [*c]?*DosQVariant, }; pub const QmlRegisterType = extern struct { major: c_int, minor: c_int, uri: [*c]const u8, qml: [*c]const u8, staticMetaObject: ?*DosQMetaObject, createDObject: CreateDObject, deleteDObject: DeleteDObject, }; pub const ParameterDefinition = extern struct { name: [*c]const u8, metaType: c_int, }; pub const SignalDefinition = extern struct { name: [*c]const u8, parametersCount: c_int, parameters: [*c]ParameterDefinition, }; pub const SignalDefinitions = extern struct { count: c_int, definitions: [*c]SignalDefinition, }; pub const SlotDefinition = extern struct { name: [*c]const u8, returnMetaType: c_int, parametersCount: c_int, parameters: [*c]ParameterDefinition, }; pub const SlotDefinitions = extern struct { count: c_int, definitions: [*c]SlotDefinition, }; pub const PropertyDefinition = extern struct { name: [*c]const u8, propertyMetaType: c_int, readSlot: [*c]const u8, writeSlot: [*c]const u8, notifySignal: [*c]const u8, }; pub const PropertyDefinitions = extern struct { count: c_int, definitions: [*c]PropertyDefinition, }; pub const DosQAbstractItemModelCallbacks = extern struct { rowCount: RowCountCallback, columnCount: ColumnCountCallback, data: DataCallback, setData: SetDataCallback, roleNames: RoleNamesCallback, flags: FlagsCallback, headerData: HeaderDataCallback, index: IndexCallback, parent: ParentCallback, hasChildren: HasChildrenCallback, canFetchMore: CanFetchMoreCallback, fetchMore: FetchMoreCallback, }; pub const DosQEventLoopProcessEventFlag = enum(c_int) { DosQEventLoopProcessEventFlagProcessAllEvents = 0x00, DosQEventLoopProcessEventFlagExcludeUserInputEvents = 0x01, DosQEventLoopProcessEventFlagProcessExcludeSocketNotifiers = 0x02, DosQEventLoopProcessEventFlagProcessAllEventsWaitForMoreEvents = 0x03 }; pub const DosQtConnectionType = enum(c_int) { DosQtConnectionTypeAutoConnection = 0, DosQtConnectionTypeDirectConnection = 1, DosQtConnectionTypeQueuedConnection = 2, DosQtConnectionTypeBlockingConnection = 3, DosQtCOnnectionTypeUniqueConnection = 0x80, };
src/DOtherSideTypes.zig
const std = @import("std"); // TODO consider showing the username (-n/--show-username) on process pub const Config = struct { show_swap: bool = false, // discriminate by pid per_pid: bool = false, // if null, then should show all the processes that the user has access of (WIP) pid_list: ?[]const u8 = null, // print the processes in decrescent order, by total RAM usage reverse: bool = false, // 0 means that there is no limit limit: u32 = 0, show_args: bool = false, // shows only the total amount of memory used only_total: bool = false, // means that watch is off watch: u32 = 0, // only show processes of the user id, null means no restriction user_id: ?std.os.uid_t = null, // TODO consider having more than one uid allowed }; /// Gets the uid of the owner of given pid pub fn getPidOwner(pid: u32) !std.os.uid_t { var buf: [48]u8 = undefined; const proc_cmd_path = try std.fmt.bufPrint(&buf, "/proc/{}/stat", .{pid}); const proc_fd = try std.os.open(proc_cmd_path, std.os.O.RDONLY, 0); defer std.os.close(proc_fd); const proc_stat = try std.os.fstat(proc_fd); return proc_stat.uid; } /// Gets the command line name (with args or not), caller must free the return pub fn getCmdName(allocator: std.mem.Allocator, pid: u32, show_args: bool) ![]const u8 { var buf: [48]u8 = undefined; const proc_cmd_path = try std.fmt.bufPrint(&buf, "/proc/{}/cmdline", .{pid}); const file_fd = try std.os.open(proc_cmd_path, std.os.O.RDONLY, 0); var file = std.fs.File{ .handle = file_fd, .capable_io_mode = .blocking }; defer file.close(); if (try file.reader().readUntilDelimiterOrEofAlloc(allocator, '\n', 512)) |cmd| { // ignores the last character std.mem.replaceScalar(u8, cmd[0 .. cmd.len - 2], 0, ' '); if (show_args) { return cmd; } else { defer allocator.free(cmd); if (getColumn(cmd, 0)) |cmd_name| return allocator.dupe(u8, std.fs.path.basename(cmd_name)); } } return error.skipProc; } /// Gets the n-th column, columns are separated by spaces pub fn getColumn(str: []const u8, column: usize) ?[]const u8 { var it = std.mem.tokenize(u8, str, " "); var i: usize = 0; while (i < column) : (i += 1) { _ = it.next(); // ignoring the value } return it.next(); } test "getColumn" { const str = "this is a test"; try std.testing.expect(std.mem.eql(u8, getColumn(str, 0).?, "this")); try std.testing.expect(std.mem.eql(u8, getColumn(str, 1).?, "is")); try std.testing.expect(std.mem.eql(u8, getColumn(str, 2).?, "a")); try std.testing.expect(std.mem.eql(u8, getColumn(str, 3).?, "test")); try std.testing.expect(getColumn(str, 4) == null); } /// Checks if str1 starts with str2, case sensitive pub fn startsWith(str1: []const u8, str2: []const u8) bool { return if (str2.len > str1.len) false else std.mem.eql(u8, str1[0..str2.len], str2); } test "startsWith" { try std.testing.expect(startsWith("Starting today", "Start")); try std.testing.expect(startsWith("doesStarting today", "doesStart")); try std.testing.expect(!startsWith("Starting today...", "start")); try std.testing.expect(!startsWith("today starting...", " today")); } /// Reads a file and returns an iterator of the contents, caller is responsible /// of deallocating the contents read pub fn readLines(allocator: std.mem.Allocator, file_path: []const u8) !std.mem.SplitIterator(u8) { const file_fd = try std.os.open(file_path, std.os.O.RDONLY, 0); var file = std.fs.File{ .handle = file_fd }; defer file.close(); const file_content = try file.reader().readUntilDelimiterOrEofAlloc(allocator, 0, 4096); if (file_content) |content| { return std.mem.split(u8, content, "\n"); } else return error.invalidFile; } /// Checks if path is a existing file (user has to have access too) and its size is at least of 1 pub fn fileExistsNotEmpty(path: []const u8) bool { const file_fd = std.os.open(path, std.os.O.RDONLY, 0) catch return false; defer std.os.close(file_fd); const proc_stat = std.os.fstat(file_fd) catch return false; return proc_stat.size > 0; } /// Returns a slice with a human-readable format based on `num` pub fn toHuman(buffer: []u8, num: usize) []const u8 { const powers = [_][]const u8{ "KiB", "MiB", "GiB", "TiB" }; var f: f64 = @intToFloat(f64, num); var i: u2 = 0; while (f >= 1000.0) : (i += 1) { f /= 1024.0; } return std.fmt.bufPrint(buffer, "{d:.1} {s}", .{ f, powers[i] }) catch unreachable; } test "toHuman" { var buffer: [9]u8 = undefined; try std.testing.expect(std.mem.eql(u8, toHuman(&buffer, 128), "128.0 KiB")); try std.testing.expect(std.mem.eql(u8, toHuman(&buffer, 1024), "1.0 MiB")); try std.testing.expect(std.mem.eql(u8, toHuman(&buffer, 2184), "2.1 MiB")); try std.testing.expect(std.mem.eql(u8, toHuman(&buffer, 200184), "195.5 MiB")); } /// prints the usage and then exits pub fn usageExit(exit_value: u8) noreturn { const usage_str = \\Usage: coremem [OPTION]... \\Show program core memory usage \\-h, --help Show this help and exits \\-S, --swap Show swap information \\-s, --show-args Show all command line arguments \\-r, --reverse Reverses the order that processes are shown \\-t, --total Show only the total RAM memory in a human readable way \\-d, --discriminate-by-pid Show by process rather than by program \\-w, --watch <N> Measure and show process memory every N seconds \\-l, --limit <N> Show only the last N processes \\-u, --user-id [uid] Only consider the processes owned by uid (if none specified, defaults to current user) \\-p, --pid <pid>[,pid2,...pidN] Only shows the memory usage of the PIDs specified ; const out = if (exit_value == 0) std.io.getStdOut() else std.io.getStdErr(); out.writer().print("{s}\n", .{usage_str}) catch {}; // does nothing in case of error std.os.exit(exit_value); } const Flag = struct { long: []const u8, short: u8, kind: enum { no_arg, optional_arg, needs_arg } = .no_arg, identifier: enum { swap, by_pid, args, total, reverse, limit, user, pid, watch, help }, }; const flags = [_]Flag{ .{ .identifier = .by_pid, .long = "discriminate-by-pid", .short = 'd' }, .{ .identifier = .help, .long = "help", .short = 'h' }, .{ .identifier = .args, .long = "show-args", .short = 's' }, .{ .identifier = .swap, .long = "swap", .short = 'S' }, .{ .identifier = .reverse, .long = "reverse", .short = 'r' }, .{ .identifier = .total, .long = "total", .short = 't' }, .{ .identifier = .user, .long = "user-id", .short = 'u', .kind = .optional_arg }, .{ .identifier = .limit, .long = "limit", .short = 'l', .kind = .needs_arg }, .{ .identifier = .pid, .long = "pid", .short = 'p', .kind = .needs_arg }, .{ .identifier = .watch, .long = "watch", .short = 'w', .kind = .needs_arg }, }; /// Parse the args and returns the config pub fn getConfig() !Config { var config: Config = .{}; var iter_args = std.process.ArgIterator.init(); _ = iter_args.skip(); // skip cmd name while (iter_args.nextPosix()) |arg| { if (arg.len < 2 or arg[0] != '-') usageExit(1); // no positional args, nor only '-' allowed var opt_cluster = arg[1..]; while (opt_cluster.len > 0) : (opt_cluster = opt_cluster[1..]) { // get the index of the equals, if not then get the last index of the cluster const stop_index = if (std.mem.indexOfScalar(u8, opt_cluster, '=')) |equals_index| equals_index else opt_cluster.len; const flag = for (flags) |flag| { if ((opt_cluster[0] == '-' and std.mem.eql(u8, opt_cluster[1..stop_index], flag.long) or opt_cluster[0] == flag.short)) { break flag; } } else usageExit(1); if (flag.kind == .no_arg) { switch (flag.identifier) { .help => usageExit(0), .total => config.only_total = true, .args => config.show_args = true, .swap => config.show_swap = true, .by_pid => config.per_pid = true, .reverse => config.reverse = true, else => unreachable, } if (opt_cluster[0] == '-') break else continue; } const opt_arg = blk: { if (opt_cluster[stop_index] == '=') { // if stop_index is the last character returns null, else returns the slice after the equals break :blk if (stop_index < opt_cluster.len - 1) opt_cluster[stop_index + 1 ..] else null; } else if (opt_cluster.len > 1 and opt_cluster[0] != '-') { break :blk opt_cluster[1..]; } else { // get the next arg or null break :blk iter_args.nextPosix(); } }; if (opt_arg) |arg_value| { switch (flag.identifier) { .user => { if (arg_value[0] != '-') { config.user_id = try std.fmt.parseInt(std.os.uid_t, arg_value, 10); } else { config.user_id = std.os.linux.getuid(); opt_cluster = arg_value[0..]; // continues iterating based on this arg_value } }, .limit => config.limit = try std.fmt.parseInt(u32, arg_value, 10), .watch => config.watch = try std.fmt.parseInt(u32, arg_value, 10), .pid => config.pid_list = arg_value, else => unreachable, } } else if (flag.kind == .optional_arg) { switch (flag.identifier) { .user => config.user_id = std.os.linux.getuid(), else => unreachable, } } else usageExit(1); break; } } return config; }
src/utils.zig
const std = @import("std"); const heap = std.heap; const mem = std.mem; const zig = std.zig; fn getJsonObject(allocator: *std.mem.Allocator) !std.json.Value { var value = std.json.Value{ .Object = std.json.ObjectMap.init(allocator) }; _ = try value.Object.put("one", std.json.Value{ .Integer = @intCast(i64, 1) }); _ = try value.Object.put("two", std.json.Value{ .Float = 2.0 }); return value; } /// Expose a print function to Javascript. /// The extern keyword can be used to link against a variable that is exported from another object. /// https://webassembly.github.io/spec/core/syntax/types.html extern fn print(i32) void; /// Write a test JSON file /// https://github.com/ziglang/zig/blob/25ec2dbc1e2302d1138749262b588d3e438fcd55/lib/std/json/write_stream.zig#L238 fn writeJson(allocator: *mem.Allocator, source: []const u8) ![]u8 { var out_buf: [1024]u8 = undefined; var slice_stream = std.io.fixedBufferStream(&out_buf); const out_stream = slice_stream.outStream(); var w = std.json.writeStream(out_stream, 10); try w.beginObject(); try w.objectField("object"); try w.emitJson(try getJsonObject(allocator)); try w.objectField("string"); try w.emitString("This is a string"); try w.objectField("array"); try w.beginArray(); try w.arrayElem(); try w.emitString("Another string"); try w.arrayElem(); try w.emitNumber(@as(i32, 1)); try w.arrayElem(); try w.emitNumber(@as(f32, 3.5)); try w.endArray(); try w.objectField("int"); try w.emitNumber(@as(i32, 10)); try w.objectField("float"); try w.emitNumber(@as(f32, 3.5)); try w.endObject(); return slice_stream.getWritten(); } /// Format zig `source` code with zig-fmt /// https://github.com/ziglang/zig/blob/4a69b11e742365d68e4d92aa18d6493db9d3edaf/lib/std/io/fixed_buffer_stream.zig#L145 fn format(allocator: *mem.Allocator, source: []const u8) ![]u8 { // parse input source code and create the abstract syntax tree const tree = try zig.parse(allocator, source); defer tree.deinit(); if (tree.errors.len != 0) { return error.ParseError; } var buf: [1024]u8 = undefined; print(buf.len); var fbs = std.io.fixedBufferStream(&buf); const out_stream = fbs.writer(); // fbs.outStream() is the same but it's deprecated const changed = try zig.render(allocator, out_stream, tree); // changed is true if the formatted code written to `out_stream` is the same // as the the input `source` code. Otherwise changed is false. // try out_stream.writeAll("Ciao "); // try out_stream.print("{} {}!", .{ "Hello", "World" }); // try out_stream.writeAll(" Mondo"); return fbs.getWritten(); } export fn format_export(input_ptr: [*]const u8, input_len: usize, output_ptr: *[*]u8, output_len: *usize) bool { const input = input_ptr[0..input_len]; var output = format(std.heap.page_allocator, input) catch |err| { std.debug.assert(err == error.ParseError); return false; }; // another example // var gpa = heap.GeneralPurposeAllocator(.{}){}; // var output = writeJson(&gpa.allocator, input) catch |err| { // return false; // }; output_ptr.* = output.ptr; output_len.* = output.len; return true; } export fn _wasm_alloc(len: usize) u32 { var buf = heap.page_allocator.alloc(u8, len) catch |err| return 0; return @ptrToInt(buf.ptr); } export fn _wasm_dealloc(ptr: [*]const u8, len: usize) void { heap.page_allocator.free(ptr[0..len]); }
src/fmt.zig
//-------------------------------------------------------------------------------- // Section: Types (11) //-------------------------------------------------------------------------------- pub const BROADCAST_SYSTEM_MESSAGE_FLAGS = enum(u32) { ALLOWSFW = 128, FLUSHDISK = 4, FORCEIFHUNG = 32, IGNORECURRENTTASK = 2, NOHANG = 8, NOTIMEOUTIFNOTHUNG = 64, POSTMESSAGE = 16, QUERY = 1, SENDNOTIFYMESSAGE = 256, LUID = 1024, RETURNHDESK = 512, _, pub fn initFlags(o: struct { ALLOWSFW: u1 = 0, FLUSHDISK: u1 = 0, FORCEIFHUNG: u1 = 0, IGNORECURRENTTASK: u1 = 0, NOHANG: u1 = 0, NOTIMEOUTIFNOTHUNG: u1 = 0, POSTMESSAGE: u1 = 0, QUERY: u1 = 0, SENDNOTIFYMESSAGE: u1 = 0, LUID: u1 = 0, RETURNHDESK: u1 = 0, }) BROADCAST_SYSTEM_MESSAGE_FLAGS { return @intToEnum(BROADCAST_SYSTEM_MESSAGE_FLAGS, (if (o.ALLOWSFW == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_FLAGS.ALLOWSFW) else 0) | (if (o.FLUSHDISK == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_FLAGS.FLUSHDISK) else 0) | (if (o.FORCEIFHUNG == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_FLAGS.FORCEIFHUNG) else 0) | (if (o.IGNORECURRENTTASK == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_FLAGS.IGNORECURRENTTASK) else 0) | (if (o.NOHANG == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_FLAGS.NOHANG) else 0) | (if (o.NOTIMEOUTIFNOTHUNG == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_FLAGS.NOTIMEOUTIFNOTHUNG) else 0) | (if (o.POSTMESSAGE == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_FLAGS.POSTMESSAGE) else 0) | (if (o.QUERY == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_FLAGS.QUERY) else 0) | (if (o.SENDNOTIFYMESSAGE == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_FLAGS.SENDNOTIFYMESSAGE) else 0) | (if (o.LUID == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_FLAGS.LUID) else 0) | (if (o.RETURNHDESK == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_FLAGS.RETURNHDESK) else 0) ); } }; pub const BSF_ALLOWSFW = BROADCAST_SYSTEM_MESSAGE_FLAGS.ALLOWSFW; pub const BSF_FLUSHDISK = BROADCAST_SYSTEM_MESSAGE_FLAGS.FLUSHDISK; pub const BSF_FORCEIFHUNG = BROADCAST_SYSTEM_MESSAGE_FLAGS.FORCEIFHUNG; pub const BSF_IGNORECURRENTTASK = BROADCAST_SYSTEM_MESSAGE_FLAGS.IGNORECURRENTTASK; pub const BSF_NOHANG = BROADCAST_SYSTEM_MESSAGE_FLAGS.NOHANG; pub const BSF_NOTIMEOUTIFNOTHUNG = BROADCAST_SYSTEM_MESSAGE_FLAGS.NOTIMEOUTIFNOTHUNG; pub const BSF_POSTMESSAGE = BROADCAST_SYSTEM_MESSAGE_FLAGS.POSTMESSAGE; pub const BSF_QUERY = BROADCAST_SYSTEM_MESSAGE_FLAGS.QUERY; pub const BSF_SENDNOTIFYMESSAGE = BROADCAST_SYSTEM_MESSAGE_FLAGS.SENDNOTIFYMESSAGE; pub const BSF_LUID = BROADCAST_SYSTEM_MESSAGE_FLAGS.LUID; pub const BSF_RETURNHDESK = BROADCAST_SYSTEM_MESSAGE_FLAGS.RETURNHDESK; pub const BROADCAST_SYSTEM_MESSAGE_INFO = enum(u32) { LLCOMPONENTS = 0, LLDESKTOPS = 16, PPLICATIONS = 8, _, pub fn initFlags(o: struct { LLCOMPONENTS: u1 = 0, LLDESKTOPS: u1 = 0, PPLICATIONS: u1 = 0, }) BROADCAST_SYSTEM_MESSAGE_INFO { return @intToEnum(BROADCAST_SYSTEM_MESSAGE_INFO, (if (o.LLCOMPONENTS == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_INFO.LLCOMPONENTS) else 0) | (if (o.LLDESKTOPS == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_INFO.LLDESKTOPS) else 0) | (if (o.PPLICATIONS == 1) @enumToInt(BROADCAST_SYSTEM_MESSAGE_INFO.PPLICATIONS) else 0) ); } }; pub const BSM_ALLCOMPONENTS = BROADCAST_SYSTEM_MESSAGE_INFO.LLCOMPONENTS; pub const BSM_ALLDESKTOPS = BROADCAST_SYSTEM_MESSAGE_INFO.LLDESKTOPS; pub const BSM_APPLICATIONS = BROADCAST_SYSTEM_MESSAGE_INFO.PPLICATIONS; pub const USER_OBJECT_INFORMATION_INDEX = enum(u32) { FLAGS = 1, HEAPSIZE = 5, IO = 6, NAME = 2, TYPE = 3, USER_SID = 4, }; pub const UOI_FLAGS = USER_OBJECT_INFORMATION_INDEX.FLAGS; pub const UOI_HEAPSIZE = USER_OBJECT_INFORMATION_INDEX.HEAPSIZE; pub const UOI_IO = USER_OBJECT_INFORMATION_INDEX.IO; pub const UOI_NAME = USER_OBJECT_INFORMATION_INDEX.NAME; pub const UOI_TYPE = USER_OBJECT_INFORMATION_INDEX.TYPE; pub const UOI_USER_SID = USER_OBJECT_INFORMATION_INDEX.USER_SID; pub const WINSTAENUMPROCA = fn( param0: ?PSTR, param1: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const WINSTAENUMPROCW = fn( param0: ?PWSTR, param1: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const DESKTOPENUMPROCA = fn( param0: ?PSTR, param1: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const DESKTOPENUMPROCW = fn( param0: ?PWSTR, param1: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type has a FreeFunc 'CloseWindowStation', what can Zig do with this information? pub const HWINSTA = *opaque{}; // TODO: this type has a FreeFunc 'CloseDesktop', what can Zig do with this information? pub const HDESK = *opaque{}; pub const USEROBJECTFLAGS = extern struct { fInherit: BOOL, fReserved: BOOL, dwFlags: u32, }; pub const BSMINFO = extern struct { cbSize: u32, hdesk: ?HDESK, hwnd: ?HWND, luid: LUID, }; //-------------------------------------------------------------------------------- // Section: Functions (31) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn CreateDesktopA( lpszDesktop: ?[*:0]const u8, lpszDevice: ?[*:0]const u8, pDevmode: ?*DEVMODEA, dwFlags: u32, dwDesiredAccess: u32, lpsa: ?*SECURITY_ATTRIBUTES, ) callconv(@import("std").os.windows.WINAPI) ?HDESK; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn CreateDesktopW( lpszDesktop: ?[*:0]const u16, lpszDevice: ?[*:0]const u16, pDevmode: ?*DEVMODEW, dwFlags: u32, dwDesiredAccess: u32, lpsa: ?*SECURITY_ATTRIBUTES, ) callconv(@import("std").os.windows.WINAPI) ?HDESK; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "USER32" fn CreateDesktopExA( lpszDesktop: ?[*:0]const u8, lpszDevice: ?[*:0]const u8, pDevmode: ?*DEVMODEA, dwFlags: u32, dwDesiredAccess: u32, lpsa: ?*SECURITY_ATTRIBUTES, ulHeapSize: u32, pvoid: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) ?HDESK; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "USER32" fn CreateDesktopExW( lpszDesktop: ?[*:0]const u16, lpszDevice: ?[*:0]const u16, pDevmode: ?*DEVMODEW, dwFlags: u32, dwDesiredAccess: u32, lpsa: ?*SECURITY_ATTRIBUTES, ulHeapSize: u32, pvoid: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) ?HDESK; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn OpenDesktopA( lpszDesktop: ?[*:0]const u8, dwFlags: u32, fInherit: BOOL, dwDesiredAccess: u32, ) callconv(@import("std").os.windows.WINAPI) ?HDESK; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn OpenDesktopW( lpszDesktop: ?[*:0]const u16, dwFlags: u32, fInherit: BOOL, dwDesiredAccess: u32, ) callconv(@import("std").os.windows.WINAPI) ?HDESK; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn OpenInputDesktop( dwFlags: u32, fInherit: BOOL, dwDesiredAccess: u32, ) callconv(@import("std").os.windows.WINAPI) ?HDESK; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn EnumDesktopsA( hwinsta: ?HWINSTA, lpEnumFunc: ?DESKTOPENUMPROCA, lParam: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn EnumDesktopsW( hwinsta: ?HWINSTA, lpEnumFunc: ?DESKTOPENUMPROCW, lParam: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn EnumDesktopWindows( hDesktop: ?HDESK, lpfn: ?WNDENUMPROC, lParam: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn SwitchDesktop( hDesktop: ?HDESK, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn SetThreadDesktop( hDesktop: ?HDESK, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn CloseDesktop( hDesktop: ?HDESK, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn GetThreadDesktop( dwThreadId: u32, ) callconv(@import("std").os.windows.WINAPI) ?HDESK; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn CreateWindowStationA( lpwinsta: ?[*:0]const u8, dwFlags: u32, dwDesiredAccess: u32, lpsa: ?*SECURITY_ATTRIBUTES, ) callconv(@import("std").os.windows.WINAPI) ?HWINSTA; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn CreateWindowStationW( lpwinsta: ?[*:0]const u16, dwFlags: u32, dwDesiredAccess: u32, lpsa: ?*SECURITY_ATTRIBUTES, ) callconv(@import("std").os.windows.WINAPI) ?HWINSTA; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn OpenWindowStationA( lpszWinSta: ?[*:0]const u8, fInherit: BOOL, dwDesiredAccess: u32, ) callconv(@import("std").os.windows.WINAPI) ?HWINSTA; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn OpenWindowStationW( lpszWinSta: ?[*:0]const u16, fInherit: BOOL, dwDesiredAccess: u32, ) callconv(@import("std").os.windows.WINAPI) ?HWINSTA; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn EnumWindowStationsA( lpEnumFunc: ?WINSTAENUMPROCA, lParam: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn EnumWindowStationsW( lpEnumFunc: ?WINSTAENUMPROCW, lParam: LPARAM, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn CloseWindowStation( hWinSta: ?HWINSTA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn SetProcessWindowStation( hWinSta: ?HWINSTA, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn GetProcessWindowStation( ) callconv(@import("std").os.windows.WINAPI) ?HWINSTA; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn GetUserObjectInformationA( hObj: ?HANDLE, nIndex: USER_OBJECT_INFORMATION_INDEX, // TODO: what to do with BytesParamIndex 3? pvInfo: ?*anyopaque, nLength: u32, lpnLengthNeeded: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn GetUserObjectInformationW( hObj: ?HANDLE, nIndex: USER_OBJECT_INFORMATION_INDEX, // TODO: what to do with BytesParamIndex 3? pvInfo: ?*anyopaque, nLength: u32, lpnLengthNeeded: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn SetUserObjectInformationA( hObj: ?HANDLE, nIndex: i32, // TODO: what to do with BytesParamIndex 3? pvInfo: ?*anyopaque, nLength: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn SetUserObjectInformationW( hObj: ?HANDLE, nIndex: i32, // TODO: what to do with BytesParamIndex 3? pvInfo: ?*anyopaque, nLength: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "USER32" fn BroadcastSystemMessageExA( flags: BROADCAST_SYSTEM_MESSAGE_FLAGS, lpInfo: ?*BROADCAST_SYSTEM_MESSAGE_INFO, Msg: u32, wParam: WPARAM, lParam: LPARAM, pbsmInfo: ?*BSMINFO, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "USER32" fn BroadcastSystemMessageExW( flags: BROADCAST_SYSTEM_MESSAGE_FLAGS, lpInfo: ?*BROADCAST_SYSTEM_MESSAGE_INFO, Msg: u32, wParam: WPARAM, lParam: LPARAM, pbsmInfo: ?*BSMINFO, ) callconv(@import("std").os.windows.WINAPI) i32; pub extern "USER32" fn BroadcastSystemMessageA( flags: u32, lpInfo: ?*u32, Msg: u32, wParam: WPARAM, lParam: LPARAM, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "USER32" fn BroadcastSystemMessageW( flags: BROADCAST_SYSTEM_MESSAGE_FLAGS, lpInfo: ?*BROADCAST_SYSTEM_MESSAGE_INFO, Msg: u32, wParam: WPARAM, lParam: LPARAM, ) callconv(@import("std").os.windows.WINAPI) i32; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (13) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { pub const WINSTAENUMPROC = thismodule.WINSTAENUMPROCA; pub const DESKTOPENUMPROC = thismodule.DESKTOPENUMPROCA; pub const CreateDesktop = thismodule.CreateDesktopA; pub const CreateDesktopEx = thismodule.CreateDesktopExA; pub const OpenDesktop = thismodule.OpenDesktopA; pub const EnumDesktops = thismodule.EnumDesktopsA; pub const CreateWindowStation = thismodule.CreateWindowStationA; pub const OpenWindowStation = thismodule.OpenWindowStationA; pub const EnumWindowStations = thismodule.EnumWindowStationsA; pub const GetUserObjectInformation = thismodule.GetUserObjectInformationA; pub const SetUserObjectInformation = thismodule.SetUserObjectInformationA; pub const BroadcastSystemMessageEx = thismodule.BroadcastSystemMessageExA; pub const BroadcastSystemMessage = thismodule.BroadcastSystemMessageA; }, .wide => struct { pub const WINSTAENUMPROC = thismodule.WINSTAENUMPROCW; pub const DESKTOPENUMPROC = thismodule.DESKTOPENUMPROCW; pub const CreateDesktop = thismodule.CreateDesktopW; pub const CreateDesktopEx = thismodule.CreateDesktopExW; pub const OpenDesktop = thismodule.OpenDesktopW; pub const EnumDesktops = thismodule.EnumDesktopsW; pub const CreateWindowStation = thismodule.CreateWindowStationW; pub const OpenWindowStation = thismodule.OpenWindowStationW; pub const EnumWindowStations = thismodule.EnumWindowStationsW; pub const GetUserObjectInformation = thismodule.GetUserObjectInformationW; pub const SetUserObjectInformation = thismodule.SetUserObjectInformationW; pub const BroadcastSystemMessageEx = thismodule.BroadcastSystemMessageExW; pub const BroadcastSystemMessage = thismodule.BroadcastSystemMessageW; }, .unspecified => if (@import("builtin").is_test) struct { pub const WINSTAENUMPROC = *opaque{}; pub const DESKTOPENUMPROC = *opaque{}; pub const CreateDesktop = *opaque{}; pub const CreateDesktopEx = *opaque{}; pub const OpenDesktop = *opaque{}; pub const EnumDesktops = *opaque{}; pub const CreateWindowStation = *opaque{}; pub const OpenWindowStation = *opaque{}; pub const EnumWindowStations = *opaque{}; pub const GetUserObjectInformation = *opaque{}; pub const SetUserObjectInformation = *opaque{}; pub const BroadcastSystemMessageEx = *opaque{}; pub const BroadcastSystemMessage = *opaque{}; } else struct { pub const WINSTAENUMPROC = @compileError("'WINSTAENUMPROC' requires that UNICODE be set to true or false in the root module"); pub const DESKTOPENUMPROC = @compileError("'DESKTOPENUMPROC' requires that UNICODE be set to true or false in the root module"); pub const CreateDesktop = @compileError("'CreateDesktop' requires that UNICODE be set to true or false in the root module"); pub const CreateDesktopEx = @compileError("'CreateDesktopEx' requires that UNICODE be set to true or false in the root module"); pub const OpenDesktop = @compileError("'OpenDesktop' requires that UNICODE be set to true or false in the root module"); pub const EnumDesktops = @compileError("'EnumDesktops' requires that UNICODE be set to true or false in the root module"); pub const CreateWindowStation = @compileError("'CreateWindowStation' requires that UNICODE be set to true or false in the root module"); pub const OpenWindowStation = @compileError("'OpenWindowStation' requires that UNICODE be set to true or false in the root module"); pub const EnumWindowStations = @compileError("'EnumWindowStations' requires that UNICODE be set to true or false in the root module"); pub const GetUserObjectInformation = @compileError("'GetUserObjectInformation' requires that UNICODE be set to true or false in the root module"); pub const SetUserObjectInformation = @compileError("'SetUserObjectInformation' requires that UNICODE be set to true or false in the root module"); pub const BroadcastSystemMessageEx = @compileError("'BroadcastSystemMessageEx' requires that UNICODE be set to true or false in the root module"); pub const BroadcastSystemMessage = @compileError("'BroadcastSystemMessage' requires that UNICODE be set to true or false in the root module"); }, }; //-------------------------------------------------------------------------------- // Section: Imports (12) //-------------------------------------------------------------------------------- const BOOL = @import("../foundation.zig").BOOL; const DEVMODEA = @import("../graphics/gdi.zig").DEVMODEA; const DEVMODEW = @import("../graphics/gdi.zig").DEVMODEW; const HANDLE = @import("../foundation.zig").HANDLE; const HWND = @import("../foundation.zig").HWND; const LPARAM = @import("../foundation.zig").LPARAM; const LUID = @import("../foundation.zig").LUID; const PSTR = @import("../foundation.zig").PSTR; const PWSTR = @import("../foundation.zig").PWSTR; const SECURITY_ATTRIBUTES = @import("../security.zig").SECURITY_ATTRIBUTES; const WNDENUMPROC = @import("../ui/windows_and_messaging.zig").WNDENUMPROC; const WPARAM = @import("../foundation.zig").WPARAM; test { // The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476 if (@hasDecl(@This(), "WINSTAENUMPROCA")) { _ = WINSTAENUMPROCA; } if (@hasDecl(@This(), "WINSTAENUMPROCW")) { _ = WINSTAENUMPROCW; } if (@hasDecl(@This(), "DESKTOPENUMPROCA")) { _ = DESKTOPENUMPROCA; } if (@hasDecl(@This(), "DESKTOPENUMPROCW")) { _ = DESKTOPENUMPROCW; } @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/system/stations_and_desktops.zig
pub fn isParserType(comptime T: type) bool { const tid = @typeInfo(T); if ((tid == .Struct or tid == .Enum or tid == .Union) and @hasDecl(T, "Redis") and @hasDecl(T.Redis, "Parser")) { if (!@hasDecl(T.Redis.Parser, "parse")) @compileError( \\`Redis.Parser` trait requires implementing: \\ fn parse(tag: u8, comptime rootParser: type, msg: var) !Self \\ ); if (!@hasDecl(T.Redis.Parser, "parseAlloc")) @compileError( \\`Redis.Parser` trait requires implementing: \\ fn parseAlloc(tag: u8, comptime rootParser: type, allocator: *Allocator, msg: var) !Self \\ ); if (!@hasDecl(T.Redis.Parser, "destroy")) @compileError( \\`Redis.Parser` trait requires implementing: \\ fn destroy(self: *Self, comptime rootParser: type, allocator: *Allocator) void \\ ); return true; } return false; } /// A type that wants access to attributes because intends to decode them. /// When the declaration is missing or returns false, attributes are discarded /// from the stream automatically by the main parser. pub fn handlesAttributes(comptime T: type) bool { if (comptime isParserType(T)) { if (@hasDecl(T.Redis.Parser, "HandlesAttributes")) { return T.Redis.Parser.HandlesAttributes; } } return false; } /// A type that doesn't want to be wrapped directly in an optional because /// it would have ill-formed / unclear semantics. An example of this are /// types that read attributes. For those types this trait defaults to `true` pub fn noOptionalWrapper(comptime T: type) bool { if (comptime isParserType(T)) { if (@hasDecl(T.Redis.Parser, "NoOptionalWrapper")) { return T.Redis.Parser.NoOptionalWrapper; } else { if (@hasDecl(T.Redis.Parser, "HandlesAttributes")) { return T.Redis.Parser.HandlesAttributes; } } } return false; } /// A type that knows how to serialize itself as one or more arguments to a /// Redis command. The RESP3 protocol is used in a asymmetrical way by Redis, /// so this is NOT the inverse operation of parsing. As an example, a struct /// might implement decoding from a RESP Map, but the correct way of /// serializing itself would be as a FLAT sequence of field-value pairs, to be /// used with XADD or HMSET: /// HMSET mystruct field1 val1 field2 val2 ... pub fn isArguments(comptime T: type) bool { const tid = @typeInfo(T); return (tid == .Struct or tid == .Enum or tid == .Union) and @hasDecl(T, "RedisArguments"); } pub fn isCommand(comptime T: type) bool { const tid = @typeInfo(T); return (tid == .Struct or tid == .Enum or tid == .Union) and @hasDecl(T, "RedisCommand"); } // test "trait error message" { // const T = struct { // pub const Redis = struct { // pub const Parser = struct {}; // }; // }; // _ = isParserType(T); // } test "docs" { @import("std").meta.refAllDecls(@This()); }
src/traits.zig
const std = @import("std"); const nvg = @import("nanovg"); const gui = @import("../gui.zig"); const Rect = @import("../geometry.zig").Rect; const Point = @import("../geometry.zig").Point; const TextBox = @This(); widget: gui.Widget, allocator: *std.mem.Allocator, text: std.ArrayList(u8), text_alignment: gui.TextAlignment = .left, background_color: nvg.Color, onChangedFn: ?fn (*Self) void = null, hovered: bool = false, cursor_position: usize = 0, cursor_position_preview: usize = 0, show_cursor_position_preview: bool = false, selection_begin: usize = 0, selection_end: usize = 0, glyph_positions: std.ArrayList(nvg.GlyphPosition), // cache base_key_down_fn: fn (*gui.Widget, *gui.KeyEvent) void, blink: bool = false, blink_timer: gui.Timer, const Self = @This(); pub fn init(allocator: *std.mem.Allocator, rect: Rect(f32)) !*Self { var self = try allocator.create(Self); const widget = gui.Widget.init(allocator, rect); self.* = Self{ .widget = widget, .allocator = allocator, .text = std.ArrayList(u8).init(allocator), .glyph_positions = std.ArrayList(nvg.GlyphPosition).init(allocator), .background_color = gui.theme_colors.light, .base_key_down_fn = widget.onKeyDownFn, .blink_timer = gui.Timer{ .on_elapsed_fn = onBlinkTimerElapsed, .ctx = @ptrToInt(self), }, }; self.widget.onMouseMoveFn = onMouseMove; self.widget.onMouseUpFn = onMouseUp; self.widget.onMouseDownFn = onMouseDown; self.widget.onKeyDownFn = onKeyDown; self.widget.onTextInputFn = onTextInput; self.widget.onEnterFn = onEnter; self.widget.onLeaveFn = onLeave; self.widget.onFocusFn = onFocus; self.widget.onBlurFn = onBlur; self.widget.drawFn = draw; self.widget.focus_policy.keyboard = true; self.widget.focus_policy.mouse = true; return self; } pub fn deinit(self: *Self) void { self.text.deinit(); self.glyph_positions.deinit(); self.widget.deinit(); self.allocator.destroy(self); } fn onChanged(self: *Self) void { if (self.onChangedFn) |onChangedFn| onChangedFn(self); } fn getCursorPositionFromMousePosition(self: Self, mouse_position_x: f32) usize { const x = mouse_position_x + self.widget.relative_rect.x; for (self.glyph_positions.items) |glyph_position, i| { const glyph_center = 0.5 * (glyph_position.minx + glyph_position.maxx); if (x < glyph_center) { return i; } } return self.glyph_positions.items.len; } fn onMouseMove(widget: *gui.Widget, event: *gui.MouseEvent) void { var self = @fieldParentPtr(Self, "widget", widget); const mouse_position = Point(f32).make(event.x, event.y); const cursor_position = self.getCursorPositionFromMousePosition(mouse_position.x); if (event.isButtonPressed(.left)) { self.cursor_position = cursor_position; self.selection_end = cursor_position; self.blink = false; self.blink_timer.start(blink_interval); } else { self.cursor_position_preview = cursor_position; self.show_cursor_position_preview = true; } } fn onMouseDown(widget: *gui.Widget, event: *gui.MouseEvent) void { var self = @fieldParentPtr(Self, "widget", widget); const mouse_position = Point(f32).make(event.x, event.y); self.cursor_position = self.getCursorPositionFromMousePosition(mouse_position.x); self.selection_begin = self.cursor_position; self.selection_end = self.cursor_position; self.show_cursor_position_preview = false; self.blink = true; self.blink_timer.start(blink_interval); if (event.click_count == 2) { self.selectAll(); } } fn onMouseUp(widget: *gui.Widget, event: *const gui.MouseEvent) void { _ = event; var self = @fieldParentPtr(Self, "widget", widget); if (self.hovered) { self.blink = true; self.blink_timer.start(blink_interval); } } fn onEnter(widget: *gui.Widget) void { var self = @fieldParentPtr(Self, "widget", widget); self.hovered = true; } fn onLeave(widget: *gui.Widget) void { var self = @fieldParentPtr(Self, "widget", widget); self.hovered = false; self.show_cursor_position_preview = false; } fn onFocus(widget: *gui.Widget, event: *gui.FocusEvent) void { var self = @fieldParentPtr(Self, "widget", widget); self.blink_timer.start(blink_interval); if (event.source == .keyboard) self.selectAll(); } fn onBlur(widget: *gui.Widget, event: *gui.FocusEvent) void { _ = event; var self = @fieldParentPtr(Self, "widget", widget); self.blink_timer.stop(); } fn onKeyDown(widget: *gui.Widget, event: *gui.KeyEvent) void { var self = @fieldParentPtr(Self, "widget", widget); self.base_key_down_fn(widget, event); if (event.event.is_accepted) return; event.event.accept(); const text_len = std.unicode.utf8CountCodepoints(self.text.items) catch unreachable; self.cursor_position = std.math.min(self.cursor_position, text_len); // make sure cursor position is in a valid range self.show_cursor_position_preview = false; if (event.isModifierPressed(.ctrl)) { switch (event.key) { .A => self.selectAll(), .X => self.cut() catch {}, // TODO: handle error .C => self.copy() catch {}, // TODO: handle error .V => self.paste() catch {}, // TODO: handle error else => event.event.ignore(), } } else { switch (event.key) { .Backspace => { if (self.hasSelection()) { self.deleteSelection(); } else if (self.cursor_position > 0) { self.cursor_position -= 1; self.deleteCodepointAt(self.cursor_position); } }, .Delete => { if (self.hasSelection()) { self.deleteSelection(); } else { self.deleteCodepointAt(self.cursor_position); } }, .Left => { if (event.isModifierPressed(.shift)) { if (!self.hasSelection()) { self.selection_begin = self.cursor_position; self.selection_end = self.cursor_position; } if (self.selection_end > 0) self.selection_end -= 1; self.cursor_position = self.selection_end; } else { if (self.hasSelection()) { self.cursor_position = std.math.min(self.selection_begin, self.selection_end); self.clearSelection(); } else if (self.cursor_position > 0) self.cursor_position -= 1; } }, .Right => { if (event.isModifierPressed(.shift)) { if (!self.hasSelection()) { self.selection_begin = self.cursor_position; self.selection_end = self.cursor_position; } if (self.selection_end < text_len) self.selection_end += 1; self.cursor_position = self.selection_end; } else { if (self.hasSelection()) { self.cursor_position = std.math.max(self.selection_begin, self.selection_end); self.clearSelection(); } else if (self.cursor_position < text_len) self.cursor_position += 1; } }, // .Escape => { // .Return, // self.widget.setFocus(false, .keyboard); // }, .Home => { self.clearSelection(); self.cursor_position = 0; }, .End => { self.clearSelection(); self.cursor_position = text_len; }, else => event.event.ignore(), } } self.blink = true; self.blink_timer.start(blink_interval); } fn onTextInput(widget: *gui.Widget, event: *const gui.TextInputEvent) void { var self = @fieldParentPtr(Self, "widget", widget); if (self.hasSelection()) { self.deleteSelection(); } const offset = getCodepointOffset(self.text.items, self.cursor_position); self.text.insertSlice(offset, event.text) catch unreachable; self.cursor_position += 1; self.show_cursor_position_preview = false; self.onChanged(); } fn cut(self: *Self) !void { if (self.widget.getApplication()) |app| { if (self.hasSelection()) { try app.setClipboardText(self.allocator, self.getSelection()); self.deleteSelection(); } else { try app.setClipboardText(self.allocator, self.text.items); self.text.clearRetainingCapacity(); self.cursor_position = 0; self.clearSelection(); } self.onChanged(); } } fn copy(self: *Self) !void { if (self.widget.getApplication()) |app| { const text = if (self.hasSelection()) self.getSelection() else self.text.items; try app.setClipboardText(self.allocator, text); } } fn paste(self: *Self) !void { if (self.widget.getApplication()) |app| { const text = (try app.getClipboardText(self.allocator)) orelse return; defer self.allocator.free(text); const codepoint_count = try std.unicode.utf8CountCodepoints(text); if (self.hasSelection()) { self.deleteSelection(); } const offset = getCodepointOffset(self.text.items, self.cursor_position); self.text.insertSlice(offset, text) catch unreachable; self.cursor_position += codepoint_count; self.show_cursor_position_preview = true; self.onChanged(); } } fn getCodepointOffset(text: []const u8, position: usize) usize { var utf8 = std.unicode.Utf8View.initUnchecked(text).iterator(); var i: usize = 0; while (i < position) { _ = utf8.nextCodepointSlice(); i += 1; } return utf8.i; } fn deleteCodepointAt(self: *Self, position: usize) void { var utf8 = std.unicode.Utf8View.initUnchecked(self.text.items); var utf8_it = utf8.iterator(); var i: usize = 0; while (i < position) { _ = utf8_it.nextCodepointSlice(); i += 1; } const start = utf8_it.i; if (utf8_it.nextCodepointSlice()) |codepoint_slice| { const len = codepoint_slice.len; self.text.replaceRange(start, len, &.{}) catch unreachable; } self.onChanged(); } fn deleteSelection(self: *Self) void { if (self.selection_begin > self.selection_end) std.mem.swap(usize, &self.selection_begin, &self.selection_end); var utf8 = std.unicode.Utf8View.initUnchecked(self.text.items).iterator(); var i: usize = 0; while (i < self.selection_begin) { _ = utf8.nextCodepointSlice(); i += 1; } const start_i = utf8.i; while (i < self.selection_end) { _ = utf8.nextCodepointSlice(); i += 1; } const end_i = utf8.i; self.text.replaceRange(start_i, end_i - start_i, &.{}) catch unreachable; // update cursor if (self.cursor_position > self.selection_end) { self.cursor_position -= self.selection_end - self.selection_begin; } else if (self.cursor_position > self.selection_begin) { self.cursor_position = self.selection_begin; } self.selection_end = self.selection_begin; self.onChanged(); } pub fn hasSelection(self: *Self) bool { return self.selection_begin != self.selection_end; } pub fn getSelection(self: *Self) []const u8 { if (self.selection_begin > self.selection_end) std.mem.swap(usize, &self.selection_begin, &self.selection_end); var utf8 = std.unicode.Utf8View.initUnchecked(self.text.items).iterator(); var i: usize = 0; while (i < self.selection_begin) { _ = utf8.nextCodepointSlice(); i += 1; } const start_i = utf8.i; while (i < self.selection_end) { _ = utf8.nextCodepointSlice(); i += 1; } const end_i = utf8.i; return self.text.items[start_i..end_i]; } pub fn clearSelection(self: *Self) void { self.selection_begin = 0; self.selection_end = 0; } pub fn selectAll(self: *Self) void { self.selection_begin = 0; self.selection_end = std.unicode.utf8CountCodepoints(self.text.items) catch unreachable; self.cursor_position = self.selection_end; } pub fn setText(self: *Self, text: []const u8) !void { const codepoint_count = try std.unicode.utf8CountCodepoints(text); try self.text.replaceRange(0, self.text.items.len, text); self.cursor_position = std.math.min(self.cursor_position, codepoint_count); self.cursor_position_preview = std.math.min(self.cursor_position_preview, codepoint_count); self.selection_begin = std.math.min(self.selection_begin, codepoint_count); self.selection_end = std.math.min(self.selection_end, codepoint_count); } pub fn draw(widget: *gui.Widget) void { const self = @fieldParentPtr(Self, "widget", widget); const rect = widget.relative_rect; //drawPanelInset(rect.x - 1, rect.y - 1, rect.w + 2, rect.h + 2, 1); const is_focused = widget.isFocused(); // background nvg.beginPath(); nvg.rect(rect.x + 1, rect.y + 1, rect.w - 2, rect.h - 2); nvg.fillColor(self.background_color); nvg.fill(); // border nvg.beginPath(); nvg.rect(rect.x + 0.5, rect.y + 0.5, rect.w - 1, rect.h - 1); nvg.strokeColor(if (is_focused) gui.theme_colors.focus else gui.theme_colors.border); nvg.stroke(); nvg.scissor(rect.x + 1, rect.y + 1, rect.w - 2, rect.h - 2); nvg.fontFace("guifont"); //nvg.fontSize(pixelsToPoints(9)); nvg.fontSize(13); var text_align = nvg.TextAlign{.vertical = .middle}; const padding = 5; var x = rect.x; switch (self.text_alignment) { .left => { text_align.horizontal = .left; x += padding; }, .center => { text_align.horizontal = .center; x += 0.5 * rect.w; }, .right => { text_align.horizontal = .right; x += rect.w - padding; }, } nvg.textAlign(text_align); const codepoint_count = std.unicode.utf8CountCodepoints(self.text.items) catch unreachable; self.glyph_positions.resize(codepoint_count) catch unreachable; nvg.textGlyphPositions(x, rect.y, self.text.items, self.glyph_positions.items); var line_height: f32 = undefined; var ascender: f32 = undefined; var descender: f32 = undefined; nvg.textMetrics(&ascender, &descender, &line_height); const cursor_h = line_height + 4; if (is_focused) self.drawSelection(cursor_h); nvg.fillColor(nvg.rgb(0, 0, 0)); const text_max_x = nvg.text(x, rect.y + 0.5 * rect.h, self.text.items); self.drawCursors(cursor_h, text_max_x); nvg.resetScissor(); } fn drawSelection(self: *Self, h: f32) void { if (!self.hasSelection()) return; const rect = self.widget.relative_rect; const min = std.math.min(self.selection_begin, self.selection_end); const max = std.math.max(self.selection_begin, self.selection_end); const min_x = self.glyph_positions.items[min].minx; const max_x = self.glyph_positions.items[max - 1].maxx; nvg.beginPath(); nvg.rect(min_x, rect.y + 0.5 * (rect.h - h), max_x - min_x, h); nvg.fillColor(gui.theme_colors.select); nvg.fill(); } fn drawCursors(self: *Self, cursor_h: f32, text_max_x: f32) void { const rect = self.widget.relative_rect; if (self.show_cursor_position_preview) { // preview var cursor_x = text_max_x; if (self.cursor_position_preview < self.glyph_positions.items.len) cursor_x = self.glyph_positions.items[self.cursor_position_preview].x; nvg.beginPath(); nvg.moveTo(cursor_x + 0.5, rect.y + 0.5 * (rect.h - cursor_h)); nvg.lineTo(cursor_x + 0.5, rect.y + 0.5 * (rect.h + cursor_h)); nvg.strokeColor(nvg.rgba(0, 0, 0, 0x50)); nvg.stroke(); } if (self.widget.isFocused() and self.blink) { var cursor_x = text_max_x; if (self.cursor_position < self.glyph_positions.items.len) cursor_x = self.glyph_positions.items[self.cursor_position].x; nvg.beginPath(); nvg.moveTo(cursor_x + 0.5, rect.y + 0.5 * (rect.h - cursor_h)); nvg.lineTo(cursor_x + 0.5, rect.y + 0.5 * (rect.h + cursor_h)); nvg.strokeColor(nvg.rgb(0, 0, 0)); nvg.stroke(); } } fn onBlinkTimerElapsed(ctx: usize) void { const self = @intToPtr(*Self, ctx); self.blink = !self.blink; } const blink_interval = 500;
src/gui/widgets/TextBox.zig
pub const EVCF_HASSETTINGS = @as(u32, 1); pub const EVCF_ENABLEBYDEFAULT = @as(u32, 2); pub const EVCF_REMOVEFROMLIST = @as(u32, 4); pub const EVCF_ENABLEBYDEFAULT_AUTO = @as(u32, 8); pub const EVCF_DONTSHOWIFZERO = @as(u32, 16); pub const EVCF_SETTINGSMODE = @as(u32, 32); pub const EVCF_OUTOFDISKSPACE = @as(u32, 64); pub const EVCF_USERCONSENTOBTAINED = @as(u32, 128); pub const EVCF_SYSTEMAUTORUN = @as(u32, 256); pub const EVCCBF_LASTNOTIFICATION = @as(u32, 1); pub const STATEBITS_FLAT = @as(u32, 1); pub const REC_S_IDIDTHEUPDATES = @import("../zig.zig").typedConst(HRESULT, @as(i32, 266240)); pub const REC_S_NOTCOMPLETE = @import("../zig.zig").typedConst(HRESULT, @as(i32, 266241)); pub const REC_S_NOTCOMPLETEBUTPROPAGATE = @import("../zig.zig").typedConst(HRESULT, @as(i32, 266242)); pub const REC_E_ABORTED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147217408)); pub const REC_E_NOCALLBACK = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147217407)); pub const REC_E_NORESIDUES = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147217406)); pub const REC_E_TOODIFFERENT = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147217405)); pub const REC_E_INEEDTODOTHEUPDATES = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2147217404)); //-------------------------------------------------------------------------------- // Section: Types (9) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows5.0' const IID_IEmptyVolumeCacheCallBack_Value = @import("../zig.zig").Guid.initString("6e793361-73c6-11d0-8469-00aa00442901"); pub const IID_IEmptyVolumeCacheCallBack = &IID_IEmptyVolumeCacheCallBack_Value; pub const IEmptyVolumeCacheCallBack = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, ScanProgress: fn( self: *const IEmptyVolumeCacheCallBack, dwlSpaceUsed: u64, dwFlags: u32, pcwszStatus: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, PurgeProgress: fn( self: *const IEmptyVolumeCacheCallBack, dwlSpaceFreed: u64, dwlSpaceToFree: u64, dwFlags: u32, pcwszStatus: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEmptyVolumeCacheCallBack_ScanProgress(self: *const T, dwlSpaceUsed: u64, dwFlags: u32, pcwszStatus: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IEmptyVolumeCacheCallBack.VTable, self.vtable).ScanProgress(@ptrCast(*const IEmptyVolumeCacheCallBack, self), dwlSpaceUsed, dwFlags, pcwszStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEmptyVolumeCacheCallBack_PurgeProgress(self: *const T, dwlSpaceFreed: u64, dwlSpaceToFree: u64, dwFlags: u32, pcwszStatus: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IEmptyVolumeCacheCallBack.VTable, self.vtable).PurgeProgress(@ptrCast(*const IEmptyVolumeCacheCallBack, self), dwlSpaceFreed, dwlSpaceToFree, dwFlags, pcwszStatus); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.0' const IID_IEmptyVolumeCache_Value = @import("../zig.zig").Guid.initString("8fce5227-04da-11d1-a004-00805f8abe06"); pub const IID_IEmptyVolumeCache = &IID_IEmptyVolumeCache_Value; pub const IEmptyVolumeCache = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Initialize: fn( self: *const IEmptyVolumeCache, hkRegKey: ?HKEY, pcwszVolume: ?[*:0]const u16, ppwszDisplayName: ?*?PWSTR, ppwszDescription: ?*?PWSTR, pdwFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSpaceUsed: fn( self: *const IEmptyVolumeCache, pdwlSpaceUsed: ?*u64, picb: ?*IEmptyVolumeCacheCallBack, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Purge: fn( self: *const IEmptyVolumeCache, dwlSpaceToFree: u64, picb: ?*IEmptyVolumeCacheCallBack, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ShowProperties: fn( self: *const IEmptyVolumeCache, hwnd: ?HWND, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Deactivate: fn( self: *const IEmptyVolumeCache, pdwFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEmptyVolumeCache_Initialize(self: *const T, hkRegKey: ?HKEY, pcwszVolume: ?[*:0]const u16, ppwszDisplayName: ?*?PWSTR, ppwszDescription: ?*?PWSTR, pdwFlags: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEmptyVolumeCache.VTable, self.vtable).Initialize(@ptrCast(*const IEmptyVolumeCache, self), hkRegKey, pcwszVolume, ppwszDisplayName, ppwszDescription, pdwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEmptyVolumeCache_GetSpaceUsed(self: *const T, pdwlSpaceUsed: ?*u64, picb: ?*IEmptyVolumeCacheCallBack) callconv(.Inline) HRESULT { return @ptrCast(*const IEmptyVolumeCache.VTable, self.vtable).GetSpaceUsed(@ptrCast(*const IEmptyVolumeCache, self), pdwlSpaceUsed, picb); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEmptyVolumeCache_Purge(self: *const T, dwlSpaceToFree: u64, picb: ?*IEmptyVolumeCacheCallBack) callconv(.Inline) HRESULT { return @ptrCast(*const IEmptyVolumeCache.VTable, self.vtable).Purge(@ptrCast(*const IEmptyVolumeCache, self), dwlSpaceToFree, picb); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEmptyVolumeCache_ShowProperties(self: *const T, hwnd: ?HWND) callconv(.Inline) HRESULT { return @ptrCast(*const IEmptyVolumeCache.VTable, self.vtable).ShowProperties(@ptrCast(*const IEmptyVolumeCache, self), hwnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEmptyVolumeCache_Deactivate(self: *const T, pdwFlags: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEmptyVolumeCache.VTable, self.vtable).Deactivate(@ptrCast(*const IEmptyVolumeCache, self), pdwFlags); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.0' const IID_IEmptyVolumeCache2_Value = @import("../zig.zig").Guid.initString("02b7e3ba-4db3-11d2-b2d9-00c04f8eec8c"); pub const IID_IEmptyVolumeCache2 = &IID_IEmptyVolumeCache2_Value; pub const IEmptyVolumeCache2 = extern struct { pub const VTable = extern struct { base: IEmptyVolumeCache.VTable, InitializeEx: fn( self: *const IEmptyVolumeCache2, hkRegKey: ?HKEY, pcwszVolume: ?[*:0]const u16, pcwszKeyName: ?[*:0]const u16, ppwszDisplayName: ?*?PWSTR, ppwszDescription: ?*?PWSTR, ppwszBtnText: ?*?PWSTR, pdwFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IEmptyVolumeCache.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEmptyVolumeCache2_InitializeEx(self: *const T, hkRegKey: ?HKEY, pcwszVolume: ?[*:0]const u16, pcwszKeyName: ?[*:0]const u16, ppwszDisplayName: ?*?PWSTR, ppwszDescription: ?*?PWSTR, ppwszBtnText: ?*?PWSTR, pdwFlags: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEmptyVolumeCache2.VTable, self.vtable).InitializeEx(@ptrCast(*const IEmptyVolumeCache2, self), hkRegKey, pcwszVolume, pcwszKeyName, ppwszDisplayName, ppwszDescription, ppwszBtnText, pdwFlags); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IReconcileInitiator_Value = @import("../zig.zig").Guid.initString("99180161-da16-101a-935c-444553540000"); pub const IID_IReconcileInitiator = &IID_IReconcileInitiator_Value; pub const IReconcileInitiator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SetAbortCallback: fn( self: *const IReconcileInitiator, punkForAbort: ?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetProgressFeedback: fn( self: *const IReconcileInitiator, ulProgress: u32, ulProgressMax: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReconcileInitiator_SetAbortCallback(self: *const T, punkForAbort: ?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IReconcileInitiator.VTable, self.vtable).SetAbortCallback(@ptrCast(*const IReconcileInitiator, self), punkForAbort); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReconcileInitiator_SetProgressFeedback(self: *const T, ulProgress: u32, ulProgressMax: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IReconcileInitiator.VTable, self.vtable).SetProgressFeedback(@ptrCast(*const IReconcileInitiator, self), ulProgress, ulProgressMax); } };} pub usingnamespace MethodMixin(@This()); }; pub const _reconcilef = enum(i32) { RECONCILEF_MAYBOTHERUSER = 1, RECONCILEF_FEEDBACKWINDOWVALID = 2, RECONCILEF_NORESIDUESOK = 4, RECONCILEF_OMITSELFRESIDUE = 8, RECONCILEF_RESUMERECONCILIATION = 16, RECONCILEF_YOUMAYDOTHEUPDATES = 32, RECONCILEF_ONLYYOUWERECHANGED = 64, ALL_RECONCILE_FLAGS = 127, }; pub const RECONCILEF_MAYBOTHERUSER = _reconcilef.RECONCILEF_MAYBOTHERUSER; pub const RECONCILEF_FEEDBACKWINDOWVALID = _reconcilef.RECONCILEF_FEEDBACKWINDOWVALID; pub const RECONCILEF_NORESIDUESOK = _reconcilef.RECONCILEF_NORESIDUESOK; pub const RECONCILEF_OMITSELFRESIDUE = _reconcilef.RECONCILEF_OMITSELFRESIDUE; pub const RECONCILEF_RESUMERECONCILIATION = _reconcilef.RECONCILEF_RESUMERECONCILIATION; pub const RECONCILEF_YOUMAYDOTHEUPDATES = _reconcilef.RECONCILEF_YOUMAYDOTHEUPDATES; pub const RECONCILEF_ONLYYOUWERECHANGED = _reconcilef.RECONCILEF_ONLYYOUWERECHANGED; pub const ALL_RECONCILE_FLAGS = _reconcilef.ALL_RECONCILE_FLAGS; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IReconcilableObject_Value = @import("../zig.zig").Guid.initString("99180162-da16-101a-935c-444553540000"); pub const IID_IReconcilableObject = &IID_IReconcilableObject_Value; pub const IReconcilableObject = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Reconcile: fn( self: *const IReconcilableObject, pInitiator: ?*IReconcileInitiator, dwFlags: u32, hwndOwner: ?HWND, hwndProgressFeedback: ?HWND, ulcInput: u32, rgpmkOtherInput: [*]?*IMoniker, plOutIndex: ?*i32, pstgNewResidues: ?*IStorage, pvReserved: ?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetProgressFeedbackMaxEstimate: fn( self: *const IReconcilableObject, pulProgressMax: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReconcilableObject_Reconcile(self: *const T, pInitiator: ?*IReconcileInitiator, dwFlags: u32, hwndOwner: ?HWND, hwndProgressFeedback: ?HWND, ulcInput: u32, rgpmkOtherInput: [*]?*IMoniker, plOutIndex: ?*i32, pstgNewResidues: ?*IStorage, pvReserved: ?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const IReconcilableObject.VTable, self.vtable).Reconcile(@ptrCast(*const IReconcilableObject, self), pInitiator, dwFlags, hwndOwner, hwndProgressFeedback, ulcInput, rgpmkOtherInput, plOutIndex, pstgNewResidues, pvReserved); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IReconcilableObject_GetProgressFeedbackMaxEstimate(self: *const T, pulProgressMax: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IReconcilableObject.VTable, self.vtable).GetProgressFeedbackMaxEstimate(@ptrCast(*const IReconcilableObject, self), pulProgressMax); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IBriefcaseInitiator_Value = @import("../zig.zig").Guid.initString("99180164-da16-101a-935c-444553540000"); pub const IID_IBriefcaseInitiator = &IID_IBriefcaseInitiator_Value; pub const IBriefcaseInitiator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, IsMonikerInBriefcase: fn( self: *const IBriefcaseInitiator, pmk: ?*IMoniker, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IBriefcaseInitiator_IsMonikerInBriefcase(self: *const T, pmk: ?*IMoniker) callconv(.Inline) HRESULT { return @ptrCast(*const IBriefcaseInitiator.VTable, self.vtable).IsMonikerInBriefcase(@ptrCast(*const IBriefcaseInitiator, self), pmk); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.0' const IID_IActiveDesktopP_Value = @import("../zig.zig").Guid.initString("52502ee0-ec80-11d0-89ab-00c04fc2972d"); pub const IID_IActiveDesktopP = &IID_IActiveDesktopP_Value; pub const IActiveDesktopP = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SetSafeMode: fn( self: *const IActiveDesktopP, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EnsureUpdateHTML: fn( self: *const IActiveDesktopP, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetScheme: fn( self: *const IActiveDesktopP, pwszSchemeName: ?[*:0]const u16, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetScheme: fn( self: *const IActiveDesktopP, pwszSchemeName: [*:0]u16, pdwcchBuffer: ?*u32, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IActiveDesktopP_SetSafeMode(self: *const T, dwFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IActiveDesktopP.VTable, self.vtable).SetSafeMode(@ptrCast(*const IActiveDesktopP, self), dwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IActiveDesktopP_EnsureUpdateHTML(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IActiveDesktopP.VTable, self.vtable).EnsureUpdateHTML(@ptrCast(*const IActiveDesktopP, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IActiveDesktopP_SetScheme(self: *const T, pwszSchemeName: ?[*:0]const u16, dwFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IActiveDesktopP.VTable, self.vtable).SetScheme(@ptrCast(*const IActiveDesktopP, self), pwszSchemeName, dwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IActiveDesktopP_GetScheme(self: *const T, pwszSchemeName: [*:0]u16, pdwcchBuffer: ?*u32, dwFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IActiveDesktopP.VTable, self.vtable).GetScheme(@ptrCast(*const IActiveDesktopP, self), pwszSchemeName, pdwcchBuffer, dwFlags); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.0' const IID_IADesktopP2_Value = @import("../zig.zig").Guid.initString("b22754e2-4574-11d1-9888-006097deacf9"); pub const IID_IADesktopP2 = &IID_IADesktopP2_Value; pub const IADesktopP2 = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, ReReadWallpaper: fn( self: *const IADesktopP2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetADObjectFlags: fn( self: *const IADesktopP2, pdwFlags: ?*u32, dwMask: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UpdateAllDesktopSubscriptions: fn( self: *const IADesktopP2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MakeDynamicChanges: fn( self: *const IADesktopP2, pOleObj: ?*IOleObject, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IADesktopP2_ReReadWallpaper(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IADesktopP2.VTable, self.vtable).ReReadWallpaper(@ptrCast(*const IADesktopP2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IADesktopP2_GetADObjectFlags(self: *const T, pdwFlags: ?*u32, dwMask: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IADesktopP2.VTable, self.vtable).GetADObjectFlags(@ptrCast(*const IADesktopP2, self), pdwFlags, dwMask); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IADesktopP2_UpdateAllDesktopSubscriptions(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IADesktopP2.VTable, self.vtable).UpdateAllDesktopSubscriptions(@ptrCast(*const IADesktopP2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IADesktopP2_MakeDynamicChanges(self: *const T, pOleObj: ?*IOleObject) callconv(.Inline) HRESULT { return @ptrCast(*const IADesktopP2.VTable, self.vtable).MakeDynamicChanges(@ptrCast(*const IADesktopP2, self), pOleObj); } };} pub usingnamespace MethodMixin(@This()); }; //-------------------------------------------------------------------------------- // Section: Functions (0) //-------------------------------------------------------------------------------- //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (8) //-------------------------------------------------------------------------------- const HKEY = @import("../system/registry.zig").HKEY; const HRESULT = @import("../foundation.zig").HRESULT; const HWND = @import("../foundation.zig").HWND; const IMoniker = @import("../system/com.zig").IMoniker; const IOleObject = @import("../system/ole.zig").IOleObject; const IStorage = @import("../system/com/structured_storage.zig").IStorage; const IUnknown = @import("../system/com.zig").IUnknown; const PWSTR = @import("../foundation.zig").PWSTR; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/ui/legacy_windows_environment_features.zig
const std = @import("std"); const builtin = @import("builtin"); const lola = @import("../main.zig"); const root = @import("root"); const GlobalObjectPool = if (builtin.is_test) // we need to do a workaround here for testing purposes lola.runtime.ObjectPool([_]type{ LoLaList, LoLaDictionary, }) else if (@hasDecl(root, "ObjectPool")) root.ObjectPool else @compileError("Please define and use a global ObjectPool type to use the runtime classes."); comptime { if (builtin.is_test) { const T = lola.runtime.ObjectPool([_]type{ LoLaList, LoLaDictionary, }); if (!T.serializable) @compileError("Both LoLaList and LoLaDictionary must be serializable!"); } } /// empty compile unit for testing purposes const empty_compile_unit = lola.CompileUnit{ .arena = std.heap.ArenaAllocator.init(std.testing.failing_allocator), .comment = "empty compile unit", .globalCount = 0, .temporaryCount = 0, .code = "", .functions = &[0]lola.CompileUnit.Function{}, .debugSymbols = &[0]lola.CompileUnit.DebugSymbol{}, }; test "runtime.install" { var pool = GlobalObjectPool.init(std.testing.allocator); defer pool.deinit(); var env = try lola.runtime.Environment.init(std.testing.allocator, &empty_compile_unit, pool.interface()); defer env.deinit(); try env.installModule(@This(), lola.runtime.Context.null_pointer); } // fn Sleep(call_context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.AsyncFunctionCall { // const allocator = call_context.get(std.mem.Allocator); // if (args.len != 1) // return error.InvalidArgs; // const seconds = try args[0].toNumber(); // const Context = struct { // allocator: std.mem.Allocator, // end_time: f64, // }; // const ptr = try allocator.create(Context); // ptr.* = Context{ // .allocator = allocator, // .end_time = @intToFloat(f64, std.time.milliTimestamp()) + 1000.0 * seconds, // }; // return lola.runtime.AsyncFunctionCall{ // .context = lola.runtime.Context.init(Context, ptr), // .destructor = struct { // fn dtor(exec_context: lola.runtime.Context) void { // const ctx = exec_context.get(Context); // ctx.allocator.destroy(ctx); // } // }.dtor, // .execute = struct { // fn execute(exec_context: lola.runtime.Context) anyerror!?lola.runtime.Value { // const ctx = exec_context.get(Context); // if (ctx.end_time < @intToFloat(f64, std.time.milliTimestamp())) { // return .void; // } else { // return null; // } // } // }.execute, // }; // } pub fn Print(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; _ = context; var stdout = std.io.getStdOut().writer(); for (args) |value| { switch (value) { .string => |str| try stdout.writeAll(str.contents), else => try stdout.print("{}", .{value}), } } try stdout.writeAll("\n"); return .void; } pub fn Exit(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; _ = context; if (args.len != 1) return error.InvalidArgs; const status = try args[0].toInteger(u8); std.process.exit(status); } pub fn ReadFile(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; _ = context; if (args.len != 1) return error.InvalidArgs; const path = try args[0].toString(); var file = std.fs.cwd().openFile(path, .{ .mode = .read_only }) catch return .void; defer file.close(); // 2 GB var contents = try file.readToEndAlloc(environment.allocator, 2 << 30); return lola.runtime.Value.fromString(lola.runtime.String.initFromOwned(environment.allocator, contents)); } pub fn FileExists(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; _ = context; if (args.len != 1) return error.InvalidArgs; const path = try args[0].toString(); var file = std.fs.cwd().openFile(path, .{ .mode = .read_only }) catch return lola.runtime.Value.initBoolean(false); file.close(); return lola.runtime.Value.initBoolean(true); } pub fn WriteFile(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; _ = context; if (args.len != 2) return error.InvalidArgs; const path = try args[0].toString(); const value = try args[1].toString(); var file = try std.fs.cwd().createFile(path, .{}); defer file.close(); try file.writeAll(value); return .void; } pub fn CreateList(environment: *lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = context; if (args.len > 1) return error.InvalidArgs; if (args.len > 0) _ = try args[0].toArray(); const list = try environment.allocator.create(LoLaList); errdefer environment.allocator.destroy(list); list.* = LoLaList{ .allocator = environment.allocator, .data = std.ArrayList(lola.runtime.Value).init(environment.allocator), }; if (args.len > 0) { const array = args[0].toArray() catch unreachable; errdefer list.data.deinit(); try list.data.resize(array.contents.len); for (list.data.items) |*item| { item.* = .void; } errdefer for (list.data.items) |*item| { item.deinit(); }; for (list.data.items) |*item, index| { item.* = try array.contents[index].clone(); } } return lola.runtime.Value.initObject( try environment.objectPool.castTo(GlobalObjectPool).createObject(list), ); } pub fn CreateDictionary(environment: *lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = context; if (args.len != 0) return error.InvalidArgs; const list = try environment.allocator.create(LoLaDictionary); errdefer environment.allocator.destroy(list); list.* = LoLaDictionary{ .allocator = environment.allocator, .data = std.ArrayList(LoLaDictionary.KV).init(environment.allocator), }; return lola.runtime.Value.initObject( try environment.objectPool.castTo(GlobalObjectPool).createObject(list), ); } pub const LoLaList = struct { const Self = @This(); allocator: std.mem.Allocator, data: std.ArrayList(lola.runtime.Value), pub fn getMethod(self: *Self, name: []const u8) ?lola.runtime.Function { inline for (comptime std.meta.declarations(funcs)) |decl| { if (std.mem.eql(u8, name, decl.name)) { return lola.runtime.Function{ .syncUser = .{ .context = lola.runtime.Context.make(*Self, self), .call = @field(funcs, decl.name), .destructor = null, }, }; } } return null; } pub fn destroyObject(self: *Self) void { for (self.data.items) |*item| { item.deinit(); } self.data.deinit(); self.allocator.destroy(self); } pub fn serializeObject(writer: lola.runtime.OutputStream.Writer, object: *Self) !void { try writer.writeIntLittle(u32, @intCast(u32, object.data.items.len)); for (object.data.items) |item| { try item.serialize(writer); } } pub fn deserializeObject(allocator: std.mem.Allocator, reader: lola.runtime.InputStream.Reader) !*Self { const item_count = try reader.readIntLittle(u32); var list = try allocator.create(Self); list.* = Self{ .allocator = allocator, .data = std.ArrayList(lola.runtime.Value).init(allocator), }; errdefer list.destroyObject(); // this will also free memory! try list.data.resize(item_count); // sane init to make destroyObject not explode // (deinit a void value is a no-op) for (list.data.items) |*item| { item.* = .void; } for (list.data.items) |*item| { item.* = try lola.runtime.Value.deserialize(reader, allocator); } return list; } const funcs = struct { fn Add(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const list = context.cast(*Self); if (args.len != 1) return error.InvalidArgs; var cloned = try args[0].clone(); errdefer cloned.deinit(); try list.data.append(cloned); return .void; } fn Remove(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const list = context.cast(*Self); if (args.len != 1) return error.InvalidArgs; const value = args[0]; var src_index: usize = 0; var dst_index: usize = 0; while (src_index < list.data.items.len) : (src_index += 1) { const eql = list.data.items[src_index].eql(value); if (eql) { // When the element is equal, we destroy and remove it. // std.debug.print("deinit {} ({})\n", .{ // src_index, // list.data.items[src_index], // }); list.data.items[src_index].deinit(); } else { // Otherwise, we move the object to the front of the list skipping // the already removed elements. // std.debug.print("move {} ({}) → {} ({})\n", .{ // src_index, // list.data.items[src_index], // dst_index, // list.data.items[dst_index], // }); if (src_index > dst_index) { list.data.items[dst_index] = list.data.items[src_index]; } dst_index += 1; } } // note: // we don't need to deinit() excess values here as we moved them // above, so they are "twice" in the list. list.data.shrinkRetainingCapacity(dst_index); return .void; } fn RemoveAt(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const list = context.cast(*Self); if (args.len != 1) return error.InvalidArgs; const index = try args[0].toInteger(usize); if (index < list.data.items.len) { list.data.items[index].deinit(); std.mem.copy( lola.runtime.Value, list.data.items[index..], list.data.items[index + 1 ..], ); list.data.shrinkRetainingCapacity(list.data.items.len - 1); } return .void; } fn GetCount(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const list = context.cast(*Self); if (args.len != 0) return error.InvalidArgs; return lola.runtime.Value.initInteger(usize, list.data.items.len); } fn GetItem(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const list = context.cast(*Self); if (args.len != 1) return error.InvalidArgs; const index = try args[0].toInteger(usize); if (index >= list.data.items.len) return error.OutOfRange; return try list.data.items[index].clone(); } fn SetItem(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const list = context.cast(*Self); if (args.len != 2) return error.InvalidArgs; const index = try args[0].toInteger(usize); if (index >= list.data.items.len) return error.OutOfRange; var cloned = try args[1].clone(); list.data.items[index].replaceWith(cloned); return .void; } fn ToArray(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const list = context.cast(*Self); if (args.len != 0) return error.InvalidArgs; var array = try lola.runtime.Array.init(list.allocator, list.data.items.len); errdefer array.deinit(); for (array.contents) |*item, index| { item.* = try list.data.items[index].clone(); } return lola.runtime.Value.fromArray(array); } fn IndexOf(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const list = context.cast(*Self); if (args.len != 1) return error.InvalidArgs; for (list.data.items) |item, index| { if (item.eql(args[0])) return lola.runtime.Value.initInteger(usize, index); } return .void; } fn Resize(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const list = context.cast(*Self); if (args.len != 1) return error.InvalidArgs; const new_size = try args[0].toInteger(usize); const old_size = list.data.items.len; if (old_size > new_size) { for (list.data.items[new_size..]) |*item| { item.deinit(); } list.data.shrinkAndFree(new_size); } else if (new_size > old_size) { try list.data.resize(new_size); for (list.data.items[old_size..]) |*item| { item.* = .void; } } return .void; } fn Clear(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const list = context.cast(*Self); if (args.len != 0) return error.InvalidArgs; for (list.data.items) |*item| { item.deinit(); } list.data.shrinkAndFree(0); return .void; } }; }; pub const LoLaDictionary = struct { const Self = @This(); const KV = struct { key: lola.runtime.Value, value: lola.runtime.Value, fn deinit(self: *KV) void { self.key.deinit(); self.value.deinit(); self.* = undefined; } }; allocator: std.mem.Allocator, data: std.ArrayList(KV), pub fn getMethod(self: *Self, name: []const u8) ?lola.runtime.Function { inline for (comptime std.meta.declarations(funcs)) |decl| { if (std.mem.eql(u8, name, decl.name)) { return lola.runtime.Function{ .syncUser = .{ .context = lola.runtime.Context.make(*Self, self), .call = @field(funcs, decl.name), .destructor = null, }, }; } } return null; } pub fn destroyObject(self: *Self) void { for (self.data.items) |*item| { item.deinit(); } self.data.deinit(); self.allocator.destroy(self); } pub fn serializeObject(writer: lola.runtime.OutputStream.Writer, object: *Self) !void { try writer.writeIntLittle(u32, @intCast(u32, object.data.items.len)); for (object.data.items) |item| { try item.key.serialize(writer); try item.value.serialize(writer); } } pub fn deserializeObject(allocator: std.mem.Allocator, reader: lola.runtime.InputStream.Reader) !*Self { const item_count = try reader.readIntLittle(u32); var list = try allocator.create(Self); list.* = Self{ .allocator = allocator, .data = std.ArrayList(KV).init(allocator), }; errdefer list.destroyObject(); // this will also free memory! try list.data.resize(item_count); // sane init to make destroyObject not explode // (deinit a void value is a no-op) for (list.data.items) |*item| { item.* = KV{ .key = .void, .value = .void, }; } for (list.data.items) |*item| { item.key = try lola.runtime.Value.deserialize(reader, allocator); item.value = try lola.runtime.Value.deserialize(reader, allocator); } return list; } const funcs = struct { fn Set(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { const dict = context.cast(*Self); if (args.len != 2) return error.InvalidArgs; if (args[1] == .void) { // short-circuit a argument `void` to a call to `Remove(key)` var result = try Remove(environment, context, args[0..1]); result.deinit(); return .void; } var value = try args[1].clone(); errdefer value.deinit(); for (dict.data.items) |*item| { if (item.key.eql(args[0])) { item.value.replaceWith(value); return .void; } } var key = try args[0].clone(); errdefer key.deinit(); try dict.data.append(KV{ .key = key, .value = value, }); return .void; } fn Get(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const dict = context.cast(*Self); if (args.len != 1) return error.InvalidArgs; for (dict.data.items) |item| { if (item.key.eql(args[0])) { return try item.value.clone(); } } return .void; } fn Contains(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const dict = context.cast(*Self); if (args.len != 1) return error.InvalidArgs; for (dict.data.items) |item| { if (item.key.eql(args[0])) { return lola.runtime.Value.initBoolean(true); } } return lola.runtime.Value.initBoolean(false); } fn Remove(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const dict = context.cast(*Self); if (args.len != 1) return error.InvalidArgs; for (dict.data.items) |*item, index| { if (item.key.eql(args[0])) { // use a fast swap-remove here item.deinit(); const last_index = dict.data.items.len - 1; dict.data.items[index] = dict.data.items[last_index]; dict.data.shrinkRetainingCapacity(last_index); return lola.runtime.Value.initBoolean(true); } } return lola.runtime.Value.initBoolean(false); } fn Clear(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const dict = context.cast(*Self); if (args.len != 0) return error.InvalidArgs; for (dict.data.items) |*item| { item.deinit(); } dict.data.shrinkAndFree(0); return .void; } fn GetCount(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const dict = context.cast(*Self); if (args.len != 0) return error.InvalidArgs; return lola.runtime.Value.initInteger(usize, dict.data.items.len); } fn GetKeys(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const dict = context.cast(*Self); if (args.len != 0) return error.InvalidArgs; var arr = try lola.runtime.Array.init(dict.allocator, dict.data.items.len); errdefer arr.deinit(); for (dict.data.items) |item, index| { arr.contents[index].replaceWith(try item.key.clone()); } return lola.runtime.Value.fromArray(arr); } fn GetValues(environment: *const lola.runtime.Environment, context: lola.runtime.Context, args: []const lola.runtime.Value) anyerror!lola.runtime.Value { _ = environment; const dict = context.cast(*Self); if (args.len != 0) return error.InvalidArgs; var arr = try lola.runtime.Array.init(dict.allocator, dict.data.items.len); errdefer arr.deinit(); for (dict.data.items) |item, index| { arr.contents[index].replaceWith(try item.value.clone()); } return lola.runtime.Value.fromArray(arr); } }; };
src/library/libraries/runtime.zig
const clint = @import("clint.zig"); const debug = @import("debug.zig"); const exceptions = [_][]const u8{ "Instruction address misaligned", "Instruction access fault", "Illegal instruction", "Breakpoint", "Load address misaligned", "Load access fault", "Store/AMO address misaligned", "Store/AMO access fault", "Environment call from U-Mode", "Environment call from S-Mode", "Reserved", "Environment call from M-Mode", "Instruction page fault", "Load page fault", "Reserved", "Store/AMO page fault", }; const modes = [_][]const u8{ "U-mode", "S-mode", "Reserved", "M-mode", }; const registers = [_][]const u8{ "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", }; // TODO: move to `csr.zig` and prefix with `mcause`? const interrupt_mask = 1 << 31; const exception_code_mask = (1 << 31) - 1; const Frame = packed struct { registers: [32]usize, mepc: usize, mstatus: usize, mtval: usize, }; const Interrupt = enum(usize) { user_software = 0, supervisor_software = 1, machine_software = 3, user_timer = 4, supervisor_timer = 5, machine_timer = 7, user_external = 8, supervisor_external = 9, machine_external = 11, _, }; export fn machineTrapHandler(cause: usize, frame: Frame) void { if (cause & interrupt_mask == interrupt_mask) { const code = @intToEnum(Interrupt, cause & exception_code_mask); switch (code) { .machine_timer => { debug.println("M-mode timer interrupt!", .{}); clint.setTimer(clint.default_delta); }, else => { debug.println("Unhandled interrupt", .{}); }, } return; } debug.println("\x1b[97;104m", .{}); debug.println("{0} An exception has occurred :( {0}", .{"=" ** 16}); // Print the exception name, code, and the privilege mode const name = if (cause < exceptions.len) exceptions[cause] else "Reserved"; const mode = frame.mstatus >> 11 & 0b11; debug.println("{} (code: {}) from {}\n", .{ name, cause, modes[mode] }); // Print registers debug.println("Registers:", .{}); var i: usize = 0; while (i < registers.len) : (i += 1) { const sep = if (i % 4 == 3) "\n" else " "; debug.print("{:<4} = 0x{x:<8}{}", .{ registers[i], frame.registers[i], sep }); } debug.println("", .{}); // Print CSRs debug.println("Control and Status Registers:", .{}); debug.println("mepc = 0x{x}", .{frame.mepc}); debug.println("mstatus = 0x{x}", .{frame.mstatus}); debug.print("mtval = 0x{x}", .{frame.mtval}); debug.println("\x1b[0m", .{}); // Halt asm volatile ("1: j 1b"); }
src/trap.zig
--- src/link/Elf.zig.orig 2021-09-07 02:41:54 UTC +++ src/link/Elf.zig @@ -3353,19 +3353,22 @@ const CsuObjects = struct { const crt_dir_path = lci.crt_dir orelse return error.LibCInstallationMissingCRTDir; switch (link_options.target.os.tag) { .dragonfly => { + const gcc_dir_path = "@GCC_TARGET_LIBDIR@"; if (result.crt0) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ crt_dir_path, obj.* }); if (result.crti) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ crt_dir_path, obj.* }); if (result.crtn) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ crt_dir_path, obj.* }); - var gccv: []const u8 = undefined; - if (link_options.target.os.version_range.semver.isAtLeast(.{ .major = 5, .minor = 4 }) orelse true) { - gccv = "gcc80"; - } else { - gccv = "gcc54"; - } + if (result.crtbegin) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ gcc_dir_path, obj.* }); + if (result.crtend) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ gcc_dir_path, obj.* }); + }, + .freebsd => { + const gcc_dir_path = "@GCC_TARGET_LIBDIR@"; + if (result.crt0) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ crt_dir_path, obj.* }); + if (result.crti) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ crt_dir_path, obj.* }); + if (result.crtn) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ crt_dir_path, obj.* }); - if (result.crtbegin) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ crt_dir_path, gccv, obj.* }); - if (result.crtend) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ crt_dir_path, gccv, obj.* }); + if (result.crtbegin) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ gcc_dir_path, obj.* }); + if (result.crtend) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ gcc_dir_path, obj.* }); }, .haiku => { const gcc_dir_path = lci.gcc_dir orelse return error.LibCInstallationMissingCRTDir;
bucket_BC/zig/patches/patch-src_link_Elf.zig
const std = @import("std"); const TestContext = @import("../../src/test.zig").TestContext; const build_options = @import("build_options"); // These tests should work with all platforms, but we're using linux_x64 for // now for consistency. Will be expanded eventually. const linux_x64 = std.zig.CrossTarget{ .cpu_arch = .x86_64, .os_tag = .linux, }; pub fn addCases(ctx: *TestContext) !void { { var case = ctx.exeUsingLlvmBackend("simple addition and subtraction", linux_x64); case.addCompareOutput( \\fn add(a: i32, b: i32) i32 { \\ return a + b; \\} \\ \\export fn main() c_int { \\ var a: i32 = -5; \\ const x = add(a, 7); \\ var y = add(2, 0); \\ y -= x; \\ return y; \\} , ""); } { var case = ctx.exeUsingLlvmBackend("llvm hello world", linux_x64); case.addCompareOutput( \\extern fn puts(s: [*:0]const u8) c_int; \\ \\export fn main() c_int { \\ _ = puts("hello world!"); \\ return 0; \\} , "hello world!" ++ std.cstr.line_sep); } { var case = ctx.exeUsingLlvmBackend("simple if statement", linux_x64); case.addCompareOutput( \\fn add(a: i32, b: i32) i32 { \\ return a + b; \\} \\ \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} \\ \\export fn main() c_int { \\ assert(add(1,2) == 3); \\ return 0; \\} , ""); } { var case = ctx.exeUsingLlvmBackend("blocks", linux_x64); case.addCompareOutput( \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} \\ \\fn foo(ok: bool) i32 { \\ const val: i32 = blk: { \\ var x: i32 = 1; \\ if (!ok) break :blk x + 9; \\ break :blk x + 19; \\ }; \\ return val + 10; \\} \\ \\export fn main() c_int { \\ assert(foo(false) == 20); \\ assert(foo(true) == 30); \\ return 0; \\} , ""); } { var case = ctx.exeUsingLlvmBackend("nested blocks", linux_x64); case.addCompareOutput( \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} \\ \\fn foo(ok: bool) i32 { \\ var val: i32 = blk: { \\ const val2: i32 = another: { \\ if (!ok) break :blk 10; \\ break :another 10; \\ }; \\ break :blk val2 + 10; \\ }; \\ return val; \\} \\ \\export fn main() c_int { \\ assert(foo(false) == 10); \\ assert(foo(true) == 20); \\ return 0; \\} , ""); } { var case = ctx.exeUsingLlvmBackend("while loops", linux_x64); case.addCompareOutput( \\fn assert(ok: bool) void { \\ if (!ok) unreachable; \\} \\ \\export fn main() c_int { \\ var sum: u32 = 0; \\ var i: u32 = 0; \\ while (i < 5) : (i += 1) { \\ sum += i; \\ } \\ assert(sum == 10); \\ assert(i == 5); \\ return 0; \\} , ""); } }
test/stage2/llvm.zig
const std = @import("std"); const stdout = std.io.getStdOut().writer(); const Word = @import("./word.zig").Word; pub const InterpreterError = error{ Bye, }; pub fn ForthInterpreter(comptime STACK_SIZE: usize) type { return struct { const Self = @This(); stack: [STACK_SIZE]i64 = [_]i64{0} ** STACK_SIZE, sp: usize = 0, dictionary: std.hash_map.StringHashMap([]Word), pub fn record(self: *Self, newWord: []const u8, seq: []Word) !void { try self.dictionary.put(newWord, seq); } pub fn init(alloc: std.mem.Allocator) Self { return .{ .dictionary = std.hash_map.StringHashMap([]Word).init(alloc) }; } pub fn deinit(self: *Self) void { self.dictionary.deinit(); } pub fn run(self: *Self, word: Word) anyerror!void { switch (word) { .plus => { const r = self.pop(); const l = self.pop(); self.push(l + r); }, .sub => { const r = self.pop(); const l = self.pop(); self.push(l - r); }, .mul => { const r = self.pop(); const l = self.pop(); self.push(l * r); }, .div => { const r = self.pop(); const l = self.pop(); self.push(@divTrunc(l, r)); }, .dup => { const v = self.pop(); self.push(v); self.push(v); }, .pop => { try stdout.print("{} ", .{self.pop()}); }, .print => { try stdout.print("{} ", .{self.top()}); }, .bye => { return InterpreterError.Bye; }, .int => |v| { self.push(v); }, .word => |w| { const ws = self.dictionary.get(w).?; for (ws) |wR| { try self.run(wR); } }, } } fn pop(self: *Self) i64 { self.sp -= 1; return self.stack[self.sp]; } fn push(self: *Self, v: i64) void { self.stack[self.sp] = v; self.sp += 1; } fn top(self: *Self) i64 { return self.stack[0]; } }; }
src/interpreter.zig
const std = @import("std"); const dcommon = @import("common/dcommon.zig"); const arch_paging = switch (dcommon.daintree_arch) { .arm64 => @import("arm64/paging.zig"), .riscv64 => @import("riscv64/paging.zig"), }; const hw = @import("hw.zig"); pub const Error = error{OutOfMemory}; pub const PAGING = arch_paging.PAGING; pub const MapSize = enum { block, table, }; pub const MapFlags = enum { non_leaf, kernel_promisc, kernel_data, kernel_rodata, kernel_code, peripheral, }; pub const mapPage = arch_paging.mapPage; pub var bump = BumpAllocator{ .next = 0 }; pub fn mapPagesConsecutive(base_in: usize, page_count_in: usize, flags: MapFlags) Error!usize { var base = base_in; var page_count = page_count_in - 1; var first = try mapPage(base, flags); var last = first; while (page_count > 0) : (page_count -= 1) { base += PAGING.page_size; var next = try mapPage(base, flags); if (next - last != PAGING.page_size) { @panic("mapPagesConsecutive wasn't consecutive"); } last = next; } arch_paging.flushTLB(); return first; } pub const BumpAllocator = struct { next: usize, pub fn allocSz(self: *BumpAllocator, comptime size: usize) callconv(.Inline) usize { const next = self.next; self.next += size; // XXX this only works if physical addresses are mapped, i.e. MMU is off // or identity mapping is in place std.mem.set(u8, @intToPtr([*]u8, next)[0..size], 0); return next; } pub fn allocPage(self: *BumpAllocator) callconv(.Inline) usize { return self.allocSz(PAGING.page_size); } pub fn alloc(self: *BumpAllocator, comptime T: type) callconv(.Inline) *T { return @intToPtr(*T, self.allocSz(@sizeOf(T))); } }; pub const PagingConfigurationInput = struct { translation_levels: u2 = 3, index_bits: u6 = 9, page_bits: u6 = 12, vaddress_mask: u64, }; pub fn configuration(comptime config: PagingConfigurationInput) PagingConfiguration { const page_size = 1 << config.page_bits; const page_mask = page_size - 1; const block_l1_bits = config.page_bits + (config.translation_levels - 1) * config.index_bits; return .{ .translation_levels = config.translation_levels, .index_bits = config.index_bits, .page_bits = config.page_bits, .vaddress_mask = config.vaddress_mask, .page_size = page_size, .page_mask = page_mask, .index_size = 1 << config.index_bits, .address_bits = config.page_bits + config.translation_levels * config.index_bits, .kernel_base = ~@as(u64, config.vaddress_mask | page_mask), .block_l1_bits = block_l1_bits, .block_l1_size = 1 << block_l1_bits, }; } pub const PagingConfiguration = struct { translation_levels: u2 = 3, index_bits: u6 = 9, page_bits: u6 = 12, vaddress_mask: u64, // computed page_size: u64, page_mask: u64, index_size: u64, address_bits: u8, kernel_base: u64, block_l1_bits: u8, block_l1_size: u64, pub fn index(self: PagingConfiguration, comptime level: u2, va: u64) callconv(.Inline) usize { if (level == 0) { @compileError("level must be 1, 2, 3"); } return (va & self.vaddress_mask) >> (@as(u6, 3 - level) * self.index_bits + self.page_bits); } pub fn range(self: PagingConfiguration, comptime level: u2, start_va: u64, length: u64) RangeIterator { return .{ .next_page = self.index(level, start_va), .last_page = self.index(level, start_va + length), .level_bits = @as(u6, 3 - level) * self.index_bits + self.page_bits, }; } pub const RangeIterator = struct { next_page: usize, last_page: usize, level_bits: u6, pub fn next(self: *RangeIterator) ?Range { if (self.next_page > self.last_page) { return null; } const page = self.next_page; self.next_page += 1; return Range{ .page = page, .address = page << self.level_bits, }; } }; pub const Range = struct { page: usize, address: usize, }; pub fn kernelPageAddress(self: PagingConfiguration, i: usize) callconv(.Inline) u64 { return self.kernel_base | (i << self.page_bits); } };
dainkrnl/src/paging.zig
const std = @import("std"); const wm = @import("wm.zig"); const assert = std.debug.assert; test "add and remove windows" { var workspace = wm.Workspace{}; assert(workspace.amountOfWindows == 0); assert(workspace.focusedWindow == 0); assert(true == workspace.addWindow(1337)); assert(workspace.amountOfWindows == 1); assert(workspace.windows[0] == 1337); assert(workspace.focusedWindow == 0); assert(true == workspace.addWindow(1338)); assert(workspace.amountOfWindows == 2); assert(workspace.windows[0] == 1337); assert(workspace.windows[1] == 1338); assert(workspace.focusedWindow == 0); assert(true == workspace.addWindow(1339)); assert(workspace.amountOfWindows == 3); assert(workspace.windows[0] == 1337); assert(workspace.windows[1] == 1338); assert(workspace.windows[2] == 1339); assert(workspace.focusedWindow == 0); assert(workspace.getWindowIndex(133) == -1); assert(workspace.getWindowIndex(1337) == 0); assert(workspace.getWindowIndex(1338) == 1); assert(workspace.getWindowIndex(1339) == 2); workspace.removeWindow(1338); assert(workspace.amountOfWindows == 2); assert(workspace.windows[0] == 1337); assert(workspace.windows[1] == 1339); assert(workspace.focusedWindow == 0); workspace.removeWindow(1337); assert(workspace.amountOfWindows == 1); assert(workspace.windows[0] == 1339); assert(workspace.focusedWindow == 0); } test "next and previousWindow" { var workspace = wm.Workspace{}; _ = workspace.addWindow(1); _ = workspace.addWindow(2); _ = workspace.addWindow(3); assert(1 == workspace.getPreviousWindow()); assert(2 == workspace.getNextWindow()); } test "maximum windows" { var workspace = wm.Workspace{}; var i: usize = 0; while (i < 8) { assert(true == workspace.addWindow(i)); i += 1; } assert(false == workspace.addWindow(1337)); assert(workspace.amountOfWindows == 8); } test "workspaceFocus" { var screen = wm.Screen{.info=undefined, .workspaces=undefined}; assert(0 == screen.previousWorkspace); screen.workspaceFocus(2); assert(2 == screen.activeWorkspace); assert(0 == screen.previousWorkspace); }
src/test.zig
const std = @import("std"); const mem = std.mem; const fmt = std.fmt; const unicode = std.unicode; const testing = std.testing; iter: usize, entries: std.ArrayList(Entry), const DecompFile = @This(); pub const Entry = struct { key: [4]u8, key_len: usize, value: Decomp, // Calculates the difference of each integral value in this entry. pub fn diff(self: Entry, other: Entry, value_form_diff: *u2) Entry { var d = Entry{ .key = undefined, .key_len = self.key_len -% other.key_len, .value = Decomp{ .form = undefined, .len = self.value.len -% other.value.len, }, }; value_form_diff.* = @enumToInt(self.value.form) -% @enumToInt(other.value.form); for (d.key) |_, i| d.key[i] = self.key[i] -% other.key[i]; for (d.value.seq) |_, i| d.value.seq[i] = self.value.seq[i] -% other.value.seq[i]; return d; } }; /// `Form` is the normalization form. /// * .canon : Canonical decomposition, which always results in two code points. /// * .compat : Compatibility decomposition, which can result in at most 18 code points. /// * .same : Default canonical decomposition to the code point itself. pub const Form = enum(u2) { canon, // D compat, // KD same, // no more decomposition. }; /// `Decomp` is the result of decomposing a code point to a normaliztion form. pub const Decomp = struct { form: Form = .canon, len: usize = 2, seq: [18]u21 = [_]u21{0} ** 18, }; pub fn deinit(self: *DecompFile) void { self.entries.deinit(); } pub fn next(self: *DecompFile) ?Entry { if (self.iter >= self.entries.items.len) return null; const entry = self.entries.items[self.iter]; self.iter += 1; return entry; } pub fn parseFile(allocator: mem.Allocator, filename: []const u8) !DecompFile { var in_file = try std.fs.cwd().openFile(filename, .{}); defer in_file.close(); return parse(allocator, in_file.reader()); } pub fn parse(allocator: mem.Allocator, reader: anytype) !DecompFile { var buf_reader = std.io.bufferedReader(reader); var input_stream = buf_reader.reader(); var entries = std.ArrayList(Entry).init(allocator); // Iterate over lines. var buf: [640]u8 = undefined; while (try input_stream.readUntilDelimiterOrEof(&buf, '\n')) |line| { // Iterate over fields. var fields = mem.split(u8, line, ";"); var field_index: usize = 0; var code_point: []const u8 = undefined; var dc = Decomp{}; while (fields.next()) |raw| : (field_index += 1) { if (field_index == 0) { // Code point. code_point = raw; } else if (field_index == 5 and raw.len != 0) { // Normalization. const parsed_cp = try fmt.parseInt(u21, code_point, 16); var _key_backing = std.mem.zeroes([4]u8); const key = blk: { const len = try unicode.utf8Encode(parsed_cp, &_key_backing); break :blk _key_backing[0..len]; }; var is_compat = false; var cp_list: [18][]const u8 = [_][]const u8{""} ** 18; var cp_iter = mem.split(u8, raw, " "); var i: usize = 0; while (cp_iter.next()) |cp| { if (mem.startsWith(u8, cp, "<")) { is_compat = true; continue; } cp_list[i] = cp; i += 1; } if (!is_compat and i == 1) { // Singleton dc.len = 1; dc.seq[0] = try fmt.parseInt(u21, cp_list[0], 16); try entries.append(Entry{ .key = _key_backing, .key_len = key.len, .value = dc }); } else if (!is_compat) { // Canonical std.debug.assert(i == 2); dc.seq[0] = try fmt.parseInt(u21, cp_list[0], 16); dc.seq[1] = try fmt.parseInt(u21, cp_list[1], 16); try entries.append(Entry{ .key = _key_backing, .key_len = key.len, .value = dc }); } else { // Compatibility std.debug.assert(i != 0 and i <= 18); var j: usize = 0; for (cp_list) |ccp| { if (ccp.len == 0) break; // sentinel dc.seq[j] = try fmt.parseInt(u21, ccp, 16); j += 1; } dc.form = .compat; dc.len = j; try entries.append(Entry{ .key = _key_backing, .key_len = key.len, .value = dc }); } } else { continue; } } } return DecompFile{ .iter = 0, .entries = entries }; } // A UDDC opcode for a decomposition file. const Opcode = enum(u4) { // increments key[2] += 1; sets value.seq[0]; emits an entry. // 1848 instances increment_key_2_and_set_value_seq_0_2bit_and_emit, // 749 instances, 1779 byte reduction increment_key_2_and_set_value_seq_0_12bit_and_emit, // 741 instances, 834 byte reduction increment_key_2_and_set_value_seq_0_21bit_and_emit, // 358 instances // increments key[3] += 1; sets value.seq[0]; emits an entry. // 1685 instances increment_key_3_and_set_value_seq_0_2bit_and_emit, // 978 instances, 2323 byte reduction increment_key_3_and_set_value_seq_0_8bit_and_emit, // 269 instances, 437 byte reduction increment_key_3_and_set_value_seq_0_21bit_and_emit, // 438 instances // Sets the key and value.seq registers, then emit an entry. This is used when no other smaller // opcode is sufficient. // 1531 instances set_key_and_value_seq_2bit_and_emit, // 732 instances, 3358 byte reduction set_key_and_value_seq_8bit_and_emit, // 297 instances, 1026 byte reduction set_key_and_value_seq_21bit_and_emit, // 502 instances // increments key[1] += 1; sets value.seq[0]; emits an entry. // 177 instances, responsible for an 764 byte reduction increment_key_1_and_set_value_seq_0_and_emit, // increments key[1] += 1; sets value.seq[0]; sets value.seq[1]; emits an entry. // 108 instances, responsible for an 203 byte reduction increment_key_1_and_set_value_seq_0_1_and_emit, // increments key[2] += 1; sets value.seq[0]; sets value.seq[1]; emits an entry. // 387 instances, responsible for an 1135 byte reduction increment_key_2_and_set_value_seq_0_1_and_emit, // Sets the key and key_len registers at the same time. // 3 instances set_key_len_and_key, // Sets the value.form register // 72 instances set_value_form, // Sets the value.len register // 309 instances set_value_len, // Denotes the end of the opcode stream. This is so that we don't need to encode the total // number of opcodes in the stream up front (note also the file is bit packed: there may be // a few remaining zero bits at the end as padding so we need an EOF opcode rather than say // catching the actual file read EOF.) eof, }; pub fn compressToFile(self: *DecompFile, filename: []const u8) !void { var out_file = try std.fs.cwd().createFile(filename, .{}); defer out_file.close(); return self.compressTo(out_file.writer()); } pub fn compressTo(self: *DecompFile, writer: anytype) !void { var buf_writer = std.io.bufferedWriter(writer); var out = std.io.bitWriter(.Little, buf_writer.writer()); // For the UDDC registers, we want one register to represent each possible value in a single // entry; we will emit opcodes to modify these registers into the desired form to produce a // real entry. var registers = std.mem.zeroes(Entry); while (self.next()) |entry| { // Determine what has changed between this entry and the current registers' state. var diff_value_form: u2 = undefined; const diff = entry.diff(registers, &diff_value_form); // If you want to analyze the difference between entries, uncomment the following: //std.debug.print("diff={}\n", .{diff}); //registers = entry; //continue; // Infrequently changed: key_len, value.form, and value.len registers. Emit opcodes to // update them if needed. if (diff.key_len != 0) { try out.writeBits(@enumToInt(Opcode.set_key_len_and_key), @bitSizeOf(Opcode)); try out.writeBits(entry.key_len, 3); _ = try out.write(entry.key[0..entry.key_len]); } if (diff_value_form != 0) { try out.writeBits(@enumToInt(Opcode.set_value_form), @bitSizeOf(Opcode)); try out.writeBits(diff_value_form, @bitSizeOf(Form)); } if (diff.value.len != 0) { try out.writeBits(@enumToInt(Opcode.set_value_len), @bitSizeOf(Opcode)); try out.writeBits(entry.value.len, 5); } // Frequently changed: this is where the magic happens. var seq_0_change = diff.value.seq[0] != 0 and mem.eql(u21, diff.value.seq[1..], ([_]u21{0} ** 17)[0..]); var seq_0_1_change = diff.value.seq[0] != 0 and diff.value.seq[1] != 0 and mem.eql(u21, diff.value.seq[2..], ([_]u21{0} ** 16)[0..]); if (seq_0_change and mem.eql(u8, &diff.key, &[4]u8{ 0, 1, 0, 0 })) { try out.writeBits(@enumToInt(Opcode.increment_key_1_and_set_value_seq_0_and_emit), @bitSizeOf(Opcode)); try out.writeBits(entry.value.seq[0], 21); } else if (seq_0_change and mem.eql(u8, &diff.key, &[4]u8{ 0, 0, 1, 0 })) { // Within this category, of all diff.value.seq[0] values: // * 41% fit in 2 bits. // * 90% fit in 6 bits. var fits_2 = true; var fits_12 = true; for (diff.value.seq[0..entry.value.len]) |s| { if (s >= (1 << 2)) { fits_2 = false; } if (s >= (1 << 12)) { fits_12 = false; } } if (fits_2) { try out.writeBits(@enumToInt(Opcode.increment_key_2_and_set_value_seq_0_2bit_and_emit), @bitSizeOf(Opcode)); try out.writeBits(diff.value.seq[0], 2); } else if (fits_12) { try out.writeBits(@enumToInt(Opcode.increment_key_2_and_set_value_seq_0_12bit_and_emit), @bitSizeOf(Opcode)); try out.writeBits(diff.value.seq[0], 12); } else { try out.writeBits(@enumToInt(Opcode.increment_key_2_and_set_value_seq_0_21bit_and_emit), @bitSizeOf(Opcode)); try out.writeBits(diff.value.seq[0], 21); } } else if (seq_0_change and mem.eql(u8, &diff.key, &[4]u8{ 0, 0, 0, 1 })) { // Within this category, of all diff.value.seq[0] values: // * 58% fit in 2 bits. // * 80% fit in 4 bits. var fits_2 = true; var fits_8 = true; for (diff.value.seq[0..entry.value.len]) |s| { if (s >= (1 << 2)) { fits_2 = false; } if (s >= (1 << 8)) { fits_8 = false; } } if (fits_2) { try out.writeBits(@enumToInt(Opcode.increment_key_3_and_set_value_seq_0_2bit_and_emit), @bitSizeOf(Opcode)); try out.writeBits(diff.value.seq[0], 2); } else if (fits_8) { try out.writeBits(@enumToInt(Opcode.increment_key_3_and_set_value_seq_0_8bit_and_emit), @bitSizeOf(Opcode)); try out.writeBits(diff.value.seq[0], 8); } else { try out.writeBits(@enumToInt(Opcode.increment_key_3_and_set_value_seq_0_21bit_and_emit), @bitSizeOf(Opcode)); try out.writeBits(diff.value.seq[0], 21); } } else if (seq_0_1_change and mem.eql(u8, &diff.key, &[4]u8{ 0, 1, 0, 0 })) { try out.writeBits(@enumToInt(Opcode.increment_key_1_and_set_value_seq_0_1_and_emit), @bitSizeOf(Opcode)); try out.writeBits(entry.value.seq[0], 21); try out.writeBits(entry.value.seq[1], 21); } else if (seq_0_1_change and mem.eql(u8, &diff.key, &[4]u8{ 0, 0, 1, 0 })) { try out.writeBits(@enumToInt(Opcode.increment_key_2_and_set_value_seq_0_1_and_emit), @bitSizeOf(Opcode)); try out.writeBits(entry.value.seq[0], 21); try out.writeBits(entry.value.seq[1], 21); } else { // Within this category, of all diff.value.seq[0..] values: // * 60% fit in 2 bits. // * 84% fit in 8 bits. var fits_2 = true; var fits_8 = true; for (diff.value.seq[0..entry.value.len]) |s| { if (s >= (1 << 2)) { fits_2 = false; } if (s >= (1 << 8)) { fits_8 = false; } } if (fits_2) { try out.writeBits(@enumToInt(Opcode.set_key_and_value_seq_2bit_and_emit), @bitSizeOf(Opcode)); _ = try out.write(entry.key[0..entry.key_len]); for (diff.value.seq[0..entry.value.len]) |s| try out.writeBits(s, 2); } else if (fits_8) { try out.writeBits(@enumToInt(Opcode.set_key_and_value_seq_8bit_and_emit), @bitSizeOf(Opcode)); _ = try out.write(entry.key[0..entry.key_len]); for (diff.value.seq[0..entry.value.len]) |s| try out.writeBits(s, 8); } else { try out.writeBits(@enumToInt(Opcode.set_key_and_value_seq_21bit_and_emit), @bitSizeOf(Opcode)); _ = try out.write(entry.key[0..entry.key_len]); for (diff.value.seq[0..entry.value.len]) |s| try out.writeBits(s, 21); } } registers = entry; } try out.writeBits(@enumToInt(Opcode.eof), @bitSizeOf(Opcode)); try out.flushBits(); try buf_writer.flush(); } pub fn decompressFile(allocator: mem.Allocator, filename: []const u8) !DecompFile { var in_file = try std.fs.cwd().openFile(filename, .{}); defer in_file.close(); return decompress(allocator, in_file.reader()); } pub fn decompress(allocator: mem.Allocator, reader: anytype) !DecompFile { var buf_reader = std.io.bufferedReader(reader); var in = std.io.bitReader(.Little, buf_reader.reader()); var entries = std.ArrayList(Entry).init(allocator); // For the UDDC registers, we want one register to represent each possible value in a single // entry; each opcode we read will modify these registers so we can emit a value. var registers = std.mem.zeroes(Entry); while (true) { // Read a single operation. var op = @intToEnum(Opcode, try in.readBitsNoEof(std.meta.Tag(Opcode), @bitSizeOf(Opcode))); // If you want to inspect the # of different ops in a stream, uncomment this: //std.debug.print("{}\n", .{op}); // Execute the operation. switch (op) { .increment_key_2_and_set_value_seq_0_2bit_and_emit => { registers.key[2] += 1; registers.value.seq[0] +%= try in.readBitsNoEof(u21, 2); try entries.append(registers); }, .increment_key_2_and_set_value_seq_0_12bit_and_emit => { registers.key[2] += 1; registers.value.seq[0] +%= try in.readBitsNoEof(u21, 12); try entries.append(registers); }, .increment_key_2_and_set_value_seq_0_21bit_and_emit => { registers.key[2] += 1; registers.value.seq[0] +%= try in.readBitsNoEof(u21, 21); try entries.append(registers); }, .increment_key_3_and_set_value_seq_0_2bit_and_emit => { registers.key[3] += 1; registers.value.seq[0] +%= try in.readBitsNoEof(u21, 2); try entries.append(registers); }, .increment_key_3_and_set_value_seq_0_8bit_and_emit => { registers.key[3] += 1; registers.value.seq[0] +%= try in.readBitsNoEof(u21, 8); try entries.append(registers); }, .increment_key_3_and_set_value_seq_0_21bit_and_emit => { registers.key[3] += 1; registers.value.seq[0] +%= try in.readBitsNoEof(u21, 21); try entries.append(registers); }, .set_key_and_value_seq_2bit_and_emit => { _ = try in.read(registers.key[0..registers.key_len]); var i: usize = 0; while (i < registers.value.len) : (i += 1) registers.value.seq[i] +%= try in.readBitsNoEof(u21, 2); while (i < registers.value.seq.len) : (i += 1) registers.value.seq[i] = 0; try entries.append(registers); }, .set_key_and_value_seq_8bit_and_emit => { _ = try in.read(registers.key[0..registers.key_len]); var i: usize = 0; while (i < registers.value.len) : (i += 1) registers.value.seq[i] +%= try in.readBitsNoEof(u21, 8); while (i < registers.value.seq.len) : (i += 1) registers.value.seq[i] = 0; try entries.append(registers); }, .set_key_and_value_seq_21bit_and_emit => { _ = try in.read(registers.key[0..registers.key_len]); var i: usize = 0; while (i < registers.value.len) : (i += 1) registers.value.seq[i] +%= try in.readBitsNoEof(u21, 21); while (i < registers.value.seq.len) : (i += 1) registers.value.seq[i] = 0; try entries.append(registers); }, .increment_key_1_and_set_value_seq_0_and_emit => { registers.key[1] += 1; registers.value.seq[0] = try in.readBitsNoEof(u21, 21); try entries.append(registers); }, .increment_key_1_and_set_value_seq_0_1_and_emit => { registers.key[1] += 1; registers.value.seq[0] = try in.readBitsNoEof(u21, 21); registers.value.seq[1] = try in.readBitsNoEof(u21, 21); try entries.append(registers); }, .increment_key_2_and_set_value_seq_0_1_and_emit => { registers.key[2] += 1; registers.value.seq[0] = try in.readBitsNoEof(u21, 21); registers.value.seq[1] = try in.readBitsNoEof(u21, 21); try entries.append(registers); }, .set_key_len_and_key => { registers.key_len = try in.readBitsNoEof(usize, 3); _ = try in.read(registers.key[0..registers.key_len]); }, .set_value_form => { registers.value.form = @intToEnum(Form, @enumToInt(registers.value.form) +% try in.readBitsNoEof(std.meta.Tag(Form), @bitSizeOf(Form))); }, .set_value_len => { registers.value.len = try in.readBitsNoEof(usize, 5); }, .eof => break, } } return DecompFile{ .iter = 0, .entries = entries }; } test "parse" { const allocator = testing.allocator; var file = try parseFile(allocator, "src/data/ucd/UnicodeData.txt"); defer file.deinit(); while (file.next()) |entry| { _ = entry; } } test "compression_is_lossless" { const allocator = testing.allocator; // Compress UnicodeData.txt -> Decompositions.bin var file = try parseFile(allocator, "src/data/ucd/UnicodeData.txt"); defer file.deinit(); try file.compressToFile("src/data/ucd/Decompositions.bin"); // Reset the raw file iterator. file.iter = 0; // Decompress the file. var decompressed = try decompressFile(allocator, "src/data/ucd/Decompositions.bin"); defer decompressed.deinit(); while (file.next()) |expected| { var actual = decompressed.next().?; try testing.expectEqual(expected, actual); } }
src/normalizer/DecompFile.zig
const stdx = @import("stdx"); const graphics = @import("graphics"); const Color = graphics.Color; const platform = @import("platform"); const ui = @import("../ui.zig"); const widgets = ui.widgets; const MouseArea = widgets.MouseArea; const Row = widgets.Row; const Flex = widgets.Flex; const Sized = widgets.Sized; const Text = widgets.Text; const log = stdx.log.scoped(.switch_); pub const SwitchOption = struct { props: struct { label: []const u8 = "Switch", init_val: bool = false, onChange: ?stdx.Function(fn (bool) void) = null, }, inner: ui.WidgetRef(Switch), const Self = @This(); pub fn isSet(self: *Self) bool { return self.inner.getWidget().isSet(); } pub fn build(self: *Self, c: *ui.BuildContext) ui.FrameId { const S = struct { fn onClick(self_: *Self, _: platform.MouseUpEvent) void { self_.inner.getWidget().toggle(); } }; const d = c.decl; return d(MouseArea, .{ .onClick = c.funcExt(self, S.onClick), .child = d(Row, .{ .valign = .Center, .children = c.list(.{ d(Flex, .{ .child = d(Text, .{ .text = self.props.label, .color = Color.White, }), }), d(Switch, .{ .bind = &self.inner, .init_val = self.props.init_val, .onChange = self.props.onChange, }), }), }), }); } }; pub const Switch = struct { props: struct { label: ?[]const u8 = null, init_val: bool = false, onChange: ?stdx.Function(fn (bool) void) = null, }, is_set: bool, anim: ui.SimpleTween, const Self = @This(); const Width = 60; const Height = 30; const InnerPadding = 5; const InnerRadius = (Height - InnerPadding * 2) / 2; pub fn init(self: *Self, _: *ui.InitContext) void { self.is_set = self.props.init_val; self.anim = ui.SimpleTween.init(100); self.anim.finish(); } pub fn build(self: *Self, c: *ui.BuildContext) ui.FrameId { const S = struct { fn onClick(self_: *Self, _: platform.MouseUpEvent) void { self_.toggle(); } }; const d = c.decl; return d(MouseArea, .{ .onClick = c.funcExt(self, S.onClick), .child = d(Sized, .{ .width = Width, .height = Height, }), }); } pub fn isSet(self: Self) bool { return self.is_set; } pub fn toggle(self: *Self) void { self.is_set = !self.is_set; self.anim.reset(); if (self.props.onChange) |cb| { cb.call(.{ self.is_set }); } } pub fn render(self: *Self, c: *ui.RenderContext) void { const g = c.g; const alo = c.getAbsLayout(); if (self.is_set) { g.setFillColor(Color.Blue); } else { g.setFillColor(Color.DarkGray); } g.fillRoundRect(alo.x, alo.y, alo.width, alo.height, alo.height * 0.5); g.setFillColor(Color.White); self.anim.step(c.delta_ms); var offset_x: f32 = undefined; if (self.is_set) { offset_x = self.anim.t * (Width - InnerPadding * 2 - InnerRadius * 2); } else { offset_x = (1 - self.anim.t) * (Width - InnerPadding * 2 - InnerRadius * 2); } g.fillCircle(alo.x + InnerPadding + InnerRadius + offset_x, alo.y + InnerPadding + InnerRadius, InnerRadius); } };
ui/src/widgets/switch.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const sql = @import("sqlite"); const db = @import("db.zig"); const fs = std.fs; const fmt = std.fmt; const print = std.debug.print; const log = std.log; const http = @import("http.zig"); const mem = std.mem; const shame = @import("shame.zig"); const time = std.time; const parse = @import("parse.zig"); const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const expectEqualStrings = std.testing.expectEqualStrings; const ArrayList = std.ArrayList; const Table = @import("queries.zig").Table; // TODO: Mabye consolidate Storage (feed_db.zig) and Db (db.zig) // Storage would be specific functions. Db would be utility/helper/wrapper functions pub const g = struct { pub var max_items_per_feed: u16 = 10; pub const tag_untagged = "untagged"; }; pub const Storage = struct { const Self = @This(); db: db.Db, allocator: Allocator, pub fn init(allocator: Allocator, location: ?[:0]const u8) !Self { return Self{ .db = try db.Db.init(allocator, location), .allocator = allocator }; } pub fn addFeed(self: *Self, feed: parse.Feed, location: []const u8) !u64 { const query = \\INSERT INTO feed (title, location, link, updated_raw, updated_timestamp) \\VALUES ( \\ ?{[]const u8}, \\ ?{[]const u8}, \\ ?, \\ ?, \\ ? \\) ON CONFLICT(location) DO UPDATE SET \\ title = excluded.title, \\ link = excluded.link, \\ updated_raw = excluded.updated_raw, \\ updated_timestamp = excluded.updated_timestamp \\WHERE updated_timestamp != excluded.updated_timestamp; \\RETURNING id; ; const args = .{ feed.title, location, feed.link, feed.updated_raw, feed.updated_timestamp }; // Make sure function returns id. // 'query' returns id only if insert or update is made. If update doesn't get pass // where condition no id is returned. const id = (try self.db.one(u64, query, args)) orelse (try self.db.one(u64, "select id from feed where location == ?{[]const u8} limit 1;", .{location})); return id orelse error.NoReturnId; } pub fn deleteFeed(self: *Self, id: u64) !void { try self.db.exec("DELETE FROM feed WHERE id = ?", .{id}); } pub fn addFeedUrl(self: *Self, feed_id: u64, headers: http.RespHeaders) !void { const query = \\INSERT INTO feed_update_http \\ (feed_id, cache_control_max_age, expires_utc, last_modified_utc, etag) \\VALUES ( \\ ?{u64}, \\ ?, \\ ?, \\ ?, \\ ? \\) \\ ON CONFLICT(feed_id) DO UPDATE SET \\ cache_control_max_age = excluded.cache_control_max_age, \\ expires_utc = excluded.expires_utc, \\ last_modified_utc = excluded.last_modified_utc, \\ etag = excluded.etag, \\ last_update = (strftime('%s', 'now')) ; try self.db.exec(query, .{ feed_id, headers.cache_control_max_age, headers.expires_utc, headers.last_modified_utc, headers.etag, }); } pub fn addFeedLocal( self: *Self, feed_id: u64, last_modified: i64, ) !void { const query = \\INSERT INTO feed_update_local \\ (feed_id, last_modified_timestamp) \\VALUES ( \\ ?{u64}, \\ ?{i64} \\) \\ON CONFLICT(feed_id) DO UPDATE SET \\ last_modified_timestamp = excluded.last_modified_timestamp, \\ last_update = (strftime('%s', 'now')) ; try self.db.exec(query, .{ feed_id, last_modified }); } pub fn addItems(self: *Self, feed_id: u64, feed_items: []parse.Feed.Item) !void { const items = feed_items[0..std.math.min(feed_items.len, g.max_items_per_feed)]; // Modifies item order in memory. Don't use after this if order is important. std.mem.reverse(parse.Feed.Item, items); const hasGuidOrLink = blk: { const hasGuid = blk_guid: { for (items) |item| { if (item.id == null) break :blk_guid false; } else { break :blk_guid true; } }; const hasLink = blk_link: { for (items) |item| { if (item.link == null) break :blk_link false; } else { break :blk_link true; } }; break :blk hasGuid or hasLink; }; const insert_query = \\INSERT INTO item (feed_id, title, link, guid, pub_date, pub_date_utc) \\VALUES ( \\ ?{u64}, \\ ?{[]const u8}, \\ ?, ?, ?, ? \\) ; if (hasGuidOrLink) { const conflict_update = \\ DO UPDATE SET \\ title = excluded.title, \\ pub_date = excluded.pub_date, \\ pub_date_utc = excluded.pub_date_utc, \\ modified_at = (strftime('%s', 'now')) \\WHERE \\ excluded.pub_date_utc != pub_date_utc ; const query = insert_query ++ "\nON CONFLICT(feed_id, guid) " ++ conflict_update ++ "\nON CONFLICT(feed_id, link)" ++ conflict_update ++ ";"; for (items) |item| { try self.db.exec(query, .{ feed_id, item.title, item.link, item.id, item.updated_raw, item.updated_timestamp, }); } const del_query = \\DELETE FROM item \\WHERE id IN \\ (SELECT id FROM item \\ WHERE feed_id = ? \\ ORDER BY id ASC \\ LIMIT (SELECT MAX(count(feed_id) - ?, 0) FROM item WHERE feed_id = ?) \\ ) ; try self.db.exec(del_query, .{ feed_id, g.max_items_per_feed, feed_id }); } else { const del_query = "DELETE FROM item WHERE feed_id = ?;"; try self.db.exec(del_query, .{feed_id}); const query = \\INSERT INTO item (feed_id, title, link, guid, pub_date, pub_date_utc) \\VALUES ( \\ ?{u64}, \\ ?{[]const u8}, \\ ?, ?, ?, ? \\); ; for (items) |item| { try self.db.exec(query, .{ feed_id, item.title, item.link, item.id, item.updated_raw, item.updated_timestamp, }); } } } pub fn addTags(self: *Self, id: u64, tags: []const u8) !void { if (tags.len == 0) return; const query = \\INSERT INTO feed_tag VALUES (?, ?) \\ON CONFLICT(feed_id, tag) DO NOTHING; ; var iter = mem.split(u8, tags, ","); while (iter.next()) |tag| { try self.db.exec(query, .{ id, mem.trim(u8, tag, " +") }); } // Make sure there is no 'untagged' tag const del_query = comptime fmt.comptimePrint( "delete from feed_tag where feed_id = ? and tag = '{s}';", .{g.tag_untagged}, ); try self.db.exec(del_query, .{id}); } pub const UpdateOptions = struct { force: bool = false, // For testing purposes resolveUrl: @TypeOf(http.resolveRequest) = http.resolveRequest, }; pub fn updateAllFeeds(self: *Self, opts: UpdateOptions) !void { try self.updateUrlFeeds(opts); try self.updateLocalFeeds(self.allocator, opts); try self.cleanItems(); } fn deallocFieldsLen(comptime T: type) u8 { const meta = std.meta; const fields = comptime meta.fields(T); comptime var result = 0; inline for (fields) |field| { const is_slice = field.field_type == []const u8 or field.field_type == ?[]const u8 or field.field_type == []u8 or field.field_type == ?[]u8; if (is_slice) result += 1; } return result; } const DeAllocField = struct { is_optional: bool, name: []const u8 }; fn deallocFields(comptime T: type) [deallocFieldsLen(T)]DeAllocField { const meta = std.meta; const fields = comptime meta.fields(T); const len = comptime deallocFieldsLen(T); comptime var result: [len]DeAllocField = undefined; inline for (fields) |field, i| { const is_slice = field.field_type == []const u8 or field.field_type == ?[]const u8 or field.field_type == []u8 or field.field_type == ?[]u8; if (is_slice) { const is_optional = switch (@typeInfo(field.field_type)) { .Pointer => false, .Optional => true, else => @compileError("Parsing UrlFeed struct failed."), }; // Reverse fields' order result[len - 1 - i] = .{ .is_optional = is_optional, .name = field.name }; } } return result; } pub fn updateUrlFeeds(self: *Self, opts: UpdateOptions) !void { if (!opts.force) { // Update every update_countdown value const update_countdown_query = \\WITH const AS (SELECT strftime('%s', 'now') as current_utc) \\UPDATE feed_update_http SET update_countdown = COALESCE( \\ const.current_utc - last_update + cache_control_max_age, \\ expires_utc - const.current_utc, \\ const.current_utc - last_update + update_interval \\) from const; ; try self.db.exec(update_countdown_query, .{}); } const UrlFeed = struct { location: []const u8, etag: ?[]const u8, feed_id: u64, updated_timestamp: ?i64, last_modified_utc: ?i64, pub fn deinit(row: @This(), allocator: Allocator) void { // Fetches allocated fields in reverse order const dealloc_fields = comptime deallocFields(@This()); // IMPORTANT: Have to free memory in reverse, important when using FixedBufferAllocator. // FixedBufferAllocator stack part uses end_index to keep track of available memory inline for (dealloc_fields) |field| { const val = @field(row, field.name); if (field.is_optional) { if (val) |v| allocator.free(v); } else { allocator.free(val); } } } }; const base_query = \\SELECT \\ feed.location as location, \\ etag, \\ feed_id, \\ feed.updated_timestamp as updated_timestamp, \\ last_modified_utc \\FROM feed_update_http \\LEFT JOIN feed ON feed_update_http.feed_id = feed.id ; const query = if (opts.force) base_query else base_query ++ "\nWHERE update_countdown < 0;"; var stmt = try self.db.sql_db.prepareDynamic(query); defer stmt.deinit(); var stack_fallback = std.heap.stackFallback(256, self.allocator); const stack_allocator = stack_fallback.get(); var iter = try stmt.iterator(UrlFeed, .{}); while (try iter.nextAlloc(stack_allocator, .{})) |row| { defer row.deinit(stack_allocator); log.info("Updating: '{s}'", .{row.location}); var arena = std.heap.ArenaAllocator.init(self.allocator); defer arena.deinit(); // TODO: pull out resolveUrl and parsing into separate function? const resp_union = try opts.resolveUrl(&arena, row.location, row.last_modified_utc, row.etag); switch (resp_union) { .not_modified => { log.info("Skipping update. Feed hasn't been modified.", .{}); continue; }, .fail => |msg| { log.info("Skipping update. Failed http request: {s}.", .{msg}); continue; }, .ok => {}, } const resp = resp_union.ok; const rss_feed = switch (resp.content_type) { .xml_atom => parse.Atom.parse(&arena, resp.body) catch { log.warn("Skipping update. Failed to parse Atom feed.", .{}); continue; }, .xml_rss => parse.Rss.parse(&arena, resp.body) catch { log.warn("Skipping update. Failed to parse RSS feed.", .{}); continue; }, else => parse.parse(&arena, resp.body) catch { log.warn("Skipping update. Failed to parse XML file.", .{}); continue; }, }; var savepoint = try self.db.sql_db.savepoint("updateUrlFeeds"); defer savepoint.rollback(); const update_data = UpdateData{ .current = .{ .feed_id = row.feed_id, .updated_timestamp = row.updated_timestamp }, .headers = resp.headers, .feed = rss_feed, }; try self.updateUrlFeed(update_data, opts); savepoint.commit(); log.info("Updated: '{s}'", .{row.location}); } } pub const CurrentData = struct { feed_id: u64, updated_timestamp: ?i64 }; const UpdateData = struct { current: CurrentData, feed: parse.Feed, headers: http.RespHeaders, }; pub fn updateUrlFeed(self: *Self, data: UpdateData, opts: UpdateOptions) !void { const query_http_update = \\UPDATE feed_update_http SET \\ cache_control_max_age = ?, \\ expires_utc = ?, \\ last_modified_utc = ?, \\ etag = ?, \\ last_update = (strftime('%s', 'now')) \\WHERE feed_id = ? ; try self.db.exec(query_http_update, .{ data.headers.cache_control_max_age, data.headers.expires_utc, data.headers.last_modified_utc, data.headers.etag, // where data.current.feed_id, }); if (!opts.force) { if (data.feed.updated_timestamp != null and data.current.updated_timestamp != null and data.feed.updated_timestamp.? == data.current.updated_timestamp.?) { log.info("\tSkipping update: Feed publish date hasn't changed", .{}); return; } } try self.db.exec(Table.feed.update_where_id, .{ data.feed.link, data.feed.updated_raw, data.feed.updated_timestamp, data.current.feed_id, }); try self.addItems(data.current.feed_id, data.feed.items); } pub fn updateLocalFeeds(self: *Self, opts: UpdateOptions) !void { const LocalFeed = struct { location: []const u8, feed_id: u64, feed_updated_timestamp: ?i64, last_modified_timestamp: ?i64, pub fn deinit(row: @This(), allocator: Allocator) void { // If LocalFeed gets more allocatable fields use deallocFields() allocator.free(row.location); } }; var contents = ArrayList(u8).init(self.allocator); defer contents.deinit(); const query_update_local = \\UPDATE feed_update_local SET \\ last_modified_timestamp = ?, \\ last_update = (strftime('%s', 'now')) \\WHERE feed_id = ? ; const query_all = \\SELECT \\ feed.location as location, \\ feed_id, \\ feed.updated_timestamp as feed_update_timestamp, \\ last_modified_timestamp \\FROM feed_update_local \\LEFT JOIN feed ON feed_update_local.feed_id = feed.id; ; var stack_fallback = std.heap.stackFallback(256, self.allocator); const stack_allocator = stack_fallback.get(); var stmt = try self.db.sql_db.prepare(query_all); defer stmt.deinit(); var iter = try stmt.iterator(LocalFeed, .{}); while (try iter.nextAlloc(stack_allocator, .{})) |row| { defer row.deinit(stack_allocator); log.info("Updating: '{s}'", .{row.location}); const file = try std.fs.openFileAbsolute(row.location, .{}); defer file.close(); var file_stat = try file.stat(); const mtime_sec = @intCast(i64, @divFloor(file_stat.mtime, time.ns_per_s)); if (!opts.force) { if (row.last_modified_timestamp) |last_modified| { if (last_modified == mtime_sec) { log.info("\tSkipping update: File hasn't been modified", .{}); continue; } } } try contents.resize(0); try contents.ensureTotalCapacity(file_stat.size); try file.reader().readAllArrayList(&contents, file_stat.size); var arena = std.heap.ArenaAllocator.init(self.allocator); var rss_feed = try parse.parse(&arena, contents.items); var savepoint = try self.db.sql_db.savepoint("updateLocalFeeds"); defer savepoint.rollback(); try self.db.exec(query_update_local, .{ mtime_sec, row.feed_id }); if (!opts.force) { if (rss_feed.updated_timestamp != null and row.feed_updated_timestamp != null and rss_feed.updated_timestamp.? == row.feed_updated_timestamp.?) { savepoint.commit(); log.info("\tSkipping update: Feed updated/pubDate hasn't changed", .{}); continue; } } try self.db.exec(Table.feed.update_where_id, .{ rss_feed.link, rss_feed.updated_raw, rss_feed.updated_timestamp, row.feed_id, }); try self.addItems(row.feed_id, rss_feed.items); savepoint.commit(); log.info("Updated: '{s}'", .{row.location}); } } pub fn cleanItemsByFeedId(self: *Self, feed_id: u64) !void { const query = \\DELETE FROM item \\WHERE id IN \\ (SELECT id FROM item \\ WHERE feed_id = ? \\ ORDER BY id ASC \\ LIMIT (SELECT MAX(count(feed_id) - ?, 0) FROM item WHERE feed_id = ?) \\ ) ; try db.delete(&self.db, query, .{ feed_id, g.max_items_per_feed, feed_id }); } pub fn cleanItems(self: *Self) !void { const query = \\SELECT \\ feed_id, count(feed_id) as count \\FROM item \\GROUP BY feed_id \\HAVING count(feed_id) > ?{u16} ; const DbResult = struct { feed_id: u64, count: usize }; const results = try self.db.selectAll(DbResult, query, .{g.max_items_per_feed}); const del_query = \\DELETE FROM item \\WHERE id IN (SELECT id \\ FROM item \\ WHERE feed_id = ? \\ ORDER BY pub_date_utc ASC, modified_at ASC LIMIT ? \\) ; for (results) |r| { try self.db.exec(del_query, .{ r.feed_id, r.count - g.max_items_per_feed }); } } pub const SearchResult = struct { location: []const u8, title: []const u8, link: ?[]const u8, id: u64, }; pub fn search(self: *Self, allocator: Allocator, terms: [][]const u8) ![]SearchResult { const query_start = "SELECT location, title, link, id FROM feed WHERE "; const like_query = "location LIKE '%{s}%' OR link LIKE '%{s}%' OR title LIKE '%{s}%'"; const query_or = " OR "; var total_cap = blk: { const or_len = (terms.len - 1) * query_or.len; const like_len = terms.len * (like_query.len - 9); // remove placeholders '{s}' break :blk query_start.len + or_len + like_len; }; for (terms) |term| total_cap += term.len * 3; var query_arr = try ArrayList(u8).initCapacity(allocator, total_cap); defer query_arr.deinit(); const writer = query_arr.writer(); var buf: [256]u8 = undefined; { writer.writeAll(query_start) catch unreachable; const term = terms[0]; // Guard against sql injection // 'term' will be cut if longer than 'buf' const safe_term = sql.c.sqlite3_snprintf(buf.len, &buf, "%q", term.ptr); if (mem.len(safe_term) > term.len) { total_cap += mem.len(safe_term) - term.len; try query_arr.ensureTotalCapacity(total_cap); } writer.print(like_query, .{ safe_term, safe_term, safe_term }) catch unreachable; } for (terms[1..]) |term| { // Guard against sql injection // 'term' will be cut if longer than 'buf' const safe_term = sql.c.sqlite3_snprintf(buf.len, &buf, "%q", term.ptr); if (mem.len(safe_term) > term.len) { total_cap += mem.len(safe_term) - term.len; try query_arr.ensureTotalCapacity(total_cap); } writer.writeAll(query_or) catch unreachable; writer.print(like_query, .{ safe_term, safe_term, safe_term }) catch unreachable; } var stmt = self.db.sql_db.prepareDynamic(query_arr.items) catch |err| { log.err("SQL_ERROR: {s}\nFailed query:\n{s}", .{ self.db.sql_db.getDetailedError().message, query_arr.items }); return err; }; defer stmt.deinit(); const results = stmt.all(SearchResult, allocator, .{}, .{}) catch |err| { log.err("SQL_ERROR: {s}\n", .{self.db.sql_db.getDetailedError().message}); return err; }; return results; } pub fn addTagsByLocation(self: *Self, tags: []const u8, location: []const u8) !void { const feed_id = (try self.db.one(u64, "select id from feed where location = ?;", .{location})) orelse { log.err("Failed to find feed with location '{s}'", .{location}); return; }; try self.addTags(feed_id, tags); } pub fn addTagsById(self: *Self, tags: []const u8, feed_id: u64) !void { _ = (try self.db.one(void, "select id from feed where id = ?;", .{feed_id})) orelse { log.err("Failed to find feed with id '{d}'", .{feed_id}); return; }; try self.addTags(feed_id, tags); } pub fn removeTagsByLocation(self: *Self, tags: [][]const u8, location: []const u8) !void { const feed_id = (try self.db.one(u64, "SELECT id FROM feed WHERE location = ?;", .{location})) orelse { log.warn("Couldn't find location '{s}'", .{location}); return; }; try self.removeTagsById(tags, feed_id); } pub fn removeTagsById(self: *Self, tags: [][]const u8, id: u64) !void { const query = "DELETE FROM feed_tag WHERE tag = ? AND id = ?;"; for (tags) |tag| { try self.db.exec(query, .{ tag, id }); } try self.untaggedCheck(id); } pub fn untaggedCheck(self: *Self, feed_id: u64) !void { const has_tags = (try self.db.one(void, "SELECT 1 FROM feed_tag WHERE id = ? LIMIT 1", .{feed_id})) != null; if (!has_tags) { const insert_query = comptime fmt.comptimePrint("insert into feed_tag (feed_id, tag) values (?, '{s}')", .{g.tag_untagged}); try self.db.exec(insert_query, .{feed_id}); } } pub fn removeTags(self: *Self, tags: [][]const u8) !void { const query = "DELETE FROM feed_tag WHERE tag = ? RETURNING feed_id;"; for (tags) |tag| { const feed_ids = try self.db.selectAll(u64, query, .{tag}); for (feed_ids) |feed_id| { try self.untaggedCheck(feed_id); } } } pub const TagCount = struct { name: []const u8, count: u32 }; pub fn getAllTags(self: *Self) ![]TagCount { const query = "SELECT tag as name, count(tag) FROM feed_tag GROUP BY tag ORDER BY tag ASC;"; return try self.db.selectAll(TagCount, query, .{}); } pub const Feed = struct { title: []const u8, link: ?[]const u8, }; pub fn getFeedById(self: *Self, id: u64) !?Feed { return try self.db.oneAlloc(Feed, "select title, link from feed where id = ? limit 1;", .{id}); } pub const RecentFeed = struct { id: u64, updated_timestamp: ?i64, title: []const u8, link: ?[]const u8, }; pub fn getRecentlyUpdatedFeeds(self: *Self) ![]RecentFeed { const query = \\SELECT \\ id \\, max(updated_timestamp, item.pub_date_utc) AS updated_timestamp \\, title \\, link \\FROM feed \\LEFT JOIN \\ (SELECT feed_id, max(pub_date_utc) as pub_date_utc FROM item GROUP BY feed_id) item \\ON item.feed_id = feed.id \\ORDER BY updated_timestamp DESC; ; return try self.db.selectAll(RecentFeed, query, .{}); } pub fn getRecentlyUpdatedFeedsByTags(self: *Self, tags: [][]const u8) ![]RecentFeed { const query_start = \\SELECT \\ id \\, max(updated_timestamp, item.pub_date_utc) AS updated_timestamp \\, title \\, link \\FROM feed \\LEFT JOIN \\ (SELECT feed_id, max(pub_date_utc) as pub_date_utc FROM item GROUP BY feed_id) item \\ON item.feed_id = feed.id \\WHERE item.feed_id in (SELECT DISTINCT feed_id FROM feed_tag WHERE tag IN ( ; const query_end = \\)) \\ORDER BY updated_timestamp DESC; ; var total_cap = query_start.len + query_end.len + tags[0].len + 2; // 2 - two quotes for (tags[1..]) |tag| total_cap += tag.len + 3; // 3 - two quotes + comma var query_arr = try ArrayList(u8).initCapacity(self.allocator, total_cap); defer query_arr.deinit(); const writer = query_arr.writer(); writer.writeAll(query_start) catch unreachable; var buf: [256]u8 = undefined; { const tag = tags[0]; const tag_cstr = try self.allocator.dupeZ(u8, tag); defer self.allocator.free(tag_cstr); print("{s}", .{tag}); // Guard against sql injection // 'tag' will be cut if longer than 'buf' const safe_term = sql.c.sqlite3_snprintf(buf.len, &buf, "%Q", tag_cstr.ptr); // total_cap takes into account adding quotes to both ends const tag_len_with_quotes = tag.len + 2; if (mem.len(safe_term) > tag_len_with_quotes) { total_cap += mem.len(safe_term) - tag_len_with_quotes; try query_arr.ensureTotalCapacity(total_cap); } writer.writeAll(mem.span(safe_term)) catch unreachable; } for (tags[1..]) |tag| { // Guard against sql injection // 'tag' will be cut if longer than 'buf' const tag_cstr = try self.allocator.dupeZ(u8, tag); defer self.allocator.free(tag_cstr); const safe_term = sql.c.sqlite3_snprintf(buf.len, &buf, "%Q", tag_cstr.ptr); // total_cap takes into account adding quotes to both ends const tag_len_with_quotes = tag.len + 2; if (mem.len(safe_term) > tag_len_with_quotes) { total_cap += mem.len(safe_term) - tag_len_with_quotes; try query_arr.ensureTotalCapacity(total_cap); } writer.writeAll(",") catch unreachable; writer.writeAll(mem.span(safe_term)) catch unreachable; } writer.writeAll(query_end) catch unreachable; var stmt = self.db.sql_db.prepareDynamic(query_arr.items) catch |err| { log.err("SQL_ERROR: {s}\nFailed query:\n{s}", .{ self.db.sql_db.getDetailedError().message, query_arr.items }); return err; }; defer stmt.deinit(); const results = stmt.all(RecentFeed, self.allocator, .{}, .{}) catch |err| { log.err("SQL_ERROR: {s}\n", .{self.db.sql_db.getDetailedError().message}); return err; }; return results; } const Item = struct { title: []const u8, link: ?[]const u8, pub_date_utc: ?i64, }; pub fn getItems(self: *Self, id: u64) ![]Item { const query = \\SELECT title, link, pub_date_utc FROM item WHERE feed_id = ?{u64} ORDER BY id DESC; ; return try self.db.selectAll(Item, query, .{id}); } pub fn untaggedFeedsCount(self: *Self) !u32 { const query = \\SELECT count(id) from feed where id not in (SELECT DISTINCT feed_id FROM feed_tag); ; return (try self.db.one(u32, query, .{})).?; } }; fn equalNullString(a: ?[]const u8, b: ?[]const u8) bool { if (a == null and b == null) return true; if (a == null or b == null) return false; return mem.eql(u8, a.?, b.?); } fn testDataRespOk() http.Ok { const contents = @embedFile("../test/sample-rss-2.xml"); const location = "https://lobste.rs/"; return http.Ok{ .location = location, .body = contents, .content_type = http.ContentType.xml_rss, .headers = .{ .etag = "etag_value" }, }; } pub fn testResolveRequest( _: *std.heap.ArenaAllocator, _: []const u8, // url _: ?i64, // last_modified _: ?[]const u8, // etag ) !http.FeedResponse { const ok = testDataRespOk(); return http.FeedResponse{ .ok = ok }; } // Can't run this test with cli.zig addFeedHttp tests // Will throw a type signature error pertaining to UpdateOptions.resolveUrl function // Don't run feed_db.zig and cli.zig tests that touch UpdateOptions together test "Storage fake net" { std.testing.log_level = .debug; const base_allocator = std.testing.allocator; var arena = std.heap.ArenaAllocator.init(base_allocator); defer arena.deinit(); const allocator = arena.allocator(); const test_data = testDataRespOk(); var feed_db = try Storage.init(allocator, null); var feed = try parse.parse(&arena, test_data.body); var savepoint = try feed_db.db.sql_db.savepoint("test_net"); defer savepoint.rollback(); const id = try feed_db.addFeed(feed, test_data.location); try feed_db.addFeedUrl(id, test_data.headers); const ItemsResult = struct { title: []const u8 }; const all_items_query = "select title from item order by id DESC"; // No items yet const saved_items = try feed_db.db.selectAll(ItemsResult, all_items_query, .{}); try expect(saved_items.len == 0); // Add some items const start_index = 3; try expect(start_index < feed.items.len); const items_src = feed.items[3..]; var tmp_items = try allocator.alloc(parse.Feed.Item, items_src.len); std.mem.copy(parse.Feed.Item, tmp_items, items_src); try feed_db.addItems(id, tmp_items); { const items = try feed_db.db.selectAll(ItemsResult, all_items_query, .{}); try expect(items.len == items_src.len); for (items) |item, i| { try std.testing.expectEqualStrings(items_src[i].title, item.title); } } // update feed try feed_db.updateUrlFeeds(.{ .force = true, .resolveUrl = testResolveRequest }); { const items = try feed_db.db.selectAll(ItemsResult, all_items_query, .{}); try expect(items.len == feed.items.len); for (items) |item, i| { try std.testing.expectEqualStrings(feed.items[i].title, item.title); } } // delete feed try feed_db.deleteFeed(id); { const count_item = try feed_db.db.one(u32, "select count(id) from item", .{}); try expect(count_item.? == 0); const count_feed = try feed_db.db.one(u32, "select count(id) from feed", .{}); try expect(count_feed.? == 0); const count_http = try feed_db.db.one(u32, "select count(feed_id) from feed_update_http", .{}); try expect(count_http.? == 0); } savepoint.commit(); } test "Storage local" { std.testing.log_level = .debug; const base_allocator = std.testing.allocator; var arena = std.heap.ArenaAllocator.init(base_allocator); defer arena.deinit(); const allocator = arena.allocator(); var feed_db = try Storage.init(allocator, null); // const abs_path = "/media/hdd/code/feedgaze/test/sample-rss-2.xml"; const rel_path = "test/rss2.xml"; var path_buf: [fs.MAX_PATH_BYTES]u8 = undefined; const abs_path = try fs.cwd().realpath(rel_path, &path_buf); const contents = try shame.getFileContents(allocator, abs_path); const file = try fs.openFileAbsolute(abs_path, .{}); defer file.close(); const stat = try file.stat(); const mtime_sec = @intCast(i64, @divFloor(stat.mtime, time.ns_per_s)); var feed = try parse.parse(&arena, contents); var savepoint = try feed_db.db.sql_db.savepoint("test_local"); defer savepoint.rollback(); const id = try feed_db.addFeed(feed, abs_path); try feed_db.addFeedLocal(id, mtime_sec); const last_items = feed.items[3..]; var tmp_items = try allocator.alloc(parse.Feed.Item, last_items.len); std.mem.copy(parse.Feed.Item, tmp_items, last_items); try feed_db.addItems(id, tmp_items); try feed_db.addTags(id, "local,local,not-net"); const LocalItem = struct { title: []const u8 }; const all_items_query = "select title from item order by id DESC"; // Feed local { const items = try feed_db.db.selectAll(LocalItem, all_items_query, .{}); try expect(items.len == last_items.len); for (items) |item, i| { try expectEqualStrings(item.title, last_items[i].title); } const tags = try feed_db.db.selectAll(struct { tag: []const u8 }, "select tag from feed_tag", .{}); try expectEqual(@as(usize, 2), tags.len); try expectEqualStrings(tags[0].tag, "local"); try expectEqualStrings(tags[1].tag, "not-net"); } // Remove one tag { var tag_local = "local"; var rm_tags = &[_][]const u8{tag_local}; try feed_db.removeTags(rm_tags); const tags = try feed_db.db.selectAll(struct { tag: []const u8 }, "select tag from feed_tag", .{}); try expectEqual(@as(usize, 1), tags.len); try expectEqualStrings(tags[0].tag, "not-net"); } const LocalUpdateResult = struct { feed_id: u64, update_interval: u32, last_update: i64, last_modified_timestamp: i64 }; const local_query = "select feed_id, update_interval, last_update, last_modified_timestamp from feed_update_local"; // Local feed update { const count = try feed_db.db.one(u32, "select count(*) from feed_update_local", .{}); try expect(count.? == 1); const feed_dbfeeds = try feed_db.db.one(LocalUpdateResult, local_query, .{}); const first = feed_dbfeeds.?; try expect(first.feed_id == 1); try expect(first.update_interval == @import("queries.zig").update_interval); const current_time = std.time.timestamp(); try expect(first.last_update <= current_time); try expect(first.last_modified_timestamp == mtime_sec); } try feed_db.updateLocalFeeds(.{ .force = true }); // Items { const items = try feed_db.db.selectAll(LocalItem, all_items_query, .{}); try expect(items.len == feed.items.len); for (items) |item, i| { const f_item = feed.items[i]; try expectEqualStrings(item.title, f_item.title); } } { const feed_dbfeeds = try feed_db.db.one(LocalUpdateResult, local_query, .{}); const first = feed_dbfeeds.?; try expect(first.feed_id == 1); const current_time = std.time.timestamp(); try expect(first.last_update <= current_time); } try feed_db.cleanItems(); // Delete feed { try feed_db.deleteFeed(id); const count_item = try feed_db.db.one(u32, "select count(id) from item", .{}); try expect(count_item.? == 0); const count_feed = try feed_db.db.one(u32, "select count(id) from feed", .{}); try expect(count_feed.? == 0); const count_http = try feed_db.db.one(u32, "select count(feed_id) from feed_update_local", .{}); try expect(count_http.? == 0); } savepoint.commit(); } test "different urls, same feed items" { std.testing.log_level = .debug; const base_allocator = std.testing.allocator; var arena = std.heap.ArenaAllocator.init(base_allocator); defer arena.deinit(); const allocator = arena.allocator(); var storage = try Storage.init(allocator, null); const location1 = "location1"; const items_base = [_]parse.Feed.Item{ .{ .title = "title1", .id = "same_id1" }, .{ .title = "title2", .id = "same_id2" }, }; const parsed_feed = parse.Feed{ .title = "Same stuff", }; const id1 = try storage.addFeed(parsed_feed, location1); var items: [items_base.len]parse.Feed.Item = undefined; mem.copy(parse.Feed.Item, &items, &items_base); try storage.addItems(id1, &items); const location2 = "location2"; const id2 = try storage.addFeed(parsed_feed, location2); try storage.addItems(id2, &items); const feed_count_query = "select count(id) from feed"; const item_feed_count_query = "select count(DISTINCT feed_id) from item"; const feed_count = try storage.db.one(u32, feed_count_query, .{}); const item_feed_count = try storage.db.one(u32, item_feed_count_query, .{}); try expectEqual(feed_count.?, item_feed_count.?); const item_count_query = "select count(id) from item"; const item_count = try storage.db.one(u32, item_count_query, .{}); try expectEqual(items.len * 2, item_count.?); } test "updateUrlFeeds: check that only neccessary url feeds are updated" { std.testing.log_level = .debug; const base_allocator = std.testing.allocator; var arena = std.heap.ArenaAllocator.init(base_allocator); defer arena.deinit(); const allocator = arena.allocator(); var storage = try Storage.init(allocator, null); { const location1 = "location1"; const parsed_feed = parse.Feed{ .title = "Title: location 1" }; const id1 = try storage.addFeed(parsed_feed, location1); const ok1 = http.Ok{ .location = "feed_location1", .body = "", .expires_utc = std.math.maxInt(i64), // no update }; try storage.addFeedUrl(id1, ok1); } { const location1 = "location2"; const parsed_feed = parse.Feed{ .title = "Title: location 2" }; const id1 = try storage.addFeed(parsed_feed, location1); const ok1 = http.Ok{ .location = "feed_location2", .body = "", .cache_control_max_age = 300, }; try storage.addFeedUrl(id1, ok1); // will update feed const query = "update feed_update_http set last_update = ? where feed_id = ?;"; try storage.db.exec(query, .{ std.math.maxInt(u32), id1 }); } { const location1 = "location3"; const parsed_feed = parse.Feed{ .title = "Title: location 3" }; const id1 = try storage.addFeed(parsed_feed, location1); const ok1 = http.Ok{ .location = "feed_location3", .body = "", }; try storage.addFeedUrl(id1, ok1); // will update feed const query = "update feed_update_http set last_update = ? where feed_id = ?;"; try storage.db.exec(query, .{ std.math.maxInt(u32), id1 }); } try storage.updateUrlFeeds(Storage.UpdateOptions{ .resolveUrl = testResolveRequest }); const query_last_update = "select last_update from feed_update_http"; { const last_updates = try storage.db.selectAll(i64, query_last_update, .{}); for (last_updates) |last_update| try expect(last_update < std.math.maxInt(u32)); } try storage.db.exec("update feed_update_http set last_update = 0;", .{}); try storage.updateUrlFeeds(Storage.UpdateOptions{ .resolveUrl = testResolveRequest, .force = true }); { const last_updates = try storage.db.selectAll(i64, query_last_update, .{}); for (last_updates) |last_update| try expect(last_update > 0); } }
src/feed_db.zig
const std = @import("std"); const helper = @import("helper.zig"); const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const HashMap = std.AutoHashMap; const PointSet = HashMap(Point, void); const input = @embedFile("../inputs/day09.txt"); pub fn run(alloc: Allocator, stdout_: anytype) !void { const heightmap = try Heightmap.init(alloc, input); defer heightmap.deinit(); var low_points = ArrayList(Point).init(alloc); defer low_points.deinit(); const res1 = try part1(&heightmap, &low_points); const res2 = try part2(alloc, &heightmap, &low_points); if (stdout_) |stdout| { try stdout.print("Part 1: {}\n", .{res1}); try stdout.print("Part 2: {}\n", .{res2}); } } fn part1(heightmap: *const Heightmap, low_points: *ArrayList(Point)) !i32 { var sum: i32 = 0; var i: usize = 0; while (i < heightmap.m) : (i += 1) { var j: usize = 0; while (j < heightmap.n) : (j += 1) { const low_point = heightmap.isLowPoint(i, j); if (low_point) { sum += heightmap.get(i, j).? + 1; try low_points.append(Point{ .x = i, .y = j }); } } } return sum; } fn part2( alloc: Allocator, heightmap: *const Heightmap, low_points: *const ArrayList(Point), ) !i32 { var biggest: [3]i32 = .{0} ** 3; var visited = PointSet.init(alloc); var to_check = PointSet.init(alloc); var latest = PointSet.init(alloc); defer visited.deinit(); defer to_check.deinit(); defer latest.deinit(); for (low_points.items) |point| { const basin = try heightmap.getBasinSize(point, &visited, &to_check, &latest); updateBiggest(&biggest, @intCast(i32, basin)); } return biggest[0] * biggest[1] * biggest[2]; } fn updateBiggest(biggest: *[3]i32, basin: i32) void { if (basin > biggest[2]) { biggest[0] = biggest[1]; biggest[1] = biggest[2]; biggest[2] = basin; } else if (basin > biggest[1]) { biggest[0] = biggest[1]; biggest[1] = basin; } else if (basin > biggest[0]) { biggest[0] = basin; } } const Point = struct { x: usize, y: usize, }; const Heightmap = struct { allocator: Allocator, m: usize, n: usize, data: []const u8, const Self = @This(); pub fn init(alloc: Allocator, inp: []const u8) !Self { var lines = tokenize(u8, inp, "\r\n"); const n = lines.next().?.len; const m = count(u8, inp, "\n"); var data = try alloc.alloc(u8, m * n); var i: usize = 0; for (inp) |char| { if (!std.ascii.isDigit(char)) continue; data[i] = char - '0'; i += 1; } return Self{ .allocator = alloc, .m = m, .n = n, .data = data }; } fn isValidPos(self: *const Self, i: usize, j: usize) bool { return i < self.m and j < self.n; } pub fn get(self: *const Self, i: usize, j: usize) ?u8 { if (!self.isValidPos(i, j)) return null; return self.data[i * self.n + j]; } pub fn getPoint(self: *const Self, point: Point) ?u8 { return self.get(point.x, point.y); } pub fn adjacentIter(self: *const Self, i: usize, j: usize) ?AdjacentIterator { if (!self.isValidPos(i, j)) return null; return AdjacentIterator{ .heightmap = self, .i = i, .j = j, .counter = 0 }; } pub fn isLowPoint(self: *const Self, i: usize, j: usize) bool { const val = self.get(i, j).?; if (val == 9) return false; // shortcut var adj = self.adjacentIter(i, j).?; while (adj.next()) |neighbor| { if (val >= neighbor) return false; } return true; } pub fn getBasinSize( self: *const Self, origin: Point, visited: *PointSet, to_check_: *PointSet, latest_: *PointSet, ) !usize { var to_check = to_check_; var latest = latest_; visited.clearRetainingCapacity(); to_check.clearRetainingCapacity(); latest.clearRetainingCapacity(); try to_check.put(origin, {}); while (to_check.count() > 0) { var keys = to_check.keyIterator(); while (keys.next()) |key| { try visited.put(key.*, {}); } keys = to_check.keyIterator(); while (keys.next()) |key| { var adj = self.adjacentIter(key.x, key.y).?; while (adj.nextPoint()) |point| { if (self.getPoint(point).? == 9) continue; if (!visited.contains(point)) try latest.put(point, {}); } } to_check.clearRetainingCapacity(); std.mem.swap(*PointSet, &to_check, &latest); } return visited.count(); } pub fn deinit(self: *const Self) void { self.allocator.free(self.data); } const AdjacentIterator = struct { heightmap: *const Heightmap, i: usize, j: usize, counter: u8, pub fn nextPoint(self: *@This()) ?Point { const i = self.i; const j = self.j; if (self.counter == 0) { self.counter += 1; if (i > 0) return Point{ .x = i - 1, .y = j }; } if (self.counter == 1) { self.counter += 1; if (j > 0) return Point{ .x = i, .y = j - 1 }; } if (self.counter == 2) { self.counter += 1; if (i < self.heightmap.m - 1) return Point{ .x = i + 1, .y = j }; } if (self.counter == 3) { self.counter += 1; if (j < self.heightmap.n - 1) return Point{ .x = i, .y = j + 1 }; } return null; } pub fn next(self: *@This()) ?u8 { if (self.nextPoint()) |point| { return self.heightmap.getPoint(point); } return null; } }; }; const eql = std.mem.eql; const tokenize = std.mem.tokenize; const split = std.mem.split; const count = std.mem.count; const parseUnsigned = std.fmt.parseUnsigned; const parseInt = std.fmt.parseInt; const sort = std.sort.sort;
src/day09.zig
const sf = @import("../sfml_import.zig"); const math = @import("std").math; pub const Color = packed struct { const Self = @This(); /// Converts a color from a csfml object /// For inner workings pub fn fromCSFML(col: sf.c.sfColor) Self { return Self{ .r = col.r, .g = col.g, .b = col.b, .a = col.a, }; } /// Converts this color to a csfml one /// For inner workings pub fn toCSFML(self: Self) sf.c.sfColor { return sf.c.sfColor{ .r = self.r, .g = self.g, .b = self.b, .a = self.a, }; } /// Inits a color with rgb components pub fn fromRGB(red: u8, green: u8, blue: u8) Self { return Self{ .r = red, .g = green, .b = blue, .a = 0xff, }; } /// Inits a color with rgba components pub fn fromRGBA(red: u8, green: u8, blue: u8, alpha: u8) Self { return Self{ .r = red, .g = green, .b = blue, .a = alpha, }; } /// Inits a color from a 32bits value (RGBA in that order) pub fn fromInteger(int: u32) Self { return Self{ .r = @truncate(u8, (int & 0xff000000) >> 24), .g = @truncate(u8, (int & 0x00ff0000) >> 16), .b = @truncate(u8, (int & 0x0000ff00) >> 8), .a = @truncate(u8, (int & 0x000000ff) >> 0), }; } /// Gets a 32 bit integer representing the color pub fn toInteger(self: Self) u32 { return (@intCast(u32, self.r) << 24) | (@intCast(u32, self.g) << 16) | (@intCast(u32, self.b) << 8) | (@intCast(u32, self.a) << 0); } /// Creates a color with rgba floats from 0 to 1 fn fromFloats(red: f32, green: f32, blue: f32, alpha: f32) Self { return Self{ .r = @floatToInt(u8, math.clamp(red, 0.0, 1.0) * 255.0), .g = @floatToInt(u8, math.clamp(green, 0.0, 1.0) * 255.0), .b = @floatToInt(u8, math.clamp(blue, 0.0, 1.0) * 255.0), .a = @floatToInt(u8, math.clamp(alpha, 0.0, 1.0) * 255.0), }; } /// Creates a color from HSV and transparency components (this is not part of the SFML) /// hue is in degrees, saturation and value are in percents pub fn fromHSVA(hue: f32, saturation: f32, value: f32, alpha: f32) Self { const h = hue; const s = saturation / 100; const v = value / 100; const a = alpha; var hh: f32 = h; if (v <= 0.0) return fromFloats(0, 0, 0, a); if (hh >= 360.0) hh = 0; hh /= 60.0; var ff: f32 = hh - math.floor(hh); var p: f32 = v * (1.0 - s); var q: f32 = v * (1.0 - (s * ff)); var t: f32 = v * (1.0 - (s * (1.0 - ff))); return switch (@floatToInt(usize, hh)) { 0 => fromFloats(v, t, p, a), 1 => fromFloats(q, v, p, a), 2 => fromFloats(p, v, t, a), 3 => fromFloats(p, q, v, a), 4 => fromFloats(t, p, v, a), else => fromFloats(v, p, q, a), }; } // Colors /// Black color pub const Black = Self.fromRGB(0, 0, 0); /// White color pub const White = Self.fromRGB(255, 255, 255); /// Red color pub const Red = Self.fromRGB(255, 0, 0); /// Green color pub const Green = Self.fromRGB(0, 255, 0); /// Blue color pub const Blue = Self.fromRGB(0, 0, 255); /// Yellow color pub const Yellow = Self.fromRGB(255, 255, 0); /// Magenta color pub const Magenta = Self.fromRGB(255, 0, 255); /// Cyan color pub const Cyan = Self.fromRGB(0, 255, 255); /// Transparent color pub const Transparent = Self.fromRGBA(0, 0, 0, 0); /// Red component r: u8, /// Green component g: u8, /// Blue component b: u8, /// Alpha (opacity) component a: u8 }; test "color: conversions" { const tst = @import("std").testing; var code: u32 = 0x4BDA9CFF; var col = Color.fromInteger(code); tst.expectEqual(Color.fromRGB(75, 218, 156), col); tst.expectEqual(code, col.toInteger()); var csfml_col = sf.c.sfColor_fromInteger(@as(c_uint, code)); // TODO : issue #2 //tst.expectEqual(Color.fromCSFML(csfml_col), col); } test "color: hsv to rgb" { const tst = @import("std").testing; var col = Color.fromHSVA(10, 20, 100, 255); tst.expectEqual(Color.fromRGB(255, 212, 204), col); }
src/sfml/graphics/color.zig
const std = @import("std"); const fs = std.fs; const i2c = @import("bus/i2c.zig"); // TODO : generate colorwheel pattern via comptime calls const colors = [_][3]u8{ [_]u8{ 255, 0, 0 }, [_]u8{ 183, 72, 0 }, [_]u8{ 111, 144, 0 }, [_]u8{ 39, 216, 0 }, [_]u8{ 0, 222, 33 }, [_]u8{ 0, 150, 105 }, [_]u8{ 0, 78, 177 }, [_]u8{ 0, 6, 249 }, [_]u8{ 66, 0, 189 }, [_]u8{ 138, 0, 117 }, [_]u8{ 210, 0, 45 }, [_]u8{ 231, 24, 0 }, [_]u8{ 159, 96, 0 }, [_]u8{ 87, 168, 0 }, [_]u8{ 15, 240, 0 }, [_]u8{ 0, 198, 57 }, [_]u8{ 0, 126, 129 }, [_]u8{ 0, 54, 201 }, [_]u8{ 18, 0, 237 }, [_]u8{ 90, 0, 165 }, [_]u8{ 162, 0, 93 }, [_]u8{ 234, 0, 21 }, }; pub const LP50xx = struct { addr: u8 = 0x14, fd: fs.File, pub fn enable(self: LP50xx) void { var buffer = [_]u8{ 0x00, 0x40 }; _ = i2c.write_block(self.fd, self.addr, &buffer); } pub fn set_brightness_index(self: LP50xx, index: u8, value: u8) void { var buffer = [_]u8{ 0x07 + index, value }; _ = i2c.write_block(self.fd, self.addr, &buffer); } pub fn set_brightness(self: LP50xx, value: u8) void { self.set_brightness_index(0, value); self.set_brightness_index(1, value); self.set_brightness_index(2, value); self.set_brightness_index(3, value); } pub fn set(self: LP50xx, index: u8, color: [3]u8) void { var buffer = [_]u8{ 0x0B + index * 3, color[0], color[1], color[2] }; _ = i2c.write_block(self.fd, self.addr, &buffer); } pub fn off(self: LP50xx) void { self.set(0, [_]u8{ 0, 0, 0 }); self.set(1, [_]u8{ 0, 0, 0 }); self.set(2, [_]u8{ 0, 0, 0 }); self.set(3, [_]u8{ 0, 0, 0 }); } pub fn read_register(self: LP50xx, register: u8, length: u8) ?[]u8 { return i2c.read_block(self.fd, self.addr, register, length); } pub fn spin(self: LP50xx) void { var step: u8 = 0; while (true) { for ([_]u8{ 0, 1, 2 }) |index| { self.set(index, colors[(step -% index) % colors.len]); } step +%= 1; std.time.sleep(100 * std.time.ns_per_ms); } } };
src/led_driver.zig
const std = @import("std"); const redis = @cImport({ @cInclude("redismodule.h"); }); // MK.EVALKEY scriptkey numkeys [key ...] [arg ...] pub export fn MK_EVALKEY(ctx: ?*redis.RedisModuleCtx, argv: [*c]?*redis.RedisModuleString, argc: c_int) c_int { const not_enough_args = argc < 3; if (1 == redis.RedisModule_IsKeysPositionRequest.?(ctx)) { if (not_enough_args) { return redis.REDISMODULE_OK; } // We know we can access numkeys var numkeys: c_longlong = undefined; if (redis.REDISMODULE_ERR == redis.RedisModule_StringToLongLong.?(argv[2], &numkeys)) { return redis.REDISMODULE_OK; } // If numkeys is a plausible number if (numkeys <= argc - 3) { redis.RedisModule_KeyAtPos.?(ctx, 1); var i: c_int = 0; while (i < numkeys) : (i += 1) { redis.RedisModule_KeyAtPos.?(ctx, 3 + i); } } return redis.REDISMODULE_OK; } // We need at least 3 arguments if (not_enough_args) return redis.RedisModule_WrongArity.?(ctx); // We open striptkey const key = @ptrCast(?*redis.RedisModuleKey, redis.RedisModule_OpenKey.?(ctx, argv[1], redis.REDISMODULE_READ)); defer redis.RedisModule_CloseKey.?(key); // We return an error if it's not a string key const keytype = redis.RedisModule_KeyType.?(key); if (keytype != redis.REDISMODULE_KEYTYPE_STRING) { return redis.RedisModule_ReplyWithError.?(ctx, redis.REDISMODULE_ERRORMSG_WRONGTYPE); } // Read the key value var len: usize = undefined; const script = redis.RedisModule_StringDMA.?(key, &len, redis.REDISMODULE_READ); // Call EVAL const reply = redis.RedisModule_Call.?(ctx, "EVAL", "bv", script, len, &argv[2], @intCast(usize, argc - 2)); defer redis.RedisModule_FreeCallReply.?(reply); // Return exactly what the Lua script returned return redis.RedisModule_ReplyWithCallReply.?(ctx, reply); } pub export fn RedisModule_OnLoad(ctx: *redis.RedisModuleCtx, argv: [*c]?*redis.RedisModuleString, argc: c_int) c_int { if (redis.RedisModule_Init(ctx, "moonkey", 1, redis.REDISMODULE_APIVER_1) == redis.REDISMODULE_ERR) return redis.REDISMODULE_ERR; if (redis.RedisModule_CreateCommand.?(ctx, "mk.evalkey", MK_EVALKEY, "getkeys-api no-cluster", 0, 0, 0) == redis.REDISMODULE_ERR) return redis.REDISMODULE_ERR; return redis.REDISMODULE_OK; }
moonkey.zig
usingnamespace @import("ncurses").ncurses; const width: c_int = 30; const height: c_int = 10; var startx: c_int = 0; var starty: c_int = 0; const choices = [_][]const u8{ "Choice 1", "Choice 2", "Choice 3", "Choice 4", "Exit", }; pub fn main() anyerror!void { var highlight: usize = 1; var choice: usize = 0; var c: c_int = undefined; _ = try initscr(); defer endwin() catch {}; try clear(); try noecho(); try cbreak(); startx = @divTrunc(80 - width, 2); starty = @divTrunc(24 - height, 2); const menuWin = try newwin(height, width, starty, startx); try menuWin.keypad(true); try mvaddstrzig(0, 0, "Use arrow keys to go up and down, Press enter to select a choice"); try refresh(); try printMenu(menuWin, highlight); while (true) { c = try menuWin.wgetch(); switch (c) { KEY_UP => { if (highlight == 1) { highlight = choices.len; } else { highlight -= 1; } }, KEY_DOWN => { if (highlight == choices.len) { highlight = 1; } else { highlight += 1; } }, 10 => { choice = highlight; }, else => { const ch = if (c > 0xff) ' ' else @intCast(u8, c); try mvprintwzig( 24, 0, "Charcter pressed is = {d:3} Hopefully it can be printed as '{c}'", .{ c, ch }, ); try refresh(); }, } try printMenu(menuWin, highlight); if (choice != 0) break; } try mvprintwzig( 23, 0, "You chose choice {d} with choice string {s}\n", .{ choice, choices[choice - 1] }, ); try clrtoeol(); try refresh(); _ = try getch(); } fn printMenu(menuWin: Window, highlight: usize) !void { var x: c_int = 2; var y: c_int = 2; try menuWin.box(0, 0); var i: usize = 0; while (i < choices.len) : (i += 1) { if (highlight == i + 1) { try menuWin.wattron(A_REVERSE); try menuWin.mvwprintwzig(y, x, "{s}", .{choices[i]}); try menuWin.wattroff(A_REVERSE); } else { try menuWin.mvwprintwzig(y, x, "{s}", .{choices[i]}); } y += 1; } try menuWin.wrefresh(); }
examples/keys.zig
pub usingnamespace @cImport({ @cDefine("GLFW_INCLUDE_GLCOREARB", ""); @cInclude("GLFW/glfw3.h"); @cInclude("stb_image_write.h"); }); pub var glCreateTextures: @typeInfo(PFNGLCREATETEXTURESPROC).Optional.child = undefined; pub var glTextureStorage2D: @typeInfo(PFNGLTEXTURESTORAGE2DPROC).Optional.child = undefined; pub var glTextureStorage2DMultisample: @typeInfo(PFNGLTEXTURESTORAGE2DMULTISAMPLEPROC).Optional.child = undefined; pub var glTextureParameteri: @typeInfo(PFNGLTEXTUREPARAMETERIPROC).Optional.child = undefined; pub var glTextureParameterf: @typeInfo(PFNGLTEXTUREPARAMETERFPROC).Optional.child = undefined; pub var glTextureParameterfv: @typeInfo(PFNGLTEXTUREPARAMETERFVPROC).Optional.child = undefined; pub var glTextureSubImage2D: @typeInfo(PFNGLTEXTURESUBIMAGE2DPROC).Optional.child = undefined; pub var glCreateFramebuffers: @typeInfo(PFNGLCREATEFRAMEBUFFERSPROC).Optional.child = undefined; pub var glDeleteFramebuffers: @typeInfo(PFNGLDELETEFRAMEBUFFERSPROC).Optional.child = undefined; pub var glBindFramebuffer: @typeInfo(PFNGLBINDFRAMEBUFFERPROC).Optional.child = undefined; pub var glNamedFramebufferTexture: @typeInfo(PFNGLNAMEDFRAMEBUFFERTEXTUREPROC).Optional.child = undefined; pub var glBlitNamedFramebuffer: @typeInfo(PFNGLBLITNAMEDFRAMEBUFFERPROC).Optional.child = undefined; pub var glClearNamedFramebufferfv: @typeInfo(PFNGLCLEARNAMEDFRAMEBUFFERFVPROC).Optional.child = undefined; pub var glClearNamedFramebufferfi: @typeInfo(PFNGLCLEARNAMEDFRAMEBUFFERFIPROC).Optional.child = undefined; pub var glDebugMessageCallback: @typeInfo(PFNGLDEBUGMESSAGECALLBACKPROC).Optional.child = undefined; pub var glUseProgram: @typeInfo(PFNGLUSEPROGRAMPROC).Optional.child = undefined; pub var glUseProgramStages: @typeInfo(PFNGLUSEPROGRAMSTAGESPROC).Optional.child = undefined; pub var glCreateShaderProgramv: @typeInfo(PFNGLCREATESHADERPROGRAMVPROC).Optional.child = undefined; pub var glDeleteProgram: @typeInfo(PFNGLDELETEPROGRAMPROC).Optional.child = undefined; pub var glCreateProgramPipelines: @typeInfo(PFNGLCREATEPROGRAMPIPELINESPROC).Optional.child = undefined; pub var glBindProgramPipeline: @typeInfo(PFNGLBINDPROGRAMPIPELINEPROC).Optional.child = undefined; pub var glProgramUniform1f: @typeInfo(PFNGLPROGRAMUNIFORM1FPROC).Optional.child = undefined; pub var glProgramUniform2f: @typeInfo(PFNGLPROGRAMUNIFORM2FPROC).Optional.child = undefined; pub var glProgramUniform4fv: @typeInfo(PFNGLPROGRAMUNIFORM4FVPROC).Optional.child = undefined; pub var glProgramUniform1i: @typeInfo(PFNGLPROGRAMUNIFORM1IPROC).Optional.child = undefined; pub var glProgramUniform2i: @typeInfo(PFNGLPROGRAMUNIFORM2IPROC).Optional.child = undefined; pub var glMemoryBarrier: @typeInfo(PFNGLMEMORYBARRIERPROC).Optional.child = undefined; pub var glDispatchCompute: @typeInfo(PFNGLDISPATCHCOMPUTEPROC).Optional.child = undefined; pub var glGetProgramiv: @typeInfo(PFNGLGETPROGRAMIVPROC).Optional.child = undefined; pub var glGetProgramInfoLog: @typeInfo(PFNGLGETPROGRAMINFOLOGPROC).Optional.child = undefined; pub var glBindImageTexture: @typeInfo(PFNGLBINDIMAGETEXTUREPROC).Optional.child = undefined; pub var glTextureBarrier: @typeInfo(PFNGLTEXTUREBARRIERPROC).Optional.child = undefined; pub var glClearTexImage: @typeInfo(PFNGLCLEARTEXIMAGEPROC).Optional.child = undefined; pub var glEnable: @typeInfo(PFNGLENABLEPROC).Optional.child = undefined; pub var glGetError: @typeInfo(PFNGLGETERRORPROC).Optional.child = undefined; pub var glDeleteTextures: @typeInfo(PFNGLDELETETEXTURESPROC).Optional.child = undefined; pub var glBindTextureUnit: @typeInfo(PFNGLBINDTEXTUREUNITPROC).Optional.child = undefined; pub var glDrawArrays: @typeInfo(PFNGLDRAWARRAYSPROC).Optional.child = undefined; pub var glCreateVertexArrays: @typeInfo(PFNGLCREATEVERTEXARRAYSPROC).Optional.child = undefined; pub var glDeleteVertexArrays: @typeInfo(PFNGLDELETEVERTEXARRAYSPROC).Optional.child = undefined; pub var glBindVertexArray: @typeInfo(PFNGLBINDVERTEXARRAYPROC).Optional.child = undefined; pub var glReadPixels: @typeInfo(PFNGLREADPIXELSPROC).Optional.child = undefined; pub var glFinish: @typeInfo(PFNGLFINISHPROC).Optional.child = undefined; pub fn initOpenGlEntryPoints() void { glCreateTextures = @ptrCast(@TypeOf(glCreateTextures), glfwGetProcAddress("glCreateTextures").?); glTextureStorage2D = @ptrCast(@TypeOf(glTextureStorage2D), glfwGetProcAddress("glTextureStorage2D").?); glTextureStorage2DMultisample = @ptrCast(@TypeOf(glTextureStorage2DMultisample), glfwGetProcAddress("glTextureStorage2DMultisample").?); glTextureParameteri = @ptrCast(@TypeOf(glTextureParameteri), glfwGetProcAddress("glTextureParameteri").?); glTextureParameterf = @ptrCast(@TypeOf(glTextureParameterf), glfwGetProcAddress("glTextureParameterf").?); glTextureParameterfv = @ptrCast(@TypeOf(glTextureParameterfv), glfwGetProcAddress("glTextureParameterfv").?); glTextureSubImage2D = @ptrCast(@TypeOf(glTextureSubImage2D), glfwGetProcAddress("glTextureSubImage2D").?); glCreateFramebuffers = @ptrCast(@TypeOf(glCreateFramebuffers), glfwGetProcAddress("glCreateFramebuffers").?); glDeleteFramebuffers = @ptrCast(@TypeOf(glDeleteFramebuffers), glfwGetProcAddress("glDeleteFramebuffers").?); glBindFramebuffer = @ptrCast(@TypeOf(glBindFramebuffer), glfwGetProcAddress("glBindFramebuffer").?); glNamedFramebufferTexture = @ptrCast(@TypeOf(glNamedFramebufferTexture), glfwGetProcAddress("glNamedFramebufferTexture").?); glBlitNamedFramebuffer = @ptrCast(@TypeOf(glBlitNamedFramebuffer), glfwGetProcAddress("glBlitNamedFramebuffer").?); glClearNamedFramebufferfv = @ptrCast(@TypeOf(glClearNamedFramebufferfv), glfwGetProcAddress("glClearNamedFramebufferfv").?); glClearNamedFramebufferfi = @ptrCast(@TypeOf(glClearNamedFramebufferfi), glfwGetProcAddress("glClearNamedFramebufferfi").?); glDebugMessageCallback = @ptrCast(@TypeOf(glDebugMessageCallback), glfwGetProcAddress("glDebugMessageCallback").?); glUseProgram = @ptrCast(@TypeOf(glUseProgram), glfwGetProcAddress("glUseProgram").?); glUseProgramStages = @ptrCast(@TypeOf(glUseProgramStages), glfwGetProcAddress("glUseProgramStages").?); glCreateShaderProgramv = @ptrCast(@TypeOf(glCreateShaderProgramv), glfwGetProcAddress("glCreateShaderProgramv").?); glDeleteProgram = @ptrCast(@TypeOf(glDeleteProgram), glfwGetProcAddress("glDeleteProgram").?); glCreateProgramPipelines = @ptrCast(@TypeOf(glCreateProgramPipelines), glfwGetProcAddress("glCreateProgramPipelines").?); glBindProgramPipeline = @ptrCast(@TypeOf(glBindProgramPipeline), glfwGetProcAddress("glBindProgramPipeline").?); glProgramUniform1f = @ptrCast(@TypeOf(glProgramUniform1f), glfwGetProcAddress("glProgramUniform1f").?); glProgramUniform2f = @ptrCast(@TypeOf(glProgramUniform2f), glfwGetProcAddress("glProgramUniform2f").?); glProgramUniform4fv = @ptrCast(@TypeOf(glProgramUniform4fv), glfwGetProcAddress("glProgramUniform4fv").?); glProgramUniform1i = @ptrCast(@TypeOf(glProgramUniform1i), glfwGetProcAddress("glProgramUniform1i").?); glProgramUniform2i = @ptrCast(@TypeOf(glProgramUniform2i), glfwGetProcAddress("glProgramUniform2i").?); glMemoryBarrier = @ptrCast(@TypeOf(glMemoryBarrier), glfwGetProcAddress("glMemoryBarrier").?); glDispatchCompute = @ptrCast(@TypeOf(glDispatchCompute), glfwGetProcAddress("glDispatchCompute").?); glGetProgramiv = @ptrCast(@TypeOf(glGetProgramiv), glfwGetProcAddress("glGetProgramiv").?); glGetProgramInfoLog = @ptrCast(@TypeOf(glGetProgramInfoLog), glfwGetProcAddress("glGetProgramInfoLog").?); glBindImageTexture = @ptrCast(@TypeOf(glBindImageTexture), glfwGetProcAddress("glBindImageTexture").?); glTextureBarrier = @ptrCast(@TypeOf(glTextureBarrier), glfwGetProcAddress("glTextureBarrier").?); glClearTexImage = @ptrCast(@TypeOf(glClearTexImage), glfwGetProcAddress("glClearTexImage").?); glEnable = @ptrCast(@TypeOf(glEnable), glfwGetProcAddress("glEnable").?); glGetError = @ptrCast(@TypeOf(glGetError), glfwGetProcAddress("glGetError").?); glDeleteTextures = @ptrCast(@TypeOf(glDeleteTextures), glfwGetProcAddress("glDeleteTextures").?); glBindTextureUnit = @ptrCast(@TypeOf(glBindTextureUnit), glfwGetProcAddress("glBindTextureUnit").?); glDrawArrays = @ptrCast(@TypeOf(glDrawArrays), glfwGetProcAddress("glDrawArrays").?); glCreateVertexArrays = @ptrCast(@TypeOf(glCreateVertexArrays), glfwGetProcAddress("glCreateVertexArrays").?); glDeleteVertexArrays = @ptrCast(@TypeOf(glDeleteVertexArrays), glfwGetProcAddress("glDeleteVertexArrays").?); glBindVertexArray = @ptrCast(@TypeOf(glBindVertexArray), glfwGetProcAddress("glBindVertexArray").?); glReadPixels = @ptrCast(@TypeOf(glReadPixels), glfwGetProcAddress("glReadPixels").?); glFinish = @ptrCast(@TypeOf(glFinish), glfwGetProcAddress("glFinish").?); }
src/c.zig
const Plan9 = @This(); const std = @import("std"); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const aout = @import("Plan9/aout.zig"); const codegen = @import("../codegen.zig"); const trace = @import("../tracy.zig").trace; const mem = std.mem; const File = link.File; const Allocator = std.mem.Allocator; const log = std.log.scoped(.link); const assert = std.debug.assert; base: link.File, sixtyfour_bit: bool, error_flags: File.ErrorFlags = File.ErrorFlags{}, bases: Bases, decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{}, /// is just casted down when 32 bit syms: std.ArrayListUnmanaged(aout.Sym) = .{}, text_buf: std.ArrayListUnmanaged(u8) = .{}, data_buf: std.ArrayListUnmanaged(u8) = .{}, hdr: aout.ExecHdr = undefined, entry_decl: ?*Module.Decl = null, got: std.ArrayListUnmanaged(u64) = .{}, const Bases = struct { text: u64, /// the addr of the got data: u64, }; fn getAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 { return addr + switch (t) { .T, .t, .l, .L => self.bases.text, .D, .d, .B, .b => self.bases.data, else => unreachable, }; } /// opposite of getAddr fn takeAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 { return addr - switch (t) { .T, .t, .l, .L => self.bases.text, .D, .d, .B, .b => self.bases.data, else => unreachable, }; } fn getSymAddr(self: Plan9, s: aout.Sym) u64 { return self.getAddr(s.value, s.type); } pub const DeclBlock = struct { type: aout.Sym.Type, /// offset in the text or data sects offset: ?u64, /// offset into syms sym_index: ?usize, /// offset into got got_index: ?usize, pub const empty = DeclBlock{ .type = .t, .offset = null, .sym_index = null, .got_index = null, }; }; pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases { return switch (arch) { .x86_64 => .{ // 0x28 => 40 == header size .text = 0x200028, .data = 0x400000, }, .i386 => .{ // 0x20 => 32 == header size .text = 0x200020, .data = 0x400000, }, else => std.debug.panic("find default base address for {}", .{arch}), }; } pub const PtrWidth = enum { p32, p64 }; pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 { if (options.use_llvm) return error.LLVMBackendDoesNotSupportPlan9; const sixtyfour_bit: bool = switch (options.target.cpu.arch.ptrBitWidth()) { 0...32 => false, 33...64 => true, else => return error.UnsupportedP9Architecture, }; const self = try gpa.create(Plan9); self.* = .{ .base = .{ .tag = .plan9, .options = options, .allocator = gpa, .file = null, }, .sixtyfour_bit = sixtyfour_bit, .bases = undefined, }; return self; } pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { _ = module; _ = try self.decl_table.getOrPut(self.base.allocator, decl); } pub fn flush(self: *Plan9, comp: *Compilation) !void { assert(!self.base.options.use_lld); switch (self.base.options.effectiveOutputMode()) { .Exe => {}, // plan9 object files are totally different .Obj => return error.TODOImplementPlan9Objs, .Lib => return error.TODOImplementWritingLibFiles, } return self.flushModule(comp); } pub fn flushModule(self: *Plan9, comp: *Compilation) !void { _ = comp; const tracy = trace(@src()); defer tracy.end(); log.debug("flushModule", .{}); defer assert(self.hdr.entry != 0x0); const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; self.text_buf.items.len = 0; self.data_buf.items.len = 0; // ensure space to write the got later assert(self.got.items.len == self.decl_table.count()); try self.data_buf.appendNTimes(self.base.allocator, 0x69, self.got.items.len * if (!self.sixtyfour_bit) @as(u32, 4) else 8); // temporary buffer var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); { for (self.decl_table.keys()) |decl| { if (!decl.has_tv) continue; const is_fn = (decl.ty.zigTypeTag() == .Fn); log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name }); decl.link.plan9 = if (is_fn) .{ .offset = self.getAddr(self.text_buf.items.len, .t), .type = .t, .sym_index = decl.link.plan9.sym_index, .got_index = decl.link.plan9.got_index, } else .{ .offset = self.getAddr(self.data_buf.items.len, .d), .type = .d, .sym_index = decl.link.plan9.sym_index, .got_index = decl.link.plan9.got_index, }; self.got.items[decl.link.plan9.got_index.?] = decl.link.plan9.offset.?; if (decl.link.plan9.sym_index) |s| { self.syms.items[s] = .{ .value = decl.link.plan9.offset.?, .type = decl.link.plan9.type, .name = mem.span(decl.name), }; } else { try self.syms.append(self.base.allocator, .{ .value = decl.link.plan9.offset.?, .type = decl.link.plan9.type, .name = mem.span(decl.name), }); decl.link.plan9.sym_index = self.syms.items.len - 1; } if (module.decl_exports.get(decl)) |exports| { for (exports) |exp| { // plan9 does not support custom sections if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) { try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{})); break; } } if (std.mem.eql(u8, exp.options.name, "_start")) { std.debug.assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry self.entry_decl = decl; } if (exp.link.plan9) |i| { self.syms.items[i] = .{ .value = decl.link.plan9.offset.?, .type = decl.link.plan9.type.toGlobal(), .name = exp.options.name, }; } else { try self.syms.append(self.base.allocator, .{ .value = decl.link.plan9.offset.?, .type = decl.link.plan9.type.toGlobal(), .name = exp.options.name, }); exp.link.plan9 = self.syms.items.len - 1; } } } log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ .ty = decl.ty, .val = decl.val, }, &code_buffer, .{ .none = {} }); const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, em); // TODO try to do more decls return; }, }; if (is_fn) { try self.text_buf.appendSlice(self.base.allocator, code); code_buffer.items.len = 0; } else { try self.data_buf.appendSlice(self.base.allocator, code); code_buffer.items.len = 0; } } } // write the got if (!self.sixtyfour_bit) { for (self.got.items) |p, i| { mem.writeInt(u32, self.data_buf.items[i * 4 ..][0..4], @intCast(u32, p), self.base.options.target.cpu.arch.endian()); } } else { for (self.got.items) |p, i| { mem.writeInt(u64, self.data_buf.items[i * 8 ..][0..8], p, self.base.options.target.cpu.arch.endian()); } } self.hdr.entry = @truncate(u32, self.entry_decl.?.link.plan9.offset.?); // edata, end, etext self.syms.items[0].value = self.getAddr(0x0, .b); self.syms.items[1].value = self.getAddr(0x0, .b); self.syms.items[2].value = self.getAddr(self.text_buf.items.len, .t); var sym_buf = std.ArrayList(u8).init(self.base.allocator); defer sym_buf.deinit(); try self.writeSyms(&sym_buf); // generate the header self.hdr = .{ .magic = try aout.magicFromArch(self.base.options.target.cpu.arch), .text = @intCast(u32, self.text_buf.items.len), .data = @intCast(u32, self.data_buf.items.len), .syms = @intCast(u32, sym_buf.items.len), .bss = 0, .pcsz = 0, .spsz = 0, .entry = self.hdr.entry, }; const file = self.base.file.?; var hdr_buf = self.hdr.toU8s(); const hdr_slice: []const u8 = &hdr_buf; // account for the fat header const hdr_size: u8 = if (!self.sixtyfour_bit) 32 else 40; // write the fat header for 64 bit entry points if (self.sixtyfour_bit) { mem.writeIntSliceBig(u64, hdr_buf[32..40], self.hdr.entry); } // write it all! var vectors: [4]std.os.iovec_const = .{ .{ .iov_base = hdr_slice.ptr, .iov_len = hdr_size }, .{ .iov_base = self.text_buf.items.ptr, .iov_len = self.text_buf.items.len }, .{ .iov_base = self.data_buf.items.ptr, .iov_len = self.data_buf.items.len }, .{ .iov_base = sym_buf.items.ptr, .iov_len = sym_buf.items.len }, // TODO spsz, pcsz }; try file.pwritevAll(&vectors, 0); } pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void { assert(self.decl_table.swapRemove(decl)); } pub fn updateDeclExports( self: *Plan9, module: *Module, decl: *Module.Decl, exports: []const *Module.Export, ) !void { // we do all the things in flush _ = self; _ = module; _ = decl; _ = exports; } pub fn deinit(self: *Plan9) void { self.decl_table.deinit(self.base.allocator); self.syms.deinit(self.base.allocator); self.text_buf.deinit(self.base.allocator); self.data_buf.deinit(self.base.allocator); self.got.deinit(self.base.allocator); } pub const Export = ?usize; pub const base_tag = .plan9; pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Plan9 { if (options.use_llvm) return error.LLVMBackendDoesNotSupportPlan9; assert(options.object_format == .plan9); const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options), }); errdefer file.close(); const self = try createEmpty(allocator, options); errdefer self.base.destroy(); self.bases = defaultBaseAddrs(options.target.cpu.arch); // first 3 symbols in our table are edata, end, etext try self.syms.appendSlice(self.base.allocator, &.{ .{ .value = 0xcafebabe, .type = .B, .name = "edata", }, .{ .value = 0xcafebabe, .type = .B, .name = "end", }, .{ .value = 0xcafebabe, .type = .T, .name = "etext", }, }); self.base.file = file; return self; } pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const writer = buf.writer(); for (self.syms.items) |sym| { if (!self.sixtyfour_bit) { try writer.writeIntBig(u32, @intCast(u32, sym.value)); } else { try writer.writeIntBig(u64, sym.value); } try writer.writeByte(@enumToInt(sym.type)); try writer.writeAll(std.mem.span(sym.name)); try writer.writeByte(0); } } pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void { try self.got.append(self.base.allocator, 0xdeadbeef); decl.link.plan9.got_index = self.got.items.len - 1; }
src/link/Plan9.zig
const std = @import("std"); const assert = std.debug.assert; const VRegisterCount: u32 = 16; pub const StackSize: u32 = 16; const MemorySizeInBytes: u32 = 0x1000; // Fonts const FontTableGlyphCount: u32 = 16; const GlyphSizeInBytes: u16 = 5; // Display pub const ScreenWidth: u32 = 64; pub const ScreenHeight: u32 = 32; pub const ScreenLineSizeInBytes: u32 = ScreenWidth / 8; // Memory pub const MinProgramAddress: u16 = 0x0200; pub const MaxProgramAddress: u16 = 0x0FFF; // Timings const DelayTimerFrequency: u32 = 60; const InstructionExecutionFrequency: u32 = 500; pub const DelayTimerPeriodMs: u32 = 1000 / DelayTimerFrequency; pub const InstructionExecutionPeriodMs: u32 = 1000 / InstructionExecutionFrequency; pub const Register = enum { V0, V1, V2, V3, V4, V5, V6, V7, V8, V9, VA, VB, VC, VD, VE, VF }; pub const CPUState = struct { pc: u16, sp: u8, stack: [StackSize]u16, vRegisters: [VRegisterCount]u8, i: u16, delayTimer: u8, soundTimer: u8, // Implementation detail delayTimerAccumulator: u32, executionTimerAccumulator: u32, memory: []u8, keyState: u16, keyStatePrev: u16, isWaitingForKey: bool, fontTableOffsets: [FontTableGlyphCount]u16, screen: [ScreenHeight][ScreenLineSizeInBytes]u8, }; const FontTableOffsetInBytes: u16 = 0x0000; const FontTable = [FontTableGlyphCount][GlyphSizeInBytes]u8{ [_]u8{ 0xF0, 0x90, 0x90, 0x90, 0xF0 }, // 0 [_]u8{ 0x20, 0x60, 0x20, 0x20, 0x70 }, // 1 [_]u8{ 0xF0, 0x10, 0xF0, 0x80, 0xF0 }, // etc... [_]u8{ 0xF0, 0x10, 0xF0, 0x10, 0xF0 }, [_]u8{ 0x90, 0x90, 0xF0, 0x10, 0x10 }, [_]u8{ 0xF0, 0x80, 0xF0, 0x10, 0xF0 }, [_]u8{ 0xF0, 0x80, 0xF0, 0x90, 0xF0 }, [_]u8{ 0xF0, 0x10, 0x20, 0x40, 0x40 }, [_]u8{ 0xF0, 0x90, 0xF0, 0x90, 0xF0 }, [_]u8{ 0xF0, 0x90, 0xF0, 0x10, 0xF0 }, [_]u8{ 0xF0, 0x90, 0xF0, 0x90, 0x90 }, [_]u8{ 0xE0, 0x90, 0xE0, 0x90, 0xE0 }, [_]u8{ 0xF0, 0x80, 0x80, 0x80, 0xF0 }, [_]u8{ 0xE0, 0x90, 0x90, 0x90, 0xE0 }, [_]u8{ 0xF0, 0x80, 0xF0, 0x80, 0xF0 }, [_]u8{ 0xF0, 0x80, 0xF0, 0x80, 0x80 }, }; fn load_font_table(state: *CPUState) void { const tableOffset: u16 = FontTableOffsetInBytes; const tableSize: u16 = FontTableGlyphCount * GlyphSizeInBytes; // Make sure we don't spill in program addressable space. assert((tableOffset + tableSize - 1) < MinProgramAddress); // Copy font table into memory for (FontTable) |glyphs, i| { for (glyphs) |byte, j| { const offset: u16 = FontTableOffsetInBytes + @intCast(u16, i) * GlyphSizeInBytes + @intCast(u16, j); state.memory[offset] = byte; } } // Assing font table addresses in memory for (state.fontTableOffsets) |*value, i| { value.* = tableOffset + GlyphSizeInBytes * @intCast(u16, i); } } pub fn createCPUState() !CPUState { var state: CPUState = std.mem.zeroes(CPUState); const allocator: *std.mem.Allocator = std.heap.page_allocator; state.memory = try allocator.alloc(u8, MemorySizeInBytes); errdefer allocator.free(state.memory); // Set PC to first address state.pc = MinProgramAddress; load_font_table(&state); return state; } pub fn destroyCPUState(state: *CPUState) void { const allocator: *std.mem.Allocator = std.heap.page_allocator; allocator.free(state.memory); state.memory = undefined; }
src/chip8/cpu.zig
const XdgPopup = @This(); const std = @import("std"); const assert = std.debug.assert; const wlr = @import("wlroots"); const wl = @import("wayland").server.wl; const util = @import("util.zig"); const Subsurface = @import("Subsurface.zig"); const Parent = Subsurface.Parent; /// The parent at the root of this surface tree parent: Parent, wlr_xdg_popup: *wlr.XdgPopup, // Always active surface_destroy: wl.Listener(*wlr.XdgSurface) = wl.Listener(*wlr.XdgSurface).init(handleDestroy), map: wl.Listener(*wlr.XdgSurface) = wl.Listener(*wlr.XdgSurface).init(handleMap), unmap: wl.Listener(*wlr.XdgSurface) = wl.Listener(*wlr.XdgSurface).init(handleUnmap), new_popup: wl.Listener(*wlr.XdgPopup) = wl.Listener(*wlr.XdgPopup).init(handleNewPopup), new_subsurface: wl.Listener(*wlr.Subsurface) = wl.Listener(*wlr.Subsurface).init(handleNewSubsurface), // Only active while mapped commit: wl.Listener(*wlr.Surface) = wl.Listener(*wlr.Surface).init(handleCommit), pub fn create(wlr_xdg_popup: *wlr.XdgPopup, parent: Parent) void { const xdg_popup = util.gpa.create(XdgPopup) catch { std.log.crit("out of memory", .{}); wlr_xdg_popup.resource.postNoMemory(); return; }; xdg_popup.* = .{ .parent = parent, .wlr_xdg_popup = wlr_xdg_popup, }; assert(wlr_xdg_popup.base.data == 0); wlr_xdg_popup.base.data = @ptrToInt(xdg_popup); const parent_box = switch (parent) { .xdg_toplevel => |xdg_toplevel| &xdg_toplevel.view.pending.box, .layer_surface => |layer_surface| &layer_surface.box, .drag_icon => unreachable, }; const output_dimensions = switch (parent) { .xdg_toplevel => |xdg_toplevel| xdg_toplevel.view.output.getEffectiveResolution(), .layer_surface => |layer_surface| layer_surface.output.getEffectiveResolution(), .drag_icon => unreachable, }; // The output box relative to the parent of the xdg_popup var box = wlr.Box{ .x = -parent_box.x, .y = -parent_box.y, .width = @intCast(c_int, output_dimensions.width), .height = @intCast(c_int, output_dimensions.height), }; wlr_xdg_popup.unconstrainFromBox(&box); wlr_xdg_popup.base.events.destroy.add(&xdg_popup.surface_destroy); wlr_xdg_popup.base.events.map.add(&xdg_popup.map); wlr_xdg_popup.base.events.unmap.add(&xdg_popup.unmap); wlr_xdg_popup.base.events.new_popup.add(&xdg_popup.new_popup); wlr_xdg_popup.base.surface.events.new_subsurface.add(&xdg_popup.new_subsurface); Subsurface.handleExisting(wlr_xdg_popup.base.surface, parent); } pub fn destroy(xdg_popup: *XdgPopup) void { xdg_popup.surface_destroy.link.remove(); xdg_popup.map.link.remove(); xdg_popup.unmap.link.remove(); xdg_popup.new_popup.link.remove(); xdg_popup.new_subsurface.link.remove(); Subsurface.destroySubsurfaces(xdg_popup.wlr_xdg_popup.base.surface); XdgPopup.destroyPopups(xdg_popup.wlr_xdg_popup.base); xdg_popup.wlr_xdg_popup.base.data = 0; util.gpa.destroy(xdg_popup); } pub fn destroyPopups(wlr_xdg_surface: *wlr.XdgSurface) void { var it = wlr_xdg_surface.popups.iterator(.forward); while (it.next()) |wlr_xdg_popup| { if (@intToPtr(?*XdgPopup, wlr_xdg_popup.base.data)) |xdg_popup| xdg_popup.destroy(); } } fn handleDestroy(listener: *wl.Listener(*wlr.XdgSurface), wlr_xdg_surface: *wlr.XdgSurface) void { const xdg_popup = @fieldParentPtr(XdgPopup, "surface_destroy", listener); xdg_popup.destroy(); } fn handleMap(listener: *wl.Listener(*wlr.XdgSurface), xdg_surface: *wlr.XdgSurface) void { const xdg_popup = @fieldParentPtr(XdgPopup, "map", listener); xdg_popup.wlr_xdg_popup.base.surface.events.commit.add(&xdg_popup.commit); xdg_popup.parent.damageWholeOutput(); } fn handleUnmap(listener: *wl.Listener(*wlr.XdgSurface), xdg_surface: *wlr.XdgSurface) void { const xdg_popup = @fieldParentPtr(XdgPopup, "unmap", listener); xdg_popup.commit.link.remove(); xdg_popup.parent.damageWholeOutput(); } fn handleCommit(listener: *wl.Listener(*wlr.Surface), surface: *wlr.Surface) void { const xdg_popup = @fieldParentPtr(XdgPopup, "commit", listener); xdg_popup.parent.damageWholeOutput(); } fn handleNewPopup(listener: *wl.Listener(*wlr.XdgPopup), wlr_xdg_popup: *wlr.XdgPopup) void { const xdg_popup = @fieldParentPtr(XdgPopup, "new_popup", listener); XdgPopup.create(wlr_xdg_popup, xdg_popup.parent); } fn handleNewSubsurface(listener: *wl.Listener(*wlr.Subsurface), new_wlr_subsurface: *wlr.Subsurface) void { const xdg_popup = @fieldParentPtr(XdgPopup, "new_subsurface", listener); Subsurface.create(new_wlr_subsurface, xdg_popup.parent); }
source/river-0.1.0/river/XdgPopup.zig
// Note: The manual lists a procmoncfg register but does not describe it. // https://forums.sifive.com/t/fe310-g002-v1p0-manual-errata/4751 const Register = @import("../../../mmio_register.zig").Register; pub fn useExternalCrystalOscillator() void { hfrosccfg.modify(.{ .hfroscen = true, }); // Set the PLL to pass through the external clock. pllcfg.modify(.{ .pllbypass = true, .pllrefsel = .hfxosc, }); // Switch to running the main high-frequency clock from the output of the PLL. // Since the PLL is bypassed and is running from the external oscillator, // this means the main high-frequency clock is running "directly" from the // 16 MHz external oscillator. pllcfg.modify(.{ .pllsel = .pll, }); // Now we can disable the internal Oscillator. hfrosccfg.modify(.{ .hfroscen = false, }); } const prci_base_address = 0x1000_8000; /// An internal trimmable high-frequency ring oscillator (HFROSC) is used to /// provide the default clock after reset, and can be used to allow operation /// without an external high-frequency crystal or the PLL. /// The oscillator is controlled by the hfrosccfg register. pub const hfrosccfg = Register(u32, packed struct { /// Ring Oscillator Divider Register (RW) hfroscdiv: u6, _reserved_6: u10, /// Ring Oscillator Trim Register (RW) hfrosctrim: u5, _reserved_21: u9, /// Ring Oscillator Enable (RW) hfroscen: bool, /// Ring Oscillator Ready (RO) hfroscrdy: bool, }).init(prci_base_address + 0x0); /// An external high-frequency 16 MHz crystal oscillator (HFXOSC) can be used to /// provide a precise clock source. /// The HFXOSC is controlled via the memory-mapped hfxosccfg register. pub const hfxosccfg = Register(u32, packed struct { _reserved_0: u29, /// Crystal Oscillator Enable (RW) hfxoscen: bool, /// Crystal Oscillator Ready (RO) hfxoscrdy: bool, }).init(prci_base_address + 0x4); /// The PLL generates a high-frequency clock by multiplying a mid-frequency /// reference source clock, either the HFROSC or the HFXOSC. The input frequency /// to the PLL can be in the range 6–48 MHz. The PLL can generate output /// clock frequencies in the range 48–384 MHz. /// The PLL is controlled by a memory-mapped read-write pllcfg register in the /// PRCI address space. pub const pllcfg = Register(u32, packed struct { /// PLL R Value (RW) pllr: u3, _reserved_3: u1, /// PLL F Value (RW) pllf: u6, /// PLL Q Value (RW) pllq: u2, _reserved_12: u4, /// PLL Select (RW) /// Selects which clock source drives the hfclk (main high-frequency clock /// that drives the CPU clock and others). pllsel: enum(u1) { /// Internal oscillator. hfrosc = 0, /// The output of the PLL. pll = 1, }, /// PLL Reference Select (RW) /// Selects the clock source that drives the PLL. pllrefsel: enum(u1) { /// Internal oscillator. hfrosc, /// External oscillator. hfxosc, }, /// PLL Bypass (RW) pllbypass: bool, _reserved_19: u12, /// PLL Lock (RO) plllock: bool, }).init(prci_base_address + 0x8); /// The plloutdiv register controls a clock divider that divides the output of the PLL. pub const plloutdiv = Register(u32, packed struct { /// PLL Final Divider Value (RW) plloutdiv: u6, _reserved_6: u2, /// PLL Final Divide By 1 (RW) plloutdivby1: bool, _reserved_9: u23, }).init(prci_base_address + 0xC);
src/target/soc/fe310-g002/prci.zig
const std = @import("std"); const ast = @import("ast.zig"); const Allocator = std.mem.Allocator; const Gc = @import("Gc.zig"); //! `Value` represents a primitive value for Luf const Value = @This(); /// actual type of the `Value` ty: Type, /// is marked by the gc? is_marked: bool = false, /// Next Value in the Linked List next: ?*Value = null, /// Global True value, saves memory by having it as a globally available 'constant' pub var True = Boolean{ .base = .{ .ty = .boolean }, .value = true }; /// Global False value, saves memory by having it as a globally available 'constant' pub var False = Boolean{ .base = .{ .ty = .boolean }, .value = false }; /// Global void value, saves memory by having it as a globally available 'constant' pub var Void = Value{ .ty = ._void }; /// Global Nil value, saves memory by having it as a globally available 'constant' pub var Nil = Value{ .ty = .nil }; /// Signature for native functions, has access to garbage collector so values created /// by a native function can be cleaned up as well pub const NativeFn = fn (gc: *Gc, args: []*Value) anyerror!*Value; /// Build in types supported by Luf pub const Type = enum { integer, boolean, string, nil, function, list, map, native, module, iterable, range, _enum, _void, optional, /// Returns Luf's Type struct representing the enum value pub fn LufType(self: Type) type { return switch (self) { .integer => Integer, .boolean => Boolean, .string => String, .nil => @TypeOf(null), .function => Function, .list => List, .map => Map, .native => Native, .module => Module, .iterable => Iterable, .range => Range, ._enum => Enum, ._void => void, .optional => Optional, }; } /// Returns `Type` based on given struct /// Compiler error when the type is not one within the scope of `Value` pub fn fromType(comptime T: type) Type { return switch (T) { Integer => .integer, Boolean => .boolean, String => .string, Function => .function, List => .list, Map => .map, Native => .native, Module => .module, Iterable => .iterable, Range => .range, Enum => ._enum, Optional => .optional, else => unreachable, }; } }; /// Unwraps the `Value` into the actual Type /// Returns null if Value is of different type pub fn unwrap(self: *Value, comptime l_type: Type) ?*l_type.LufType() { if (self.ty != l_type) return null; return @fieldParentPtr(l_type.LufType(), "base", self); } /// Casts `Value` to `String` /// User must ensure Value contains correct type pub fn toString(self: *Value) *String { return @fieldParentPtr(String, "base", self); } /// Casts `Value` to `Integer` /// User must ensure Value contains correct type pub fn toInteger(self: *Value) *Integer { return @fieldParentPtr(Integer, "base", self); } /// Casts `Value` to `Boolean` /// User must ensure Value contains correct type pub fn toBool(self: *Value) *Boolean { return @fieldParentPtr(Boolean, "base", self); } /// Casts `Value` to `List` /// User must ensure Value contains correct type pub fn toList(self: *Value) *List { return @fieldParentPtr(List, "base", self); } /// Casts `Value` to `Map` /// User must ensure Value contains correct type pub fn toMap(self: *Value) *Map { return @fieldParentPtr(Map, "base", self); } /// Casts `Value` to `Native` /// User must ensure Value contains correct type pub fn toFunction(self: *Value) *Function { return @fieldParentPtr(Function, "base", self); } /// Casts `Value` to `Native` /// User must ensure Value contains correct type pub fn toNative(self: *Value) *Native { return @fieldParentPtr(Native, "base", self); } /// Casts `Value` to `Range` /// User must ensure Value contains correct type pub fn toRange(self: *Value) *Range { return @fieldParentPtr(Range, "base", self); } /// Casts `Value` to `Enum` /// User must ensure Value contains correct type pub fn toEnum(self: *Value) *Enum { return @fieldParentPtr(Enum, "base", self); } /// Casts `Value` to `Iterable` /// User must ensure Value contains correct type pub fn toIterable(self: *Value) *Iterable { return @fieldParentPtr(Iterable, "base", self); } /// Casts `Value` to `Module` /// User must ensure Value contains correct type pub fn toModule(self: *Value) *Module { return @fieldParentPtr(Module, "base", self); } /// Casts `Value` to `Optional` /// User must ensure Value contains correct type pub fn toOptional(self: *Value) *Optional { return @fieldParentPtr(Optional, "base", self); } /// Returns true if the `Type` of the given `Value` is equal pub fn isType(self: *const Value, tag: Type) bool { return self.ty == tag; } /// Returns the `Value` as a Zig type /// `error.InvalidType` is returned when `Value` cannot be coerced to the given /// Zig type pub fn toZig(self: *Value, comptime T: type) error{InvalidType}!T { return switch (T) { void => {}, bool => if (self.unwrap(.boolean)) |b| b.value else error.InvalidType, []const u8 => if (self.unwrap(.string)) |s| s.value else error.InvalidType, *Map, *const Map => if (self.unwrap(.map)) |map| map.value else error.InvalidType, *Value, *const Value => self, Value => self.*, else => switch (@typeInfo(T)) { .Null => if (self.unwrap(.nil)) |nil| null else error.InvalidType, .Int => @intCast(T, if (self.unwrap(.integer)) |int| int.value else return error.InvalidType), .Enum => @intToEnum(T, if (self.unwrap(.integer)) |int| int.value else return error.InvalidType), else => @compileError("TODO add support for type: " ++ @typeName(T)), }, }; } /// Creates a Luf `Value` from a Zig value /// Memory is owned by caller and must be freed by caller pub fn fromZig(gc: *Gc, val: anytype) !*Value { switch (@TypeOf(val)) { // todo have a global void value void => return &Void, *Value => return val, Value => { const ret = gc.gpa.create(Value); ret.* = val; return val; }, bool => return if (val) &Value.True.base else &Value.False.base, []u8, []const u8 => return String.create(gc, val), type => switch (@typeInfo(val)) { .Struct => |info| { comptime var decl_count = 0; inline for (info.decls) |decl| { if (decl.is_pub) decl_count += 1; } const ret = try Map.create(gc, decl_count); const map = ret.toMap(); errdefer ret.destroy(gc.gpa); try map.value.ensureCapacity(gc.gpa, decl_count); inline for (info.decls) |decl| { if (!decl.is_pub) continue; const key = try String.create(gc, decl.name); const value = try fromZig(gc, @field(val, decl.name)); map.value.putAssumeCapacityNoClobber(key, value); } return ret; }, else => @compileError("Unsupported type: " ++ @typeName(val)), }, else => switch (@typeInfo(@TypeOf(val))) { .Fn => { const Fn = @typeInfo(@TypeOf(val)).Fn; comptime var arg_index: u2 = 0; const args_len = Fn.args.len; const Func = struct { // this is needed as `val` is not accessible from inner function var innerFunc: @TypeOf(val) = undefined; fn call(_gc: *Gc, args: []*Value) !*Value { if (args.len != args_len) return error.IncorrectArgumentCount; const ArgsTuple = comptime blk: { var fields: [args_len]std.builtin.TypeInfo.StructField = undefined; for (fields) |*f, idx| { var num_buf: [128]u8 = undefined; f.* = .{ .name = std.fmt.bufPrint(&num_buf, "{}", .{idx}) catch unreachable, .field_type = Fn.args[idx].arg_type.?, .default_value = @as(?(Fn.args[idx].arg_type.?), null), .is_comptime = false, .alignment = @alignOf(Fn.args[idx].arg_type.?), }; } break :blk @Type(.{ .Struct = .{ .layout = .Auto, .fields = &fields, .decls = &[0]std.builtin.TypeInfo.Declaration{}, .is_tuple = true, }, }); }; var tuple: ArgsTuple = undefined; comptime var i = 0; inline while (i < args_len) : (i += 1) { tuple[i] = try args[i].toZig(Fn.args[i].arg_type.?); } return fromZig(_gc, @call(.{}, innerFunc, tuple)); } }; // set innerFunc as we cannot access `val` from inside Func.innerFunc = val; return try gc.newValue( Native, .{ .base = .{ .ty = .native, .next = null, .is_marked = false }, .func = Func.call, .arg_len = Fn.args.len, }, ); }, .ComptimeInt, .Int => return Integer.create(gc, @intCast(i64, val)), .Pointer => |info| switch (@typeInfo(info.child)) { .Array => |array_info| switch (array_info.child) { u8 => return String.create(gc, val), else => { const ret = try Value.List.create(gc, array_info.len); for (ret.toList().value.items) |*item, i| { item.* = try fromZig(gc, val[i]); } return ret; }, }, else => @compileError("Unsupported type: " ++ @typeName(@TypeOf(val))), }, .Null => return &Nil, else => @compileError("Unsupported type: " ++ @typeName(@TypeOf(val))), }, } } /// Frees all memory of the `Value`. /// NOTE, for lists/maps it only frees the list/map itself, not the values it contains pub fn destroy(self: *Value, gpa: *Allocator) void { switch (self.ty) { .integer => self.toInteger().destroy(gpa), .string => self.toString().destroy(gpa), .function => self.toFunction().destroy(gpa), .list => self.toList().destroy(gpa), .map => self.toMap().destroy(gpa), .module => self.toModule().destroy(gpa), .iterable => self.toIterable().destroy(gpa), .range => self.toRange().destroy(gpa), ._enum => self.toEnum().destroy(gpa), .native => self.toNative().destroy(gpa), else => {}, } } pub const Integer = struct { base: Value, value: i64, /// Creates a new `Integer` using the given `val`. Returns /// the pointer to its `Value` pub fn create(gc: *Gc, val: i64) !*Value { return try gc.newValue( Integer, .{ .base = .{ .ty = .integer, .next = null, .is_marked = false }, .value = val, }, ); } pub fn destroy(self: *Integer, gpa: *Allocator) void { gpa.destroy(self); } }; pub const Boolean = struct { base: Value, value: bool, }; pub const String = struct { base: Value, value: []const u8, /// Creates a new `Integer` using the given `val`. Returns /// the pointer to its `Value` pub fn create(gc: *Gc, val: []const u8) !*Value { return try gc.newValue( String, .{ .base = .{ .ty = .string, .next = null, .is_marked = false }, .value = try gc.gpa.dupe(u8, val), }, ); } /// Copies the value of `other` into `self` /// Does this by first free'ing the current value, and then duplicating /// the value of `other`. This does NOT free the value of the original `other` pub fn copyFrom(self: *String, gpa: *Allocator, other: *String) !void { gpa.free(self.value); self.value = try gpa.dupe(u8, other.value); } pub fn destroy(self: *String, gpa: *Allocator) void { gpa.free(self.value); gpa.destroy(self); } }; pub const Module = struct { base: Value, value: []const u8, pub fn create(gc: *Gc, val: []const u8) !*Value { return try gc.newValue( Module, .{ .base = .{ .ty = .module, .next = null, .is_marked = false }, .value = try gc.gpa.dupe(u8, val), }, ); } pub fn destroy(self: *Module, gpa: *Allocator) void { gpa.free(self.value); gpa.destroy(self); } }; pub const ReturnValue = struct { base: Value, value: *Value, }; pub const Function = struct { base: Value, name: ?[]const u8, arg_len: usize, locals: usize, entry: usize, /// Creates a new `Integer` using the given `val`. Returns /// the pointer to its `Value` pub fn create( gc: *Gc, name: ?[]const u8, locals: usize, arg_len: usize, entry_point: usize, ) !*Value { const n = if (name) |n| try gc.gpa.dupe(u8, n) else null; return try gc.newValue( Function, .{ .base = .{ .ty = .function, .next = null, .is_marked = false }, .arg_len = arg_len, .locals = locals, .name = n, .entry = entry_point, }, ); } pub fn destroy(self: *Function, gpa: *Allocator) void { if (self.name) |name| gpa.free(name); gpa.destroy(self); } }; pub const List = struct { base: Value, value: ListType, pub const ListType = std.ArrayListUnmanaged(*Value); pub fn create(gc: *Gc, len: ?usize) !*Value { var self = List{ .base = .{ .ty = .list, .next = null, .is_marked = false }, .value = ListType{}, }; if (len) |n| try self.value.ensureCapacity(gc.gpa, n); return try gc.newValue(List, self); } pub fn destroy(self: *List, gpa: *Allocator) void { self.value.deinit(gpa); gpa.destroy(self); } }; pub const Map = struct { base: Value, value: MapType, pub const MapType = std.ArrayHashMapUnmanaged(*Value, *Value, hash, eql, true); pub fn create(gc: *Gc, len: ?usize) !*Value { var self = Map{ .base = .{ .ty = .map, .next = null, .is_marked = false }, .value = MapType{}, }; if (len) |n| try self.value.ensureCapacity(gc.gpa, n); return try gc.newValue(Map, self); } pub fn destroy(self: *Map, gpa: *Allocator) void { self.value.deinit(gpa); gpa.destroy(self); } }; pub const Native = struct { base: Value, func: Value.NativeFn, arg_len: usize, pub fn destroy(self: *Native, gpa: *Allocator) void { gpa.destroy(self); } }; pub const Range = struct { base: Value, start: i64, end: i64, pub fn create(gc: *Gc, start: i64, end: i64) !*Value { return try gc.newValue( Range, .{ .base = .{ .ty = .range, .next = null, .is_marked = false }, .start = start, .end = end, }, ); } pub fn destroy(self: *Range, gpa: *Allocator) void { gpa.destroy(self); } }; pub const Enum = struct { base: Value, value: [][]const u8, /// Creates a new `Enum` value. Takes ownership of the memory of the slice pub fn create(gc: *Gc, enums: [][]const u8) !*Value { for (enums) |*enm| enm.* = try gc.gpa.dupe(u8, enm.*); return try gc.newValue( Enum, .{ .base = .{ .ty = ._enum, .next = null, .is_marked = false }, .value = enums, }, ); } pub fn destroy(self: *Enum, gpa: *Allocator) void { for (self.value) |enm| gpa.free(enm); gpa.free(self.value); gpa.destroy(self); } }; pub const Iterable = struct { base: Value, expose_index: bool, index: usize, value: *Value, pub fn create(gc: *Gc, expose: bool, index: usize, value: *Value) !*Value { return try gc.newValue( Iterable, .{ .base = .{ .ty = .iterable, .next = null, .is_marked = false }, .expose_index = expose, .index = index, .value = value, }, ); } pub fn destroy(self: *Iterable, gpa: *Allocator) void { gpa.destroy(self); } /// Returns a new Value, returns null if end of iterator is reached /// This creates a copy of the actual value pub fn next(self: *@This(), gc: *Gc) !?*Value { switch (self.value.ty) { .list => { const list = self.value.toList().value; if (list.items.len == 0) return null; if (list.items.len == self.index) return null; defer self.index += 1; return list.items[self.index]; }, .string => { const string = self.value.toString().value; if (string.len == 0) return null; if (string.len == self.index) return null; defer self.index += 1; return String.create(gc, string[self.index .. self.index + 1]); }, .range => { const range = self.value.toRange(); if (range.start == range.end) return null; if (self.index == range.end - range.start) return null; defer self.index += 1; return Integer.create(gc, range.start + @intCast(i64, self.index)); }, else => unreachable, } } }; pub const Optional = struct { base: Value, child: ?*Value, pub fn create(gc: *Gc, value: ?*Value) !*Value { return try gc.newValue( Optional, .{ .base = .{ .ty = .optional, .next = null, .is_marked = false }, .child = value, }, ); } /// Only destroys itself, not the pointer to the child value pub fn destroy(self: *Optional, gpa: *Allocator) void { gpa.destroy(self); } }; fn hash(key: *Value) u32 { const hashFn = std.hash.autoHash; var hasher = std.hash.Wyhash.init(0); switch (key.ty) { .integer => hashFn(&hasher, key.toInteger().value), .boolean => hashFn(&hasher, key.toBool().value), .string => hasher.update(key.toString().value), .function => { const func = key.toFunction(); if (func.name) |name| hasher.update(name); hashFn(&hasher, func.arg_len); hashFn(&hasher, func.locals); hashFn(&hasher, func.entry); }, .list => { const list = key.toList().value; hashFn(&hasher, list.items.len); hashFn(&hasher, list.items.ptr); }, .map => { const map = key.toMap().value; hashFn(&hasher, map.items().len); hashFn(&hasher, map.items().ptr); }, .native => { const native = key.toNative(); hashFn(&hasher, native.arg_len); hashFn(&hasher, native.func); }, .range => { const range = key.toRange(); hashFn(&hasher, range.start); hashFn(&hasher, range.end); }, else => unreachable, } return @truncate(u32, hasher.final()); } fn eql(a: *Value, b: *Value) bool { return switch (a.ty) { .integer => a.toInteger().value == b.toInteger().value, .boolean => a.toBool().value == b.toBool().value, .nil => true, .string => std.mem.eql(u8, a.toString().value, b.toString().value), .list => { const list_a = a.toList().value; const list_b = b.toList().value; if (list_a.items.len != list_b.items.len) return false; for (list_a.items) |item, i| { if (!item.eql(list_b.items[i])) return false; } return true; }, .map => { const map_a = a.toMap().value; const map_b = a.toMap().value; if (map_a.items().len != map_b.items().len) return false; for (map_a.items()) |entry, i| { if (entry.hash != map_b.items()[i].hash) return false; } return true; }, .range => { const range_a = a.toRange(); const range_b = b.toRange(); return range_a.start == range_b.start and range_a.end == range_b.end; }, else => unreachable, }; } /// Prints a `Value` to the given `writer` pub fn print(self: *Value, writer: anytype) @TypeOf(writer).Error!void { switch (self.ty) { .integer => try writer.print("{}", .{self.toInteger().value}), .boolean => try writer.print("{}", .{self.toBool().value}), .string => try writer.writeAll(self.toString().value), .nil => try writer.writeAll("nil"), .list => { const list = self.toList().value; try writer.writeAll("["); for (list.items) |item, i| { try item.print(writer); if (i != list.items.len - 1) try writer.writeAll(",\n"); } try writer.writeAll("]\n"); }, .map => { const map = self.toMap().value; try writer.writeAll("{"); for (map.entries.items) |item, i| { try item.key.print(writer); try writer.writeAll(":"); try item.value.print(writer); if (i != map.items().len - 1) try writer.writeAll(",\n"); } try writer.writeAll("}\n"); }, .range => try writer.print("{}..{}", .{ self.toRange().start, self.toRange().end }), ._enum => { const enm = self.toEnum().value; try writer.writeAll("{"); for (enm) |item, i| { try writer.writeAll(item); if (i != enm.len - 1) try writer.writeAll(",\n"); } try writer.writeAll("}\n"); }, .optional => { if (self.toOptional().child) |child| try child.print(writer) else try writer.writeAll("nil"); }, else => try writer.writeAll("void"), } }
src/Value.zig
const std = @import("std"); const assert = std.debug.assert; const print = std.debug.print; const testing = std.testing; const wren = @import("./wren.zig"); const Configuration = @import("./vm.zig").Configuration; const Vm = @import("./vm.zig").Vm; const ErrorType = @import("./vm.zig").ErrorType; pub const ForeignClassRegistry = StringHashMap(ForeignClassDesc); pub const ForeignMethodDesc = struct {}; pub const ForeignClassDesc = struct {}; const WrappedFn = wren.ForeignMethodFn; const WrappedFinalizeFn = wren.FinalizeFn; // "Inspired" by // https://github.com/daurnimator/zig-autolua/blob/5bc7194124a7d7a14e6efe5033c75c7ad4b19cb8/src/autolua.zig#L185-L210 // Used with permission. const WrapMode = enum { Initialize, Instance, Static, Finalize }; fn wrap(comptime Class: type, comptime func: anytype, wrap_mode: WrapMode) switch (wrap_mode) { .Finalize => WrappedFinalizeFn, else => WrappedFn, } { const Fn = @typeInfo(@TypeOf(func)).Fn; const ThunkArg = switch (wrap_mode) { .Finalize => ?*c_void, else => ?*wren.Vm, }; const slot_offset = switch (wrap_mode) { .Instance, .Initialize => 2, .Static => 1, .Finalize => 0, }; // See https://github.com/ziglang/zig/issues/229 return struct { // See https://github.com/ziglang/zig/issues/2930 fn call(vm: *Vm, args: anytype) (if (Fn.return_type) |rt| rt else void) { if (Fn.args.len == args.len) { return @call(.{}, func, args); } else { const i = args.len; // Add 1 for the class itself in slot 0. const a = vm.getSlot(Fn.args[i].arg_type.?, 1 + i - slot_offset); return @call(.{ .modifier = .always_inline }, call, .{ vm, args ++ .{a} }); } } fn thunk(arg: ThunkArg) callconv(.C) void { const ResultHandler = struct { const Self = @This(); maybe_vm: ?*Vm, pub fn noop(self: Self, ignored: anytype) void {} pub fn handle(self: Self, result: anytype) void { if (self.maybe_vm) |vm| { const return_type = @TypeOf(result); if (return_type != void) { if (@typeInfo(return_type) == .ErrorUnion) { if (result) |success| { if (@TypeOf(success) != void) { vm.setSlot(0, success); } } else |err| { vm.abortFiber(0, @errorName(err)); } } else { vm.setSlot(0, result); } } } else { std.debug.panic("vm must be set", .{}); } } }; const call_target = switch (wrap_mode) { .Finalize => func, else => call, }; const result_handler = switch (wrap_mode) { .Finalize => ResultHandler{ .maybe_vm = null }, else => ResultHandler{ .maybe_vm = Vm.fromC(arg) }, }; const result_handler_fn = switch (wrap_mode) { .Finalize => result_handler.noop, else => result_handler.handle, }; // Initially, this switch defined `initial_args` (the tuple in `@call`). // This failed to compile with the following error: // error: compiler bug: generating const value for struct field '0' switch (wrap_mode) { .Initialize => { const vm = Vm.fromC(arg); const foreign = wren.setSlotNewForeign(arg, 0, 0, @sizeOf(Class)); assert(foreign != null); const instance = @ptrCast(*Class, @alignCast(@alignOf(Class), foreign)); result_handler_fn(@call(.{ .modifier = .always_inline }, call_target, .{ vm, .{ instance, vm } })); }, .Finalize => { const instance = @ptrCast(*Class, arg); result_handler_fn(@call(.{ .modifier = .always_inline }, call_target, .{instance})); }, .Instance => { const vm = Vm.fromC(arg); const instance = vm.getSlot(*Class, 0); result_handler_fn(@call(.{ .modifier = .always_inline }, call_target, .{ vm, .{ instance, vm } })); }, .Static => { const vm = Vm.fromC(arg); result_handler_fn(@call(.{ .modifier = .always_inline }, call_target, .{ vm, .{vm} })); }, } } }.thunk; } const MethodDesc = struct { name: []const u8, argument_count: usize, method: WrappedFn }; pub fn ForeignClass(class_name: []const u8, comptime Class: anytype) type { const ti = @typeInfo(Class); const meta = struct { pub fn isInitialize(comptime decl: std.builtin.TypeInfo.Declaration) bool { comptime { const name_base = "initialize"; if (!std.mem.startsWith(u8, decl.name, name_base)) return false; // @compileLog(name_base.len); // @compileLog(decl.name.len); if (decl.name.len <= name_base.len) return false; const digits = decl.name[name_base.len..]; const num = std.fmt.parseInt(usize, digits, 10) catch |e| { return false; }; const fn_type_info = @typeInfo(decl.data.Fn.fn_type); if (fn_type_info.Fn.args.len != 2 + num) return false; const arg1_info = @typeInfo(fn_type_info.Fn.args[0].arg_type orelse unreachable); if (arg1_info != .Pointer) return false; if (arg1_info.Pointer.child != Class) return false; const arg2_info = @typeInfo(fn_type_info.Fn.args[1].arg_type orelse unreachable); if (arg2_info != .Pointer) return false; if (arg2_info.Pointer.child != Vm) return false; const ret_info = @typeInfo(fn_type_info.Fn.return_type orelse unreachable); if (ret_info != .Void and (ret_info != .ErrorUnion or ret_info.ErrorUnion.payload != void)) return false; return true; } } pub fn isFinalize(comptime decl: std.builtin.TypeInfo.Declaration) bool { if (!std.mem.eql(u8, "finalize", decl.name)) return false; const fn_type_info = @typeInfo(decl.data.Fn.fn_type); if (fn_type_info.Fn.args.len != 1) return false; const arg1_info = @typeInfo(fn_type_info.Fn.args[0].arg_type orelse unreachable); if (arg1_info != .Pointer) return false; if (arg1_info.Pointer.child != Class) return false; const ret_info = @typeInfo(fn_type_info.Fn.return_type orelse unreachable); // No access to the VM, so no way to signal errors to it. if (ret_info != .Void) return false; return true; } pub fn isInstanceMethod(comptime decl: std.builtin.TypeInfo.Declaration) bool { if (isInitialize(decl) or isFinalize(decl)) return false; const fn_type_info = @typeInfo(decl.data.Fn.fn_type); // At least self and vm must be present. if (fn_type_info.Fn.args.len < 2) return false; if (fn_type_info.Fn.args[0].arg_type.? != *Class) return false; if (fn_type_info.Fn.args[1].arg_type.? != *Vm) return false; return true; } pub fn isStaticMethod(comptime decl: std.builtin.TypeInfo.Declaration) bool { if (isInitialize(decl) or isFinalize(decl)) return false; const fn_type_info = @typeInfo(decl.data.Fn.fn_type); // At least the vm must be present. if (fn_type_info.Fn.args.len < 1) return false; if (fn_type_info.Fn.args[0].arg_type.? != *Vm) return false; return true; } pub fn countInitializers(comptime strct: std.builtin.TypeInfo.Struct) comptime_int { comptime var initializer_count = 0; inline for (strct.decls) |decl| { switch (decl.data) { .Fn => { if (isInitialize(decl)) { initializer_count += 1; } }, else => {}, } } return initializer_count; } pub fn calcMaxInitializerArgs(comptime strct: std.builtin.TypeInfo.Struct) comptime_int { comptime var max = 0; inline for (strct.decls) |decl| { switch (decl.data) { .Fn => { if (isInitialize(decl)) { const fn_type_info = @typeInfo(decl.data.Fn.fn_type); const count = fn_type_info.Fn.args.len - 2; max = std.math.max(max, count); } }, else => {}, } } return max; } pub fn countInstanceMethods(comptime strct: std.builtin.TypeInfo.Struct) comptime_int { comptime var method_count = 0; inline for (strct.decls) |decl| { switch (decl.data) { .Fn => { if (isInstanceMethod(decl)) { method_count += 1; } }, else => {}, } } return method_count; } pub fn countStaticMethods(comptime strct: std.builtin.TypeInfo.Struct) comptime_int { comptime var method_count = 0; inline for (strct.decls) |decl| { switch (decl.data) { .Fn => { if (isStaticMethod(decl)) { method_count += 1; } }, else => {}, } } return method_count; } }; // TODO: Check back when https://github.com/ziglang/zig/issues/6084 is fixed. comptime var max_initializer_args = 0; inline for (ti.Struct.decls) |decl| { comptime { switch (decl.data) { .Fn => { if (meta.isInitialize(decl)) { const fn_type_info = @typeInfo(decl.data.Fn.fn_type); const count = fn_type_info.Fn.args.len - 2; max_initializer_args = std.math.max(max_initializer_args, @as(comptime_int, count)); } }, else => {}, } } } // comptime const max_initializer_args = meta.calcMaxInitializerArgs(ti.Struct); comptime var instance_method_count = 0; inline for (ti.Struct.decls) |decl| { comptime { switch (decl.data) { .Fn => { if (meta.isInstanceMethod(decl)) { instance_method_count += 1; } }, else => {}, } } } // comptime const instance_method_count = meta.countInstanceMethods(ti.Struct); comptime var static_method_count = 0; inline for (ti.Struct.decls) |decl| { comptime { switch (decl.data) { .Fn => { if (meta.isStaticMethod(decl)) { static_method_count += 1; } }, else => {}, } } // comptime const static_method_count = meta.countStaticMethods(ti.Struct); } // One of the zero-arg initializer. comptime var initializers: [max_initializer_args + 1]WrappedFn = undefined; std.mem.set(WrappedFn, initializers[0..], null); comptime var finalizer: WrappedFinalizeFn = null; comptime var instance_methods: [instance_method_count]MethodDesc = undefined; comptime var instance_method_index = 0; comptime var static_methods: [static_method_count]MethodDesc = undefined; comptime var static_method_index = 0; inline for (ti.Struct.decls) |decl| { comptime { switch (decl.data) { .Fn => { if (meta.isInitialize(decl)) { initializers[@typeInfo(decl.data.Fn.fn_type).Fn.args.len - 2] = wrap(Class, @field(Class, decl.name), .Initialize); } if (meta.isFinalize(decl)) { finalizer = wrap(Class, @field(Class, decl.name), .Finalize); } if (meta.isInstanceMethod(decl)) { instance_methods[instance_method_index] = MethodDesc{ .name = decl.name, .argument_count = @typeInfo(decl.data.Fn.fn_type).Fn.args.len - 2, .method = wrap(Class, @field(Class, decl.name), .Instance), }; instance_method_index += 1; } if (meta.isStaticMethod(decl)) { static_methods[static_method_index] = MethodDesc{ .name = decl.name, .argument_count = @typeInfo(decl.data.Fn.fn_type).Fn.args.len - 1, .method = wrap(Class, @field(Class, decl.name), .Static), }; static_method_index += 1; } }, else => {}, } } } return struct { const name: []const u8 = class_name; fn allocate(cvm: ?*wren.Vm) callconv(.C) void { const vm = Vm.fromC(cvm); // There is always one slot for the class itself. // TODO: Check this is set. initializers[vm.getSlotCount() - 1].?(cvm); } pub fn bindForeignClass(cvm: ?*wren.Vm, module_name: [*c]const u8, class_name_: [*c]const u8) callconv(.C) wren.ForeignClassMethods { assert(std.mem.eql(u8, name, std.mem.span(class_name_))); return wren.ForeignClassMethods{ .allocate = allocate, .finalize = finalizer, }; } pub fn bindForeignMethod(cvm: ?*wren.Vm, module_name: [*c]const u8, class_name_: [*c]const u8, is_static: bool, signature: [*c]const u8) callconv(.C) wren.ForeignMethodFn { assert(std.mem.eql(u8, name, std.mem.span(class_name_))); var method_name: []const u8 = undefined; method_name.ptr = signature; method_name.len = 0; var arg_count: usize = 0; for (std.mem.span(signature)) |c, i| { if (c == '(') method_name.len = i; if (c == '_') arg_count = arg_count + 1; } const methods = if (is_static) &static_methods else &instance_methods; for (methods) |method| { if (std.mem.eql(u8, method.name, std.mem.span(method_name)) and method.argument_count == arg_count) { return method.method; } } return null; } }; } pub fn ForeignClassInterface(comptime Classes: anytype) type { if (@typeInfo(@TypeOf(Classes)) != .Struct or (@typeInfo(@TypeOf(Classes)) == .Struct and !@typeInfo(@TypeOf(Classes)).Struct.is_tuple)) { @compileError("foreign classes must be passed as a tuple"); } const ClassDesc = struct { name: []const u8, bindForeignClassFn: wren.BindForeignClassFn, bindForeignMethodFn: wren.BindForeignMethodFn, }; comptime var classes: [Classes.len]ClassDesc = undefined; comptime var class_index = 0; comptime { inline for (Classes) |Class| { classes[class_index] = ClassDesc{ .name = Class.name, .bindForeignClassFn = Class.bindForeignClass, .bindForeignMethodFn = Class.bindForeignMethod, }; class_index += 1; } } return struct { pub fn bindForeignClass(cvm: ?*wren.Vm, module_name: [*c]const u8, class_name: [*c]const u8) callconv(.C) wren.ForeignClassMethods { for (classes) |class| { if (std.mem.eql(u8, class.name, std.mem.span(class_name))) { return class.bindForeignClassFn.?(cvm, module_name, class_name); } } std.debug.panic("can not bind unknown class {s}", .{class_name}); } pub fn bindForeignMethod(cvm: ?*wren.Vm, module_name: [*c]const u8, class_name: [*c]const u8, is_static: bool, signature: [*c]const u8) callconv(.C) wren.ForeignMethodFn { for (classes) |class| { if (std.mem.eql(u8, class.name, std.mem.span(class_name))) { return class.bindForeignMethodFn.?(cvm, module_name, class_name, is_static, signature); } } std.debug.panic("can not bind unknown function {s}.{s}", .{ class_name, signature }); } }; } pub fn registerForeignClasses(config: *Configuration, comptime Classes: anytype) void { const interface = ForeignClassInterface(Classes); config.bindForeignClassFn = interface.bindForeignClass; config.bindForeignMethodFn = interface.bindForeignMethod; } const EmptyUserData = struct {}; fn vmPrintError(vm: *Vm, error_type: ErrorType, module: ?[]const u8, line: ?u32, message: []const u8) void { std.debug.print("error_type={}, module={}, line={}, message={}\n", .{ error_type, module, line, message }); } fn vmPrint(vm: *Vm, msg: []const u8) void { std.debug.print("{}", .{msg}); } var test_class_was_allocated = false; var test_class_was_allocated_with_3_params = false; var test_class_was_called = false; var test_class_was_finalized = false; const TestClass = struct { const Self = @This(); // Make the struct have a size > 0. data: [4]u8, pub fn initialize0(self: *Self, vm: *Vm) !void { test_class_was_allocated = true; } pub fn initialize3(self: *Self, vm: *Vm, x: i32, y: i32, z: i32) !void { test_class_was_allocated_with_3_params = true; } pub fn finalize(self: *Self) void { test_class_was_finalized = true; } pub fn call(self: *Self, vm: *Vm, str: []const u8) void { if (std.mem.eql(u8, str, "lemon")) { test_class_was_called = true; } } }; var adder_works = false; const Adder = struct { const Self = @This(); summand: u32, pub fn initialize1(self: *Self, vm: *Vm, summand: u32) void { self.summand = summand; } pub fn add(self: *Self, vm: *Vm, summand: u32) void { if (self.summand == 5 and summand == 3) { adder_works = true; } } }; const Namespace = struct { pub fn multiply(vm: *Vm, a: i32, b: i32) i32 { if (a == 6 and b == 9) { return 42; } return a * b; } }; test "accessing foreign classes" { const allocator = std.testing.allocator; var config = Configuration{}; config.errorFn = vmPrintError; config.writeFn = vmPrint; config.registerForeignClasses(.{ ForeignClass("TestClass", TestClass), ForeignClass("Adder", Adder), ForeignClass("Namespace", Namespace), }); var vm: Vm = undefined; try config.newVmInPlace(EmptyUserData, &vm, null); try vm.interpret("test", \\foreign class TestClass { \\ construct new() {} \\ construct new(a, b, c) {} \\ foreign call(s) \\} \\var tc = TestClass.new() \\tc.call("lemon") \\ \\var tc2 = TestClass.new(1, 2, 3) \\ \\foreign class Adder { \\ construct new(summand) {} \\ foreign add(summand) \\} \\var adder = Adder.new(5) \\adder.add(3) \\ \\foreign class Namespace { \\ foreign static multiply(a, b) \\} \\ \\var product = Namespace.multiply(6, 9) ); vm.ensureSlots(1); vm.getVariable("test", "product", 0); const product = vm.getSlot(i32, 0); testing.expectEqual(@as(i32, 42), product); vm.deinit(); testing.expect(test_class_was_allocated); testing.expect(test_class_was_finalized); testing.expect(test_class_was_called); testing.expect(test_class_was_allocated_with_3_params); testing.expect(adder_works); }
src/zapata/foreign.zig
const std = @import("std"); const string = []const u8; const range = @import("range").range; const input = @embedFile("../input/day15.txt"); const Point = struct { x: usize, y: usize, fn neighbors(self: Point, s: usize) [4]?Point { return .{ if (self.y == 0) null else .{ .x = self.x, .y = self.y - 1 }, if (self.y == s - 1) null else .{ .x = self.x, .y = self.y + 1 }, if (self.x == 0) null else .{ .x = self.x - 1, .y = self.y }, if (self.x == s - 1) null else .{ .x = self.x + 1, .y = self.y }, }; } fn toPlus(self: Point, cost: u32) PointPlus { return .{ .x = self.x, .y = self.y, .cost = cost }; } }; const PointPlus = struct { x: usize, y: usize, cost: u32, fn toPoint(self: PointPlus) Point { return .{ .x = self.x, .y = self.y }; } }; pub fn main() !void { // // part 1 { var iter = std.mem.split(u8, input, "\n"); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const alloc = &arena.allocator; var grid = std.ArrayList([]u32).init(alloc); defer grid.deinit(); while (iter.next()) |line| { if (line.len == 0) continue; var list = std.ArrayList(u32).init(alloc); defer list.deinit(); for (line) |_, i| { try list.append(try std.fmt.parseUnsigned(u32, line[i..][0..1], 10)); } try grid.append(list.toOwnedSlice()); } const graph = grid.toOwnedSlice(); const shortest = try shortestPath(alloc, graph); std.debug.print("{d}\n", .{shortest}); } // part 2 { var iter = std.mem.split(u8, input, "\n"); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const alloc = &arena.allocator; var grid = std.ArrayList([]u32).init(alloc); defer grid.deinit(); var col_i: usize = 0; while (iter.next()) |line| { if (line.len == 0) continue; var list = std.ArrayList(u32).init(alloc); defer list.deinit(); var row_i: usize = 0; for (line) |_| { try list.append(try std.fmt.parseUnsigned(u32, line[row_i..][0..1], 10)); row_i += 1; } const l = list.items.len; for (range(4)) |_| { for (range(l)) |_| { try list.append(list.items[row_i - l] % 9 + 1); row_i += 1; } } try grid.append(list.toOwnedSlice()); col_i += 1; } { const l = grid.items.len; for (range(4)) |_| { for (range(l)) |_| { const next = try alloc.dupe(u32, grid.items[col_i - l]); for (next) |*n| { n.* = n.* % 9 + 1; } try grid.append(next); col_i += 1; } } } const graph = grid.toOwnedSlice(); const shortest = try shortestPath(alloc, graph); std.debug.print("{d}\n", .{shortest}); } } // Modified https://en.wikipedia.org/wiki/Dijkstra's_algorithm fn shortestPath(alloc: *std.mem.Allocator, grid: []const []const u32) !u32 { const l = grid.len; var dists = std.AutoHashMap(Point, u32).init(alloc); defer dists.deinit(); var q = std.PriorityQueue(PointPlus, compareFn).init(alloc); defer q.deinit(); for (range(l)) |_, y| { for (range(l)) |_, x| { try dists.put(Point{ .x = x, .y = y }, std.math.maxInt(u32)); } } dists.getEntry(Point{ .x = 0, .y = 0 }).?.value_ptr.* = 0; try q.add(PointPlus{ .x = 0, .y = 0, .cost = 0 }); while (q.len > 0) { const u = q.remove(); if (u.cost != dists.get(u.toPoint())) { continue; } if (u.x == l - 1 and u.y == l - 1) { break; } for (u.toPoint().neighbors(l)) |vop| { if (vop == null) continue; const v = vop.?; const alt = dists.get(u.toPoint()).? + grid[v.y][v.x]; if (alt < dists.get(v).?) { dists.getEntry(v).?.value_ptr.* = alt; try q.add(v.toPlus(alt)); } } } var shortest = dists.get(Point{ .x = l - 1, .y = l - 1 }).?; shortest -= dists.get(Point{ .x = 0, .y = 0 }).?; return shortest; } fn compareFn(a: PointPlus, b: PointPlus) std.math.Order { if (a.cost < b.cost) return .lt; if (a.cost == b.cost) return .eq; if (a.cost > b.cost) return .gt; unreachable; }
src/day15.zig
pub const std = @import("std"); pub const xml = @import("xml.zig"); pub const mem = std.mem; pub const Protocol = struct { name: []u8, copyright: ?Copyright, description: ?Description, interfaces: []Interface, allocator: mem.Allocator, pub fn deinit(proto: *Protocol) void { proto.allocator.free(proto.name); if (proto.copyright) |*copyright| copyright.deinit(); if (proto.description) |*description| description.deinit(); for (proto.interfaces) |*iface| iface.deinit(); proto.allocator.free(proto.interfaces); } }; pub const Copyright = struct { content: []u8, allocator: mem.Allocator, pub fn deinit(copy: *Copyright) void { copy.allocator.free(copy.content); } }; pub const Interface = struct { name: []u8, version: u32, description: ?Description, requests: []Message, events: []Message, enums: []Enum, allocator: mem.Allocator, pub fn deinit(iface: *Interface) void { iface.allocator.free(iface.name); if (iface.description) |*description| description.deinit(); for (iface.requests) |*request| request.deinit(); iface.allocator.free(iface.requests); for (iface.events) |*event| event.deinit(); iface.allocator.free(iface.events); for (iface.enums) |*enm| enm.deinit(); iface.allocator.free(iface.enums); } }; pub const Message = struct { name: []u8, is_destructor: bool, since: u32, description: ?Description, args: []Arg, allocator: mem.Allocator, pub fn deinit(msg: *Message) void { msg.allocator.free(msg.name); if (msg.description) |*description| description.deinit(); for (msg.args) |*arg| arg.deinit(); msg.allocator.free(msg.args); } }; pub const Enum = struct { name: []u8, since: u32, bitfield: bool, description: ?Description, entries: []Entry, allocator: mem.Allocator, pub fn deinit(enm: *Enum) void { enm.allocator.free(enm.name); if (enm.description) |*description| description.deinit(); for (enm.entries) |*entry| entry.deinit(); enm.allocator.free(enm.entries); } }; pub const Entry = struct { name: []u8, value: u32, summary: ?[]u8, since: u32, description: ?Description, allocator: mem.Allocator, pub fn deinit(ent: *Entry) void { ent.allocator.free(ent.name); if (ent.summary) |s| ent.allocator.free(s); if (ent.description) |*desc| desc.deinit(); } }; pub const Arg = struct { name: []u8, kind: ArgKind, summary: ?[]u8, interface: ?[]u8, allow_null: bool, @"enum": ?[]u8, allocator: mem.Allocator, pub fn deinit(arg: *Arg) void { arg.allocator.free(arg.name); if (arg.summary) |s| arg.allocator.free(s); if (arg.interface) |i| arg.allocator.free(i); if (arg.@"enum") |e| arg.allocator.free(e); } }; pub const ArgKind = enum { new_id, int, uint, fixed, string, object, array, fd, }; pub const Description = struct { summary: []u8, content: []u8, allocator: mem.Allocator, pub fn deinit(desc: *Description) void { desc.allocator.free(desc.summary); desc.allocator.free(desc.content); } }; pub fn parseFile(allocator: mem.Allocator, file: []const u8) !Protocol { var parser = xml.Parser.init(file); while (parser.next()) |ev| switch (ev) { .open_tag => |tag| { if (mem.eql(u8, tag, "protocol")) { return try parseProtocol(allocator, &parser); } }, else => {}, }; return error.NoProtocol; } fn parseProtocol(allocator: mem.Allocator, parser: *xml.Parser) !Protocol { var name: ?[]u8 = null; errdefer if (name) |n| allocator.free(n); var copyright: ?Copyright = null; errdefer if (copyright) |*c| c.deinit(); var description: ?Description = null; errdefer if (description) |*d| d.deinit(); var interfaces = std.ArrayList(Interface).init(allocator); errdefer interfaces.deinit(); while (parser.next()) |ev| switch (ev) { .open_tag => |tag| { if (mem.eql(u8, tag, "copyright")) { if (copyright != null) return error.DuplicateCopyright; copyright = try parseCopyright(allocator, parser); } else if (mem.eql(u8, tag, "description")) { if (description != null) return error.DuplicateDescription; description = try parseDescription(allocator, parser); } else if (mem.eql(u8, tag, "interface")) { var iface = try parseInterface(allocator, parser); errdefer iface.deinit(); try interfaces.append(iface); } }, .attribute => |attr| { if (mem.eql(u8, attr.name, "name")) { if (name != null) return error.DuplicateName; name = try attr.dupeValue(allocator); } }, .close_tag => |tag| { if (mem.eql(u8, tag, "protocol")) { return Protocol{ .name = name orelse return error.ProtocolNameMissing, .copyright = copyright, .description = description, .interfaces = interfaces.toOwnedSlice(), .allocator = allocator, }; } }, else => {}, }; return error.UnexpectedEof; } fn parseCopyright(allocator: mem.Allocator, parser: *xml.Parser) !Copyright { var content = std.ArrayList(u8).init(allocator); errdefer content.deinit(); while (parser.next()) |ev| switch (ev) { .character_data => |text| { try content.appendSlice(text); }, .close_tag => |tag| { if (mem.eql(u8, tag, "copyright")) { return Copyright{ .content = content.toOwnedSlice(), .allocator = allocator, }; } }, else => {}, }; return error.UnexpectedEof; } fn parseInterface(allocator: mem.Allocator, parser: *xml.Parser) !Interface { var name: ?[]u8 = null; errdefer if (name) |n| allocator.free(n); var version: u32 = 1; var description: ?Description = null; errdefer if (description) |*d| d.deinit(); var requests = std.ArrayList(Message).init(allocator); errdefer requests.deinit(); var events = std.ArrayList(Message).init(allocator); errdefer events.deinit(); var enums = std.ArrayList(Enum).init(allocator); errdefer enums.deinit(); while (parser.next()) |ev| switch (ev) { .open_tag => |tag| { if (mem.eql(u8, tag, "description")) { if (description != null) return error.DuplicateDescription; description = try parseDescription(allocator, parser); } else if (mem.eql(u8, tag, "request")) { var req = try parseMessage(allocator, parser); errdefer req.deinit(); try requests.append(req); } else if (mem.eql(u8, tag, "event")) { var evt = try parseMessage(allocator, parser); errdefer evt.deinit(); try events.append(evt); } else if (mem.eql(u8, tag, "enum")) { var enm = try parseEnum(allocator, parser); errdefer enm.deinit(); try enums.append(enm); } }, .attribute => |attr| { if (mem.eql(u8, attr.name, "name")) { if (name != null) return error.DuplicateName; name = try attr.dupeValue(allocator); } else if (mem.eql(u8, attr.name, "version")) { const value = try attr.dupeValue(allocator); defer allocator.free(value); version = try std.fmt.parseInt(u32, value, 10); } }, .close_tag => |tag| { if (mem.eql(u8, tag, "interface")) { return Interface{ .name = name orelse return error.InterfaceNameMissing, .version = version, .description = description, .requests = requests.toOwnedSlice(), .events = events.toOwnedSlice(), .enums = enums.toOwnedSlice(), .allocator = allocator, }; } }, else => {}, }; return error.UnexpectedEof; } fn parseMessage(allocator: mem.Allocator, parser: *xml.Parser) !Message { var name: ?[]u8 = null; errdefer if (name) |n| allocator.free(n); var is_destructor: bool = false; var since: u32 = 1; var description: ?Description = null; errdefer if (description) |*d| d.deinit(); var args = std.ArrayList(Arg).init(allocator); errdefer args.deinit(); while (parser.next()) |ev| switch (ev) { .open_tag => |tag| { if (mem.eql(u8, tag, "description")) { if (description != null) return error.DuplicateDescription; description = try parseDescription(allocator, parser); } else if (mem.eql(u8, tag, "arg")) { var arg = try parseArg(allocator, parser); errdefer arg.deinit(); try args.append(arg); } }, .attribute => |attr| { if (mem.eql(u8, attr.name, "name")) { if (name != null) return error.DuplicateName; name = try attr.dupeValue(allocator); } else if (mem.eql(u8, attr.name, "type")) { if (attr.valueEql("destructor")) { is_destructor = true; } else { return error.InvalidMessageType; } } else if (mem.eql(u8, attr.name, "since")) { const value = try attr.dupeValue(allocator); errdefer allocator.free(value); since = try std.fmt.parseInt(u32, value, 10); } }, .close_tag => |tag| { if (mem.eql(u8, tag, "request") or mem.eql(u8, tag, "event")) { return Message{ .name = name orelse return error.MessageNameMissing, .description = description, .is_destructor = is_destructor, .since = since, .args = args.toOwnedSlice(), .allocator = allocator, }; } }, else => {}, }; return error.UnexpectedEof; } fn parseEnum(allocator: mem.Allocator, parser: *xml.Parser) !Enum { var name: ?[]u8 = null; errdefer if (name) |n| allocator.free(n); var since: u32 = 1; var bitfield: bool = false; var description: ?Description = null; errdefer if (description) |*d| d.deinit(); var entries = std.ArrayList(Entry).init(allocator); errdefer entries.deinit(); while (parser.next()) |ev| switch (ev) { .open_tag => |tag| { if (mem.eql(u8, tag, "description")) { if (description != null) return error.DuplicateDescription; description = try parseDescription(allocator, parser); } else if (mem.eql(u8, tag, "entry")) { var ent = try parseEntry(allocator, parser); errdefer ent.deinit(); try entries.append(ent); } }, .attribute => |attr| { if (mem.eql(u8, attr.name, "name")) { name = try attr.dupeValue(allocator); } else if (mem.eql(u8, attr.name, "since")) { const value = try attr.dupeValue(allocator); defer allocator.free(value); since = try std.fmt.parseInt(u32, value, 10); } else if (mem.eql(u8, attr.name, "bitfield")) { if (attr.valueEql("true")) { bitfield = true; } else if (attr.valueEql("false")) { bitfield = false; } else { return error.InvalidBool; } } }, .close_tag => |tag| { if (mem.eql(u8, tag, "enum")) { return Enum{ .name = name orelse return error.EnumNameMissing, .since = since, .bitfield = bitfield, .description = description, .entries = entries.toOwnedSlice(), .allocator = allocator, }; } }, else => {}, }; return error.UnexpectedEof; } fn parseEntry(allocator: mem.Allocator, parser: *xml.Parser) !Entry { var name: ?[]u8 = null; errdefer if (name) |n| allocator.free(n); var value: u32 = 0; var summary: ?[]u8 = null; errdefer if (summary) |s| allocator.free(s); var since: u32 = 1; var description: ?Description = null; while (parser.next()) |ev| switch (ev) { .attribute => |attr| { if (mem.eql(u8, attr.name, "name")) { if (name != null) return error.DuplicateName; name = try attr.dupeValue(allocator); } else if (mem.eql(u8, attr.name, "value")) { const attrvalue = try attr.dupeValue(allocator); defer allocator.free(attrvalue); if (attr.valueStartsWith("0x")) value = try std.fmt.parseInt(u32, attrvalue[2..], 16) else value = try std.fmt.parseInt(u32, attrvalue, 10); } else if (mem.eql(u8, attr.name, "summary")) { if (summary != null) return error.DuplicateSummary; summary = try attr.dupeValue(allocator); } else if (mem.eql(u8, attr.name, "since")) { const attrvalue = try attr.dupeValue(allocator); defer allocator.free(attrvalue); since = try std.fmt.parseInt(u32, attrvalue, 10); } }, .close_tag => |tag| { if (mem.eql(u8, tag, "entry")) { return Entry{ .name = name orelse return error.EntryNameMissing, .value = value, .summary = summary orelse "", .since = since, .description = description, .allocator = allocator, }; } }, else => {}, }; return error.UnexpectedEof; } fn parseArgKind(string: []const u8) !ArgKind { if (mem.eql(u8, string, "new_id")) { return .new_id; } else if (mem.eql(u8, string, "int")) { return .int; } else if (mem.eql(u8, string, "uint")) { return .uint; } else if (mem.eql(u8, string, "fixed")) { return .fixed; } else if (mem.eql(u8, string, "string")) { return .string; } else if (mem.eql(u8, string, "object")) { return .object; } else if (mem.eql(u8, string, "array")) { return .array; } else if (mem.eql(u8, string, "fd")) { return .fd; } else { return error.InvalidArgKind; } } fn parseArg(allocator: mem.Allocator, parser: *xml.Parser) !Arg { var name: ?[]u8 = null; errdefer if (name) |n| allocator.free(n); var kind: ArgKind = .new_id; var summary: ?[]u8 = null; errdefer if (summary) |s| allocator.free(s); var interface: ?[]u8 = null; errdefer if (interface) |i| allocator.free(i); var allow_null = false; var @"enum": ?[]u8 = null; errdefer if (@"enum") |e| allocator.free(e); while (parser.next()) |ev| switch (ev) { .attribute => |attr| { if (mem.eql(u8, attr.name, "name")) { if (name != null) return error.DuplicateName; name = try attr.dupeValue(allocator); } else if (mem.eql(u8, attr.name, "type")) { const value = try attr.dupeValue(allocator); defer allocator.free(value); kind = try parseArgKind(value); } else if (mem.eql(u8, attr.name, "summary")) { if (summary != null) return error.DuplicateSummary; summary = try attr.dupeValue(allocator); } else if (mem.eql(u8, attr.name, "interface")) { if (interface != null) return error.DuplicateInterface; interface = try attr.dupeValue(allocator); } else if (mem.eql(u8, attr.name, "allow-null")) { if (attr.valueEql("true")) { allow_null = true; } else if (!attr.valueEql("false")) { return error.InvalidBool; } } else if (mem.eql(u8, attr.name, "enum")) { if (@"enum" != null) return error.DuplicateEnum; @"enum" = try attr.dupeValue(allocator); } }, .close_tag => |tag| { if (mem.eql(u8, tag, "arg")) { return Arg{ .name = name orelse return error.ArgNameMissing, .kind = kind, .summary = summary, .interface = interface, .allow_null = allow_null, .@"enum" = @"enum", .allocator = allocator, }; } }, else => {}, }; return error.UnexpectedEof; } fn parseDescription(allocator: mem.Allocator, parser: *xml.Parser) !Description { var summary: ?[]u8 = null; errdefer if (summary) |s| allocator.free(s); var content = std.ArrayList(u8).init(allocator); errdefer content.deinit(); while (parser.next()) |ev| switch (ev) { .attribute => |attr| { if (mem.eql(u8, attr.name, "summary")) { summary = try attr.dupeValue(allocator); } }, .character_data => |text| { try content.appendSlice(text); }, .close_tag => |tag| { if (mem.eql(u8, tag, "description")) { return Description{ .summary = summary orelse return error.DescriptionSummaryMissing, .content = content.toOwnedSlice(), .allocator = allocator, }; } }, else => {}, }; return error.UnexpectedEof; }
scanner/wayland.zig
const comptimePrint = @import("std").fmt.comptimePrint; pub const update_interval = 600; pub const Table = struct { pub const item = struct { pub const create = \\CREATE TABLE IF NOT EXISTS item( \\ id INTEGER PRIMARY KEY, \\ feed_id INTEGER NOT NULL, \\ title TEXT NOT NULL, \\ link TEXT DEFAULT NULL, \\ guid TEXT DEFAULT NULL, \\ pub_date TEXT DEFAULT NULL, \\ pub_date_utc INTEGER DEFAULT NULL, \\ modified_at INTEGER DEFAULT (strftime('%s', 'now')), \\ FOREIGN KEY(feed_id) REFERENCES feed(id) ON DELETE CASCADE, \\ UNIQUE(feed_id, guid), \\ UNIQUE(feed_id, link) \\); ; }; pub const feed_update_local = struct { pub const create = comptimePrint( \\CREATE TABLE IF NOT EXISTS feed_update_local ( \\ feed_id INTEGER UNIQUE, \\ update_interval INTEGER DEFAULT {d}, \\ last_update INTEGER DEFAULT (strftime('%s', 'now')), \\ last_modified_timestamp INTEGER, \\ FOREIGN KEY(feed_id) REFERENCES feed(id) ON DELETE CASCADE \\); , .{update_interval}); }; pub const feed_update_http = struct { const name = "feed_update_http"; pub const create = comptimePrint( \\CREATE TABLE IF NOT EXISTS feed_update_http ( \\ feed_id INTEGER UNIQUE, \\ update_countdown INTEGER DEFAULT 0, \\ update_interval INTEGER DEFAULT {d}, \\ last_update INTEGER DEFAULT (strftime('%s', 'now')), \\ cache_control_max_age INTEGER DEFAULT NULL, \\ expires_utc INTEGER DEFAULT NULL, \\ last_modified_utc INTEGER DEFAULT NULL, \\ etag TEXT DEFAULT NULL, \\ FOREIGN KEY(feed_id) REFERENCES feed(id) ON DELETE CASCADE \\); , .{update_interval}); }; pub const feed = struct { pub const create = \\CREATE TABLE IF NOT EXISTS feed( \\ id INTEGER PRIMARY KEY, \\ location TEXT NOT NULL UNIQUE, \\ title TEXT NOT NULL, \\ link TEXT DEFAULT NULL, \\ updated_raw TEXT DEFAULT NULL, \\ updated_timestamp INTEGER DEFAULT NULL, \\ added_at INTEGER DEFAULT (strftime('%s', 'now')) \\); ; pub const update_where_id = \\UPDATE feed SET \\ link = ?, \\ updated_raw = ?, \\ updated_timestamp = ? \\WHERE id = ?{u64} ; }; pub const feed_tag = struct { pub const create = \\CREATE TABLE IF NOT EXISTS feed_tag ( \\ feed_id INTEGER NOT NULL, \\ tag TEXT NOT NULL, \\ UNIQUE(feed_id, tag) \\ FOREIGN KEY(feed_id) REFERENCES feed(id) ON DELETE CASCADE \\); \\ ; }; };
src/queries.zig
pub const PIPE_UNLIMITED_INSTANCES = @as(u32, 255); pub const NMPWAIT_WAIT_FOREVER = @as(u32, 4294967295); pub const NMPWAIT_NOWAIT = @as(u32, 1); pub const NMPWAIT_USE_DEFAULT_WAIT = @as(u32, 0); //-------------------------------------------------------------------------------- // Section: Types (1) //-------------------------------------------------------------------------------- pub const NAMED_PIPE_MODE = enum(u32) { WAIT = 0, NOWAIT = 1, // READMODE_BYTE = 0, this enum value conflicts with WAIT READMODE_MESSAGE = 2, // CLIENT_END = 0, this enum value conflicts with WAIT // SERVER_END = 1, this enum value conflicts with NOWAIT // TYPE_BYTE = 0, this enum value conflicts with WAIT TYPE_MESSAGE = 4, // ACCEPT_REMOTE_CLIENTS = 0, this enum value conflicts with WAIT REJECT_REMOTE_CLIENTS = 8, _, pub fn initFlags(o: struct { WAIT: u1 = 0, NOWAIT: u1 = 0, READMODE_MESSAGE: u1 = 0, TYPE_MESSAGE: u1 = 0, REJECT_REMOTE_CLIENTS: u1 = 0, }) NAMED_PIPE_MODE { return @intToEnum(NAMED_PIPE_MODE, (if (o.WAIT == 1) @enumToInt(NAMED_PIPE_MODE.WAIT) else 0) | (if (o.NOWAIT == 1) @enumToInt(NAMED_PIPE_MODE.NOWAIT) else 0) | (if (o.READMODE_MESSAGE == 1) @enumToInt(NAMED_PIPE_MODE.READMODE_MESSAGE) else 0) | (if (o.TYPE_MESSAGE == 1) @enumToInt(NAMED_PIPE_MODE.TYPE_MESSAGE) else 0) | (if (o.REJECT_REMOTE_CLIENTS == 1) @enumToInt(NAMED_PIPE_MODE.REJECT_REMOTE_CLIENTS) else 0) ); } }; pub const PIPE_WAIT = NAMED_PIPE_MODE.WAIT; pub const PIPE_NOWAIT = NAMED_PIPE_MODE.NOWAIT; pub const PIPE_READMODE_BYTE = NAMED_PIPE_MODE.WAIT; pub const PIPE_READMODE_MESSAGE = NAMED_PIPE_MODE.READMODE_MESSAGE; pub const PIPE_CLIENT_END = NAMED_PIPE_MODE.WAIT; pub const PIPE_SERVER_END = NAMED_PIPE_MODE.NOWAIT; pub const PIPE_TYPE_BYTE = NAMED_PIPE_MODE.WAIT; pub const PIPE_TYPE_MESSAGE = NAMED_PIPE_MODE.TYPE_MESSAGE; pub const PIPE_ACCEPT_REMOTE_CLIENTS = NAMED_PIPE_MODE.WAIT; pub const PIPE_REJECT_REMOTE_CLIENTS = NAMED_PIPE_MODE.REJECT_REMOTE_CLIENTS; //-------------------------------------------------------------------------------- // Section: Functions (22) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn CreatePipe( hReadPipe: ?*?HANDLE, hWritePipe: ?*?HANDLE, lpPipeAttributes: ?*SECURITY_ATTRIBUTES, nSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn ConnectNamedPipe( hNamedPipe: ?HANDLE, lpOverlapped: ?*OVERLAPPED, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn DisconnectNamedPipe( hNamedPipe: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn SetNamedPipeHandleState( hNamedPipe: ?HANDLE, lpMode: ?*NAMED_PIPE_MODE, lpMaxCollectionCount: ?*u32, lpCollectDataTimeout: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn PeekNamedPipe( hNamedPipe: ?HANDLE, // TODO: what to do with BytesParamIndex 2? lpBuffer: ?*anyopaque, nBufferSize: u32, lpBytesRead: ?*u32, lpTotalBytesAvail: ?*u32, lpBytesLeftThisMessage: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn TransactNamedPipe( hNamedPipe: ?HANDLE, // TODO: what to do with BytesParamIndex 2? lpInBuffer: ?*anyopaque, nInBufferSize: u32, // TODO: what to do with BytesParamIndex 4? lpOutBuffer: ?*anyopaque, nOutBufferSize: u32, lpBytesRead: ?*u32, lpOverlapped: ?*OVERLAPPED, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn CreateNamedPipeW( lpName: ?[*:0]const u16, dwOpenMode: FILE_FLAGS_AND_ATTRIBUTES, dwPipeMode: NAMED_PIPE_MODE, nMaxInstances: u32, nOutBufferSize: u32, nInBufferSize: u32, nDefaultTimeOut: u32, lpSecurityAttributes: ?*SECURITY_ATTRIBUTES, ) callconv(@import("std").os.windows.WINAPI) ?HANDLE; pub extern "KERNEL32" fn WaitNamedPipeW( lpNamedPipeName: ?[*:0]const u16, nTimeOut: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn GetNamedPipeClientComputerNameW( Pipe: ?HANDLE, // TODO: what to do with BytesParamIndex 2? ClientComputerName: ?PWSTR, ClientComputerNameLength: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.1.2600' pub extern "ADVAPI32" fn ImpersonateNamedPipeClient( hNamedPipe: ?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetNamedPipeInfo( hNamedPipe: ?HANDLE, lpFlags: ?*NAMED_PIPE_MODE, lpOutBufferSize: ?*u32, lpInBufferSize: ?*u32, lpMaxInstances: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn GetNamedPipeHandleStateW( hNamedPipe: ?HANDLE, lpState: ?*NAMED_PIPE_MODE, lpCurInstances: ?*u32, lpMaxCollectionCount: ?*u32, lpCollectDataTimeout: ?*u32, lpUserName: ?[*:0]u16, nMaxUserNameSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub extern "KERNEL32" fn CallNamedPipeW( lpNamedPipeName: ?[*:0]const u16, // TODO: what to do with BytesParamIndex 2? lpInBuffer: ?*anyopaque, nInBufferSize: u32, // TODO: what to do with BytesParamIndex 4? lpOutBuffer: ?*anyopaque, nOutBufferSize: u32, lpBytesRead: ?*u32, nTimeOut: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn CreateNamedPipeA( lpName: ?[*:0]const u8, dwOpenMode: FILE_FLAGS_AND_ATTRIBUTES, dwPipeMode: NAMED_PIPE_MODE, nMaxInstances: u32, nOutBufferSize: u32, nInBufferSize: u32, nDefaultTimeOut: u32, lpSecurityAttributes: ?*SECURITY_ATTRIBUTES, ) callconv(@import("std").os.windows.WINAPI) ?HANDLE; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn GetNamedPipeHandleStateA( hNamedPipe: ?HANDLE, lpState: ?*NAMED_PIPE_MODE, lpCurInstances: ?*u32, lpMaxCollectionCount: ?*u32, lpCollectDataTimeout: ?*u32, lpUserName: ?[*:0]u8, nMaxUserNameSize: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn CallNamedPipeA( lpNamedPipeName: ?[*:0]const u8, // TODO: what to do with BytesParamIndex 2? lpInBuffer: ?*anyopaque, nInBufferSize: u32, // TODO: what to do with BytesParamIndex 4? lpOutBuffer: ?*anyopaque, nOutBufferSize: u32, lpBytesRead: ?*u32, nTimeOut: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "KERNEL32" fn WaitNamedPipeA( lpNamedPipeName: ?[*:0]const u8, nTimeOut: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetNamedPipeClientComputerNameA( Pipe: ?HANDLE, // TODO: what to do with BytesParamIndex 2? ClientComputerName: ?PSTR, ClientComputerNameLength: u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetNamedPipeClientProcessId( Pipe: ?HANDLE, ClientProcessId: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetNamedPipeClientSessionId( Pipe: ?HANDLE, ClientSessionId: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetNamedPipeServerProcessId( Pipe: ?HANDLE, ServerProcessId: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetNamedPipeServerSessionId( Pipe: ?HANDLE, ServerSessionId: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (5) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { pub const CreateNamedPipe = thismodule.CreateNamedPipeA; pub const WaitNamedPipe = thismodule.WaitNamedPipeA; pub const GetNamedPipeClientComputerName = thismodule.GetNamedPipeClientComputerNameA; pub const GetNamedPipeHandleState = thismodule.GetNamedPipeHandleStateA; pub const CallNamedPipe = thismodule.CallNamedPipeA; }, .wide => struct { pub const CreateNamedPipe = thismodule.CreateNamedPipeW; pub const WaitNamedPipe = thismodule.WaitNamedPipeW; pub const GetNamedPipeClientComputerName = thismodule.GetNamedPipeClientComputerNameW; pub const GetNamedPipeHandleState = thismodule.GetNamedPipeHandleStateW; pub const CallNamedPipe = thismodule.CallNamedPipeW; }, .unspecified => if (@import("builtin").is_test) struct { pub const CreateNamedPipe = *opaque{}; pub const WaitNamedPipe = *opaque{}; pub const GetNamedPipeClientComputerName = *opaque{}; pub const GetNamedPipeHandleState = *opaque{}; pub const CallNamedPipe = *opaque{}; } else struct { pub const CreateNamedPipe = @compileError("'CreateNamedPipe' requires that UNICODE be set to true or false in the root module"); pub const WaitNamedPipe = @compileError("'WaitNamedPipe' requires that UNICODE be set to true or false in the root module"); pub const GetNamedPipeClientComputerName = @compileError("'GetNamedPipeClientComputerName' requires that UNICODE be set to true or false in the root module"); pub const GetNamedPipeHandleState = @compileError("'GetNamedPipeHandleState' requires that UNICODE be set to true or false in the root module"); pub const CallNamedPipe = @compileError("'CallNamedPipe' requires that UNICODE be set to true or false in the root module"); }, }; //-------------------------------------------------------------------------------- // Section: Imports (7) //-------------------------------------------------------------------------------- const BOOL = @import("../foundation.zig").BOOL; const FILE_FLAGS_AND_ATTRIBUTES = @import("../storage/file_system.zig").FILE_FLAGS_AND_ATTRIBUTES; const HANDLE = @import("../foundation.zig").HANDLE; const OVERLAPPED = @import("../system/io.zig").OVERLAPPED; const PSTR = @import("../foundation.zig").PSTR; const PWSTR = @import("../foundation.zig").PWSTR; const SECURITY_ATTRIBUTES = @import("../security.zig").SECURITY_ATTRIBUTES; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/system/pipes.zig
const std = @import("std"); const c = @import("internal/c.zig"); const internal = @import("internal/internal.zig"); const log = std.log.scoped(.git); const git = @import("git.zig"); pub const Attribute = struct { z_attr: [*c]const u8, /// Check if an attribute is set on. In core git parlance, this the value for "Set" attributes. /// /// For example, if the attribute file contains: /// *.c foo /// /// Then for file `xyz.c` looking up attribute "foo" gives a value for which this is true. pub fn isTrue(self: Attribute) bool { return self.getValue() == .TRUE; } /// Checks if an attribute is set off. In core git parlance, this is the value for attributes that are "Unset" (not to be /// confused with values that a "Unspecified"). /// /// For example, if the attribute file contains: /// *.h -foo /// /// Then for file `zyx.h` looking up attribute "foo" gives a value for which this is true. pub fn isFalse(self: Attribute) bool { return self.getValue() == .FALSE; } /// Checks if an attribute is unspecified. This may be due to the attribute not being mentioned at all or because the /// attribute was explicitly set unspecified via the `!` operator. /// /// For example, if the attribute file contains: /// *.c foo /// *.h -foo /// onefile.c !foo /// /// Then for `onefile.c` looking up attribute "foo" yields a value with of true. Also, looking up "foo" on file `onefile.rb` /// or looking up "bar" on any file will all give a value of true. pub fn isUnspecified(self: Attribute) bool { return self.getValue() == .UNSPECIFIED; } /// Checks if an attribute is set to a value (as opposed to TRUE, FALSE or UNSPECIFIED). This would be the case if for a file /// with something like: /// *.txt eol=lf /// /// Given this, looking up "eol" for `onefile.txt` will give back the string "lf" and will return true. pub fn hasValue(self: Attribute) bool { return self.getValue() == .STRING; } pub fn getValue(self: Attribute) AttributeValue { return @intToEnum(AttributeValue, c.git_attr_value(self.z_attr)); } pub const AttributeValue = enum(c_uint) { /// The attribute has been left unspecified UNSPECIFIED = 0, /// The attribute has been set TRUE, /// The attribute has been unset FALSE, /// This attribute has a value STRING, }; comptime { std.testing.refAllDecls(@This()); } }; comptime { std.testing.refAllDecls(@This()); }
src/attribute.zig
const std = @import("std"); const warn = std.debug.warn; fn solve_sudoku(field: *[81]u8, rules: [3][9 * 9]u8) bool { var set_mask = [_]u16{0} ** 81; return sudoku_try_set_field(0, field, rules, &set_mask); } fn sudoku_try_set_field(index: u8, field: *[81]u8, rules: [3][81]u8, set_mask: *[81]u16) bool { var field_no: u8 = index; while (true) { // search next empty field if (field_no >= 81) { return true; } if (field[field_no] == 0) { break; // found so try } field_no += 1; } var i: u8 = 1; while (i < 10) { if ((@intCast(u16, 1) << @intCast(u4, i) & set_mask[field_no]) == 0) { // don´t try already set numbers field[field_no] = i; var set_mask_next: [81]u16 = undefined; std.mem.copy(u16, &set_mask_next, set_mask); // make a clone var all_valid: bool = true; for (rules) |rule| { if (!valid(field, rule, &set_mask_next)) { all_valid = false; break; } } if (all_valid) { if (sudoku_try_set_field(field_no + 1, field, rules, &set_mask_next)) { return true; } } } i += 1; } field[field_no] = 0; return false; } fn valid(field: *[81]u8, rule: [81]u8, set_mask: *[81]u16) bool { var flags = [_]u16{0} ** 9; for (field) |f, i| { if (f == 0) { continue; } const mask: u16 = (@intCast(u16, 1) << @intCast(u4, f)); const r: u8 = rule[i]; if (flags[r] & mask != 0) { return false; } flags[r] |= mask; } for (rule) |r, i| { set_mask[i] |= flags[r]; // mark numbers that are taken for this rule } return true; } fn printBoard(board: [9 * 9]u8) void { warn("\n", .{}); for (board) |num, i| { if (num == 0) { warn(" _", .{}); } else { warn(" {}", .{num}); } if ((i % 3) == 2) { warn(" ", .{}); } if ((i % 9) == 8) { warn("\n", .{}); } if ((i % 27) == 26) { warn("\n", .{}); } } } pub fn main() anyerror!void { var board = [_]u8{ 1, 0, 0, 0, 0, 7, 0, 9, 0, 0, 3, 0, 0, 2, 0, 0, 0, 8, 0, 0, 9, 6, 0, 0, 5, 0, 0, 0, 0, 5, 3, 0, 0, 9, 0, 0, 0, 1, 0, 0, 8, 0, 0, 0, 2, 6, 0, 0, 0, 0, 4, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 1, 0, 0, 4, 0, 0, 0, 0, 0, 0, 7, 0, 0, 7, 0, 0, 0, 3, 0, 0, }; const rule_rows = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, }; const rule_colums = [_]u8{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, }; const rule_basicblocks = [_]u8{ 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 3, 3, 3, 4, 4, 4, 5, 5, 5, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 6, 6, 6, 7, 7, 7, 8, 8, 8, 6, 6, 6, 7, 7, 7, 8, 8, 8, }; const rules = [_][9 * 9]u8{ rule_rows, rule_colums, rule_basicblocks, }; printBoard(board); const msBefore = std.time.milliTimestamp(); const reuslt = solve_sudoku(&board, rules); const ms = std.time.milliTimestamp() - msBefore; warn("Time {} ms", .{ms}); if (reuslt) { printBoard(board); } else { warn("no solution found", .{}); } }
zig/src/main.zig
const std = @import("std"); const tools = @import("tools"); const with_trace = true; const assert = std.debug.assert; fn trace(comptime fmt: []const u8, args: anytype) void { if (with_trace) std.debug.print(fmt, args); } const Map = tools.Map(200, 100, false); const Vec2 = tools.Vec2; fn compute_dists(start: u8, map: *const Map, dists: []u32) void { var dmap_max: [200 * 100]u16 = [1]u16{9999} ** (200 * 100); assert(map.bbox.min.x == 0 and map.bbox.min.y == 0); const w = @intCast(u32, map.bbox.max.x) + 1; const h = @intCast(u32, map.bbox.max.y) + 1; const stride = w; const dmap = dmap_max[0 .. w + stride * h]; var done = false; while (!done) { done = true; var p = Vec2{ .x = 0, .y = 0 }; while (p.y < h) : (p.y += 1) { p.x = 0; while (p.x < w) : (p.x += 1) { const o = @intCast(u32, p.x) + stride * @intCast(u32, p.y); const m = map.at(p); if (m == '#') continue; var d = dmap[o]; if (m == start) { d = 0; } else { for ([_]Vec2{ .{ .x = 0, .y = 1 }, .{ .x = 0, .y = -1 }, .{ .x = 1, .y = 0 }, .{ .x = -1, .y = 0 } }) |dir| { const p1 = Vec2{ .x = p.x + dir.x, .y = p.y + dir.y }; if (map.get(p1)) |m1| { if (m1 == '#') continue; const o1 = @intCast(u32, p1.x) + stride * @intCast(u32, p1.y); if (d > dmap[o1] + 1) { d = dmap[o1] + 1; } } } } if (d < dmap[o]) { dmap[o] = d; done = false; } if (m >= '0' and m <= '9') { dists[m - '0'] = d; } } } } } fn fact(n: u32) u32 { var f: u32 = 1; var i: u32 = 0; while (i < n) : (i += 1) f *= (i + 1); return f; } pub fn main() anyerror!void { const stdout = std.io.getStdOut().writer(); var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const allocator = arena.allocator(); const limit = 1 * 1024 * 1024 * 1024; const text = try std.fs.cwd().readFileAlloc(allocator, "day24.txt", limit); defer allocator.free(text); var map = Map{ .default_tile = 0 }; var nb_chkpts: u32 = 0; { var p = Vec2{ .x = 0, .y = 0 }; for (text) |c| { if (c == '\n') { p.y += 1; p.x = 0; continue; } map.set(p, c); p.x += 1; if (c >= '0' and c <= '9') nb_chkpts = if (nb_chkpts < 1 + c - '0') 1 + c - '0' else nb_chkpts; } trace("nb_chkpts={}, map_size={}\n", .{ nb_chkpts, map.bbox.max }); //var buf: [100 * 200]u8 = undefined; //trace("{}\n", .{map.printToBuf(p, null, &buf)}); } var distances: [10 * 10]u32 = undefined; { var i: u8 = 0; while (i < nb_chkpts) : (i += 1) { compute_dists('0' + i, &map, distances[i * nb_chkpts .. i * nb_chkpts + nb_chkpts]); for (distances[i * nb_chkpts .. i * nb_chkpts + nb_chkpts]) |d| { trace("{:>2} ", .{d}); } trace("\n", .{}); } } // part 1: { var best_dist: u32 = 99999; const permuts = fact(nb_chkpts - 1); var p: u32 = 0; while (p < permuts) : (p += 1) { var allcheckpoints = [_]u32{ 1, 2, 3, 4, 5, 6, 7, 8, 9 }; const order = allcheckpoints[0 .. nb_chkpts - 1]; { var mod: u32 = nb_chkpts - 1; var k = p; for (order) |*c, i| { const t = c.*; c.* = order[i + k % mod]; order[i + k % mod] = t; k /= mod; mod -= 1; } } var dist: u32 = 0; var prev: u32 = 0; //trace("0", .{}); for (order) |c| { dist += distances[c * nb_chkpts + prev]; prev = c; // trace("->{}", .{c}); } //trace(": {}\n", .{dist}); if (dist < best_dist) best_dist = dist; } try stdout.print("best dist = {}\n", .{best_dist}); } // part 2: { var best_dist: u32 = 99999; const permuts = fact(nb_chkpts - 1); var p: u32 = 0; while (p < permuts) : (p += 1) { var allcheckpoints = [_]u32{ 1, 2, 3, 4, 5, 6, 7, 8, 9 }; const order = allcheckpoints[0 .. nb_chkpts - 1]; { var mod: u32 = nb_chkpts - 1; var k = p; for (order) |*c, i| { const t = c.*; c.* = order[i + k % mod]; order[i + k % mod] = t; k /= mod; mod -= 1; } } var dist: u32 = 0; var prev: u32 = 0; //trace("0", .{}); for (order) |c| { dist += distances[c * nb_chkpts + prev]; prev = c; // trace("->{}", .{c}); } //trace(": {}\n", .{dist}); dist += distances[0 * nb_chkpts + prev]; if (dist < best_dist) best_dist = dist; } try stdout.print("best dist avec retour = {}\n", .{best_dist}); } }
2016/day24.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const List = std.ArrayList; const Map = std.AutoHashMap; const StrMap = std.StringHashMap; const BitSet = std.DynamicBitSet; const Str = []const u8; const int = i64; const util = @import("util.zig"); const gpa = util.gpa; const data = @embedFile("../data/day09.txt"); const Rec = struct { val: int, }; pub fn main() !void { var width: usize = 0; var height: usize = 0; // Load the map with a border of 9 values const height_map = blk: { var height_map = List(u8).init(gpa); errdefer height_map.deinit(); var lines = tokenize(u8, data, "\r\n"); while (lines.next()) |line| { if (line.len == 0) { continue; } if (width == 0) { width = line.len; try height_map.appendNTimes(9, width + 2); } else { assert(width == line.len); } try height_map.append(9); for (line) |c| { try height_map.append(c - '0'); } height += 1; try height_map.append(9); } try height_map.appendNTimes(9, width + 2); break :blk height_map.toOwnedSlice(); }; defer gpa.free(height_map); const pitch = width + 2; const start = pitch + 1; var basin_map = try gpa.alloc(u8, height_map.len); defer gpa.free(basin_map); @memset(basin_map.ptr, 0, basin_map.len); for (height_map) |rec, i| { if (rec == 9) { basin_map[i] = 1; } } var big1_count: usize = 0; var big2_count: usize = 0; var big3_count: usize = 0; var basin_id: u8 = 2; var part1: usize = 0; var y: usize = 0; while (y < height) : (y += 1) { var x: usize = 0; while (x < width) : (x += 1) { const idx = y * pitch + x + start; const left = height_map[idx - 1]; const up = height_map[idx - pitch]; const down = height_map[idx + pitch]; const right = height_map[idx + 1]; const self = height_map[idx]; if (self < left and self < right and self < down and self < up) { part1 += self + 1; const count = try floodFill(basin_map, pitch, idx, basin_id); basin_id += 1; if (count > big1_count) { big3_count = big2_count; big2_count = big1_count; big1_count = count; } else if (count > big2_count) { big3_count = big2_count; big2_count = count; } else if (count > big3_count) { big3_count = count; } } } } const part2 = @intCast(int, big1_count * big2_count * big3_count); print("part1={}, part2={}\n", .{part1, part2}); } fn floodFill(map: []u8, pitch: usize, seed_idx: usize, basin_id: u8) !usize { var frontier = std.ArrayList(usize).init(gpa); defer frontier.deinit(); try frontier.append(seed_idx); var count: usize = 0; while (frontier.popOrNull()) |idx| { if (map[idx] != 0) continue; map[idx] = basin_id; count += 1; const left = map[idx - 1]; const up = map[idx - pitch]; const down = map[idx + pitch]; const right = map[idx + 1]; if (left == 0) try frontier.append(idx - 1); if (up == 0) try frontier.append(idx - pitch); if (down == 0) try frontier.append(idx + pitch); if (right == 0) try frontier.append(idx + 1); } return count; } // Useful stdlib functions const tokenize = std.mem.tokenize; const split = std.mem.split; const indexOf = std.mem.indexOfScalar; const indexOfAny = std.mem.indexOfAny; const indexOfStr = std.mem.indexOfPosLinear; const lastIndexOf = std.mem.lastIndexOfScalar; const lastIndexOfAny = std.mem.lastIndexOfAny; const lastIndexOfStr = std.mem.lastIndexOfLinear; const trim = std.mem.trim; const sliceMin = std.mem.min; const sliceMax = std.mem.max; const eql = std.mem.eql; const parseEnum = std.meta.stringToEnum; const parseInt = std.fmt.parseInt; const parseFloat = std.fmt.parseFloat; const min = std.math.min; const min3 = std.math.min3; const max = std.math.max; const max3 = std.math.max3; const print = std.debug.print; const assert = std.debug.assert; const sort = std.sort.sort; const asc = std.sort.asc; const desc = std.sort.desc;
src/day09.zig
const std = @import("std"); const fmt = std.fmt; const heap = std.heap; const mem = std.mem; const net = std.net; const os = std.os; const testing = std.testing; const time = std.time; const Atomic = std.atomic.Atomic; const assert = std.debug.assert; const httpserver = @import("lib.zig"); const curl = @import("curl.zig"); const port = 34450; const TestHarness = struct { const Self = @This(); root_allocator: mem.Allocator, arena: heap.ArenaAllocator, socket: os.socket_t, running: Atomic(bool) = Atomic(bool).init(true), server: httpserver.Server(*Self), thread: std.Thread, pub fn format(self: *const Self, comptime fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { _ = self; _ = fmt_string; _ = options; try writer.writeAll("0"); } fn create(allocator: mem.Allocator, comptime handler: httpserver.RequestHandler(*Self)) !*TestHarness { const socket = blk: { const sockfd = try os.socket(os.AF.INET6, os.SOCK.STREAM, 0); errdefer os.close(sockfd); os.setsockopt( sockfd, os.SOL.SOCKET, os.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)), ) catch {}; const addr = try net.Address.parseIp6("::0", port); try os.bind(sockfd, &addr.any, @sizeOf(os.sockaddr.in6)); try os.listen(sockfd, std.math.maxInt(u31)); break :blk sockfd; }; var res = try allocator.create(TestHarness); res.* = .{ .root_allocator = allocator, .arena = heap.ArenaAllocator.init(allocator), .socket = socket, .server = undefined, .thread = undefined, }; try res.server.init( allocator, .{}, &res.running, socket, res, handler, ); // Start thread res.thread = try std.Thread.spawn( .{}, struct { fn worker(server: *httpserver.Server(*Self)) !void { return server.run(10 * time.ns_per_ms); } }.worker, .{&res.server}, ); return res; } fn deinit(self: *TestHarness) void { // Wait for the server to finish self.running.store(false, .SeqCst); self.thread.join(); // Clean up the server self.server.deinit(); os.close(self.socket); // Clean up our own data self.arena.deinit(); self.root_allocator.destroy(self); } fn do(self: *TestHarness, method: []const u8, path: []const u8, body_opt: ?[]const u8) !curl.Response { var buf: [1024]u8 = undefined; const url = try fmt.bufPrintZ(&buf, "http://localhost:{d}{s}", .{ port, path, }); return curl.do(self.root_allocator, method, url, body_opt); } }; test "GET 200 OK" { // Try to test multiple end conditions for the serving loop var i: usize = 1; while (i < 20) : (i += 1) { var th = try TestHarness.create( testing.allocator, struct { fn handle(ctx: *TestHarness, per_request_allocator: mem.Allocator, peer: httpserver.Peer, req: httpserver.Request) anyerror!httpserver.Response { _ = ctx; _ = per_request_allocator; _ = peer; _ = req; try testing.expectEqualStrings("/plaintext", req.path); try testing.expectEqual(httpserver.Method.get, req.method); try testing.expect(req.headers.get("Host") != null); try testing.expectEqualStrings("*/*", req.headers.get("Accept").?.value); try testing.expect(req.headers.get("Content-Length") == null); try testing.expect(req.headers.get("Content-Type") == null); try testing.expect(req.body == null); return httpserver.Response{ .response = .{ .status_code = .ok, .headers = &[_]httpserver.Header{}, .data = "Hello, World!", }, }; } }.handle, ); defer th.deinit(); var j: usize = 0; while (j < i) : (j += 1) { var resp = try th.do("GET", "/plaintext", null); defer resp.deinit(); try testing.expectEqual(@as(usize, 200), resp.response_code); try testing.expectEqualStrings("Hello, World!", resp.data); } } } test "POST 200 OK" { const body = \\Perspiciatis eligendi aspernatur iste delectus et et quo repudiandae. Iusto repellat tempora nisi alias. Autem inventore rerum magnam sunt voluptatem aspernatur. \\Consequuntur quae non fugit dignissimos at quis. Mollitia nisi minus voluptatem voluptatem sed sunt dolore. Expedita ullam ut ex voluptatem delectus. Fuga quos asperiores consequatur similique voluptatem provident vel. Repudiandae rerum quia dolorem totam. ; var th = try TestHarness.create( testing.allocator, struct { fn handle(ctx: *TestHarness, per_request_allocator: mem.Allocator, peer: httpserver.Peer, req: httpserver.Request) anyerror!httpserver.Response { _ = ctx; _ = per_request_allocator; _ = peer; _ = req; try testing.expectEqualStrings("/foobar", req.path); try testing.expectEqual(httpserver.Method.post, req.method); try testing.expect(req.headers.get("Host") != null); try testing.expectEqualStrings("*/*", req.headers.get("Accept").?.value); try testing.expectEqualStrings("application/json", req.headers.get("Content-Type").?.value); try testing.expectEqual(body.len, try fmt.parseInt(usize, req.headers.get("Content-Length").?.value, 10)); try testing.expectEqualStrings(body, req.body.?); return httpserver.Response{ .response = .{ .status_code = .ok, .headers = &[_]httpserver.Header{}, .data = "Hello, World!", }, }; } }.handle, ); defer th.deinit(); var i: usize = 1; while (i < 20) : (i += 1) { var j: usize = 0; while (j < i) : (j += 1) { var resp = try th.do("POST", "/foobar", body); defer resp.deinit(); try testing.expectEqual(@as(usize, 200), resp.response_code); try testing.expectEqualStrings("Hello, World!", resp.data); } } } test "GET files" { var th = try TestHarness.create( testing.allocator, struct { fn handle(ctx: *TestHarness, per_request_allocator: mem.Allocator, peer: httpserver.Peer, req: httpserver.Request) anyerror!httpserver.Response { _ = ctx; _ = per_request_allocator; _ = peer; _ = req; try testing.expect(mem.startsWith(u8, req.path, "/static")); try testing.expect(req.headers.get("Host") != null); try testing.expectEqualStrings("*/*", req.headers.get("Accept").?.value); try testing.expect(req.headers.get("Content-Length") == null); try testing.expect(req.headers.get("Content-Type") == null); try testing.expect(req.body == null); const path = req.path[1..]; return httpserver.Response{ .send_file = .{ .status_code = .ok, .headers = &[_]httpserver.Header{}, .path = path, }, }; } }.handle, ); defer th.deinit(); const test_cases = &[_]struct { path: []const u8, exp_data: []const u8, exp_response_code: usize, }{ .{ .path = "/static/foobar.txt", .exp_data = "foobar content\n", .exp_response_code = 200 }, .{ .path = "/static/notfound.txt", .exp_data = "Not Found", .exp_response_code = 404 }, }; inline for (test_cases) |tc| { var i: usize = 0; while (i < 20) : (i += 1) { var resp = try th.do("GET", tc.path, null); defer resp.deinit(); try testing.expectEqual(tc.exp_response_code, resp.response_code); try testing.expectEqualStrings(tc.exp_data, resp.data); } } }
src/test.zig
const std = @import("std"); const sf = @import("../sfml.zig"); const IpAddress = @This(); // Const addresses // TODO: make those comptime /// Any address or no address pub fn any() IpAddress { return IpAddress.initFromInt(0); } pub const none = any; /// Broadcast address pub fn broadcast() IpAddress { return IpAddress.initFromInt(0xFFFFFFFF); } /// Loopback pub fn localhost() IpAddress { return IpAddress.init(127, 0, 0, 1); } // Constructor/destructor /// Inits an ip address from bytes pub fn init(a: u8, b: u8, c: u8, d: u8) IpAddress { return .{ ._ip = sf.c.sfIpAddress_fromBytes(a, b, c, d) }; } /// Inits an ip address from an integer pub fn initFromInt(int: u32) IpAddress { return .{ ._ip = sf.c.sfIpAddress_fromInteger(int) }; } /// Inits an ip address from a string (network name or ip) pub fn initFromString(str: [*:0]const u8) IpAddress { return .{ ._ip = sf.c.sfIpAddress_fromString(str) }; } // Local/global addresses /// Gets the local address of this pc (takes no time) pub fn getLocalAddress() IpAddress { return .{ ._ip = sf.c.sfIpAddress_getLocalAddress() }; } /// Gets the public address of this pc (from outside) /// Takes time because it needs to contact an internet server /// Specify a timeout in case it takes too much time pub fn getPublicAddress(timeout: ?sf.system.Time) !IpAddress { const time = timeout orelse sf.system.Time{ .us = 0 }; const ip = IpAddress{ ._ip = sf.c.sfIpAddress_getPublicAddress(time._toCSFML()) }; if (ip.equals(IpAddress.none())) return sf.Error.timeout; return ip; } // Compare /// Compares two ip addresses pub fn equals(self: IpAddress, other: IpAddress) bool { return std.mem.eql(u8, self.bytes(), other.bytes()); } /// Gets the full slice of the contents fn bytes(self: IpAddress) []const u8 { return @ptrCast(*const [16]u8, &self._ip)[0..]; } // Getter /// Gets the ip as it is stored but as a slice pub fn toString(self: IpAddress) []const u8 { var slice = self.bytes(); for (slice) |s, i| { if (s == 0) { slice.len = i; break; } } return slice; } /// Gets the ip as an int pub fn toInt(self: IpAddress) u32 { // TODO: This is a workaround to //return sf.c.sfIpAddress_toInteger(self._ip); const str = self.toString(); var iter = std.mem.split(u8, str, "."); const a = (std.fmt.parseInt(u32, iter.next().?, 10) catch unreachable) << 24; const b = (std.fmt.parseInt(u32, iter.next().?, 10) catch unreachable) << 16; const c = (std.fmt.parseInt(u32, iter.next().?, 10) catch unreachable) << 8; const d = (std.fmt.parseInt(u32, iter.next().?, 10) catch unreachable) << 0; if (iter.next()) |_| unreachable; return a | b | c | d; } /// Prints this ip address as a string pub fn format( self: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype ) !void { _ = options; _ = fmt; try writer.writeAll(self.toString()); } /// Csfml structure _ip: sf.c.sfIpAddress, test "ipaddress" { const tst = std.testing; var ip = IpAddress.init(0x01, 0x23, 0x45, 0x67); try tst.expectEqual(@as(u32, 0x01234567), ip.toInt()); ip = IpAddress.initFromInt(0xabababab); try tst.expect(ip.equals(IpAddress.init(0xab, 0xab, 0xab, 0xab))); ip = IpAddress.initFromString("localhost"); try tst.expectEqualStrings("127.0.0.1", ip.toString()); //toInt(); _ = getLocalAddress(); _ = getPublicAddress(sf.system.Time.microseconds(1)) catch {}; }
src/sfml/network/IpAddress.zig
const std = @import("std"); const decoder = @import("decoder.zig"); const CPU = @import("cpu.zig").CPU; const Instruction = decoder.Instruction; pub const OpFuncError = error{ Unimplemented, }; pub const OpFunc = fn (cpu: *CPU, inst: []Instruction, cur: usize) anyerror!void; /// Execute next instruction. pub inline fn next(cpu: *CPU, inst: []Instruction, oldcur: usize) !void { var cur = oldcur +% 1; cpu.pc +%= 4; cpu.cache.exec_counter += 1; if (cpu.cache.exec_counter > cpu.cache.max_exec) return; if (cur == cpu.cache.inst.?.len) return; return @call(.{}, cpu.cache.funcs.?[cur], .{ cpu, inst, cur }); } /// Attempt to "goto" cached instruction, otherwise return as usual. pub inline fn goto(cpu: *CPU, inst: []Instruction, oldcur: usize, newpc: CPU.XLEN) !void { // TODO: support compressed instructions if (newpc >= cpu.cache.base_pc and newpc < cpu.cache.max_pc and (newpc % 4) == 0) { cpu.pc = newpc -% 4; return next(cpu, inst, @truncate(usize, (newpc - cpu.cache.base_pc) / 4) -% 1); } cpu.pc = newpc; cpu.cache.exec_counter += 1; return; } // -- ALU functions with only registers -- pub fn add(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].add; cpu.setReg(params.rd, cpu.getReg(params.rs1) +% cpu.getReg(params.rs2)); return next(cpu, inst, cur); } pub fn sub(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].sub; cpu.setReg(params.rd, cpu.getReg(params.rs1) -% cpu.getReg(params.rs2)); return next(cpu, inst, cur); } pub fn xor(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].xor; cpu.setReg(params.rd, cpu.getReg(params.rs1) ^ cpu.getReg(params.rs2)); return next(cpu, inst, cur); } pub fn @"or"(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].@"or"; cpu.setReg(params.rd, cpu.getReg(params.rs1) | cpu.getReg(params.rs2)); return next(cpu, inst, cur); } pub fn @"and"(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].@"and"; cpu.setReg(params.rd, cpu.getReg(params.rs1) & cpu.getReg(params.rs2)); return next(cpu, inst, cur); } pub fn sll(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].sll; cpu.setReg(params.rd, cpu.getReg(params.rs1) << @truncate(u6, cpu.getReg(params.rs2))); return next(cpu, inst, cur); } pub fn srl(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].srl; cpu.setReg(params.rd, cpu.getReg(params.rs1) >> @truncate(u6, cpu.getReg(params.rs2))); return next(cpu, inst, cur); } pub fn sra(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].sra; cpu.setReg(params.rd, cpu.getReg(params.rs1) >> @truncate(u6, cpu.getReg(params.rs2))); return next(cpu, inst, cur); } // -- ALU functions with an immediate value -- // Most of these functions are pretty monotonous, always following the same format // Immediates are always sign-extended, unless otherwise specified // at least that was my understanding of the specification. pub fn addi(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].addi; cpu.setReg(params.rd, cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned())); return next(cpu, inst, cur); } pub fn xori(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].xori; cpu.setReg(params.rd, cpu.getReg(params.rs1) ^ CPU.util.sext(params.immSigned())); return next(cpu, inst, cur); } pub fn ori(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].ori; cpu.setReg(params.rd, cpu.getReg(params.rs1) | CPU.util.sext(params.immSigned())); return next(cpu, inst, cur); } pub fn andi(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].andi; cpu.setReg(params.rd, cpu.getReg(params.rs1) & CPU.util.sext(params.immSigned())); return next(cpu, inst, cur); } pub fn slli(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].slli; cpu.setReg(params.rd, cpu.getReg(params.rs1) << @intCast(u6, params.imm & 0b1111)); return next(cpu, inst, cur); } pub fn srli(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].srli; cpu.setReg(params.rd, cpu.getReg(params.rs1) >> @intCast(u6, params.imm & 0b1111)); return next(cpu, inst, cur); } pub fn srai(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].srai; cpu.setReg(params.rd, cpu.getReg(params.rs1) >> @intCast(u6, params.imm & 0b1111)); return next(cpu, inst, cur); } pub fn slti(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].slti; // Why @bitCast() & @intCast()? RISC-V manual: "immediates are always sign extended" cpu.setReg(params.rd, if (cpu.getRegSigned(params.rs1) < @intCast(CPU.SXLEN, params.immSigned())) 1 else 0); return next(cpu, inst, cur); } pub fn sltiu(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].sltiu; // Explicitly unsigned, so only zero-extend // Since this isn't hardware, a simple @intCast() works cpu.setReg(params.rd, if (cpu.getReg(params.rs1) < @intCast(CPU.XLEN, params.imm)) 1 else 0); return next(cpu, inst, cur); } // -- Load/store -- pub fn lb(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].lb; cpu.setReg(params.rd, CPU.util.sext(try cpu.getMem(cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned()), i8, .none))); return next(cpu, inst, cur); } pub fn lh(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].lh; cpu.setReg(params.rd, CPU.util.sext(try cpu.getMem(cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned()), i16, .none))); return next(cpu, inst, cur); } pub fn lw(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].lw; cpu.setReg(params.rd, CPU.util.sext(try cpu.getMem(cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned()), i32, .none))); return next(cpu, inst, cur); } pub fn ld(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].ld; cpu.setReg(params.rd, CPU.util.sext(try cpu.getMem(cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned()), i64, .none))); return next(cpu, inst, cur); } pub fn lbu(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].lbu; cpu.setReg(params.rd, @intCast(CPU.XLEN, try cpu.getMem(cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned()), u8, .none))); return next(cpu, inst, cur); } pub fn lhu(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].lhu; cpu.setReg(params.rd, @intCast(CPU.XLEN, try cpu.getMem(cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned()), u16, .none))); return next(cpu, inst, cur); } pub fn lwu(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].lhu; cpu.setReg(params.rd, @intCast(CPU.XLEN, try cpu.getMem(cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned()), u32, .none))); return next(cpu, inst, cur); } pub fn sb(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].sb; try cpu.setMem(cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned()), @truncate(u8, cpu.getReg(params.rs2))); return next(cpu, inst, cur); } pub fn sh(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].sh; try cpu.setMem(cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned()), @truncate(u16, cpu.getReg(params.rs2))); return next(cpu, inst, cur); } pub fn sw(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].sw; try cpu.setMem(cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned()), @truncate(u32, cpu.getReg(params.rs2))); return next(cpu, inst, cur); } pub fn sd(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].sd; try cpu.setMem(cpu.getReg(params.rs1) +% CPU.util.sext(params.immSigned()), @truncate(u64, cpu.getReg(params.rs2))); return next(cpu, inst, cur); } // -- Branch functions -- pub fn beq(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].beq; if (cpu.getReg(params.rs1) == cpu.getReg(params.rs2)) return goto(cpu, inst, cur, cpu.pc +% @intCast(CPU.XLEN, params.imm)); return next(cpu, inst, cur); } pub fn bne(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].bne; if (cpu.getReg(params.rs1) != cpu.getReg(params.rs2)) return goto(cpu, inst, cur, cpu.pc +% @intCast(CPU.XLEN, params.imm)); return next(cpu, inst, cur); } pub fn blt(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].blt; if (cpu.getRegSigned(params.rs1) < cpu.getRegSigned(params.rs2)) return goto(cpu, inst, cur, cpu.pc +% @intCast(CPU.XLEN, params.imm)); return next(cpu, inst, cur); } pub fn bge(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].bge; if (cpu.getRegSigned(params.rs1) > cpu.getRegSigned(params.rs2)) return goto(cpu, inst, cur, cpu.pc +% @intCast(CPU.XLEN, params.imm)); return next(cpu, inst, cur); } pub fn bltu(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].bltu; if (cpu.getReg(params.rs1) < cpu.getReg(params.rs2)) return goto(cpu, inst, cur, cpu.pc +% @intCast(CPU.XLEN, params.imm)); return next(cpu, inst, cur); } pub fn bgeu(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].bgeu; if (cpu.getReg(params.rs1) > cpu.getReg(params.rs2)) return goto(cpu, inst, cur, cpu.pc +% @intCast(CPU.XLEN, params.imm)); return next(cpu, inst, cur); } // -- Jump functions -- pub fn jal(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].jal; cpu.setReg(params.rd, cpu.pc + 4); return goto(cpu, inst, cur, cpu.pc +% @intCast(CPU.XLEN, params.imm)); } pub fn jalr(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].jalr; cpu.setReg(params.rd, cpu.pc + 4); return goto(cpu, inst, cur, cpu.getReg(params.rs1) +% @intCast(CPU.XLEN, params.imm)); } // -- Misc functions -- pub fn lui(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].lui; cpu.setReg(params.rd, CPU.util.sext(params.imm)); return next(cpu, inst, cur); } pub fn auipc(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].auipc; cpu.setReg(params.rd, cpu.pc + CPU.util.sext(params.imm)); return next(cpu, inst, cur); } // -- Specifically 32-bit ALU functions -- pub fn addw(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].addw; cpu.setReg(params.rd, CPU.util.sext(@truncate(u32, cpu.getReg(params.rs1)) +% @truncate(u32, cpu.getReg(params.rs2)))); return next(cpu, inst, cur); } pub fn subw(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].subw; cpu.setReg(params.rd, CPU.util.sext(@truncate(u32, cpu.getReg(params.rs1)) -% @truncate(u32, cpu.getReg(params.rs2)))); return next(cpu, inst, cur); } pub fn sllw(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].sllw; cpu.setReg(params.rd, CPU.util.sext(@truncate(u32, cpu.getReg(params.rs1)) << @truncate(u5, cpu.getReg(params.rs2)))); return next(cpu, inst, cur); } pub fn srlw(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].srlw; cpu.setReg(params.rd, CPU.util.sext(@truncate(u32, cpu.getReg(params.rs1)) >> @truncate(u5, cpu.getReg(params.rs2)))); return next(cpu, inst, cur); } pub fn sraw(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].sraw; cpu.setReg(params.rd, CPU.util.sext(@truncate(u32, cpu.getReg(params.rs1)) >> @truncate(u5, cpu.getReg(params.rs2)))); return next(cpu, inst, cur); } // -- 32-bit ALU functions with an immediate value -- pub fn addiw(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].addiw; cpu.setReg(params.rd, CPU.util.sext(@truncate(u32, cpu.getReg(params.rs1)) +% @bitCast(u32, @intCast(i32, params.immSigned())))); return next(cpu, inst, cur); } pub fn slliw(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].slliw; cpu.setReg(params.rd, CPU.util.sext(@truncate(u32, cpu.getReg(params.rs1)) << @truncate(u5, params.shamt))); return next(cpu, inst, cur); } pub fn srliw(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].srliw; cpu.setReg(params.rd, CPU.util.sext(@truncate(u32, cpu.getReg(params.rs1)) >> @truncate(u5, params.shamt))); return next(cpu, inst, cur); } pub fn sraiw(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].sraiw; cpu.setReg(params.rd, CPU.util.sext(@truncate(u32, cpu.getReg(params.rs1)) >> @truncate(u5, params.shamt))); return next(cpu, inst, cur); } // -- Multiplication (M) extension functions -- pub fn mul(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].mul; cpu.setReg(params.rd, @intCast(CPU.XLEN, @truncate(u32, cpu.getReg(params.rs1) *% cpu.getReg(params.rs2)))); return next(cpu, inst, cur); } pub fn mulh(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].mulh; cpu.setReg(params.rd, (@bitCast(CPU.XLEN, cpu.getRegSigned(params.rs1) *% cpu.getRegSigned(params.rs2)) & 0xFFFFFFFF_00000000) >> 32); return next(cpu, inst, cur); } pub fn mulsu(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].mulsu; cpu.setReg(params.rd, @intCast(CPU.XLEN, ((cpu.getReg(params.rs1) *% cpu.getReg(params.rs2)) & 0xFFFFFFFF_00000000) >> 32)); return next(cpu, inst, cur); } pub fn mulu(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].mulu; cpu.setReg(params.rd, @intCast(CPU.XLEN, ((cpu.getReg(params.rs1) *% cpu.getReg(params.rs2)) & 0xFFFFFFFF_00000000) >> 32)); return next(cpu, inst, cur); } pub fn div(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].div; cpu.setReg(params.rd, @bitCast(CPU.XLEN, try std.math.divFloor(i64, cpu.getRegSigned(params.rs1), cpu.getRegSigned(params.rs2)))); return next(cpu, inst, cur); } pub fn divu(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].divu; cpu.setReg(params.rd, cpu.getReg(params.rs1) / cpu.getReg(params.rs2)); return next(cpu, inst, cur); } pub fn rem(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].rem; cpu.setReg(params.rd, @bitCast(CPU.XLEN, try std.math.rem(i64, cpu.getRegSigned(params.rs1), cpu.getRegSigned(params.rs2)))); return next(cpu, inst, cur); } pub fn remu(cpu: *CPU, inst: []Instruction, cur: usize) !void { const params = inst[cur].remu; cpu.setReg(params.rd, cpu.getReg(params.rs1) % cpu.getReg(params.rs2)); return next(cpu, inst, cur); } // -- Utilities -- /// Get the function corresponding to an instruction. pub fn fromInstruction(inst: Instruction) !OpFunc { return switch (inst) { .add => add, .sub => sub, .xor => xor, .@"or" => @"or", .@"and" => @"and", .sll => sll, .srl => srl, .sra => sra, .addi => addi, .xori => xori, .ori => ori, .andi => andi, .slli => slli, .srli => srli, .srai => srai, .slti => slti, .sltiu => sltiu, .lb => lb, .lh => lh, .lw => lw, .ld => ld, .lbu => lbu, .lhu => lhu, .lwu => lwu, .sb => sb, .sh => sh, .sw => sw, .sd => sd, .beq => beq, .bne => bne, .blt => blt, .bge => bge, .bltu => bltu, .bgeu => bgeu, .jal => jal, .jalr => jalr, .lui => lui, .auipc => auipc, .addw => addw, .subw => subw, .sllw => sllw, .srlw => srlw, .sraw => sraw, .addiw => addiw, .slliw => slliw, .srliw => srliw, .sraiw => sraiw, .mul => mul, .mulh => mulh, .mulsu => mulsu, .mulu => mulu, .div => div, .divu => divu, .rem => rem, .remu => remu, else => OpFuncError.Unimplemented, }; }
opfunc.zig
const std = @import("std"); const mem = std.mem; const Allocator = std.mem.Allocator; const ArrayListUnmanaged = std.ArrayListUnmanaged; const assert = std.debug.assert; const log = std.log.scoped(.module); const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Target = std.Target; const ast = std.zig.ast; const Module = @This(); const Compilation = @import("Compilation.zig"); const Cache = @import("Cache.zig"); const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const trace = @import("tracy.zig").trace; const AstGen = @import("AstGen.zig"); const Sema = @import("Sema.zig"); const target_util = @import("target.zig"); const build_options = @import("build_options"); /// General-purpose allocator. Used for both temporary and long-term storage. gpa: *Allocator, comp: *Compilation, /// Where our incremental compilation metadata serialization will go. zig_cache_artifact_directory: Compilation.Directory, /// Pointer to externally managed resource. root_pkg: *Package, /// Normally, `main_pkg` and `root_pkg` are the same. The exception is `zig test`, in which /// `root_pkg` is the test runner, and `main_pkg` is the user's source file which has the tests. main_pkg: *Package, /// Used by AstGen worker to load and store ZIR cache. global_zir_cache: Compilation.Directory, /// Used by AstGen worker to load and store ZIR cache. local_zir_cache: Compilation.Directory, /// It's rare for a decl to be exported, so we save memory by having a sparse /// map of Decl pointers to details about them being exported. /// The Export memory is owned by the `export_owners` table; the slice itself /// is owned by this table. The slice is guaranteed to not be empty. decl_exports: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{}, /// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl /// is modified. Note that the key of this table is not the Decl being exported, but the Decl that /// is performing the export of another Decl. /// This table owns the Export memory. export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{}, /// The set of all the files in the Module. We keep track of this in order to iterate /// over it and check which source files have been modified on the file system when /// an update is requested, as well as to cache `@import` results. /// Keys are fully resolved file paths. This table owns the keys and values. import_table: std.StringArrayHashMapUnmanaged(*Scope.File) = .{}, /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. monomorphed_funcs: MonomorphedFuncsSet = .{}, /// The set of all comptime function calls that have been cached so that future calls /// with the same parameters will get the same return value. memoized_calls: MemoizedCallSet = .{}, /// We optimize memory usage for a compilation with no compile errors by storing the /// error messages and mapping outside of `Decl`. /// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator. /// Note that a Decl can succeed but the Fn it represents can fail. In this case, /// a Decl can have a failed_decls entry but have analysis status of success. failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{}, /// Keep track of one `@compileLog` callsite per owner Decl. /// The value is the AST node index offset from the Decl. compile_log_decls: std.AutoArrayHashMapUnmanaged(*Decl, i32) = .{}, /// Using a map here for consistency with the other fields here. /// The ErrorMsg memory is owned by the `Scope.File`, using Module's general purpose allocator. failed_files: std.AutoArrayHashMapUnmanaged(*Scope.File, ?*ErrorMsg) = .{}, /// Using a map here for consistency with the other fields here. /// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator. failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{}, next_anon_name_index: usize = 0, /// Candidates for deletion. After a semantic analysis update completes, this list /// contains Decls that need to be deleted if they end up having no references to them. deletion_set: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{}, /// Error tags and their values, tag names are duped with mod.gpa. /// Corresponds with `error_name_list`. global_error_set: std.StringHashMapUnmanaged(ErrorInt) = .{}, /// ErrorInt -> []const u8 for fast lookups for @intToError at comptime /// Corresponds with `global_error_set`. error_name_list: ArrayListUnmanaged([]const u8) = .{}, /// Incrementing integer used to compare against the corresponding Decl /// field to determine whether a Decl's status applies to an ongoing update, or a /// previous analysis. generation: u32 = 0, stage1_flags: packed struct { have_winmain: bool = false, have_wwinmain: bool = false, have_winmain_crt_startup: bool = false, have_wwinmain_crt_startup: bool = false, have_dllmain_crt_startup: bool = false, have_c_main: bool = false, reserved: u2 = 0, } = .{}, job_queued_update_builtin_zig: bool = true, compile_log_text: ArrayListUnmanaged(u8) = .{}, emit_h: ?*GlobalEmitH, test_functions: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{}, const MonomorphedFuncsSet = std.HashMapUnmanaged( *Fn, void, MonomorphedFuncsContext, std.hash_map.default_max_load_percentage, ); const MonomorphedFuncsContext = struct { pub fn eql(ctx: @This(), a: *Fn, b: *Fn) bool { _ = ctx; return a == b; } /// Must match `Sema.GenericCallAdapter.hash`. pub fn hash(ctx: @This(), key: *Fn) u64 { _ = ctx; var hasher = std.hash.Wyhash.init(0); // The generic function Decl is guaranteed to be the first dependency // of each of its instantiations. const generic_owner_decl = key.owner_decl.dependencies.keys()[0]; const generic_func = generic_owner_decl.val.castTag(.function).?.data; std.hash.autoHash(&hasher, @ptrToInt(generic_func)); // This logic must be kept in sync with the logic in `analyzeCall` that // computes the hash. const comptime_args = key.comptime_args.?; const generic_ty_info = generic_owner_decl.ty.fnInfo(); for (generic_ty_info.param_types) |param_ty, i| { if (generic_ty_info.paramIsComptime(i) and param_ty.tag() != .generic_poison) { comptime_args[i].val.hash(param_ty, &hasher); } } return hasher.final(); } }; pub const MemoizedCallSet = std.HashMapUnmanaged( MemoizedCall.Key, MemoizedCall.Result, MemoizedCall, std.hash_map.default_max_load_percentage, ); pub const MemoizedCall = struct { pub const Key = struct { func: *Fn, args: []TypedValue, }; pub const Result = struct { val: Value, arena: std.heap.ArenaAllocator.State, }; pub fn eql(ctx: @This(), a: Key, b: Key) bool { _ = ctx; if (a.func != b.func) return false; assert(a.args.len == b.args.len); for (a.args) |a_arg, arg_i| { const b_arg = b.args[arg_i]; if (!a_arg.eql(b_arg)) { return false; } } return true; } /// Must match `Sema.GenericCallAdapter.hash`. pub fn hash(ctx: @This(), key: Key) u64 { _ = ctx; var hasher = std.hash.Wyhash.init(0); // The generic function Decl is guaranteed to be the first dependency // of each of its instantiations. std.hash.autoHash(&hasher, @ptrToInt(key.func)); // This logic must be kept in sync with the logic in `analyzeCall` that // computes the hash. for (key.args) |arg| { arg.hash(&hasher); } return hasher.final(); } }; /// A `Module` has zero or one of these depending on whether `-femit-h` is enabled. pub const GlobalEmitH = struct { /// Where to put the output. loc: Compilation.EmitLoc, /// When emit_h is non-null, each Decl gets one more compile error slot for /// emit-h failing for that Decl. This table is also how we tell if a Decl has /// failed emit-h or succeeded. failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{}, /// Tracks all decls in order to iterate over them and emit .h code for them. decl_table: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{}, }; pub const ErrorInt = u32; pub const Export = struct { options: std.builtin.ExportOptions, src: LazySrcLoc, /// Represents the position of the export, if any, in the output file. link: link.File.Export, /// The Decl that performs the export. Note that this is *not* the Decl being exported. owner_decl: *Decl, /// The Decl being exported. Note this is *not* the Decl performing the export. exported_decl: *Decl, status: enum { in_progress, failed, /// Indicates that the failure was due to a temporary issue, such as an I/O error /// when writing to the output file. Retrying the export may succeed. failed_retryable, complete, }, pub fn getSrcLoc(exp: Export) SrcLoc { return .{ .file_scope = exp.owner_decl.namespace.file_scope, .parent_decl_node = exp.owner_decl.src_node, .lazy = exp.src, }; } }; /// When Module emit_h field is non-null, each Decl is allocated via this struct, so that /// there can be EmitH state attached to each Decl. pub const DeclPlusEmitH = struct { decl: Decl, emit_h: EmitH, }; pub const Decl = struct { /// Allocated with Module's allocator; outlives the ZIR code. name: [*:0]const u8, /// The most recent Type of the Decl after a successful semantic analysis. /// Populated when `has_tv`. ty: Type, /// The most recent Value of the Decl after a successful semantic analysis. /// Populated when `has_tv`. val: Value, /// Populated when `has_tv`. align_val: Value, /// Populated when `has_tv`. linksection_val: Value, /// The memory for ty, val, align_val, linksection_val. /// If this is `null` then there is no memory management needed. value_arena: ?*std.heap.ArenaAllocator.State = null, /// The direct parent namespace of the Decl. /// Reference to externally owned memory. /// In the case of the Decl corresponding to a file, this is /// the namespace of the struct, since there is no parent. namespace: *Scope.Namespace, /// An integer that can be checked against the corresponding incrementing /// generation field of Module. This is used to determine whether `complete` status /// represents pre- or post- re-analysis. generation: u32, /// The AST node index of this declaration. /// Must be recomputed when the corresponding source file is modified. src_node: ast.Node.Index, /// Line number corresponding to `src_node`. Stored separately so that source files /// do not need to be loaded into memory in order to compute debug line numbers. src_line: u32, /// Index to ZIR `extra` array to the entry in the parent's decl structure /// (the part that says "for every decls_len"). The first item at this index is /// the contents hash, followed by line, name, etc. /// For anonymous decls and also the root Decl for a File, this is 0. zir_decl_index: Zir.Inst.Index, /// Represents the "shallow" analysis status. For example, for decls that are functions, /// the function type is analyzed with this set to `in_progress`, however, the semantic /// analysis of the function body is performed with this value set to `success`. Functions /// have their own analysis status field. analysis: enum { /// This Decl corresponds to an AST Node that has not been referenced yet, and therefore /// because of Zig's lazy declaration analysis, it will remain unanalyzed until referenced. unreferenced, /// Semantic analysis for this Decl is running right now. /// This state detects dependency loops. in_progress, /// The file corresponding to this Decl had a parse error or ZIR error. /// There will be a corresponding ErrorMsg in Module.failed_files. file_failure, /// This Decl might be OK but it depends on another one which did not successfully complete /// semantic analysis. dependency_failure, /// Semantic analysis failure. /// There will be a corresponding ErrorMsg in Module.failed_decls. sema_failure, /// There will be a corresponding ErrorMsg in Module.failed_decls. /// This indicates the failure was something like running out of disk space, /// and attempting semantic analysis again may succeed. sema_failure_retryable, /// There will be a corresponding ErrorMsg in Module.failed_decls. codegen_failure, /// There will be a corresponding ErrorMsg in Module.failed_decls. /// This indicates the failure was something like running out of disk space, /// and attempting codegen again may succeed. codegen_failure_retryable, /// Everything is done. During an update, this Decl may be out of date, depending /// on its dependencies. The `generation` field can be used to determine if this /// completion status occurred before or after a given update. complete, /// A Module update is in progress, and this Decl has been flagged as being known /// to require re-analysis. outdated, }, /// Whether `typed_value`, `align_val`, and `linksection_val` are populated. has_tv: bool, /// If `true` it means the `Decl` is the resource owner of the type/value associated /// with it. That means when `Decl` is destroyed, the cleanup code should additionally /// check if the value owns a `Namespace`, and destroy that too. owns_tv: bool, /// This flag is set when this Decl is added to `Module.deletion_set`, and cleared /// when removed. deletion_flag: bool, /// Whether the corresponding AST decl has a `pub` keyword. is_pub: bool, /// Whether the corresponding AST decl has a `export` keyword. is_exported: bool, /// Whether the ZIR code provides an align instruction. has_align: bool, /// Whether the ZIR code provides a linksection instruction. has_linksection: bool, /// Flag used by garbage collection to mark and sweep. /// Decls which correspond to an AST node always have this field set to `true`. /// Anonymous Decls are initialized with this field set to `false` and then it /// is the responsibility of machine code backends to mark it `true` whenever /// a `decl_ref` Value is encountered that points to this Decl. /// When the `codegen_decl` job is encountered in the main work queue, if the /// Decl is marked alive, then it sends the Decl to the linker. Otherwise it /// deletes the Decl on the spot. alive: bool, /// Represents the position of the code in the output file. /// This is populated regardless of semantic analysis and code generation. link: link.File.LinkBlock, /// Represents the function in the linked output file, if the `Decl` is a function. /// This is stored here and not in `Fn` because `Decl` survives across updates but /// `Fn` does not. /// TODO Look into making `Fn` a longer lived structure and moving this field there /// to save on memory usage. fn_link: link.File.LinkFn, /// The shallow set of other decls whose typed_value could possibly change if this Decl's /// typed_value is modified. dependants: DepsTable = .{}, /// The shallow set of other decls whose typed_value changing indicates that this Decl's /// typed_value may need to be regenerated. dependencies: DepsTable = .{}, pub const DepsTable = std.AutoArrayHashMapUnmanaged(*Decl, void); pub fn clearName(decl: *Decl, gpa: *Allocator) void { gpa.free(mem.spanZ(decl.name)); decl.name = undefined; } pub fn destroy(decl: *Decl, module: *Module) void { const gpa = module.gpa; log.debug("destroy {*} ({s})", .{ decl, decl.name }); _ = module.test_functions.swapRemove(decl); if (decl.deletion_flag) { assert(module.deletion_set.swapRemove(decl)); } if (decl.has_tv) { if (decl.getInnerNamespace()) |namespace| { namespace.destroyDecls(module); } decl.clearValues(gpa); } decl.dependants.deinit(gpa); decl.dependencies.deinit(gpa); decl.clearName(gpa); if (module.emit_h != null) { const decl_plus_emit_h = @fieldParentPtr(DeclPlusEmitH, "decl", decl); decl_plus_emit_h.emit_h.fwd_decl.deinit(gpa); gpa.destroy(decl_plus_emit_h); } else { gpa.destroy(decl); } } pub fn clearValues(decl: *Decl, gpa: *Allocator) void { if (decl.getFunction()) |func| { func.deinit(gpa); gpa.destroy(func); } if (decl.getVariable()) |variable| { gpa.destroy(variable); } if (decl.value_arena) |arena_state| { arena_state.promote(gpa).deinit(); decl.value_arena = null; decl.has_tv = false; decl.owns_tv = false; } } pub fn finalizeNewArena(decl: *Decl, arena: *std.heap.ArenaAllocator) !void { assert(decl.value_arena == null); const arena_state = try arena.allocator.create(std.heap.ArenaAllocator.State); arena_state.* = arena.state; decl.value_arena = arena_state; } /// This name is relative to the containing namespace of the decl. /// The memory is owned by the containing File ZIR. pub fn getName(decl: Decl) ?[:0]const u8 { const zir = decl.namespace.file_scope.zir; return decl.getNameZir(zir); } pub fn getNameZir(decl: Decl, zir: Zir) ?[:0]const u8 { assert(decl.zir_decl_index != 0); const name_index = zir.extra[decl.zir_decl_index + 5]; if (name_index <= 1) return null; return zir.nullTerminatedString(name_index); } pub fn contentsHash(decl: Decl) std.zig.SrcHash { const zir = decl.namespace.file_scope.zir; return decl.contentsHashZir(zir); } pub fn contentsHashZir(decl: Decl, zir: Zir) std.zig.SrcHash { assert(decl.zir_decl_index != 0); const hash_u32s = zir.extra[decl.zir_decl_index..][0..4]; const contents_hash = @bitCast(std.zig.SrcHash, hash_u32s.*); return contents_hash; } pub fn zirBlockIndex(decl: *const Decl) Zir.Inst.Index { assert(decl.zir_decl_index != 0); const zir = decl.namespace.file_scope.zir; return zir.extra[decl.zir_decl_index + 6]; } pub fn zirAlignRef(decl: Decl) Zir.Inst.Ref { if (!decl.has_align) return .none; assert(decl.zir_decl_index != 0); const zir = decl.namespace.file_scope.zir; return @intToEnum(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 6]); } pub fn zirLinksectionRef(decl: Decl) Zir.Inst.Ref { if (!decl.has_linksection) return .none; assert(decl.zir_decl_index != 0); const zir = decl.namespace.file_scope.zir; const extra_index = decl.zir_decl_index + 6 + @boolToInt(decl.has_align); return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); } /// Returns true if and only if the Decl is the top level struct associated with a File. pub fn isRoot(decl: *const Decl) bool { if (decl.namespace.parent != null) return false; return decl == decl.namespace.ty.getOwnerDecl(); } pub fn relativeToLine(decl: Decl, offset: u32) u32 { return decl.src_line + offset; } pub fn relativeToNodeIndex(decl: Decl, offset: i32) ast.Node.Index { return @bitCast(ast.Node.Index, offset + @bitCast(i32, decl.src_node)); } pub fn nodeIndexToRelative(decl: Decl, node_index: ast.Node.Index) i32 { return @bitCast(i32, node_index) - @bitCast(i32, decl.src_node); } pub fn tokSrcLoc(decl: Decl, token_index: ast.TokenIndex) LazySrcLoc { return .{ .token_offset = token_index - decl.srcToken() }; } pub fn nodeSrcLoc(decl: Decl, node_index: ast.Node.Index) LazySrcLoc { return .{ .node_offset = decl.nodeIndexToRelative(node_index) }; } pub fn srcLoc(decl: Decl) SrcLoc { return decl.nodeOffsetSrcLoc(0); } pub fn nodeOffsetSrcLoc(decl: Decl, node_offset: i32) SrcLoc { return .{ .file_scope = decl.getFileScope(), .parent_decl_node = decl.src_node, .lazy = .{ .node_offset = node_offset }, }; } pub fn srcToken(decl: Decl) ast.TokenIndex { const tree = &decl.namespace.file_scope.tree; return tree.firstToken(decl.src_node); } pub fn srcByteOffset(decl: Decl) u32 { const tree = &decl.namespace.file_scope.tree; return tree.tokens.items(.start)[decl.srcToken()]; } pub fn renderFullyQualifiedName(decl: Decl, writer: anytype) !void { const unqualified_name = mem.spanZ(decl.name); return decl.namespace.renderFullyQualifiedName(unqualified_name, writer); } pub fn getFullyQualifiedName(decl: Decl, gpa: *Allocator) ![]u8 { var buffer = std.ArrayList(u8).init(gpa); defer buffer.deinit(); try decl.renderFullyQualifiedName(buffer.writer()); return buffer.toOwnedSlice(); } pub fn typedValue(decl: Decl) error{AnalysisFail}!TypedValue { if (!decl.has_tv) return error.AnalysisFail; return TypedValue{ .ty = decl.ty, .val = decl.val, }; } pub fn value(decl: *Decl) error{AnalysisFail}!Value { return (try decl.typedValue()).val; } pub fn isFunction(decl: *Decl) !bool { const tv = try decl.typedValue(); return tv.ty.zigTypeTag() == .Fn; } /// If the Decl has a value and it is a struct, return it, /// otherwise null. pub fn getStruct(decl: *Decl) ?*Struct { if (!decl.owns_tv) return null; const ty = (decl.val.castTag(.ty) orelse return null).data; const struct_obj = (ty.castTag(.@"struct") orelse return null).data; assert(struct_obj.owner_decl == decl); return struct_obj; } /// If the Decl has a value and it is a union, return it, /// otherwise null. pub fn getUnion(decl: *Decl) ?*Union { if (!decl.owns_tv) return null; const ty = (decl.val.castTag(.ty) orelse return null).data; const union_obj = (ty.cast(Type.Payload.Union) orelse return null).data; assert(union_obj.owner_decl == decl); return union_obj; } /// If the Decl has a value and it is a function, return it, /// otherwise null. pub fn getFunction(decl: *Decl) ?*Fn { if (!decl.owns_tv) return null; const func = (decl.val.castTag(.function) orelse return null).data; assert(func.owner_decl == decl); return func; } pub fn getVariable(decl: *Decl) ?*Var { if (!decl.owns_tv) return null; const variable = (decl.val.castTag(.variable) orelse return null).data; assert(variable.owner_decl == decl); return variable; } /// Gets the namespace that this Decl creates by being a struct, union, /// enum, or opaque. /// Only returns it if the Decl is the owner. pub fn getInnerNamespace(decl: *Decl) ?*Scope.Namespace { if (!decl.owns_tv) return null; const ty = (decl.val.castTag(.ty) orelse return null).data; switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.owner_decl == decl); return &struct_obj.namespace; }, .enum_full, .enum_nonexhaustive => { const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; assert(enum_obj.owner_decl == decl); return &enum_obj.namespace; }, .empty_struct => { return ty.castTag(.empty_struct).?.data; }, .@"opaque" => { @panic("TODO opaque types"); }, .@"union", .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; assert(union_obj.owner_decl == decl); return &union_obj.namespace; }, else => return null, } } pub fn dump(decl: *Decl) void { const loc = std.zig.findLineColumn(decl.scope.source.bytes, decl.src); std.debug.print("{s}:{d}:{d} name={s} status={s}", .{ decl.scope.sub_file_path, loc.line + 1, loc.column + 1, mem.spanZ(decl.name), @tagName(decl.analysis), }); if (decl.has_tv) { std.debug.print(" ty={} val={}", .{ decl.ty, decl.val }); } std.debug.print("\n", .{}); } pub fn getFileScope(decl: Decl) *Scope.File { return decl.namespace.file_scope; } pub fn getEmitH(decl: *Decl, module: *Module) *EmitH { assert(module.emit_h != null); const decl_plus_emit_h = @fieldParentPtr(DeclPlusEmitH, "decl", decl); return &decl_plus_emit_h.emit_h; } fn removeDependant(decl: *Decl, other: *Decl) void { assert(decl.dependants.swapRemove(other)); } fn removeDependency(decl: *Decl, other: *Decl) void { assert(decl.dependencies.swapRemove(other)); } }; /// This state is attached to every Decl when Module emit_h is non-null. pub const EmitH = struct { fwd_decl: ArrayListUnmanaged(u8) = .{}, }; /// Represents the data that an explicit error set syntax provides. pub const ErrorSet = struct { /// The Decl that corresponds to the error set itself. owner_decl: *Decl, /// Offset from Decl node index, points to the error set AST node. node_offset: i32, names_len: u32, /// The string bytes are stored in the owner Decl arena. /// They are in the same order they appear in the AST. /// The length is given by `names_len`. names_ptr: [*]const []const u8, pub fn srcLoc(self: ErrorSet) SrcLoc { return .{ .file_scope = self.owner_decl.getFileScope(), .parent_decl_node = self.owner_decl.src_node, .lazy = .{ .node_offset = self.node_offset }, }; } }; /// Represents the data that a struct declaration provides. pub const Struct = struct { /// The Decl that corresponds to the struct itself. owner_decl: *Decl, /// Set of field names in declaration order. fields: std.StringArrayHashMapUnmanaged(Field), /// Represents the declarations inside this struct. namespace: Scope.Namespace, /// Offset from `owner_decl`, points to the struct AST node. node_offset: i32, /// Index of the struct_decl ZIR instruction. zir_index: Zir.Inst.Index, layout: std.builtin.TypeInfo.ContainerLayout, status: enum { none, field_types_wip, have_field_types, layout_wip, have_layout, }, /// If true, definitely nonzero size at runtime. If false, resolving the fields /// is necessary to determine whether it has bits at runtime. known_has_bits: bool, /// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl. pub const Field = struct { /// Uses `noreturn` to indicate `anytype`. /// undefined until `status` is `have_field_types` or `have_layout`. ty: Type, abi_align: Value, /// Uses `unreachable_value` to indicate no default. default_val: Value, /// undefined until `status` is `have_layout`. offset: u32, is_comptime: bool, }; pub fn getFullyQualifiedName(s: *Struct, gpa: *Allocator) ![]u8 { return s.owner_decl.getFullyQualifiedName(gpa); } pub fn srcLoc(s: Struct) SrcLoc { return .{ .file_scope = s.owner_decl.getFileScope(), .parent_decl_node = s.owner_decl.src_node, .lazy = .{ .node_offset = s.node_offset }, }; } pub fn haveFieldTypes(s: Struct) bool { return switch (s.status) { .none, .field_types_wip, => false, .have_field_types, .layout_wip, .have_layout, => true, }; } }; /// Represents the data that an enum declaration provides, when the fields /// are auto-numbered, and there are no declarations. The integer tag type /// is inferred to be the smallest power of two unsigned int that fits /// the number of fields. pub const EnumSimple = struct { /// The Decl that corresponds to the enum itself. owner_decl: *Decl, /// Set of field names in declaration order. fields: std.StringArrayHashMapUnmanaged(void), /// Offset from `owner_decl`, points to the enum decl AST node. node_offset: i32, pub fn srcLoc(self: EnumSimple) SrcLoc { return .{ .file_scope = self.owner_decl.getFileScope(), .parent_decl_node = self.owner_decl.src_node, .lazy = .{ .node_offset = self.node_offset }, }; } }; /// Represents the data that an enum declaration provides, when there is /// at least one tag value explicitly specified, or at least one declaration. pub const EnumFull = struct { /// The Decl that corresponds to the enum itself. owner_decl: *Decl, /// An integer type which is used for the numerical value of the enum. /// Whether zig chooses this type or the user specifies it, it is stored here. tag_ty: Type, /// Set of field names in declaration order. fields: std.StringArrayHashMapUnmanaged(void), /// Maps integer tag value to field index. /// Entries are in declaration order, same as `fields`. /// If this hash map is empty, it means the enum tags are auto-numbered. values: ValueMap, /// Represents the declarations inside this struct. namespace: Scope.Namespace, /// Offset from `owner_decl`, points to the enum decl AST node. node_offset: i32, pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false); pub fn srcLoc(self: EnumFull) SrcLoc { return .{ .file_scope = self.owner_decl.getFileScope(), .parent_decl_node = self.owner_decl.src_node, .lazy = .{ .node_offset = self.node_offset }, }; } }; pub const Union = struct { /// The Decl that corresponds to the union itself. owner_decl: *Decl, /// An enum type which is used for the tag of the union. /// This type is created even for untagged unions, even when the memory /// layout does not store the tag. /// Whether zig chooses this type or the user specifies it, it is stored here. /// This will be set to the null type until status is `have_field_types`. tag_ty: Type, /// Set of field names in declaration order. fields: std.StringArrayHashMapUnmanaged(Field), /// Represents the declarations inside this union. namespace: Scope.Namespace, /// Offset from `owner_decl`, points to the union decl AST node. node_offset: i32, /// Index of the union_decl ZIR instruction. zir_index: Zir.Inst.Index, layout: std.builtin.TypeInfo.ContainerLayout, status: enum { none, field_types_wip, have_field_types, layout_wip, have_layout, }, pub const Field = struct { /// undefined until `status` is `have_field_types` or `have_layout`. ty: Type, abi_align: Value, }; pub fn getFullyQualifiedName(s: *Union, gpa: *Allocator) ![]u8 { return s.owner_decl.getFullyQualifiedName(gpa); } pub fn srcLoc(self: Union) SrcLoc { return .{ .file_scope = self.owner_decl.getFileScope(), .parent_decl_node = self.owner_decl.src_node, .lazy = .{ .node_offset = self.node_offset }, }; } }; /// Some Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator. /// Extern functions do not have this data structure; they are represented by /// the `Decl` only, with a `Value` tag of `extern_fn`. pub const Fn = struct { /// The Decl that corresponds to the function itself. owner_decl: *Decl, /// If this is not null, this function is a generic function instantiation, and /// there is a `TypedValue` here for each parameter of the function. /// Non-comptime parameters are marked with a `generic_poison` for the value. /// Non-anytype parameters are marked with a `generic_poison` for the type. comptime_args: ?[*]TypedValue = null, /// The ZIR instruction that is a function instruction. Use this to find /// the body. We store this rather than the body directly so that when ZIR /// is regenerated on update(), we can map this to the new corresponding /// ZIR instruction. zir_body_inst: Zir.Inst.Index, /// Relative to owner Decl. lbrace_line: u32, /// Relative to owner Decl. rbrace_line: u32, lbrace_column: u16, rbrace_column: u16, state: Analysis, is_cold: bool = false, pub const Analysis = enum { queued, /// This function intentionally only has ZIR generated because it is marked /// inline, which means no runtime version of the function will be generated. inline_only, in_progress, /// There will be a corresponding ErrorMsg in Module.failed_decls sema_failure, /// This Fn might be OK but it depends on another Decl which did not /// successfully complete semantic analysis. dependency_failure, success, }; pub fn deinit(func: *Fn, gpa: *Allocator) void { if (func.getInferredErrorSet()) |map| { map.deinit(gpa); } } pub fn getInferredErrorSet(func: *Fn) ?*std.StringHashMapUnmanaged(void) { const ret_ty = func.owner_decl.ty.fnReturnType(); if (ret_ty.tag() == .generic_poison) { return null; } if (ret_ty.zigTypeTag() == .ErrorUnion) { if (ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { return &payload.data.map; } } return null; } }; pub const Var = struct { /// if is_extern == true this is undefined init: Value, owner_decl: *Decl, is_extern: bool, is_mutable: bool, is_threadlocal: bool, }; pub const Scope = struct { tag: Tag, pub fn cast(base: *Scope, comptime T: type) ?*T { if (base.tag != T.base_tag) return null; return @fieldParentPtr(T, "base", base); } pub fn ownerDecl(scope: *Scope) ?*Decl { return switch (scope.tag) { .block => scope.cast(Block).?.sema.owner_decl, .file => null, .namespace => null, }; } pub fn srcDecl(scope: *Scope) ?*Decl { return switch (scope.tag) { .block => scope.cast(Block).?.src_decl, .file => null, .namespace => scope.cast(Namespace).?.getDecl(), }; } /// Asserts the scope has a parent which is a Namespace and returns it. pub fn namespace(scope: *Scope) *Namespace { switch (scope.tag) { .block => return scope.cast(Block).?.sema.owner_decl.namespace, .file => return scope.cast(File).?.root_decl.?.namespace, .namespace => return scope.cast(Namespace).?, } } /// Asserts the scope has a parent which is a Namespace or File and /// returns the sub_file_path field. pub fn subFilePath(base: *Scope) []const u8 { switch (base.tag) { .namespace => return @fieldParentPtr(Namespace, "base", base).file_scope.sub_file_path, .file => return @fieldParentPtr(File, "base", base).sub_file_path, .block => unreachable, } } /// When called from inside a Block Scope, chases the src_decl, not the owner_decl. pub fn getFileScope(base: *Scope) *Scope.File { var cur = base; while (true) { cur = switch (cur.tag) { .namespace => return @fieldParentPtr(Namespace, "base", cur).file_scope, .file => return @fieldParentPtr(File, "base", cur), .block => return @fieldParentPtr(Block, "base", cur).src_decl.namespace.file_scope, }; } } pub const Tag = enum { /// .zig source code. file, /// Namespace owned by structs, enums, unions, and opaques for decls. namespace, block, }; /// The container that structs, enums, unions, and opaques have. pub const Namespace = struct { pub const base_tag: Tag = .namespace; base: Scope = Scope{ .tag = base_tag }, parent: ?*Namespace, file_scope: *Scope.File, /// Will be a struct, enum, union, or opaque. ty: Type, /// Direct children of the namespace. Used during an update to detect /// which decls have been added/removed from source. /// Declaration order is preserved via entry order. /// Key memory is owned by `decl.name`. /// TODO save memory with https://github.com/ziglang/zig/issues/8619. /// Anonymous decls are not stored here; they are kept in `anon_decls` instead. decls: std.StringArrayHashMapUnmanaged(*Decl) = .{}, anon_decls: std.AutoArrayHashMapUnmanaged(*Decl, void) = .{}, pub fn deinit(ns: *Namespace, mod: *Module) void { ns.destroyDecls(mod); ns.* = undefined; } pub fn destroyDecls(ns: *Namespace, mod: *Module) void { const gpa = mod.gpa; log.debug("destroyDecls {*}", .{ns}); var decls = ns.decls; ns.decls = .{}; var anon_decls = ns.anon_decls; ns.anon_decls = .{}; for (decls.values()) |value| { value.destroy(mod); } decls.deinit(gpa); for (anon_decls.keys()) |key| { key.destroy(mod); } anon_decls.deinit(gpa); } pub fn deleteAllDecls( ns: *Namespace, mod: *Module, outdated_decls: ?*std.AutoArrayHashMap(*Decl, void), ) !void { const gpa = mod.gpa; log.debug("deleteAllDecls {*}", .{ns}); var decls = ns.decls; ns.decls = .{}; var anon_decls = ns.anon_decls; ns.anon_decls = .{}; // TODO rework this code to not panic on OOM. // (might want to coordinate with the clearDecl function) for (decls.values()) |child_decl| { mod.clearDecl(child_decl, outdated_decls) catch @panic("out of memory"); child_decl.destroy(mod); } decls.deinit(gpa); for (anon_decls.keys()) |child_decl| { mod.clearDecl(child_decl, outdated_decls) catch @panic("out of memory"); child_decl.destroy(mod); } anon_decls.deinit(gpa); } // This renders e.g. "std.fs.Dir.OpenOptions" pub fn renderFullyQualifiedName( ns: Namespace, name: []const u8, writer: anytype, ) @TypeOf(writer).Error!void { if (ns.parent) |parent| { const decl = ns.getDecl(); try parent.renderFullyQualifiedName(mem.spanZ(decl.name), writer); } else { try ns.file_scope.renderFullyQualifiedName(writer); } if (name.len != 0) { try writer.writeAll("."); try writer.writeAll(name); } } pub fn getDecl(ns: Namespace) *Decl { return ns.ty.getOwnerDecl(); } }; pub const File = struct { pub const base_tag: Tag = .file; base: Scope = Scope{ .tag = base_tag }, status: enum { never_loaded, retryable_failure, parse_failure, astgen_failure, success_zir, }, source_loaded: bool, tree_loaded: bool, zir_loaded: bool, /// Relative to the owning package's root_src_dir. /// Memory is stored in gpa, owned by File. sub_file_path: []const u8, /// Whether this is populated depends on `source_loaded`. source: [:0]const u8, /// Whether this is populated depends on `status`. stat_size: u64, /// Whether this is populated depends on `status`. stat_inode: std.fs.File.INode, /// Whether this is populated depends on `status`. stat_mtime: i128, /// Whether this is populated or not depends on `tree_loaded`. tree: ast.Tree, /// Whether this is populated or not depends on `zir_loaded`. zir: Zir, /// Package that this file is a part of, managed externally. pkg: *Package, /// The Decl of the struct that represents this File. root_decl: ?*Decl, /// Used by change detection algorithm, after astgen, contains the /// set of decls that existed in the previous ZIR but not in the new one. deleted_decls: std.ArrayListUnmanaged(*Decl) = .{}, /// Used by change detection algorithm, after astgen, contains the /// set of decls that existed both in the previous ZIR and in the new one, /// but their source code has been modified. outdated_decls: std.ArrayListUnmanaged(*Decl) = .{}, /// The most recent successful ZIR for this file, with no errors. /// This is only populated when a previously successful ZIR /// newly introduces compile errors during an update. When ZIR is /// successful, this field is unloaded. prev_zir: ?*Zir = null, pub fn unload(file: *File, gpa: *Allocator) void { file.unloadTree(gpa); file.unloadSource(gpa); file.unloadZir(gpa); } pub fn unloadTree(file: *File, gpa: *Allocator) void { if (file.tree_loaded) { file.tree_loaded = false; file.tree.deinit(gpa); } } pub fn unloadSource(file: *File, gpa: *Allocator) void { if (file.source_loaded) { file.source_loaded = false; gpa.free(file.source); } } pub fn unloadZir(file: *File, gpa: *Allocator) void { if (file.zir_loaded) { file.zir_loaded = false; file.zir.deinit(gpa); } } pub fn deinit(file: *File, mod: *Module) void { const gpa = mod.gpa; log.debug("deinit File {s}", .{file.sub_file_path}); file.deleted_decls.deinit(gpa); file.outdated_decls.deinit(gpa); if (file.root_decl) |root_decl| { root_decl.destroy(mod); } gpa.free(file.sub_file_path); file.unload(gpa); if (file.prev_zir) |prev_zir| { prev_zir.deinit(gpa); gpa.destroy(prev_zir); } file.* = undefined; } pub fn getSource(file: *File, gpa: *Allocator) ![:0]const u8 { if (file.source_loaded) return file.source; const root_dir_path = file.pkg.root_src_directory.path orelse "."; log.debug("File.getSource, not cached. pkgdir={s} sub_file_path={s}", .{ root_dir_path, file.sub_file_path, }); // Keep track of inode, file size, mtime, hash so we can detect which files // have been modified when an incremental update is requested. var f = try file.pkg.root_src_directory.handle.openFile(file.sub_file_path, .{}); defer f.close(); const stat = try f.stat(); if (stat.size > std.math.maxInt(u32)) return error.FileTooBig; const source = try gpa.allocSentinel(u8, @intCast(usize, stat.size), 0); defer if (!file.source_loaded) gpa.free(source); const amt = try f.readAll(source); if (amt != stat.size) return error.UnexpectedEndOfFile; // Here we do not modify stat fields because this function is the one // used for error reporting. We need to keep the stat fields stale so that // astGenFile can know to regenerate ZIR. file.source = source; file.source_loaded = true; return source; } pub fn getTree(file: *File, gpa: *Allocator) !*const ast.Tree { if (file.tree_loaded) return &file.tree; const source = try file.getSource(gpa); file.tree = try std.zig.parse(gpa, source); file.tree_loaded = true; return &file.tree; } pub fn destroy(file: *File, mod: *Module) void { const gpa = mod.gpa; file.deinit(mod); gpa.destroy(file); } pub fn renderFullyQualifiedName(file: File, writer: anytype) !void { // Convert all the slashes into dots and truncate the extension. const ext = std.fs.path.extension(file.sub_file_path); const noext = file.sub_file_path[0 .. file.sub_file_path.len - ext.len]; for (noext) |byte| switch (byte) { '/', '\\' => try writer.writeByte('.'), else => try writer.writeByte(byte), }; } pub fn fullyQualifiedNameZ(file: File, gpa: *Allocator) ![:0]u8 { var buf = std.ArrayList(u8).init(gpa); defer buf.deinit(); try file.renderFullyQualifiedName(buf.writer()); return buf.toOwnedSliceSentinel(0); } /// Returns the full path to this file relative to its package. pub fn fullPath(file: File, ally: *Allocator) ![]u8 { return file.pkg.root_src_directory.join(ally, &[_][]const u8{file.sub_file_path}); } pub fn dumpSrc(file: *File, src: LazySrcLoc) void { const loc = std.zig.findLineColumn(file.source.bytes, src); std.debug.print("{s}:{d}:{d}\n", .{ file.sub_file_path, loc.line + 1, loc.column + 1 }); } pub fn okToReportErrors(file: File) bool { return switch (file.status) { .parse_failure, .astgen_failure => false, else => true, }; } }; /// This is the context needed to semantically analyze ZIR instructions and /// produce AIR instructions. /// This is a temporary structure stored on the stack; references to it are valid only /// during semantic analysis of the block. pub const Block = struct { pub const base_tag: Tag = .block; base: Scope = Scope{ .tag = base_tag }, parent: ?*Block, /// Shared among all child blocks. sema: *Sema, /// This Decl is the Decl according to the Zig source code corresponding to this Block. /// This can vary during inline or comptime function calls. See `Sema.owner_decl` /// for the one that will be the same for all Block instances. src_decl: *Decl, instructions: ArrayListUnmanaged(Air.Inst.Index), // `param` instructions are collected here to be used by the `func` instruction. params: std.ArrayListUnmanaged(Param) = .{}, label: ?*Label = null, inlining: ?*Inlining, /// If runtime_index is not 0 then one of these is guaranteed to be non null. runtime_cond: ?LazySrcLoc = null, runtime_loop: ?LazySrcLoc = null, /// Non zero if a non-inline loop or a runtime conditional have been encountered. /// Stores to to comptime variables are only allowed when var.runtime_index <= runtime_index. runtime_index: u32 = 0, is_comptime: bool, /// when null, it is determined by build mode, changed by @setRuntimeSafety want_safety: ?bool = null, const Param = struct { /// `noreturn` means `anytype`. ty: Type, is_comptime: bool, }; /// This `Block` maps a block ZIR instruction to the corresponding /// AIR instruction for break instruction analysis. pub const Label = struct { zir_block: Zir.Inst.Index, merges: Merges, }; /// This `Block` indicates that an inline function call is happening /// and return instructions should be analyzed as a break instruction /// to this AIR block instruction. /// It is shared among all the blocks in an inline or comptime called /// function. pub const Inlining = struct { merges: Merges, }; pub const Merges = struct { block_inst: Air.Inst.Index, /// Separate array list from break_inst_list so that it can be passed directly /// to resolvePeerTypes. results: ArrayListUnmanaged(Air.Inst.Ref), /// Keeps track of the break instructions so that the operand can be replaced /// if we need to add type coercion at the end of block analysis. /// Same indexes, capacity, length as `results`. br_list: ArrayListUnmanaged(Air.Inst.Index), }; /// For debugging purposes. pub fn dump(block: *Block, mod: Module) void { Zir.dumpBlock(mod, block); } pub fn makeSubBlock(parent: *Block) Block { return .{ .parent = parent, .sema = parent.sema, .src_decl = parent.src_decl, .instructions = .{}, .label = null, .inlining = parent.inlining, .is_comptime = parent.is_comptime, .runtime_cond = parent.runtime_cond, .runtime_loop = parent.runtime_loop, .runtime_index = parent.runtime_index, .want_safety = parent.want_safety, }; } pub fn wantSafety(block: *const Block) bool { return block.want_safety orelse switch (block.sema.mod.optimizeMode()) { .Debug => true, .ReleaseSafe => true, .ReleaseFast => false, .ReleaseSmall => false, }; } pub fn getFileScope(block: *Block) *Scope.File { return block.src_decl.namespace.file_scope; } pub fn addTy( block: *Block, tag: Air.Inst.Tag, ty: Type, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .ty = ty }, }); } pub fn addTyOp( block: *Block, tag: Air.Inst.Tag, ty: Type, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .ty_op = .{ .ty = try block.sema.addType(ty), .operand = operand, } }, }); } pub fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .no_op = {} }, }); } pub fn addUnOp( block: *Block, tag: Air.Inst.Tag, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .un_op = operand }, }); } pub fn addBr( block: *Block, target_block: Air.Inst.Index, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = .br, .data = .{ .br = .{ .block_inst = target_block, .operand = operand, } }, }); } pub fn addBinOp( block: *Block, tag: Air.Inst.Tag, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, .data = .{ .bin_op = .{ .lhs = lhs, .rhs = rhs, } }, }); } pub fn addArg(block: *Block, ty: Type, name: u32) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = .arg, .data = .{ .ty_str = .{ .ty = try block.sema.addType(ty), .str = name, } }, }); } pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { return Air.indexToRef(try block.addInstAsIndex(inst)); } pub fn addInstAsIndex(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index { const sema = block.sema; const gpa = sema.gpa; try sema.air_instructions.ensureUnusedCapacity(gpa, 1); try block.instructions.ensureUnusedCapacity(gpa, 1); const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); return result_index; } pub fn startAnonDecl(block: *Block) !WipAnonDecl { return WipAnonDecl{ .block = block, .new_decl_arena = std.heap.ArenaAllocator.init(block.sema.gpa), .finished = false, }; } pub const WipAnonDecl = struct { block: *Scope.Block, new_decl_arena: std.heap.ArenaAllocator, finished: bool, pub fn arena(wad: *WipAnonDecl) *Allocator { return &wad.new_decl_arena.allocator; } pub fn deinit(wad: *WipAnonDecl) void { if (!wad.finished) { wad.new_decl_arena.deinit(); } wad.* = undefined; } pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value) !*Decl { const new_decl = try wad.block.sema.mod.createAnonymousDecl(&wad.block.base, .{ .ty = ty, .val = val, }); errdefer wad.block.sema.mod.deleteAnonDecl(&wad.block.base, new_decl); try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; return new_decl; } }; }; }; /// This struct holds data necessary to construct API-facing `AllErrors.Message`. /// Its memory is managed with the general purpose allocator so that they /// can be created and destroyed in response to incremental updates. /// In some cases, the Scope.File could have been inferred from where the ErrorMsg /// is stored. For example, if it is stored in Module.failed_decls, then the Scope.File /// would be determined by the Decl Scope. However, the data structure contains the field /// anyway so that `ErrorMsg` can be reused for error notes, which may be in a different /// file than the parent error message. It also simplifies processing of error messages. pub const ErrorMsg = struct { src_loc: SrcLoc, msg: []const u8, notes: []ErrorMsg = &.{}, pub fn create( gpa: *Allocator, src_loc: SrcLoc, comptime format: []const u8, args: anytype, ) !*ErrorMsg { const err_msg = try gpa.create(ErrorMsg); errdefer gpa.destroy(err_msg); err_msg.* = try init(gpa, src_loc, format, args); return err_msg; } /// Assumes the ErrorMsg struct and msg were both allocated with `gpa`, /// as well as all notes. pub fn destroy(err_msg: *ErrorMsg, gpa: *Allocator) void { err_msg.deinit(gpa); gpa.destroy(err_msg); } pub fn init( gpa: *Allocator, src_loc: SrcLoc, comptime format: []const u8, args: anytype, ) !ErrorMsg { return ErrorMsg{ .src_loc = src_loc, .msg = try std.fmt.allocPrint(gpa, format, args), }; } pub fn deinit(err_msg: *ErrorMsg, gpa: *Allocator) void { for (err_msg.notes) |*note| { note.deinit(gpa); } gpa.free(err_msg.notes); gpa.free(err_msg.msg); err_msg.* = undefined; } }; /// Canonical reference to a position within a source file. pub const SrcLoc = struct { file_scope: *Scope.File, /// Might be 0 depending on tag of `lazy`. parent_decl_node: ast.Node.Index, /// Relative to `parent_decl_node`. lazy: LazySrcLoc, pub fn declSrcToken(src_loc: SrcLoc) ast.TokenIndex { const tree = src_loc.file_scope.tree; return tree.firstToken(src_loc.parent_decl_node); } pub fn declRelativeToNodeIndex(src_loc: SrcLoc, offset: i32) ast.TokenIndex { return @bitCast(ast.Node.Index, offset + @bitCast(i32, src_loc.parent_decl_node)); } pub fn byteOffset(src_loc: SrcLoc, gpa: *Allocator) !u32 { switch (src_loc.lazy) { .unneeded => unreachable, .entire_file => return 0, .byte_abs => |byte_index| return byte_index, .token_abs => |tok_index| { const tree = try src_loc.file_scope.getTree(gpa); const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_abs => |node| { const tree = try src_loc.file_scope.getTree(gpa); const token_starts = tree.tokens.items(.start); const tok_index = tree.firstToken(node); return token_starts[tok_index]; }, .byte_offset => |byte_off| { const tree = try src_loc.file_scope.getTree(gpa); const token_starts = tree.tokens.items(.start); return token_starts[src_loc.declSrcToken()] + byte_off; }, .token_offset => |tok_off| { const tree = try src_loc.file_scope.getTree(gpa); const tok_index = src_loc.declSrcToken() + tok_off; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset, .node_offset_bin_op => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); assert(src_loc.file_scope.tree_loaded); const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[node]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_back2tok => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const tok_index = tree.firstToken(node) - 2; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_var_decl_ty => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const node_tags = tree.nodes.items(.tag); const full = switch (node_tags[node]) { .global_var_decl => tree.globalVarDecl(node), .local_var_decl => tree.localVarDecl(node), .simple_var_decl => tree.simpleVarDecl(node), .aligned_var_decl => tree.alignedVarDecl(node), else => unreachable, }; const tok_index = if (full.ast.type_node != 0) blk: { const main_tokens = tree.nodes.items(.main_token); break :blk main_tokens[full.ast.type_node]; } else blk: { break :blk full.ast.mut_token + 1; // the name token }; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_builtin_call_arg0 => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const node = src_loc.declRelativeToNodeIndex(node_off); const param = switch (node_tags[node]) { .builtin_call_two, .builtin_call_two_comma => node_datas[node].lhs, .builtin_call, .builtin_call_comma => tree.extra_data[node_datas[node].lhs], else => unreachable, }; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[param]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_builtin_call_arg1 => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const node = src_loc.declRelativeToNodeIndex(node_off); const param = switch (node_tags[node]) { .builtin_call_two, .builtin_call_two_comma => node_datas[node].rhs, .builtin_call, .builtin_call_comma => tree.extra_data[node_datas[node].lhs + 1], else => unreachable, }; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[param]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_array_access_index => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_datas = tree.nodes.items(.data); const node = src_loc.declRelativeToNodeIndex(node_off); const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[node_datas[node].rhs]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_slice_sentinel => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_tags = tree.nodes.items(.tag); const node = src_loc.declRelativeToNodeIndex(node_off); const full = switch (node_tags[node]) { .slice_open => tree.sliceOpen(node), .slice => tree.slice(node), .slice_sentinel => tree.sliceSentinel(node), else => unreachable, }; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[full.ast.sentinel]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_call_func => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_tags = tree.nodes.items(.tag); const node = src_loc.declRelativeToNodeIndex(node_off); var params: [1]ast.Node.Index = undefined; const full = switch (node_tags[node]) { .call_one, .call_one_comma, .async_call_one, .async_call_one_comma, => tree.callOne(&params, node), .call, .call_comma, .async_call, .async_call_comma, => tree.callFull(node), else => unreachable, }; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[full.ast.fn_expr]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_field_name => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const node = src_loc.declRelativeToNodeIndex(node_off); const tok_index = switch (node_tags[node]) { .field_access => node_datas[node].rhs, else => tree.firstToken(node) - 2, }; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_deref_ptr => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_datas = tree.nodes.items(.data); const node = src_loc.declRelativeToNodeIndex(node_off); const tok_index = node_datas[node].lhs; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_asm_source => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_tags = tree.nodes.items(.tag); const node = src_loc.declRelativeToNodeIndex(node_off); const full = switch (node_tags[node]) { .asm_simple => tree.asmSimple(node), .@"asm" => tree.asmFull(node), else => unreachable, }; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[full.ast.template]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_asm_ret_ty => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_tags = tree.nodes.items(.tag); const node = src_loc.declRelativeToNodeIndex(node_off); const full = switch (node_tags[node]) { .asm_simple => tree.asmSimple(node), .@"asm" => tree.asmFull(node), else => unreachable, }; const asm_output = full.outputs[0]; const node_datas = tree.nodes.items(.data); const ret_ty_node = node_datas[asm_output].lhs; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[ret_ty_node]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_for_cond, .node_offset_if_cond => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const node_tags = tree.nodes.items(.tag); const src_node = switch (node_tags[node]) { .if_simple => tree.ifSimple(node).ast.cond_expr, .@"if" => tree.ifFull(node).ast.cond_expr, .while_simple => tree.whileSimple(node).ast.cond_expr, .while_cont => tree.whileCont(node).ast.cond_expr, .@"while" => tree.whileFull(node).ast.cond_expr, .for_simple => tree.forSimple(node).ast.cond_expr, .@"for" => tree.forFull(node).ast.cond_expr, else => unreachable, }; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[src_node]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_bin_lhs => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const node_datas = tree.nodes.items(.data); const src_node = node_datas[node].lhs; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[src_node]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_bin_rhs => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const node_datas = tree.nodes.items(.data); const src_node = node_datas[node].rhs; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[src_node]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_switch_operand => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); const node_datas = tree.nodes.items(.data); const src_node = node_datas[node].lhs; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[src_node]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_switch_special_prong => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const switch_node = src_loc.declRelativeToNodeIndex(node_off); const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); const extra = tree.extraData(node_datas[switch_node].rhs, ast.Node.SubRange); const case_nodes = tree.extra_data[extra.start..extra.end]; for (case_nodes) |case_node| { const case = switch (node_tags[case_node]) { .switch_case_one => tree.switchCaseOne(case_node), .switch_case => tree.switchCase(case_node), else => unreachable, }; const is_special = (case.ast.values.len == 0) or (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .identifier and mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")); if (!is_special) continue; const tok_index = main_tokens[case_node]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; } else unreachable; }, .node_offset_switch_range => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const switch_node = src_loc.declRelativeToNodeIndex(node_off); const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); const extra = tree.extraData(node_datas[switch_node].rhs, ast.Node.SubRange); const case_nodes = tree.extra_data[extra.start..extra.end]; for (case_nodes) |case_node| { const case = switch (node_tags[case_node]) { .switch_case_one => tree.switchCaseOne(case_node), .switch_case => tree.switchCase(case_node), else => unreachable, }; const is_special = (case.ast.values.len == 0) or (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .identifier and mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")); if (is_special) continue; for (case.ast.values) |item_node| { if (node_tags[item_node] == .switch_range) { const tok_index = main_tokens[item_node]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; } } } else unreachable; }, .node_offset_fn_type_cc => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const node = src_loc.declRelativeToNodeIndex(node_off); var params: [1]ast.Node.Index = undefined; const full = switch (node_tags[node]) { .fn_proto_simple => tree.fnProtoSimple(&params, node), .fn_proto_multi => tree.fnProtoMulti(node), .fn_proto_one => tree.fnProtoOne(&params, node), .fn_proto => tree.fnProto(node), .fn_decl => switch (node_tags[node_datas[node].lhs]) { .fn_proto_simple => tree.fnProtoSimple(&params, node_datas[node].lhs), .fn_proto_multi => tree.fnProtoMulti(node_datas[node].lhs), .fn_proto_one => tree.fnProtoOne(&params, node_datas[node].lhs), .fn_proto => tree.fnProto(node_datas[node].lhs), else => unreachable, }, else => unreachable, }; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[full.ast.callconv_expr]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_fn_type_ret_ty => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_tags = tree.nodes.items(.tag); const node = src_loc.declRelativeToNodeIndex(node_off); var params: [1]ast.Node.Index = undefined; const full = switch (node_tags[node]) { .fn_proto_simple => tree.fnProtoSimple(&params, node), .fn_proto_multi => tree.fnProtoMulti(node), .fn_proto_one => tree.fnProtoOne(&params, node), .fn_proto => tree.fnProto(node), else => unreachable, }; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[full.ast.return_type]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_anyframe_type => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_datas = tree.nodes.items(.data); const parent_node = src_loc.declRelativeToNodeIndex(node_off); const node = node_datas[parent_node].rhs; const main_tokens = tree.nodes.items(.main_token); const tok_index = main_tokens[node]; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, .node_offset_lib_name => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const parent_node = src_loc.declRelativeToNodeIndex(node_off); var params: [1]ast.Node.Index = undefined; const full = switch (node_tags[parent_node]) { .fn_proto_simple => tree.fnProtoSimple(&params, parent_node), .fn_proto_multi => tree.fnProtoMulti(parent_node), .fn_proto_one => tree.fnProtoOne(&params, parent_node), .fn_proto => tree.fnProto(parent_node), .fn_decl => blk: { const fn_proto = node_datas[parent_node].lhs; break :blk switch (node_tags[fn_proto]) { .fn_proto_simple => tree.fnProtoSimple(&params, fn_proto), .fn_proto_multi => tree.fnProtoMulti(fn_proto), .fn_proto_one => tree.fnProtoOne(&params, fn_proto), .fn_proto => tree.fnProto(fn_proto), else => unreachable, }; }, else => unreachable, }; const tok_index = full.lib_name.?; const token_starts = tree.tokens.items(.start); return token_starts[tok_index]; }, } } }; /// Resolving a source location into a byte offset may require doing work /// that we would rather not do unless the error actually occurs. /// Therefore we need a data structure that contains the information necessary /// to lazily produce a `SrcLoc` as required. /// Most of the offsets in this data structure are relative to the containing Decl. /// This makes the source location resolve properly even when a Decl gets /// shifted up or down in the file, as long as the Decl's contents itself /// do not change. pub const LazySrcLoc = union(enum) { /// When this tag is set, the code that constructed this `LazySrcLoc` is asserting /// that all code paths which would need to resolve the source location are /// unreachable. If you are debugging this tag incorrectly being this value, /// look into using reverse-continue with a memory watchpoint to see where the /// value is being set to this tag. unneeded, /// Means the source location points to an entire file; not any particular /// location within the file. `file_scope` union field will be active. entire_file, /// The source location points to a byte offset within a source file, /// offset from 0. The source file is determined contextually. /// Inside a `SrcLoc`, the `file_scope` union field will be active. byte_abs: u32, /// The source location points to a token within a source file, /// offset from 0. The source file is determined contextually. /// Inside a `SrcLoc`, the `file_scope` union field will be active. token_abs: u32, /// The source location points to an AST node within a source file, /// offset from 0. The source file is determined contextually. /// Inside a `SrcLoc`, the `file_scope` union field will be active. node_abs: u32, /// The source location points to a byte offset within a source file, /// offset from the byte offset of the Decl within the file. /// The Decl is determined contextually. byte_offset: u32, /// This data is the offset into the token list from the Decl token. /// The Decl is determined contextually. token_offset: u32, /// The source location points to an AST node, which is this value offset /// from its containing Decl node AST index. /// The Decl is determined contextually. node_offset: i32, /// The source location points to two tokens left of the first token of an AST node, /// which is this value offset from its containing Decl node AST index. /// The Decl is determined contextually. node_offset_back2tok: i32, /// The source location points to a variable declaration type expression, /// found by taking this AST node index offset from the containing /// Decl AST node, which points to a variable declaration AST node. Next, navigate /// to the type expression. /// The Decl is determined contextually. node_offset_var_decl_ty: i32, /// The source location points to a for loop condition expression, /// found by taking this AST node index offset from the containing /// Decl AST node, which points to a for loop AST node. Next, navigate /// to the condition expression. /// The Decl is determined contextually. node_offset_for_cond: i32, /// The source location points to the first parameter of a builtin /// function call, found by taking this AST node index offset from the containing /// Decl AST node, which points to a builtin call AST node. Next, navigate /// to the first parameter. /// The Decl is determined contextually. node_offset_builtin_call_arg0: i32, /// Same as `node_offset_builtin_call_arg0` except arg index 1. node_offset_builtin_call_arg1: i32, /// The source location points to the index expression of an array access /// expression, found by taking this AST node index offset from the containing /// Decl AST node, which points to an array access AST node. Next, navigate /// to the index expression. /// The Decl is determined contextually. node_offset_array_access_index: i32, /// The source location points to the sentinel expression of a slice /// expression, found by taking this AST node index offset from the containing /// Decl AST node, which points to a slice AST node. Next, navigate /// to the sentinel expression. /// The Decl is determined contextually. node_offset_slice_sentinel: i32, /// The source location points to the callee expression of a function /// call expression, found by taking this AST node index offset from the containing /// Decl AST node, which points to a function call AST node. Next, navigate /// to the callee expression. /// The Decl is determined contextually. node_offset_call_func: i32, /// The payload is offset from the containing Decl AST node. /// The source location points to the field name of: /// * a field access expression (`a.b`), or /// * the operand ("b" node) of a field initialization expression (`.a = b`) /// The Decl is determined contextually. node_offset_field_name: i32, /// The source location points to the pointer of a pointer deref expression, /// found by taking this AST node index offset from the containing /// Decl AST node, which points to a pointer deref AST node. Next, navigate /// to the pointer expression. /// The Decl is determined contextually. node_offset_deref_ptr: i32, /// The source location points to the assembly source code of an inline assembly /// expression, found by taking this AST node index offset from the containing /// Decl AST node, which points to inline assembly AST node. Next, navigate /// to the asm template source code. /// The Decl is determined contextually. node_offset_asm_source: i32, /// The source location points to the return type of an inline assembly /// expression, found by taking this AST node index offset from the containing /// Decl AST node, which points to inline assembly AST node. Next, navigate /// to the return type expression. /// The Decl is determined contextually. node_offset_asm_ret_ty: i32, /// The source location points to the condition expression of an if /// expression, found by taking this AST node index offset from the containing /// Decl AST node, which points to an if expression AST node. Next, navigate /// to the condition expression. /// The Decl is determined contextually. node_offset_if_cond: i32, /// The source location points to a binary expression, such as `a + b`, found /// by taking this AST node index offset from the containing Decl AST node. /// The Decl is determined contextually. node_offset_bin_op: i32, /// The source location points to the LHS of a binary expression, found /// by taking this AST node index offset from the containing Decl AST node, /// which points to a binary expression AST node. Next, nagivate to the LHS. /// The Decl is determined contextually. node_offset_bin_lhs: i32, /// The source location points to the RHS of a binary expression, found /// by taking this AST node index offset from the containing Decl AST node, /// which points to a binary expression AST node. Next, nagivate to the RHS. /// The Decl is determined contextually. node_offset_bin_rhs: i32, /// The source location points to the operand of a switch expression, found /// by taking this AST node index offset from the containing Decl AST node, /// which points to a switch expression AST node. Next, nagivate to the operand. /// The Decl is determined contextually. node_offset_switch_operand: i32, /// The source location points to the else/`_` prong of a switch expression, found /// by taking this AST node index offset from the containing Decl AST node, /// which points to a switch expression AST node. Next, nagivate to the else/`_` prong. /// The Decl is determined contextually. node_offset_switch_special_prong: i32, /// The source location points to all the ranges of a switch expression, found /// by taking this AST node index offset from the containing Decl AST node, /// which points to a switch expression AST node. Next, nagivate to any of the /// range nodes. The error applies to all of them. /// The Decl is determined contextually. node_offset_switch_range: i32, /// The source location points to the calling convention of a function type /// expression, found by taking this AST node index offset from the containing /// Decl AST node, which points to a function type AST node. Next, nagivate to /// the calling convention node. /// The Decl is determined contextually. node_offset_fn_type_cc: i32, /// The source location points to the return type of a function type /// expression, found by taking this AST node index offset from the containing /// Decl AST node, which points to a function type AST node. Next, nagivate to /// the return type node. /// The Decl is determined contextually. node_offset_fn_type_ret_ty: i32, /// The source location points to the type expression of an `anyframe->T` /// expression, found by taking this AST node index offset from the containing /// Decl AST node, which points to a `anyframe->T` expression AST node. Next, navigate /// to the type expression. /// The Decl is determined contextually. node_offset_anyframe_type: i32, /// The source location points to the string literal of `extern "foo"`, found /// by taking this AST node index offset from the containing /// Decl AST node, which points to a function prototype or variable declaration /// expression AST node. Next, navigate to the string literal of the `extern "foo"`. /// The Decl is determined contextually. node_offset_lib_name: i32, /// Upgrade to a `SrcLoc` based on the `Decl` or file in the provided scope. pub fn toSrcLoc(lazy: LazySrcLoc, scope: *Scope) SrcLoc { return switch (lazy) { .unneeded, .entire_file, .byte_abs, .token_abs, .node_abs, => .{ .file_scope = scope.getFileScope(), .parent_decl_node = 0, .lazy = lazy, }, .byte_offset, .token_offset, .node_offset, .node_offset_back2tok, .node_offset_var_decl_ty, .node_offset_for_cond, .node_offset_builtin_call_arg0, .node_offset_builtin_call_arg1, .node_offset_array_access_index, .node_offset_slice_sentinel, .node_offset_call_func, .node_offset_field_name, .node_offset_deref_ptr, .node_offset_asm_source, .node_offset_asm_ret_ty, .node_offset_if_cond, .node_offset_bin_op, .node_offset_bin_lhs, .node_offset_bin_rhs, .node_offset_switch_operand, .node_offset_switch_special_prong, .node_offset_switch_range, .node_offset_fn_type_cc, .node_offset_fn_type_ret_ty, .node_offset_anyframe_type, .node_offset_lib_name, => .{ .file_scope = scope.getFileScope(), .parent_decl_node = scope.srcDecl().?.src_node, .lazy = lazy, }, }; } /// Upgrade to a `SrcLoc` based on the `Decl` provided. pub fn toSrcLocWithDecl(lazy: LazySrcLoc, decl: *Decl) SrcLoc { return switch (lazy) { .unneeded, .entire_file, .byte_abs, .token_abs, .node_abs, => .{ .file_scope = decl.getFileScope(), .parent_decl_node = 0, .lazy = lazy, }, .byte_offset, .token_offset, .node_offset, .node_offset_back2tok, .node_offset_var_decl_ty, .node_offset_for_cond, .node_offset_builtin_call_arg0, .node_offset_builtin_call_arg1, .node_offset_array_access_index, .node_offset_slice_sentinel, .node_offset_call_func, .node_offset_field_name, .node_offset_deref_ptr, .node_offset_asm_source, .node_offset_asm_ret_ty, .node_offset_if_cond, .node_offset_bin_op, .node_offset_bin_lhs, .node_offset_bin_rhs, .node_offset_switch_operand, .node_offset_switch_special_prong, .node_offset_switch_range, .node_offset_fn_type_cc, .node_offset_fn_type_ret_ty, .node_offset_anyframe_type, .node_offset_lib_name, => .{ .file_scope = decl.getFileScope(), .parent_decl_node = decl.src_node, .lazy = lazy, }, }; } }; pub const SemaError = error{ OutOfMemory, AnalysisFail }; pub const CompileError = error{ OutOfMemory, /// When this is returned, the compile error for the failure has already been recorded. AnalysisFail, /// Returned when a compile error needed to be reported but a provided LazySrcLoc was set /// to the `unneeded` tag. The source location was, in fact, needed. It is expected that /// somewhere up the call stack, the operation will be retried after doing expensive work /// to compute a source location. NeededSourceLocation, /// A Type or Value was needed to be used during semantic analysis, but it was not available /// because the function is generic. This is only seen when analyzing the body of a param /// instruction. GenericPoison, }; pub fn deinit(mod: *Module) void { const gpa = mod.gpa; for (mod.import_table.keys()) |key| { gpa.free(key); } for (mod.import_table.values()) |value| { value.destroy(mod); } mod.import_table.deinit(gpa); mod.deletion_set.deinit(gpa); // The callsite of `Compilation.create` owns the `main_pkg`, however // Module owns the builtin and std packages that it adds. if (mod.main_pkg.table.fetchRemove("builtin")) |kv| { gpa.free(kv.key); kv.value.destroy(gpa); } if (mod.main_pkg.table.fetchRemove("std")) |kv| { gpa.free(kv.key); kv.value.destroy(gpa); } if (mod.main_pkg.table.fetchRemove("root")) |kv| { gpa.free(kv.key); } if (mod.root_pkg != mod.main_pkg) { mod.root_pkg.destroy(gpa); } mod.compile_log_text.deinit(gpa); mod.zig_cache_artifact_directory.handle.close(); mod.local_zir_cache.handle.close(); mod.global_zir_cache.handle.close(); for (mod.failed_decls.values()) |value| { value.destroy(gpa); } mod.failed_decls.deinit(gpa); if (mod.emit_h) |emit_h| { for (emit_h.failed_decls.values()) |value| { value.destroy(gpa); } emit_h.failed_decls.deinit(gpa); emit_h.decl_table.deinit(gpa); gpa.destroy(emit_h); } for (mod.failed_files.values()) |value| { if (value) |msg| msg.destroy(gpa); } mod.failed_files.deinit(gpa); for (mod.failed_exports.values()) |value| { value.destroy(gpa); } mod.failed_exports.deinit(gpa); mod.compile_log_decls.deinit(gpa); for (mod.decl_exports.values()) |export_list| { gpa.free(export_list); } mod.decl_exports.deinit(gpa); for (mod.export_owners.values()) |value| { freeExportList(gpa, value); } mod.export_owners.deinit(gpa); { var it = mod.global_error_set.keyIterator(); while (it.next()) |key| { gpa.free(key.*); } mod.global_error_set.deinit(gpa); } mod.error_name_list.deinit(gpa); mod.test_functions.deinit(gpa); mod.monomorphed_funcs.deinit(gpa); { var it = mod.memoized_calls.iterator(); while (it.next()) |entry| { gpa.free(entry.key_ptr.args); entry.value_ptr.arena.promote(gpa).deinit(); } mod.memoized_calls.deinit(gpa); } } fn freeExportList(gpa: *Allocator, export_list: []*Export) void { for (export_list) |exp| { gpa.free(exp.options.name); gpa.destroy(exp); } gpa.free(export_list); } const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; // TODO This is taking advantage of matching stage1 debug union layout. // We need a better language feature for initializing a union with // a runtime known tag. const Stage1DataLayout = extern struct { data: [8]u8 align(8), safety_tag: u8, }; comptime { if (data_has_safety_tag) { assert(@sizeOf(Stage1DataLayout) == @sizeOf(Zir.Inst.Data)); } } pub fn astGenFile(mod: *Module, file: *Scope.File) !void { const tracy = trace(@src()); defer tracy.end(); const comp = mod.comp; const gpa = mod.gpa; // In any case we need to examine the stat of the file to determine the course of action. var source_file = try file.pkg.root_src_directory.handle.openFile(file.sub_file_path, .{}); defer source_file.close(); const stat = try source_file.stat(); const want_local_cache = file.pkg == mod.main_pkg; const digest = hash: { var path_hash: Cache.HashHelper = .{}; path_hash.addBytes(build_options.version); if (!want_local_cache) { path_hash.addOptionalBytes(file.pkg.root_src_directory.path); } path_hash.addBytes(file.sub_file_path); break :hash path_hash.final(); }; const cache_directory = if (want_local_cache) mod.local_zir_cache else mod.global_zir_cache; const zir_dir = cache_directory.handle; var cache_file: ?std.fs.File = null; defer if (cache_file) |f| f.close(); // Determine whether we need to reload the file from disk and redo parsing and AstGen. switch (file.status) { .never_loaded, .retryable_failure => cached: { // First, load the cached ZIR code, if any. log.debug("AstGen checking cache: {s} (local={}, digest={s})", .{ file.sub_file_path, want_local_cache, &digest, }); // We ask for a lock in order to coordinate with other zig processes. // If another process is already working on this file, we will get the cached // version. Likewise if we're working on AstGen and another process asks for // the cached file, they'll get it. cache_file = zir_dir.openFile(&digest, .{ .lock = .Shared }) catch |err| switch (err) { error.PathAlreadyExists => unreachable, // opening for reading error.NoSpaceLeft => unreachable, // opening for reading error.NotDir => unreachable, // no dir components error.InvalidUtf8 => unreachable, // it's a hex encoded name error.BadPathName => unreachable, // it's a hex encoded name error.NameTooLong => unreachable, // it's a fixed size name error.PipeBusy => unreachable, // it's not a pipe error.WouldBlock => unreachable, // not asking for non-blocking I/O error.SymLinkLoop, error.FileNotFound, error.Unexpected, => break :cached, else => |e| return e, // Retryable errors are handled at callsite. }; // First we read the header to determine the lengths of arrays. const header = cache_file.?.reader().readStruct(Zir.Header) catch |err| switch (err) { // This can happen if Zig bails out of this function between creating // the cached file and writing it. error.EndOfStream => break :cached, else => |e| return e, }; const unchanged_metadata = stat.size == header.stat_size and stat.mtime == header.stat_mtime and stat.inode == header.stat_inode; if (!unchanged_metadata) { log.debug("AstGen cache stale: {s}", .{file.sub_file_path}); break :cached; } log.debug("AstGen cache hit: {s} instructions_len={d}", .{ file.sub_file_path, header.instructions_len, }); var instructions: std.MultiArrayList(Zir.Inst) = .{}; defer instructions.deinit(gpa); try instructions.setCapacity(gpa, header.instructions_len); instructions.len = header.instructions_len; var zir: Zir = .{ .instructions = instructions.toOwnedSlice(), .string_bytes = &.{}, .extra = &.{}, }; var keep_zir = false; defer if (!keep_zir) zir.deinit(gpa); zir.string_bytes = try gpa.alloc(u8, header.string_bytes_len); zir.extra = try gpa.alloc(u32, header.extra_len); const safety_buffer = if (data_has_safety_tag) try gpa.alloc([8]u8, header.instructions_len) else undefined; defer if (data_has_safety_tag) gpa.free(safety_buffer); const data_ptr = if (data_has_safety_tag) @ptrCast([*]u8, safety_buffer.ptr) else @ptrCast([*]u8, zir.instructions.items(.data).ptr); var iovecs = [_]std.os.iovec{ .{ .iov_base = @ptrCast([*]u8, zir.instructions.items(.tag).ptr), .iov_len = header.instructions_len, }, .{ .iov_base = data_ptr, .iov_len = header.instructions_len * 8, }, .{ .iov_base = zir.string_bytes.ptr, .iov_len = header.string_bytes_len, }, .{ .iov_base = @ptrCast([*]u8, zir.extra.ptr), .iov_len = header.extra_len * 4, }, }; const amt_read = try cache_file.?.readvAll(&iovecs); const amt_expected = zir.instructions.len * 9 + zir.string_bytes.len + zir.extra.len * 4; if (amt_read != amt_expected) { log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path}); zir.deinit(gpa); break :cached; } if (data_has_safety_tag) { const tags = zir.instructions.items(.tag); for (zir.instructions.items(.data)) |*data, i| { const union_tag = Zir.Inst.Tag.data_tags[@enumToInt(tags[i])]; const as_struct = @ptrCast(*Stage1DataLayout, data); as_struct.* = .{ .safety_tag = @enumToInt(union_tag), .data = safety_buffer[i], }; } } keep_zir = true; file.zir = zir; file.zir_loaded = true; file.stat_size = header.stat_size; file.stat_inode = header.stat_inode; file.stat_mtime = header.stat_mtime; file.status = .success_zir; log.debug("AstGen cached success: {s}", .{file.sub_file_path}); // TODO don't report compile errors until Sema @importFile if (file.zir.hasCompileErrors()) { { const lock = comp.mutex.acquire(); defer lock.release(); try mod.failed_files.putNoClobber(gpa, file, null); } file.status = .astgen_failure; return error.AnalysisFail; } return; }, .parse_failure, .astgen_failure, .success_zir => { const unchanged_metadata = stat.size == file.stat_size and stat.mtime == file.stat_mtime and stat.inode == file.stat_inode; if (unchanged_metadata) { log.debug("unmodified metadata of file: {s}", .{file.sub_file_path}); return; } log.debug("metadata changed: {s}", .{file.sub_file_path}); }, } if (cache_file) |f| { f.close(); cache_file = null; } cache_file = zir_dir.createFile(&digest, .{ .lock = .Exclusive }) catch |err| switch (err) { error.NotDir => unreachable, // no dir components error.InvalidUtf8 => unreachable, // it's a hex encoded name error.BadPathName => unreachable, // it's a hex encoded name error.NameTooLong => unreachable, // it's a fixed size name error.PipeBusy => unreachable, // it's not a pipe error.WouldBlock => unreachable, // not asking for non-blocking I/O error.FileNotFound => unreachable, // no dir components else => |e| { const pkg_path = file.pkg.root_src_directory.path orelse "."; const cache_path = cache_directory.path orelse "."; log.warn("unable to save cached ZIR code for {s}/{s} to {s}/{s}: {s}", .{ pkg_path, file.sub_file_path, cache_path, &digest, @errorName(e), }); return; }, }; mod.lockAndClearFileCompileError(file); // If the previous ZIR does not have compile errors, keep it around // in case parsing or new ZIR fails. In case of successful ZIR update // at the end of this function we will free it. // We keep the previous ZIR loaded so that we can use it // for the update next time it does not have any compile errors. This avoids // needlessly tossing out semantic analysis work when an error is // temporarily introduced. if (file.zir_loaded and !file.zir.hasCompileErrors()) { assert(file.prev_zir == null); const prev_zir_ptr = try gpa.create(Zir); file.prev_zir = prev_zir_ptr; prev_zir_ptr.* = file.zir; file.zir = undefined; file.zir_loaded = false; } file.unload(gpa); if (stat.size > std.math.maxInt(u32)) return error.FileTooBig; const source = try gpa.allocSentinel(u8, @intCast(usize, stat.size), 0); defer if (!file.source_loaded) gpa.free(source); const amt = try source_file.readAll(source); if (amt != stat.size) return error.UnexpectedEndOfFile; file.stat_size = stat.size; file.stat_inode = stat.inode; file.stat_mtime = stat.mtime; file.source = source; file.source_loaded = true; file.tree = try std.zig.parse(gpa, source); defer if (!file.tree_loaded) file.tree.deinit(gpa); if (file.tree.errors.len != 0) { const parse_err = file.tree.errors[0]; var msg = std.ArrayList(u8).init(gpa); defer msg.deinit(); const token_starts = file.tree.tokens.items(.start); const token_tags = file.tree.tokens.items(.tag); try file.tree.renderError(parse_err, msg.writer()); const err_msg = try gpa.create(ErrorMsg); err_msg.* = .{ .src_loc = .{ .file_scope = file, .parent_decl_node = 0, .lazy = .{ .byte_abs = token_starts[parse_err.token] }, }, .msg = msg.toOwnedSlice(), }; if (token_tags[parse_err.token] == .invalid) { const bad_off = @intCast(u32, file.tree.tokenSlice(parse_err.token).len); const byte_abs = token_starts[parse_err.token] + bad_off; try mod.errNoteNonLazy(.{ .file_scope = file, .parent_decl_node = 0, .lazy = .{ .byte_abs = byte_abs }, }, err_msg, "invalid byte: '{'}'", .{std.zig.fmtEscapes(source[byte_abs..][0..1])}); } { const lock = comp.mutex.acquire(); defer lock.release(); try mod.failed_files.putNoClobber(gpa, file, err_msg); } file.status = .parse_failure; return error.AnalysisFail; } file.tree_loaded = true; file.zir = try AstGen.generate(gpa, file.tree); file.zir_loaded = true; file.status = .success_zir; log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); const safety_buffer = if (data_has_safety_tag) try gpa.alloc([8]u8, file.zir.instructions.len) else undefined; defer if (data_has_safety_tag) gpa.free(safety_buffer); const data_ptr = if (data_has_safety_tag) if (file.zir.instructions.len == 0) @as([*]const u8, undefined) else @ptrCast([*]const u8, safety_buffer.ptr) else @ptrCast([*]const u8, file.zir.instructions.items(.data).ptr); if (data_has_safety_tag) { // The `Data` union has a safety tag but in the file format we store it without. for (file.zir.instructions.items(.data)) |*data, i| { const as_struct = @ptrCast(*const Stage1DataLayout, data); safety_buffer[i] = as_struct.data; } } const header: Zir.Header = .{ .instructions_len = @intCast(u32, file.zir.instructions.len), .string_bytes_len = @intCast(u32, file.zir.string_bytes.len), .extra_len = @intCast(u32, file.zir.extra.len), .stat_size = stat.size, .stat_inode = stat.inode, .stat_mtime = stat.mtime, }; var iovecs = [_]std.os.iovec_const{ .{ .iov_base = @ptrCast([*]const u8, &header), .iov_len = @sizeOf(Zir.Header), }, .{ .iov_base = @ptrCast([*]const u8, file.zir.instructions.items(.tag).ptr), .iov_len = file.zir.instructions.len, }, .{ .iov_base = data_ptr, .iov_len = file.zir.instructions.len * 8, }, .{ .iov_base = file.zir.string_bytes.ptr, .iov_len = file.zir.string_bytes.len, }, .{ .iov_base = @ptrCast([*]const u8, file.zir.extra.ptr), .iov_len = file.zir.extra.len * 4, }, }; cache_file.?.writevAll(&iovecs) catch |err| { const pkg_path = file.pkg.root_src_directory.path orelse "."; const cache_path = cache_directory.path orelse "."; log.warn("unable to write cached ZIR code for {s}/{s} to {s}/{s}: {s}", .{ pkg_path, file.sub_file_path, cache_path, &digest, @errorName(err), }); }; if (file.zir.hasCompileErrors()) { { const lock = comp.mutex.acquire(); defer lock.release(); try mod.failed_files.putNoClobber(gpa, file, null); } file.status = .astgen_failure; return error.AnalysisFail; } if (file.prev_zir) |prev_zir| { // Iterate over all Namespace objects contained within this File, looking at the // previous and new ZIR together and update the references to point // to the new one. For example, Decl name, Decl zir_decl_index, and Namespace // decl_table keys need to get updated to point to the new memory, even if the // underlying source code is unchanged. // We do not need to hold any locks at this time because all the Decl and Namespace // objects being touched are specific to this File, and the only other concurrent // tasks are touching other File objects. try updateZirRefs(gpa, file, prev_zir.*); // At this point, `file.outdated_decls` and `file.deleted_decls` are populated, // and semantic analysis will deal with them properly. // No need to keep previous ZIR. prev_zir.deinit(gpa); gpa.destroy(prev_zir); file.prev_zir = null; } else if (file.root_decl) |root_decl| { // This is an update, but it is the first time the File has succeeded // ZIR. We must mark it outdated since we have already tried to // semantically analyze it. try file.outdated_decls.resize(gpa, 1); file.outdated_decls.items[0] = root_decl; } } /// Patch ups: /// * Struct.zir_index /// * Decl.zir_index /// * Fn.zir_body_inst /// * Decl.zir_decl_index fn updateZirRefs(gpa: *Allocator, file: *Scope.File, old_zir: Zir) !void { const new_zir = file.zir; // Maps from old ZIR to new ZIR, struct_decl, enum_decl, etc. Any instruction which // creates a namespace, gets mapped from old to new here. var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; defer inst_map.deinit(gpa); // Maps from old ZIR to new ZIR, the extra data index for the sub-decl item. // e.g. the thing that Decl.zir_decl_index points to. var extra_map: std.AutoHashMapUnmanaged(u32, u32) = .{}; defer extra_map.deinit(gpa); try mapOldZirToNew(gpa, old_zir, new_zir, &inst_map, &extra_map); // Walk the Decl graph, updating ZIR indexes, strings, and populating // the deleted and outdated lists. var decl_stack: std.ArrayListUnmanaged(*Decl) = .{}; defer decl_stack.deinit(gpa); const root_decl = file.root_decl.?; try decl_stack.append(gpa, root_decl); file.deleted_decls.clearRetainingCapacity(); file.outdated_decls.clearRetainingCapacity(); // The root decl is always outdated; otherwise we would not have had // to re-generate ZIR for the File. try file.outdated_decls.append(gpa, root_decl); while (decl_stack.popOrNull()) |decl| { // Anonymous decls and the root decl have this set to 0. We still need // to walk them but we do not need to modify this value. // Anonymous decls should not be marked outdated. They will be re-generated // if their owner decl is marked outdated. if (decl.zir_decl_index != 0) { const old_zir_decl_index = decl.zir_decl_index; const new_zir_decl_index = extra_map.get(old_zir_decl_index) orelse { log.debug("updateZirRefs {s}: delete {*} ({s})", .{ file.sub_file_path, decl, decl.name, }); try file.deleted_decls.append(gpa, decl); continue; }; const old_hash = decl.contentsHashZir(old_zir); decl.zir_decl_index = new_zir_decl_index; const new_hash = decl.contentsHashZir(new_zir); if (!std.zig.srcHashEql(old_hash, new_hash)) { log.debug("updateZirRefs {s}: outdated {*} ({s}) {d} => {d}", .{ file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, }); try file.outdated_decls.append(gpa, decl); } else { log.debug("updateZirRefs {s}: unchanged {*} ({s}) {d} => {d}", .{ file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, }); } } if (!decl.owns_tv) continue; if (decl.getStruct()) |struct_obj| { struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl); continue; }; } if (decl.getUnion()) |union_obj| { union_obj.zir_index = inst_map.get(union_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl); continue; }; } if (decl.getFunction()) |func| { func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse { try file.deleted_decls.append(gpa, decl); continue; }; } if (decl.getInnerNamespace()) |namespace| { for (namespace.decls.values()) |sub_decl| { try decl_stack.append(gpa, sub_decl); } for (namespace.anon_decls.keys()) |sub_decl| { try decl_stack.append(gpa, sub_decl); } } } } pub fn mapOldZirToNew( gpa: *Allocator, old_zir: Zir, new_zir: Zir, inst_map: *std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index), extra_map: *std.AutoHashMapUnmanaged(u32, u32), ) Allocator.Error!void { // Contain ZIR indexes of declaration instructions. const MatchedZirDecl = struct { old_inst: Zir.Inst.Index, new_inst: Zir.Inst.Index, }; var match_stack: std.ArrayListUnmanaged(MatchedZirDecl) = .{}; defer match_stack.deinit(gpa); const old_main_struct_inst = old_zir.getMainStruct(); const new_main_struct_inst = new_zir.getMainStruct(); try match_stack.append(gpa, .{ .old_inst = old_main_struct_inst, .new_inst = new_main_struct_inst, }); var old_decls = std.ArrayList(Zir.Inst.Index).init(gpa); defer old_decls.deinit(); var new_decls = std.ArrayList(Zir.Inst.Index).init(gpa); defer new_decls.deinit(); while (match_stack.popOrNull()) |match_item| { try inst_map.put(gpa, match_item.old_inst, match_item.new_inst); // Maps name to extra index of decl sub item. var decl_map: std.StringHashMapUnmanaged(u32) = .{}; defer decl_map.deinit(gpa); { var old_decl_it = old_zir.declIterator(match_item.old_inst); while (old_decl_it.next()) |old_decl| { try decl_map.put(gpa, old_decl.name, old_decl.sub_index); } } var new_decl_it = new_zir.declIterator(match_item.new_inst); while (new_decl_it.next()) |new_decl| { const old_extra_index = decl_map.get(new_decl.name) orelse continue; const new_extra_index = new_decl.sub_index; try extra_map.put(gpa, old_extra_index, new_extra_index); try old_zir.findDecls(&old_decls, old_extra_index); try new_zir.findDecls(&new_decls, new_extra_index); var i: usize = 0; while (true) : (i += 1) { if (i >= old_decls.items.len) break; if (i >= new_decls.items.len) break; try match_stack.append(gpa, .{ .old_inst = old_decls.items[i], .new_inst = new_decls.items[i], }); } } } } pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void { const tracy = trace(@src()); defer tracy.end(); const subsequent_analysis = switch (decl.analysis) { .in_progress => unreachable, .file_failure, .sema_failure, .sema_failure_retryable, .codegen_failure, .dependency_failure, .codegen_failure_retryable, => return error.AnalysisFail, .complete => return, .outdated => blk: { log.debug("re-analyzing {*} ({s})", .{ decl, decl.name }); // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. mod.deleteDeclExports(decl); // Dependencies will be re-discovered, so we remove them here prior to re-analysis. for (decl.dependencies.keys()) |dep| { dep.removeDependant(decl); if (dep.dependants.count() == 0 and !dep.deletion_flag) { log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ decl, decl.name, dep, dep.name, }); // We don't perform a deletion here, because this Decl or another one // may end up referencing it before the update is complete. dep.deletion_flag = true; try mod.deletion_set.put(mod.gpa, dep, {}); } } decl.dependencies.clearRetainingCapacity(); break :blk true; }, .unreferenced => false, }; const type_changed = mod.semaDecl(decl) catch |err| switch (err) { error.AnalysisFail => { if (decl.analysis == .in_progress) { // If this decl caused the compile error, the analysis field would // be changed to indicate it was this Decl's fault. Because this // did not happen, we infer here that it was a dependency failure. decl.analysis = .dependency_failure; } return error.AnalysisFail; }, error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, else => |e| { decl.analysis = .sema_failure_retryable; try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( mod.gpa, decl.srcLoc(), "unable to analyze: {s}", .{@errorName(e)}, )); return error.AnalysisFail; }, }; if (subsequent_analysis) { // We may need to chase the dependants and re-analyze them. // However, if the decl is a function, and the type is the same, we do not need to. if (type_changed or decl.ty.zigTypeTag() != .Fn) { for (decl.dependants.keys()) |dep| { switch (dep.analysis) { .unreferenced => unreachable, .in_progress => continue, // already doing analysis, ok .outdated => continue, // already queued for update .file_failure, .dependency_failure, .sema_failure, .sema_failure_retryable, .codegen_failure, .codegen_failure_retryable, .complete, => if (dep.generation != mod.generation) { try mod.markOutdatedDecl(dep); }, } } } } } pub fn semaPkg(mod: *Module, pkg: *Package) !void { const file = (try mod.importPkg(pkg)).file; return mod.semaFile(file); } /// Regardless of the file status, will create a `Decl` so that we /// can track dependencies and re-analyze when the file becomes outdated. pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void { const tracy = trace(@src()); defer tracy.end(); if (file.root_decl != null) return; const gpa = mod.gpa; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const struct_obj = try new_decl_arena.allocator.create(Module.Struct); const struct_ty = try Type.Tag.@"struct".create(&new_decl_arena.allocator, struct_obj); const struct_val = try Value.Tag.ty.create(&new_decl_arena.allocator, struct_ty); struct_obj.* = .{ .owner_decl = undefined, // set below .fields = .{}, .node_offset = 0, // it's the struct for the root file .zir_index = undefined, // set below .layout = .Auto, .status = .none, .known_has_bits = undefined, .namespace = .{ .parent = null, .ty = struct_ty, .file_scope = file, }, }; const new_decl = try mod.allocateNewDecl(&struct_obj.namespace, 0); file.root_decl = new_decl; struct_obj.owner_decl = new_decl; new_decl.src_line = 0; new_decl.name = try file.fullyQualifiedNameZ(gpa); new_decl.is_pub = true; new_decl.is_exported = false; new_decl.has_align = false; new_decl.has_linksection = false; new_decl.ty = struct_ty; new_decl.val = struct_val; new_decl.has_tv = true; new_decl.owns_tv = true; new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive. new_decl.analysis = .in_progress; new_decl.generation = mod.generation; if (file.status == .success_zir) { assert(file.zir_loaded); const main_struct_inst = file.zir.getMainStruct(); struct_obj.zir_index = main_struct_inst; var sema_arena = std.heap.ArenaAllocator.init(gpa); defer sema_arena.deinit(); var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = &sema_arena.allocator, .code = file.zir, .owner_decl = new_decl, .namespace = &struct_obj.namespace, .func = null, .fn_ret_ty = Type.initTag(.void), .owner_func = null, }; defer sema.deinit(); var block_scope: Scope.Block = .{ .parent = null, .sema = &sema, .src_decl = new_decl, .instructions = .{}, .inlining = null, .is_comptime = true, }; defer block_scope.instructions.deinit(gpa); if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_obj)) |_| { new_decl.analysis = .complete; } else |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => {}, } } else { new_decl.analysis = .file_failure; } try new_decl.finalizeNewArena(&new_decl_arena); } /// Returns `true` if the Decl type changed. /// Returns `true` if this is the first time analyzing the Decl. /// Returns `false` otherwise. fn semaDecl(mod: *Module, decl: *Decl) !bool { const tracy = trace(@src()); defer tracy.end(); if (decl.namespace.file_scope.status != .success_zir) { return error.AnalysisFail; } const gpa = mod.gpa; const zir = decl.namespace.file_scope.zir; const zir_datas = zir.instructions.items(.data); decl.analysis = .in_progress; var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = &analysis_arena.allocator, .code = zir, .owner_decl = decl, .namespace = decl.namespace, .func = null, .fn_ret_ty = Type.initTag(.void), .owner_func = null, }; defer sema.deinit(); if (decl.isRoot()) { log.debug("semaDecl root {*} ({s})", .{ decl, decl.name }); const main_struct_inst = zir.getMainStruct(); const struct_obj = decl.getStruct().?; // This might not have gotten set in `semaFile` if the first time had // a ZIR failure, so we set it here in case. struct_obj.zir_index = main_struct_inst; try sema.analyzeStructDecl(decl, main_struct_inst, struct_obj); decl.analysis = .complete; decl.generation = mod.generation; return false; } log.debug("semaDecl {*} ({s})", .{ decl, decl.name }); var block_scope: Scope.Block = .{ .parent = null, .sema = &sema, .src_decl = decl, .instructions = .{}, .inlining = null, .is_comptime = true, }; defer { block_scope.instructions.deinit(gpa); block_scope.params.deinit(gpa); } const zir_block_index = decl.zirBlockIndex(); const inst_data = zir_datas[zir_block_index].pl_node; const extra = zir.extraData(Zir.Inst.Block, inst_data.payload_index); const body = zir.extra[extra.end..][0..extra.data.body_len]; const break_index = try sema.analyzeBody(&block_scope, body); const result_ref = zir_datas[break_index].@"break".operand; const src: LazySrcLoc = .{ .node_offset = 0 }; const decl_tv = try sema.resolveInstValue(&block_scope, src, result_ref); const align_val = blk: { const align_ref = decl.zirAlignRef(); if (align_ref == .none) break :blk Value.initTag(.null_value); break :blk (try sema.resolveInstConst(&block_scope, src, align_ref)).val; }; const linksection_val = blk: { const linksection_ref = decl.zirLinksectionRef(); if (linksection_ref == .none) break :blk Value.initTag(.null_value); break :blk (try sema.resolveInstConst(&block_scope, src, linksection_ref)).val; }; // Note this resolves the type of the Decl, not the value; if this Decl // is a struct, for example, this resolves `type` (which needs no resolution), // not the struct itself. try sema.resolveTypeLayout(&block_scope, src, decl_tv.ty); // We need the memory for the Type to go into the arena for the Decl var decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer decl_arena.deinit(); const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); if (decl_tv.val.castTag(.function)) |fn_payload| { const func = fn_payload.data; const owns_tv = func.owner_decl == decl; if (owns_tv) { var prev_type_has_bits = false; var prev_is_inline = false; var type_changed = true; if (decl.has_tv) { prev_type_has_bits = decl.ty.hasCodeGenBits(); type_changed = !decl.ty.eql(decl_tv.ty); if (decl.getFunction()) |prev_func| { prev_is_inline = prev_func.state == .inline_only; } decl.clearValues(gpa); } decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); decl.val = try decl_tv.val.copy(&decl_arena.allocator); decl.align_val = try align_val.copy(&decl_arena.allocator); decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); decl.has_tv = true; decl.owns_tv = owns_tv; decl_arena_state.* = decl_arena.state; decl.value_arena = decl_arena_state; decl.analysis = .complete; decl.generation = mod.generation; const is_inline = decl_tv.ty.fnCallingConvention() == .Inline; if (!is_inline and decl_tv.ty.hasCodeGenBits()) { // We don't fully codegen the decl until later, but we do need to reserve a global // offset table index for it. This allows us to codegen decls out of dependency // order, increasing how many computations can be done in parallel. try mod.comp.bin_file.allocateDeclIndexes(decl); try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); if (type_changed and mod.emit_h != null) { try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); } } else if (!prev_is_inline and prev_type_has_bits) { mod.comp.bin_file.freeDecl(decl); } if (decl.is_exported) { const export_src = src; // TODO make this point at `export` token if (is_inline) { return mod.fail(&block_scope.base, export_src, "export of inline function", .{}); } // The scope needs to have the decl in it. try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); } return type_changed or is_inline != prev_is_inline; } } var type_changed = true; if (decl.has_tv) { type_changed = !decl.ty.eql(decl_tv.ty); decl.clearValues(gpa); } decl.owns_tv = false; var queue_linker_work = false; if (decl_tv.val.castTag(.variable)) |payload| { const variable = payload.data; if (variable.owner_decl == decl) { decl.owns_tv = true; queue_linker_work = true; const copied_init = try variable.init.copy(&decl_arena.allocator); variable.init = copied_init; } } else if (decl_tv.val.castTag(.extern_fn)) |payload| { const owner_decl = payload.data; if (decl == owner_decl) { decl.owns_tv = true; queue_linker_work = true; } } decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); decl.val = try decl_tv.val.copy(&decl_arena.allocator); decl.align_val = try align_val.copy(&decl_arena.allocator); decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); decl.has_tv = true; decl_arena_state.* = decl_arena.state; decl.value_arena = decl_arena_state; decl.analysis = .complete; decl.generation = mod.generation; if (queue_linker_work and decl.ty.hasCodeGenBits()) { try mod.comp.bin_file.allocateDeclIndexes(decl); try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); if (type_changed and mod.emit_h != null) { try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); } } else if (decl_tv.ty.zigTypeTag() == .Type) { // In case this Decl is a struct or union, we need to resolve the fields // while we still have the `Sema` in scope, so that the field type expressions // can use the resolved AIR instructions that they possibly reference. // We do this after the decl is populated and set to `complete` so that a `Decl` // may reference itself. var buffer: Value.ToTypeBuffer = undefined; const ty = decl.val.toType(&buffer); try sema.resolveDeclFields(&block_scope, src, ty); } if (decl.is_exported) { const export_src = src; // TODO point to the export token // The scope needs to have the decl in it. try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); } return type_changed; } /// Returns the depender's index of the dependee. pub fn declareDeclDependency(mod: *Module, depender: *Decl, dependee: *Decl) !void { if (depender == dependee) return; log.debug("{*} ({s}) depends on {*} ({s})", .{ depender, depender.name, dependee, dependee.name, }); try depender.dependencies.ensureUnusedCapacity(mod.gpa, 1); try dependee.dependants.ensureUnusedCapacity(mod.gpa, 1); if (dependee.deletion_flag) { dependee.deletion_flag = false; assert(mod.deletion_set.swapRemove(dependee)); } dependee.dependants.putAssumeCapacity(depender, {}); depender.dependencies.putAssumeCapacity(dependee, {}); } pub const ImportFileResult = struct { file: *Scope.File, is_new: bool, }; pub fn importPkg(mod: *Module, pkg: *Package) !ImportFileResult { const gpa = mod.gpa; // The resolved path is used as the key in the import table, to detect if // an import refers to the same as another, despite different relative paths // or differently mapped package names. const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{ pkg.root_src_directory.path orelse ".", pkg.root_src_path, }); var keep_resolved_path = false; defer if (!keep_resolved_path) gpa.free(resolved_path); const gop = try mod.import_table.getOrPut(gpa, resolved_path); if (gop.found_existing) return ImportFileResult{ .file = gop.value_ptr.*, .is_new = false, }; keep_resolved_path = true; // It's now owned by import_table. const sub_file_path = try gpa.dupe(u8, pkg.root_src_path); errdefer gpa.free(sub_file_path); const new_file = try gpa.create(Scope.File); errdefer gpa.destroy(new_file); gop.value_ptr.* = new_file; new_file.* = .{ .sub_file_path = sub_file_path, .source = undefined, .source_loaded = false, .tree_loaded = false, .zir_loaded = false, .stat_size = undefined, .stat_inode = undefined, .stat_mtime = undefined, .tree = undefined, .zir = undefined, .status = .never_loaded, .pkg = pkg, .root_decl = null, }; return ImportFileResult{ .file = new_file, .is_new = true, }; } pub fn importFile( mod: *Module, cur_file: *Scope.File, import_string: []const u8, ) !ImportFileResult { if (cur_file.pkg.table.get(import_string)) |pkg| { return mod.importPkg(pkg); } if (!mem.endsWith(u8, import_string, ".zig")) { return error.PackageNotFound; } const gpa = mod.gpa; // The resolved path is used as the key in the import table, to detect if // an import refers to the same as another, despite different relative paths // or differently mapped package names. const cur_pkg_dir_path = cur_file.pkg.root_src_directory.path orelse "."; const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{ cur_pkg_dir_path, cur_file.sub_file_path, "..", import_string, }); var keep_resolved_path = false; defer if (!keep_resolved_path) gpa.free(resolved_path); const gop = try mod.import_table.getOrPut(gpa, resolved_path); if (gop.found_existing) return ImportFileResult{ .file = gop.value_ptr.*, .is_new = false, }; keep_resolved_path = true; // It's now owned by import_table. const new_file = try gpa.create(Scope.File); errdefer gpa.destroy(new_file); const resolved_root_path = try std.fs.path.resolve(gpa, &[_][]const u8{cur_pkg_dir_path}); defer gpa.free(resolved_root_path); if (!mem.startsWith(u8, resolved_path, resolved_root_path)) { return error.ImportOutsidePkgPath; } // +1 for the directory separator here. const sub_file_path = try gpa.dupe(u8, resolved_path[resolved_root_path.len + 1 ..]); errdefer gpa.free(sub_file_path); log.debug("new importFile. resolved_root_path={s}, resolved_path={s}, sub_file_path={s}, import_string={s}", .{ resolved_root_path, resolved_path, sub_file_path, import_string, }); gop.value_ptr.* = new_file; new_file.* = .{ .sub_file_path = sub_file_path, .source = undefined, .source_loaded = false, .tree_loaded = false, .zir_loaded = false, .stat_size = undefined, .stat_inode = undefined, .stat_mtime = undefined, .tree = undefined, .zir = undefined, .status = .never_loaded, .pkg = cur_file.pkg, .root_decl = null, }; return ImportFileResult{ .file = new_file, .is_new = true, }; } pub fn scanNamespace( mod: *Module, namespace: *Scope.Namespace, extra_start: usize, decls_len: u32, parent_decl: *Decl, ) SemaError!usize { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; const zir = namespace.file_scope.zir; try mod.comp.work_queue.ensureUnusedCapacity(decls_len); try namespace.decls.ensureCapacity(gpa, decls_len); const bit_bags_count = std.math.divCeil(usize, decls_len, 8) catch unreachable; var extra_index = extra_start + bit_bags_count; var bit_bag_index: usize = extra_start; var cur_bit_bag: u32 = undefined; var decl_i: u32 = 0; var scan_decl_iter: ScanDeclIter = .{ .module = mod, .namespace = namespace, .parent_decl = parent_decl, }; while (decl_i < decls_len) : (decl_i += 1) { if (decl_i % 8 == 0) { cur_bit_bag = zir.extra[bit_bag_index]; bit_bag_index += 1; } const flags = @truncate(u4, cur_bit_bag); cur_bit_bag >>= 4; const decl_sub_index = extra_index; extra_index += 7; // src_hash(4) + line(1) + name(1) + value(1) extra_index += @truncate(u1, flags >> 2); extra_index += @truncate(u1, flags >> 3); try scanDecl(&scan_decl_iter, decl_sub_index, flags); } return extra_index; } const ScanDeclIter = struct { module: *Module, namespace: *Scope.Namespace, parent_decl: *Decl, usingnamespace_index: usize = 0, comptime_index: usize = 0, unnamed_test_index: usize = 0, }; fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!void { const tracy = trace(@src()); defer tracy.end(); const mod = iter.module; const namespace = iter.namespace; const gpa = mod.gpa; const zir = namespace.file_scope.zir; // zig fmt: off const is_pub = (flags & 0b0001) != 0; const is_exported = (flags & 0b0010) != 0; const has_align = (flags & 0b0100) != 0; const has_linksection = (flags & 0b1000) != 0; // zig fmt: on const line = iter.parent_decl.relativeToLine(zir.extra[decl_sub_index + 4]); const decl_name_index = zir.extra[decl_sub_index + 5]; const decl_index = zir.extra[decl_sub_index + 6]; const decl_block_inst_data = zir.instructions.items(.data)[decl_index].pl_node; const decl_node = iter.parent_decl.relativeToNodeIndex(decl_block_inst_data.src_node); // Every Decl needs a name. var is_named_test = false; const decl_name: [:0]const u8 = switch (decl_name_index) { 0 => name: { if (is_exported) { const i = iter.usingnamespace_index; iter.usingnamespace_index += 1; break :name try std.fmt.allocPrintZ(gpa, "usingnamespace_{d}", .{i}); } else { const i = iter.comptime_index; iter.comptime_index += 1; break :name try std.fmt.allocPrintZ(gpa, "comptime_{d}", .{i}); } }, 1 => name: { const i = iter.unnamed_test_index; iter.unnamed_test_index += 1; break :name try std.fmt.allocPrintZ(gpa, "test_{d}", .{i}); }, else => name: { const raw_name = zir.nullTerminatedString(decl_name_index); if (raw_name.len == 0) { is_named_test = true; const test_name = zir.nullTerminatedString(decl_name_index + 1); break :name try std.fmt.allocPrintZ(gpa, "test.{s}", .{test_name}); } else { break :name try gpa.dupeZ(u8, raw_name); } }, }; // We create a Decl for it regardless of analysis status. const gop = try namespace.decls.getOrPut(gpa, decl_name); if (!gop.found_existing) { const new_decl = try mod.allocateNewDecl(namespace, decl_node); log.debug("scan new {*} ({s}) into {*}", .{ new_decl, decl_name, namespace }); new_decl.src_line = line; new_decl.name = decl_name; gop.value_ptr.* = new_decl; // Exported decls, comptime decls, usingnamespace decls, and // test decls if in test mode, get analyzed. const decl_pkg = namespace.file_scope.pkg; const want_analysis = is_exported or switch (decl_name_index) { 0 => true, // comptime decl 1 => blk: { // test decl with no name. Skip the part where we check against // the test name filter. if (!mod.comp.bin_file.options.is_test) break :blk false; if (decl_pkg != mod.main_pkg) break :blk false; try mod.test_functions.put(gpa, new_decl, {}); break :blk true; }, else => blk: { if (!is_named_test) break :blk false; if (!mod.comp.bin_file.options.is_test) break :blk false; if (decl_pkg != mod.main_pkg) break :blk false; // TODO check the name against --test-filter try mod.test_functions.put(gpa, new_decl, {}); break :blk true; }, }; if (want_analysis) { mod.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); } new_decl.is_pub = is_pub; new_decl.is_exported = is_exported; new_decl.has_align = has_align; new_decl.has_linksection = has_linksection; new_decl.zir_decl_index = @intCast(u32, decl_sub_index); new_decl.alive = true; // This Decl corresponds to an AST node and therefore always alive. return; } gpa.free(decl_name); const decl = gop.value_ptr.*; log.debug("scan existing {*} ({s}) of {*}", .{ decl, decl.name, namespace }); // Update the AST node of the decl; even if its contents are unchanged, it may // have been re-ordered. decl.src_node = decl_node; decl.src_line = line; decl.is_pub = is_pub; decl.is_exported = is_exported; decl.has_align = has_align; decl.has_linksection = has_linksection; decl.zir_decl_index = @intCast(u32, decl_sub_index); if (decl.getFunction()) |_| { switch (mod.comp.bin_file.tag) { .coff => { // TODO Implement for COFF }, .elf => if (decl.fn_link.elf.len != 0) { // TODO Look into detecting when this would be unnecessary by storing enough state // in `Decl` to notice that the line number did not change. mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl }); }, .macho => if (decl.fn_link.macho.len != 0) { // TODO Look into detecting when this would be unnecessary by storing enough state // in `Decl` to notice that the line number did not change. mod.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl }); }, .plan9 => { // TODO implement for plan9 }, .c, .wasm, .spirv => {}, } } } /// Make it as if the semantic analysis for this Decl never happened. pub fn clearDecl( mod: *Module, decl: *Decl, outdated_decls: ?*std.AutoArrayHashMap(*Decl, void), ) Allocator.Error!void { const tracy = trace(@src()); defer tracy.end(); log.debug("clearing {*} ({s})", .{ decl, decl.name }); const gpa = mod.gpa; try mod.deletion_set.ensureUnusedCapacity(gpa, decl.dependencies.count()); if (outdated_decls) |map| { _ = map.swapRemove(decl); try map.ensureUnusedCapacity(decl.dependants.count()); } // Remove itself from its dependencies. for (decl.dependencies.keys()) |dep| { dep.removeDependant(decl); if (dep.dependants.count() == 0 and !dep.deletion_flag) { log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ decl, decl.name, dep, dep.name, }); // We don't recursively perform a deletion here, because during the update, // another reference to it may turn up. dep.deletion_flag = true; mod.deletion_set.putAssumeCapacity(dep, {}); } } decl.dependencies.clearRetainingCapacity(); // Anything that depends on this deleted decl needs to be re-analyzed. for (decl.dependants.keys()) |dep| { dep.removeDependency(decl); if (outdated_decls) |map| { map.putAssumeCapacity(dep, {}); } else if (std.debug.runtime_safety) { // If `outdated_decls` is `null`, it means we're being called from // `Compilation` after `performAllTheWork` and we cannot queue up any // more work. `dep` must necessarily be another Decl that is no longer // being referenced, and will be in the `deletion_set`. Otherwise, // something has gone wrong. assert(mod.deletion_set.contains(dep)); } } decl.dependants.clearRetainingCapacity(); if (mod.failed_decls.fetchSwapRemove(decl)) |kv| { kv.value.destroy(gpa); } if (mod.emit_h) |emit_h| { if (emit_h.failed_decls.fetchSwapRemove(decl)) |kv| { kv.value.destroy(gpa); } assert(emit_h.decl_table.swapRemove(decl)); } _ = mod.compile_log_decls.swapRemove(decl); mod.deleteDeclExports(decl); if (decl.has_tv) { if (decl.ty.hasCodeGenBits()) { mod.comp.bin_file.freeDecl(decl); // TODO instead of a union, put this memory trailing Decl objects, // and allow it to be variably sized. decl.link = switch (mod.comp.bin_file.tag) { .coff => .{ .coff = link.File.Coff.TextBlock.empty }, .elf => .{ .elf = link.File.Elf.TextBlock.empty }, .macho => .{ .macho = link.File.MachO.TextBlock.empty }, .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty }, .c => .{ .c = link.File.C.DeclBlock.empty }, .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty }, .spirv => .{ .spirv = {} }, }; decl.fn_link = switch (mod.comp.bin_file.tag) { .coff => .{ .coff = {} }, .elf => .{ .elf = link.File.Elf.SrcFn.empty }, .macho => .{ .macho = link.File.MachO.SrcFn.empty }, .plan9 => .{ .plan9 = {} }, .c => .{ .c = link.File.C.FnBlock.empty }, .wasm => .{ .wasm = link.File.Wasm.FnData.empty }, .spirv => .{ .spirv = .{} }, }; } if (decl.getInnerNamespace()) |namespace| { try namespace.deleteAllDecls(mod, outdated_decls); } decl.clearValues(gpa); } if (decl.deletion_flag) { decl.deletion_flag = false; assert(mod.deletion_set.swapRemove(decl)); } decl.analysis = .unreferenced; } pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void { log.debug("deleteUnusedDecl {*} ({s})", .{ decl, decl.name }); // TODO: remove `allocateDeclIndexes` and make the API that the linker backends // are required to notice the first time `updateDecl` happens and keep track // of it themselves. However they can rely on getting a `freeDecl` call if any // `updateDecl` or `updateFunc` calls happen. This will allow us to avoid any call // into the linker backend here, since the linker backend will never have been told // about the Decl in the first place. // Until then, we did call `allocateDeclIndexes` on this anonymous Decl and so we // must call `freeDecl` in the linker backend now. if (decl.has_tv) { if (decl.ty.hasCodeGenBits()) { mod.comp.bin_file.freeDecl(decl); } } const dependants = decl.dependants.keys(); assert(dependants[0].namespace.anon_decls.swapRemove(decl)); for (dependants) |dep| { dep.removeDependency(decl); } for (decl.dependencies.keys()) |dep| { dep.removeDependant(decl); } decl.destroy(mod); } pub fn deleteAnonDecl(mod: *Module, scope: *Scope, decl: *Decl) void { log.debug("deleteAnonDecl {*} ({s})", .{ decl, decl.name }); const scope_decl = scope.ownerDecl().?; assert(scope_decl.namespace.anon_decls.swapRemove(decl)); decl.destroy(mod); } /// Delete all the Export objects that are caused by this Decl. Re-analysis of /// this Decl will cause them to be re-created (or not). fn deleteDeclExports(mod: *Module, decl: *Decl) void { const kv = mod.export_owners.fetchSwapRemove(decl) orelse return; for (kv.value) |exp| { if (mod.decl_exports.getPtr(exp.exported_decl)) |value_ptr| { // Remove exports with owner_decl matching the regenerating decl. const list = value_ptr.*; var i: usize = 0; var new_len = list.len; while (i < new_len) { if (list[i].owner_decl == decl) { mem.copyBackwards(*Export, list[i..], list[i + 1 .. new_len]); new_len -= 1; } else { i += 1; } } value_ptr.* = mod.gpa.shrink(list, new_len); if (new_len == 0) { assert(mod.decl_exports.swapRemove(exp.exported_decl)); } } if (mod.comp.bin_file.cast(link.File.Elf)) |elf| { elf.deleteExport(exp.link.elf); } if (mod.comp.bin_file.cast(link.File.MachO)) |macho| { macho.deleteExport(exp.link.macho); } if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| { failed_kv.value.destroy(mod.gpa); } mod.gpa.free(exp.options.name); mod.gpa.destroy(exp); } mod.gpa.free(kv.value); } pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; // Use the Decl's arena for function memory. var arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = &arena.allocator, .code = decl.namespace.file_scope.zir, .owner_decl = decl, .namespace = decl.namespace, .func = func, .fn_ret_ty = func.owner_decl.ty.fnReturnType(), .owner_func = func, }; defer sema.deinit(); // First few indexes of extra are reserved and set at the end. const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len; try sema.air_extra.ensureTotalCapacity(gpa, reserved_count); sema.air_extra.items.len += reserved_count; var inner_block: Scope.Block = .{ .parent = null, .sema = &sema, .src_decl = decl, .instructions = .{}, .inlining = null, .is_comptime = false, }; defer inner_block.instructions.deinit(gpa); const fn_info = sema.code.getFnInfo(func.zir_body_inst); const zir_tags = sema.code.instructions.items(.tag); // Here we are performing "runtime semantic analysis" for a function body, which means // we must map the parameter ZIR instructions to `arg` AIR instructions. // AIR requires the `arg` parameters to be the first N instructions. // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. const runtime_params_len = @intCast(u32, fn_ty.fnParamLen()); try inner_block.instructions.ensureTotalCapacity(gpa, runtime_params_len); try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` try sema.inst_map.ensureUnusedCapacity(gpa, fn_info.total_params_len); var runtime_param_index: usize = 0; var total_param_index: usize = 0; for (fn_info.param_body) |inst| { const name = switch (zir_tags[inst]) { .param, .param_comptime => blk: { const inst_data = sema.code.instructions.items(.data)[inst].pl_tok; const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index).data; break :blk extra.name; }, .param_anytype, .param_anytype_comptime => blk: { const str_tok = sema.code.instructions.items(.data)[inst].str_tok; break :blk str_tok.start; }, else => continue, }; if (func.comptime_args) |comptime_args| { const arg_tv = comptime_args[total_param_index]; if (arg_tv.val.tag() != .generic_poison) { // We have a comptime value for this parameter. const arg = try sema.addConstant(arg_tv.ty, arg_tv.val); sema.inst_map.putAssumeCapacityNoClobber(inst, arg); total_param_index += 1; continue; } } const param_type = fn_ty.fnParamType(runtime_param_index); const ty_ref = try sema.addType(param_type); const arg_index = @intCast(u32, sema.air_instructions.len); inner_block.instructions.appendAssumeCapacity(arg_index); sema.air_instructions.appendAssumeCapacity(.{ .tag = .arg, .data = .{ .ty_str = .{ .ty = ty_ref, .str = name, } }, }); sema.inst_map.putAssumeCapacityNoClobber(inst, Air.indexToRef(arg_index)); total_param_index += 1; runtime_param_index += 1; } func.state = .in_progress; log.debug("set {s} to in_progress", .{decl.name}); _ = sema.analyzeBody(&inner_block, fn_info.body) catch |err| switch (err) { error.NeededSourceLocation => @panic("zig compiler bug: NeededSourceLocation"), error.GenericPoison => @panic("zig compiler bug: GenericPoison"), else => |e| return e, }; // Copy the block into place and mark that as the main block. try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + inner_block.instructions.items.len); const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(u32, inner_block.instructions.items.len), }); sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = main_block_index; func.state = .success; log.debug("set {s} to success", .{decl.name}); return Air{ .instructions = sema.air_instructions.toOwnedSlice(), .extra = sema.air_extra.toOwnedSlice(gpa), .values = sema.air_values.toOwnedSlice(gpa), }; } fn markOutdatedDecl(mod: *Module, decl: *Decl) !void { log.debug("mark outdated {*} ({s})", .{ decl, decl.name }); try mod.comp.work_queue.writeItem(.{ .analyze_decl = decl }); if (mod.failed_decls.fetchSwapRemove(decl)) |kv| { kv.value.destroy(mod.gpa); } if (mod.emit_h) |emit_h| { if (emit_h.failed_decls.fetchSwapRemove(decl)) |kv| { kv.value.destroy(mod.gpa); } } _ = mod.compile_log_decls.swapRemove(decl); decl.analysis = .outdated; } pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: ast.Node.Index) !*Decl { // If we have emit-h then we must allocate a bigger structure to store the emit-h state. const new_decl: *Decl = if (mod.emit_h != null) blk: { const parent_struct = try mod.gpa.create(DeclPlusEmitH); parent_struct.* = .{ .emit_h = .{}, .decl = undefined, }; break :blk &parent_struct.decl; } else try mod.gpa.create(Decl); new_decl.* = .{ .name = "", .namespace = namespace, .src_node = src_node, .src_line = undefined, .has_tv = false, .owns_tv = false, .ty = undefined, .val = undefined, .align_val = undefined, .linksection_val = undefined, .analysis = .unreferenced, .deletion_flag = false, .zir_decl_index = 0, .link = switch (mod.comp.bin_file.tag) { .coff => .{ .coff = link.File.Coff.TextBlock.empty }, .elf => .{ .elf = link.File.Elf.TextBlock.empty }, .macho => .{ .macho = link.File.MachO.TextBlock.empty }, .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty }, .c => .{ .c = link.File.C.DeclBlock.empty }, .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty }, .spirv => .{ .spirv = {} }, }, .fn_link = switch (mod.comp.bin_file.tag) { .coff => .{ .coff = {} }, .elf => .{ .elf = link.File.Elf.SrcFn.empty }, .macho => .{ .macho = link.File.MachO.SrcFn.empty }, .plan9 => .{ .plan9 = {} }, .c => .{ .c = link.File.C.FnBlock.empty }, .wasm => .{ .wasm = link.File.Wasm.FnData.empty }, .spirv => .{ .spirv = .{} }, }, .generation = 0, .is_pub = false, .is_exported = false, .has_linksection = false, .has_align = false, .alive = false, }; return new_decl; } /// Get error value for error tag `name`. pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged(ErrorInt).KV { const gop = try mod.global_error_set.getOrPut(mod.gpa, name); if (gop.found_existing) { return std.StringHashMapUnmanaged(ErrorInt).KV{ .key = gop.key_ptr.*, .value = gop.value_ptr.*, }; } errdefer assert(mod.global_error_set.remove(name)); try mod.error_name_list.ensureCapacity(mod.gpa, mod.error_name_list.items.len + 1); gop.key_ptr.* = try mod.gpa.dupe(u8, name); gop.value_ptr.* = @intCast(ErrorInt, mod.error_name_list.items.len); mod.error_name_list.appendAssumeCapacity(gop.key_ptr.*); return std.StringHashMapUnmanaged(ErrorInt).KV{ .key = gop.key_ptr.*, .value = gop.value_ptr.*, }; } pub fn analyzeExport( mod: *Module, scope: *Scope, src: LazySrcLoc, borrowed_symbol_name: []const u8, exported_decl: *Decl, ) !void { try mod.ensureDeclAnalyzed(exported_decl); switch (exported_decl.ty.zigTypeTag()) { .Fn => {}, else => return mod.fail(scope, src, "unable to export type '{}'", .{exported_decl.ty}), } try mod.decl_exports.ensureUnusedCapacity(mod.gpa, 1); try mod.export_owners.ensureUnusedCapacity(mod.gpa, 1); const new_export = try mod.gpa.create(Export); errdefer mod.gpa.destroy(new_export); const symbol_name = try mod.gpa.dupe(u8, borrowed_symbol_name); errdefer mod.gpa.free(symbol_name); const owner_decl = scope.ownerDecl().?; log.debug("exporting Decl '{s}' as symbol '{s}' from Decl '{s}'", .{ exported_decl.name, borrowed_symbol_name, owner_decl.name, }); new_export.* = .{ .options = .{ .name = symbol_name }, .src = src, .link = switch (mod.comp.bin_file.tag) { .coff => .{ .coff = {} }, .elf => .{ .elf = link.File.Elf.Export{} }, .macho => .{ .macho = link.File.MachO.Export{} }, .plan9 => .{ .plan9 = null }, .c => .{ .c = {} }, .wasm => .{ .wasm = {} }, .spirv => .{ .spirv = {} }, }, .owner_decl = owner_decl, .exported_decl = exported_decl, .status = .in_progress, }; // Add to export_owners table. const eo_gop = mod.export_owners.getOrPutAssumeCapacity(owner_decl); if (!eo_gop.found_existing) { eo_gop.value_ptr.* = &[0]*Export{}; } eo_gop.value_ptr.* = try mod.gpa.realloc(eo_gop.value_ptr.*, eo_gop.value_ptr.len + 1); eo_gop.value_ptr.*[eo_gop.value_ptr.len - 1] = new_export; errdefer eo_gop.value_ptr.* = mod.gpa.shrink(eo_gop.value_ptr.*, eo_gop.value_ptr.len - 1); // Add to exported_decl table. const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl); if (!de_gop.found_existing) { de_gop.value_ptr.* = &[0]*Export{}; } de_gop.value_ptr.* = try mod.gpa.realloc(de_gop.value_ptr.*, de_gop.value_ptr.len + 1); de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export; errdefer de_gop.value_ptr.* = mod.gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1); } /// Takes ownership of `name` even if it returns an error. pub fn createAnonymousDeclNamed( mod: *Module, scope: *Scope, typed_value: TypedValue, name: [:0]u8, ) !*Decl { return mod.createAnonymousDeclFromDeclNamed(scope.ownerDecl().?, typed_value, name); } pub fn createAnonymousDecl(mod: *Module, scope: *Scope, typed_value: TypedValue) !*Decl { return mod.createAnonymousDeclFromDecl(scope.ownerDecl().?, typed_value); } pub fn createAnonymousDeclFromDecl(mod: *Module, owner_decl: *Decl, tv: TypedValue) !*Decl { const name_index = mod.getNextAnonNameIndex(); const name = try std.fmt.allocPrintZ(mod.gpa, "{s}__anon_{d}", .{ owner_decl.name, name_index, }); return mod.createAnonymousDeclFromDeclNamed(owner_decl, tv, name); } /// Takes ownership of `name` even if it returns an error. pub fn createAnonymousDeclFromDeclNamed( mod: *Module, owner_decl: *Decl, typed_value: TypedValue, name: [:0]u8, ) !*Decl { errdefer mod.gpa.free(name); const namespace = owner_decl.namespace; try namespace.anon_decls.ensureUnusedCapacity(mod.gpa, 1); const new_decl = try mod.allocateNewDecl(namespace, owner_decl.src_node); new_decl.name = name; new_decl.src_line = owner_decl.src_line; new_decl.ty = typed_value.ty; new_decl.val = typed_value.val; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; namespace.anon_decls.putAssumeCapacityNoClobber(new_decl, {}); // TODO: This generates the Decl into the machine code file if it is of a // type that is non-zero size. We should be able to further improve the // compiler to omit Decls which are only referenced at compile-time and not runtime. if (typed_value.ty.hasCodeGenBits()) { try mod.comp.bin_file.allocateDeclIndexes(new_decl); try mod.comp.work_queue.writeItem(.{ .codegen_decl = new_decl }); } return new_decl; } pub fn getNextAnonNameIndex(mod: *Module) usize { return @atomicRmw(usize, &mod.next_anon_name_index, .Add, 1, .Monotonic); } pub fn makeIntType(arena: *Allocator, signedness: std.builtin.Signedness, bits: u16) !Type { const int_payload = try arena.create(Type.Payload.Bits); int_payload.* = .{ .base = .{ .tag = switch (signedness) { .signed => .int_signed, .unsigned => .int_unsigned, }, }, .data = bits, }; return Type.initPayload(&int_payload.base); } /// We don't return a pointer to the new error note because the pointer /// becomes invalid when you add another one. pub fn errNote( mod: *Module, scope: *Scope, src: LazySrcLoc, parent: *ErrorMsg, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { return mod.errNoteNonLazy(src.toSrcLoc(scope), parent, format, args); } pub fn errNoteNonLazy( mod: *Module, src_loc: SrcLoc, parent: *ErrorMsg, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { const msg = try std.fmt.allocPrint(mod.gpa, format, args); errdefer mod.gpa.free(msg); parent.notes = try mod.gpa.realloc(parent.notes, parent.notes.len + 1); parent.notes[parent.notes.len - 1] = .{ .src_loc = src_loc, .msg = msg, }; } pub fn errMsg( mod: *Module, scope: *Scope, src: LazySrcLoc, comptime format: []const u8, args: anytype, ) error{OutOfMemory}!*ErrorMsg { return ErrorMsg.create(mod.gpa, src.toSrcLoc(scope), format, args); } pub fn fail( mod: *Module, scope: *Scope, src: LazySrcLoc, comptime format: []const u8, args: anytype, ) CompileError { const err_msg = try mod.errMsg(scope, src, format, args); return mod.failWithOwnedErrorMsg(scope, err_msg); } /// Same as `fail`, except given a token index, and the function sets up the `LazySrcLoc` /// for pointing at it relatively by subtracting from the containing `Decl`. pub fn failTok( mod: *Module, scope: *Scope, token_index: ast.TokenIndex, comptime format: []const u8, args: anytype, ) CompileError { const src = scope.srcDecl().?.tokSrcLoc(token_index); return mod.fail(scope, src, format, args); } /// Same as `fail`, except given an AST node index, and the function sets up the `LazySrcLoc` /// for pointing at it relatively by subtracting from the containing `Decl`. pub fn failNode( mod: *Module, scope: *Scope, node_index: ast.Node.Index, comptime format: []const u8, args: anytype, ) CompileError { const src = scope.srcDecl().?.nodeSrcLoc(node_index); return mod.fail(scope, src, format, args); } pub fn failWithOwnedErrorMsg(mod: *Module, scope: *Scope, err_msg: *ErrorMsg) CompileError { @setCold(true); { errdefer err_msg.destroy(mod.gpa); if (err_msg.src_loc.lazy == .unneeded) { return error.NeededSourceLocation; } try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); try mod.failed_files.ensureUnusedCapacity(mod.gpa, 1); } switch (scope.tag) { .block => { const block = scope.cast(Scope.Block).?; if (block.sema.owner_func) |func| { func.state = .sema_failure; } else { block.sema.owner_decl.analysis = .sema_failure; block.sema.owner_decl.generation = mod.generation; } mod.failed_decls.putAssumeCapacityNoClobber(block.sema.owner_decl, err_msg); }, .file => unreachable, .namespace => unreachable, } return error.AnalysisFail; } pub fn simplePtrType( arena: *Allocator, elem_ty: Type, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size, ) Allocator.Error!Type { if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) { return Type.initTag(.const_slice_u8); } // TODO stage1 type inference bug const T = Type.Tag; const type_payload = try arena.create(Type.Payload.ElemType); type_payload.* = .{ .base = .{ .tag = switch (size) { .One => if (mutable) T.single_mut_pointer else T.single_const_pointer, .Many => if (mutable) T.many_mut_pointer else T.many_const_pointer, .C => if (mutable) T.c_mut_pointer else T.c_const_pointer, .Slice => if (mutable) T.mut_slice else T.const_slice, }, }, .data = elem_ty, }; return Type.initPayload(&type_payload.base); } pub fn ptrType( arena: *Allocator, elem_ty: Type, sentinel: ?Value, @"align": u32, bit_offset: u16, host_size: u16, mutable: bool, @"allowzero": bool, @"volatile": bool, size: std.builtin.TypeInfo.Pointer.Size, ) Allocator.Error!Type { assert(host_size == 0 or bit_offset < host_size * 8); // TODO check if type can be represented by simplePtrType return Type.Tag.pointer.create(arena, .{ .pointee_type = elem_ty, .sentinel = sentinel, .@"align" = @"align", .bit_offset = bit_offset, .host_size = host_size, .@"allowzero" = @"allowzero", .mutable = mutable, .@"volatile" = @"volatile", .size = size, }); } pub fn optionalType(arena: *Allocator, child_type: Type) Allocator.Error!Type { switch (child_type.tag()) { .single_const_pointer => return Type.Tag.optional_single_const_pointer.create( arena, child_type.elemType(), ), .single_mut_pointer => return Type.Tag.optional_single_mut_pointer.create( arena, child_type.elemType(), ), else => return Type.Tag.optional.create(arena, child_type), } } pub fn arrayType( arena: *Allocator, len: u64, sentinel: ?Value, elem_type: Type, ) Allocator.Error!Type { if (elem_type.eql(Type.initTag(.u8))) { if (sentinel) |some| { if (some.eql(Value.initTag(.zero), elem_type)) { return Type.Tag.array_u8_sentinel_0.create(arena, len); } } else { return Type.Tag.array_u8.create(arena, len); } } if (sentinel) |some| { return Type.Tag.array_sentinel.create(arena, .{ .len = len, .sentinel = some, .elem_type = elem_type, }); } return Type.Tag.array.create(arena, .{ .len = len, .elem_type = elem_type, }); } pub fn errorUnionType( arena: *Allocator, error_set: Type, payload: Type, ) Allocator.Error!Type { assert(error_set.zigTypeTag() == .ErrorSet); if (error_set.eql(Type.initTag(.anyerror)) and payload.eql(Type.initTag(.void))) { return Type.initTag(.anyerror_void_error_union); } return Type.Tag.error_union.create(arena, .{ .error_set = error_set, .payload = payload, }); } pub fn getTarget(mod: Module) Target { return mod.comp.bin_file.options.target; } pub fn optimizeMode(mod: Module) std.builtin.Mode { return mod.comp.bin_file.options.optimize_mode; } fn lockAndClearFileCompileError(mod: *Module, file: *Scope.File) void { switch (file.status) { .success_zir, .retryable_failure => {}, .never_loaded, .parse_failure, .astgen_failure => { const lock = mod.comp.mutex.acquire(); defer lock.release(); if (mod.failed_files.fetchSwapRemove(file)) |kv| { if (kv.value) |msg| msg.destroy(mod.gpa); // Delete previous error message. } }, } } pub const SwitchProngSrc = union(enum) { scalar: u32, multi: Multi, range: Multi, pub const Multi = struct { prong: u32, item: u32, }; pub const RangeExpand = enum { none, first, last }; /// This function is intended to be called only when it is certain that we need /// the LazySrcLoc in order to emit a compile error. pub fn resolve( prong_src: SwitchProngSrc, gpa: *Allocator, decl: *Decl, switch_node_offset: i32, range_expand: RangeExpand, ) LazySrcLoc { @setCold(true); const tree = decl.namespace.file_scope.getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ decl.namespace.file_scope.sub_file_path, @errorName(err), }); return LazySrcLoc{ .node_offset = 0 }; }; const switch_node = decl.relativeToNodeIndex(switch_node_offset); const main_tokens = tree.nodes.items(.main_token); const node_datas = tree.nodes.items(.data); const node_tags = tree.nodes.items(.tag); const extra = tree.extraData(node_datas[switch_node].rhs, ast.Node.SubRange); const case_nodes = tree.extra_data[extra.start..extra.end]; var multi_i: u32 = 0; var scalar_i: u32 = 0; for (case_nodes) |case_node| { const case = switch (node_tags[case_node]) { .switch_case_one => tree.switchCaseOne(case_node), .switch_case => tree.switchCase(case_node), else => unreachable, }; if (case.ast.values.len == 0) continue; if (case.ast.values.len == 1 and node_tags[case.ast.values[0]] == .identifier and mem.eql(u8, tree.tokenSlice(main_tokens[case.ast.values[0]]), "_")) { continue; } const is_multi = case.ast.values.len != 1 or node_tags[case.ast.values[0]] == .switch_range; switch (prong_src) { .scalar => |i| if (!is_multi and i == scalar_i) return LazySrcLoc{ .node_offset = decl.nodeIndexToRelative(case.ast.values[0]), }, .multi => |s| if (is_multi and s.prong == multi_i) { var item_i: u32 = 0; for (case.ast.values) |item_node| { if (node_tags[item_node] == .switch_range) continue; if (item_i == s.item) return LazySrcLoc{ .node_offset = decl.nodeIndexToRelative(item_node), }; item_i += 1; } else unreachable; }, .range => |s| if (is_multi and s.prong == multi_i) { var range_i: u32 = 0; for (case.ast.values) |range| { if (node_tags[range] != .switch_range) continue; if (range_i == s.item) switch (range_expand) { .none => return LazySrcLoc{ .node_offset = decl.nodeIndexToRelative(range), }, .first => return LazySrcLoc{ .node_offset = decl.nodeIndexToRelative(node_datas[range].lhs), }, .last => return LazySrcLoc{ .node_offset = decl.nodeIndexToRelative(node_datas[range].rhs), }, }; range_i += 1; } else unreachable; }, } if (is_multi) { multi_i += 1; } else { scalar_i += 1; } } else unreachable; } }; pub const PeerTypeCandidateSrc = union(enum) { /// Do not print out error notes for candidate sources none: void, /// When we want to know the the src of candidate i, look up at /// index i in this slice override: []LazySrcLoc, /// resolvePeerTypes originates from a @TypeOf(...) call typeof_builtin_call_node_offset: i32, pub fn resolve( self: PeerTypeCandidateSrc, gpa: *Allocator, decl: *Decl, candidates: usize, candidate_i: usize, ) ?LazySrcLoc { @setCold(true); switch (self) { .none => { return null; }, .override => |candidate_srcs| { return candidate_srcs[candidate_i]; }, .typeof_builtin_call_node_offset => |node_offset| { if (candidates <= 2) { switch (candidate_i) { 0 => return LazySrcLoc{ .node_offset_builtin_call_arg0 = node_offset }, 1 => return LazySrcLoc{ .node_offset_builtin_call_arg1 = node_offset }, else => unreachable, } } const tree = decl.namespace.file_scope.getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ decl.namespace.file_scope.sub_file_path, @errorName(err), }); return LazySrcLoc{ .node_offset = 0 }; }; const node = decl.relativeToNodeIndex(node_offset); const node_datas = tree.nodes.items(.data); const params = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs]; return LazySrcLoc{ .node_abs = params[candidate_i] }; }, } } }; /// Called from `performAllTheWork`, after all AstGen workers have finished, /// and before the main semantic analysis loop begins. pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { // Ultimately, the goal is to queue up `analyze_decl` tasks in the work queue // for the outdated decls, but we cannot queue up the tasks until after // we find out which ones have been deleted, otherwise there would be // deleted Decl pointers in the work queue. var outdated_decls = std.AutoArrayHashMap(*Decl, void).init(mod.gpa); defer outdated_decls.deinit(); for (mod.import_table.values()) |file| { try outdated_decls.ensureUnusedCapacity(file.outdated_decls.items.len); for (file.outdated_decls.items) |decl| { outdated_decls.putAssumeCapacity(decl, {}); } file.outdated_decls.clearRetainingCapacity(); // Handle explicitly deleted decls from the source code. This is one of two // places that Decl deletions happen. The other is in `Compilation`, after // `performAllTheWork`, where we iterate over `Module.deletion_set` and // delete Decls which are no longer referenced. // If a Decl is explicitly deleted from source, and also no longer referenced, // it may be both in this `deleted_decls` set, as well as in the // `Module.deletion_set`. To avoid deleting it twice, we remove it from the // deletion set at this time. for (file.deleted_decls.items) |decl| { log.debug("deleted from source: {*} ({s})", .{ decl, decl.name }); // Remove from the namespace it resides in, preserving declaration order. assert(decl.zir_decl_index != 0); _ = decl.namespace.decls.orderedRemove(mem.spanZ(decl.name)); try mod.clearDecl(decl, &outdated_decls); decl.destroy(mod); } file.deleted_decls.clearRetainingCapacity(); } // Finally we can queue up re-analysis tasks after we have processed // the deleted decls. for (outdated_decls.keys()) |key| { try mod.markOutdatedDecl(key); } } /// Called from `Compilation.update`, after everything is done, just before /// reporting compile errors. In this function we emit exported symbol collision /// errors and communicate exported symbols to the linker backend. pub fn processExports(mod: *Module) !void { const gpa = mod.gpa; // Map symbol names to `Export` for name collision detection. var symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{}; defer symbol_exports.deinit(gpa); var it = mod.decl_exports.iterator(); while (it.next()) |entry| { const exported_decl = entry.key_ptr.*; const exports = entry.value_ptr.*; for (exports) |new_export| { const gop = try symbol_exports.getOrPut(gpa, new_export.options.name); if (gop.found_existing) { new_export.status = .failed_retryable; try mod.failed_exports.ensureUnusedCapacity(gpa, 1); const src_loc = new_export.getSrcLoc(); const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {s}", .{ new_export.options.name, }); errdefer msg.destroy(gpa); const other_export = gop.value_ptr.*; const other_src_loc = other_export.getSrcLoc(); try mod.errNoteNonLazy(other_src_loc, msg, "other symbol here", .{}); mod.failed_exports.putAssumeCapacityNoClobber(new_export, msg); new_export.status = .failed; } else { gop.value_ptr.* = new_export; } } mod.comp.bin_file.updateDeclExports(mod, exported_decl, exports) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => { const new_export = exports[0]; new_export.status = .failed_retryable; try mod.failed_exports.ensureUnusedCapacity(gpa, 1); const src_loc = new_export.getSrcLoc(); const msg = try ErrorMsg.create(gpa, src_loc, "unable to export: {s}", .{ @errorName(err), }); mod.failed_exports.putAssumeCapacityNoClobber(new_export, msg); }, }; } } pub fn populateTestFunctions(mod: *Module) !void { const gpa = mod.gpa; const builtin_pkg = mod.main_pkg.table.get("builtin").?; const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file; const builtin_namespace = builtin_file.root_decl.?.namespace; const decl = builtin_namespace.decls.get("test_functions").?; var buf: Type.Payload.ElemType = undefined; const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType(); const array_decl = d: { // Add mod.test_functions to an array decl then make the test_functions // decl reference it as a slice. var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const arena = &new_decl_arena.allocator; const test_fn_vals = try arena.alloc(Value, mod.test_functions.count()); const array_decl = try mod.createAnonymousDeclFromDecl(decl, .{ .ty = try Type.Tag.array.create(arena, .{ .len = test_fn_vals.len, .elem_type = try tmp_test_fn_ty.copy(arena), }), .val = try Value.Tag.array.create(arena, test_fn_vals), }); for (mod.test_functions.keys()) |test_decl, i| { const test_name_slice = mem.sliceTo(test_decl.name, 0); const test_name_decl = n: { var name_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer name_decl_arena.deinit(); const bytes = try name_decl_arena.allocator.dupe(u8, test_name_slice); const test_name_decl = try mod.createAnonymousDeclFromDecl(array_decl, .{ .ty = try Type.Tag.array_u8.create(&name_decl_arena.allocator, bytes.len), .val = try Value.Tag.bytes.create(&name_decl_arena.allocator, bytes), }); try test_name_decl.finalizeNewArena(&name_decl_arena); break :n test_name_decl; }; try mod.linkerUpdateDecl(test_name_decl); const field_vals = try arena.create([3]Value); field_vals.* = .{ try Value.Tag.slice.create(arena, .{ .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl), .len = try Value.Tag.int_u64.create(arena, test_name_slice.len), }), // name try Value.Tag.decl_ref.create(arena, test_decl), // func Value.initTag(.null_value), // async_frame_size }; test_fn_vals[i] = try Value.Tag.@"struct".create(arena, field_vals); } try array_decl.finalizeNewArena(&new_decl_arena); break :d array_decl; }; try mod.linkerUpdateDecl(array_decl); { var arena_instance = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = arena_instance.state; const arena = &arena_instance.allocator; decl.ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena)); decl.val = try Value.Tag.slice.create(arena, .{ .ptr = try Value.Tag.decl_ref.create(arena, array_decl), .len = try Value.Tag.int_u64.create(arena, mod.test_functions.count()), }); } try mod.linkerUpdateDecl(decl); } pub fn linkerUpdateDecl(mod: *Module, decl: *Decl) !void { mod.comp.bin_file.updateDecl(mod, decl) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { decl.analysis = .codegen_failure; return; }, else => { const gpa = mod.gpa; try mod.failed_decls.ensureUnusedCapacity(gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create( gpa, decl.srcLoc(), "unable to codegen: {s}", .{@errorName(err)}, )); decl.analysis = .codegen_failure_retryable; return; }, }; }
src/Module.zig
const std = @import("index.zig"); const debug = std.debug; const assert = debug.assert; const mem = std.mem; const os = std.os; const builtin = @import("builtin"); const Os = builtin.Os; const c = std.c; const maxInt = std.math.maxInt; const Allocator = mem.Allocator; pub const c_allocator = &c_allocator_state; var c_allocator_state = Allocator{ .allocFn = cAlloc, .reallocFn = cRealloc, .freeFn = cFree, }; fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 { assert(alignment <= @alignOf(c_longdouble)); return if (c.malloc(n)) |buf| @ptrCast([*]u8, buf)[0..n] else error.OutOfMemory; } fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { const old_ptr = @ptrCast(*c_void, old_mem.ptr); if (c.realloc(old_ptr, new_size)) |buf| { return @ptrCast([*]u8, buf)[0..new_size]; } else if (new_size <= old_mem.len) { return old_mem[0..new_size]; } else { return error.OutOfMemory; } } fn cFree(self: *Allocator, old_mem: []u8) void { const old_ptr = @ptrCast(*c_void, old_mem.ptr); c.free(old_ptr); } /// This allocator makes a syscall directly for every allocation and free. /// Thread-safe and lock-free. pub const DirectAllocator = struct { allocator: Allocator, heap_handle: ?HeapHandle, const HeapHandle = if (builtin.os == Os.windows) os.windows.HANDLE else void; pub fn init() DirectAllocator { return DirectAllocator{ .allocator = Allocator{ .allocFn = alloc, .reallocFn = realloc, .freeFn = free, }, .heap_handle = if (builtin.os == Os.windows) null else {}, }; } pub fn deinit(self: *DirectAllocator) void { switch (builtin.os) { Os.windows => if (self.heap_handle) |heap_handle| { _ = os.windows.HeapDestroy(heap_handle); }, else => {}, } } fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(DirectAllocator, "allocator", allocator); switch (builtin.os) { Os.linux, Os.macosx, Os.ios, Os.freebsd => { const p = os.posix; const alloc_size = if (alignment <= os.page_size) n else n + alignment; const addr = p.mmap(null, alloc_size, p.PROT_READ | p.PROT_WRITE, p.MAP_PRIVATE | p.MAP_ANONYMOUS, -1, 0); if (addr == p.MAP_FAILED) return error.OutOfMemory; if (alloc_size == n) return @intToPtr([*]u8, addr)[0..n]; const aligned_addr = (addr & ~usize(alignment - 1)) + alignment; // We can unmap the unused portions of our mmap, but we must only // pass munmap bytes that exist outside our allocated pages or it // will happily eat us too. // Since alignment > page_size, we are by definition on a page boundary. const unused_start = addr; const unused_len = aligned_addr - 1 - unused_start; const err = p.munmap(unused_start, unused_len); assert(p.getErrno(err) == 0); // It is impossible that there is an unoccupied page at the top of our // mmap. return @intToPtr([*]u8, aligned_addr)[0..n]; }, Os.windows => { const amt = n + alignment + @sizeOf(usize); const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, builtin.AtomicOrder.SeqCst); const heap_handle = optional_heap_handle orelse blk: { const hh = os.windows.HeapCreate(0, amt, 0) orelse return error.OutOfMemory; const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse break :blk hh; _ = os.windows.HeapDestroy(hh); break :blk other_hh.?; // can't be null because of the cmpxchg }; const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory; const root_addr = @ptrToInt(ptr); const rem = @rem(root_addr, alignment); const march_forward_bytes = if (rem == 0) 0 else (alignment - rem); const adjusted_addr = root_addr + march_forward_bytes; const record_addr = adjusted_addr + n; @intToPtr(*align(1) usize, record_addr).* = root_addr; return @intToPtr([*]u8, adjusted_addr)[0..n]; }, else => @compileError("Unsupported OS"), } } fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(DirectAllocator, "allocator", allocator); switch (builtin.os) { Os.linux, Os.macosx, Os.ios, Os.freebsd => { if (new_size <= old_mem.len) { const base_addr = @ptrToInt(old_mem.ptr); const old_addr_end = base_addr + old_mem.len; const new_addr_end = base_addr + new_size; const rem = @rem(new_addr_end, os.page_size); const new_addr_end_rounded = new_addr_end + if (rem == 0) 0 else (os.page_size - rem); if (old_addr_end > new_addr_end_rounded) { _ = os.posix.munmap(new_addr_end_rounded, old_addr_end - new_addr_end_rounded); } return old_mem[0..new_size]; } const result = try alloc(allocator, new_size, alignment); mem.copy(u8, result, old_mem); return result; }, Os.windows => { const old_adjusted_addr = @ptrToInt(old_mem.ptr); const old_record_addr = old_adjusted_addr + old_mem.len; const root_addr = @intToPtr(*align(1) usize, old_record_addr).*; const old_ptr = @intToPtr(*c_void, root_addr); const amt = new_size + alignment + @sizeOf(usize); const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) orelse blk: { if (new_size > old_mem.len) return error.OutOfMemory; const new_record_addr = old_record_addr - new_size + old_mem.len; @intToPtr(*align(1) usize, new_record_addr).* = root_addr; return old_mem[0..new_size]; }; const offset = old_adjusted_addr - root_addr; const new_root_addr = @ptrToInt(new_ptr); const new_adjusted_addr = new_root_addr + offset; assert(new_adjusted_addr % alignment == 0); const new_record_addr = new_adjusted_addr + new_size; @intToPtr(*align(1) usize, new_record_addr).* = new_root_addr; return @intToPtr([*]u8, new_adjusted_addr)[0..new_size]; }, else => @compileError("Unsupported OS"), } } fn free(allocator: *Allocator, bytes: []u8) void { const self = @fieldParentPtr(DirectAllocator, "allocator", allocator); switch (builtin.os) { Os.linux, Os.macosx, Os.ios, Os.freebsd => { _ = os.posix.munmap(@ptrToInt(bytes.ptr), bytes.len); }, Os.windows => { const record_addr = @ptrToInt(bytes.ptr) + bytes.len; const root_addr = @intToPtr(*align(1) usize, record_addr).*; const ptr = @intToPtr(*c_void, root_addr); _ = os.windows.HeapFree(self.heap_handle.?, 0, ptr); }, else => @compileError("Unsupported OS"), } } }; /// This allocator takes an existing allocator, wraps it, and provides an interface /// where you can allocate without freeing, and then free it all together. pub const ArenaAllocator = struct { pub allocator: Allocator, child_allocator: *Allocator, buffer_list: std.LinkedList([]u8), end_index: usize, const BufNode = std.LinkedList([]u8).Node; pub fn init(child_allocator: *Allocator) ArenaAllocator { return ArenaAllocator{ .allocator = Allocator{ .allocFn = alloc, .reallocFn = realloc, .freeFn = free, }, .child_allocator = child_allocator, .buffer_list = std.LinkedList([]u8).init(), .end_index = 0, }; } pub fn deinit(self: *ArenaAllocator) void { var it = self.buffer_list.first; while (it) |node| { // this has to occur before the free because the free frees node it = node.next; self.child_allocator.free(node.data); } } fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode { const actual_min_size = minimum_size + @sizeOf(BufNode); var len = prev_len; while (true) { len += len / 2; len += os.page_size - @rem(len, os.page_size); if (len >= actual_min_size) break; } const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len); const buf_node_slice = @bytesToSlice(BufNode, buf[0..@sizeOf(BufNode)]); const buf_node = &buf_node_slice[0]; buf_node.* = BufNode{ .data = buf, .prev = null, .next = null, }; self.buffer_list.append(buf_node); self.end_index = 0; return buf_node; } fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator); var cur_node = if (self.buffer_list.last) |last_node| last_node else try self.createNode(0, n + alignment); while (true) { const cur_buf = cur_node.data[@sizeOf(BufNode)..]; const addr = @ptrToInt(cur_buf.ptr) + self.end_index; const rem = @rem(addr, alignment); const march_forward_bytes = if (rem == 0) 0 else (alignment - rem); const adjusted_index = self.end_index + march_forward_bytes; const new_end_index = adjusted_index + n; if (new_end_index > cur_buf.len) { cur_node = try self.createNode(cur_buf.len, n + alignment); continue; } const result = cur_buf[adjusted_index..new_end_index]; self.end_index = new_end_index; return result; } } fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { if (new_size <= old_mem.len) { return old_mem[0..new_size]; } else { const result = try alloc(allocator, new_size, alignment); mem.copy(u8, result, old_mem); return result; } } fn free(allocator: *Allocator, bytes: []u8) void {} }; pub const FixedBufferAllocator = struct { allocator: Allocator, end_index: usize, buffer: []u8, pub fn init(buffer: []u8) FixedBufferAllocator { return FixedBufferAllocator{ .allocator = Allocator{ .allocFn = alloc, .reallocFn = realloc, .freeFn = free, }, .buffer = buffer, .end_index = 0, }; } fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); const addr = @ptrToInt(self.buffer.ptr) + self.end_index; const rem = @rem(addr, alignment); const march_forward_bytes = if (rem == 0) 0 else (alignment - rem); const adjusted_index = self.end_index + march_forward_bytes; const new_end_index = adjusted_index + n; if (new_end_index > self.buffer.len) { return error.OutOfMemory; } const result = self.buffer[adjusted_index..new_end_index]; self.end_index = new_end_index; return result; } fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); assert(old_mem.len <= self.end_index); if (new_size <= old_mem.len) { return old_mem[0..new_size]; } else if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len) { const start_index = self.end_index - old_mem.len; const new_end_index = start_index + new_size; if (new_end_index > self.buffer.len) return error.OutOfMemory; const result = self.buffer[start_index..new_end_index]; self.end_index = new_end_index; return result; } else { const result = try alloc(allocator, new_size, alignment); mem.copy(u8, result, old_mem); return result; } } fn free(allocator: *Allocator, bytes: []u8) void {} }; /// lock free pub const ThreadSafeFixedBufferAllocator = struct { allocator: Allocator, end_index: usize, buffer: []u8, pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator { return ThreadSafeFixedBufferAllocator{ .allocator = Allocator{ .allocFn = alloc, .reallocFn = realloc, .freeFn = free, }, .buffer = buffer, .end_index = 0, }; } fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator); var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst); while (true) { const addr = @ptrToInt(self.buffer.ptr) + end_index; const rem = @rem(addr, alignment); const march_forward_bytes = if (rem == 0) 0 else (alignment - rem); const adjusted_index = end_index + march_forward_bytes; const new_end_index = adjusted_index + n; if (new_end_index > self.buffer.len) { return error.OutOfMemory; } end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse return self.buffer[adjusted_index..new_end_index]; } } fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { if (new_size <= old_mem.len) { return old_mem[0..new_size]; } else { const result = try alloc(allocator, new_size, alignment); mem.copy(u8, result, old_mem); return result; } } fn free(allocator: *Allocator, bytes: []u8) void {} }; pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) { return StackFallbackAllocator(size){ .buffer = undefined, .fallback_allocator = fallback_allocator, .fixed_buffer_allocator = undefined, .allocator = Allocator{ .allocFn = StackFallbackAllocator(size).alloc, .reallocFn = StackFallbackAllocator(size).realloc, .freeFn = StackFallbackAllocator(size).free, }, }; } pub fn StackFallbackAllocator(comptime size: usize) type { return struct { const Self = @This(); buffer: [size]u8, allocator: Allocator, fallback_allocator: *Allocator, fixed_buffer_allocator: FixedBufferAllocator, pub fn get(self: *Self) *Allocator { self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]); return &self.allocator; } fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(Self, "allocator", allocator); return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator.allocator, n, alignment) catch self.fallback_allocator.allocFn(self.fallback_allocator, n, alignment); } fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(Self, "allocator", allocator); const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and @ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len; if (in_buffer) { return FixedBufferAllocator.realloc( &self.fixed_buffer_allocator.allocator, old_mem, new_size, alignment, ) catch { const result = try self.fallback_allocator.allocFn( self.fallback_allocator, new_size, alignment, ); mem.copy(u8, result, old_mem); return result; }; } return self.fallback_allocator.reallocFn(self.fallback_allocator, old_mem, new_size, alignment); } fn free(allocator: *Allocator, bytes: []u8) void { const self = @fieldParentPtr(Self, "allocator", allocator); const in_buffer = @ptrToInt(bytes.ptr) >= @ptrToInt(&self.buffer) and @ptrToInt(bytes.ptr) < @ptrToInt(&self.buffer) + self.buffer.len; if (!in_buffer) { return self.fallback_allocator.freeFn(self.fallback_allocator, bytes); } } }; } test "c_allocator" { if (builtin.link_libc) { var slice = c_allocator.alloc(u8, 50) catch return; defer c_allocator.free(slice); slice = c_allocator.realloc(u8, slice, 100) catch return; } } test "DirectAllocator" { var direct_allocator = DirectAllocator.init(); defer direct_allocator.deinit(); const allocator = &direct_allocator.allocator; try testAllocator(allocator); try testAllocatorAligned(allocator, 16); try testAllocatorLargeAlignment(allocator); } test "ArenaAllocator" { var direct_allocator = DirectAllocator.init(); defer direct_allocator.deinit(); var arena_allocator = ArenaAllocator.init(&direct_allocator.allocator); defer arena_allocator.deinit(); try testAllocator(&arena_allocator.allocator); try testAllocatorAligned(&arena_allocator.allocator, 16); try testAllocatorLargeAlignment(&arena_allocator.allocator); } var test_fixed_buffer_allocator_memory: [30000 * @sizeOf(usize)]u8 = undefined; test "FixedBufferAllocator" { var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]); try testAllocator(&fixed_buffer_allocator.allocator); try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16); try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator); } test "FixedBufferAllocator Reuse memory on realloc" { var small_fixed_buffer: [10]u8 = undefined; // check if we re-use the memory { var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5); assert(slice0.len == 5); var slice1 = try fixed_buffer_allocator.allocator.realloc(u8, slice0, 10); assert(slice1.ptr == slice0.ptr); assert(slice1.len == 10); debug.assertError(fixed_buffer_allocator.allocator.realloc(u8, slice1, 11), error.OutOfMemory); } // check that we don't re-use the memory if it's not the most recent block { var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2); slice0[0] = 1; slice0[1] = 2; var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2); var slice2 = try fixed_buffer_allocator.allocator.realloc(u8, slice0, 4); assert(slice0.ptr != slice2.ptr); assert(slice1.ptr != slice2.ptr); assert(slice2[0] == 1); assert(slice2[1] == 2); } } test "ThreadSafeFixedBufferAllocator" { var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]); try testAllocator(&fixed_buffer_allocator.allocator); try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16); try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator); } fn testAllocator(allocator: *mem.Allocator) !void { var slice = try allocator.alloc(*i32, 100); assert(slice.len == 100); for (slice) |*item, i| { item.* = try allocator.create(@intCast(i32, i)); } slice = try allocator.realloc(*i32, slice, 20000); assert(slice.len == 20000); for (slice[0..100]) |item, i| { assert(item.* == @intCast(i32, i)); allocator.destroy(item); } slice = try allocator.realloc(*i32, slice, 50); assert(slice.len == 50); slice = try allocator.realloc(*i32, slice, 25); assert(slice.len == 25); slice = try allocator.realloc(*i32, slice, 0); assert(slice.len == 0); slice = try allocator.realloc(*i32, slice, 10); assert(slice.len == 10); allocator.free(slice); } fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !void { // initial var slice = try allocator.alignedAlloc(u8, alignment, 10); assert(slice.len == 10); // grow slice = try allocator.alignedRealloc(u8, alignment, slice, 100); assert(slice.len == 100); // shrink slice = try allocator.alignedRealloc(u8, alignment, slice, 10); assert(slice.len == 10); // go to zero slice = try allocator.alignedRealloc(u8, alignment, slice, 0); assert(slice.len == 0); // realloc from zero slice = try allocator.alignedRealloc(u8, alignment, slice, 100); assert(slice.len == 100); // shrink with shrink slice = allocator.alignedShrink(u8, alignment, slice, 10); assert(slice.len == 10); // shrink to zero slice = allocator.alignedShrink(u8, alignment, slice, 0); assert(slice.len == 0); } fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void { //Maybe a platform's page_size is actually the same as or // very near usize? if (os.page_size << 2 > maxInt(usize)) return; const USizeShift = @IntType(false, std.math.log2(usize.bit_count)); const large_align = u29(os.page_size << 2); var align_mask: usize = undefined; _ = @shlWithOverflow(usize, ~usize(0), USizeShift(@ctz(large_align)), &align_mask); var slice = try allocator.allocFn(allocator, 500, large_align); debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr)); slice = try allocator.reallocFn(allocator, slice, 100, large_align); debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr)); slice = try allocator.reallocFn(allocator, slice, 5000, large_align); debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr)); slice = try allocator.reallocFn(allocator, slice, 10, large_align); debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr)); slice = try allocator.reallocFn(allocator, slice, 20000, large_align); debug.assert(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr)); allocator.free(slice); }
std/heap.zig
const std = @import("../../../std.zig"); const pid_t = linux.pid_t; const uid_t = linux.uid_t; const clock_t = linux.clock_t; const stack_t = linux.stack_t; const sigset_t = linux.sigset_t; const linux = std.os.linux; const sockaddr = linux.sockaddr; const socklen_t = linux.socklen_t; const iovec = linux.iovec; const iovec_const = linux.iovec_const; pub const mode_t = usize; pub const SYS = extern enum(usize) { read = 0, write = 1, open = 2, close = 3, stat = 4, fstat = 5, lstat = 6, poll = 7, lseek = 8, mmap = 9, mprotect = 10, munmap = 11, brk = 12, rt_sigaction = 13, rt_sigprocmask = 14, rt_sigreturn = 15, ioctl = 16, pread = 17, pwrite = 18, readv = 19, writev = 20, access = 21, pipe = 22, select = 23, sched_yield = 24, mremap = 25, msync = 26, mincore = 27, madvise = 28, shmget = 29, shmat = 30, shmctl = 31, dup = 32, dup2 = 33, pause = 34, nanosleep = 35, getitimer = 36, alarm = 37, setitimer = 38, getpid = 39, sendfile = 40, socket = 41, connect = 42, accept = 43, sendto = 44, recvfrom = 45, sendmsg = 46, recvmsg = 47, shutdown = 48, bind = 49, listen = 50, getsockname = 51, getpeername = 52, socketpair = 53, setsockopt = 54, getsockopt = 55, clone = 56, fork = 57, vfork = 58, execve = 59, exit = 60, wait4 = 61, kill = 62, uname = 63, semget = 64, semop = 65, semctl = 66, shmdt = 67, msgget = 68, msgsnd = 69, msgrcv = 70, msgctl = 71, fcntl = 72, flock = 73, fsync = 74, fdatasync = 75, truncate = 76, ftruncate = 77, getdents = 78, getcwd = 79, chdir = 80, fchdir = 81, rename = 82, mkdir = 83, rmdir = 84, creat = 85, link = 86, unlink = 87, symlink = 88, readlink = 89, chmod = 90, fchmod = 91, chown = 92, fchown = 93, lchown = 94, umask = 95, gettimeofday = 96, getrlimit = 97, getrusage = 98, sysinfo = 99, times = 100, ptrace = 101, getuid = 102, syslog = 103, getgid = 104, setuid = 105, setgid = 106, geteuid = 107, getegid = 108, setpgid = 109, getppid = 110, getpgrp = 111, setsid = 112, setreuid = 113, setregid = 114, getgroups = 115, setgroups = 116, setresuid = 117, getresuid = 118, setresgid = 119, getresgid = 120, getpgid = 121, setfsuid = 122, setfsgid = 123, getsid = 124, capget = 125, capset = 126, rt_sigpending = 127, rt_sigtimedwait = 128, rt_sigqueueinfo = 129, rt_sigsuspend = 130, sigaltstack = 131, utime = 132, mknod = 133, uselib = 134, personality = 135, ustat = 136, statfs = 137, fstatfs = 138, sysfs = 139, getpriority = 140, setpriority = 141, sched_setparam = 142, sched_getparam = 143, sched_setscheduler = 144, sched_getscheduler = 145, sched_get_priority_max = 146, sched_get_priority_min = 147, sched_rr_get_interval = 148, mlock = 149, munlock = 150, mlockall = 151, munlockall = 152, vhangup = 153, modify_ldt = 154, pivot_root = 155, _sysctl = 156, prctl = 157, arch_prctl = 158, adjtimex = 159, setrlimit = 160, chroot = 161, sync = 162, acct = 163, settimeofday = 164, mount = 165, umount2 = 166, swapon = 167, swapoff = 168, reboot = 169, sethostname = 170, setdomainname = 171, iopl = 172, ioperm = 173, create_module = 174, init_module = 175, delete_module = 176, get_kernel_syms = 177, query_module = 178, quotactl = 179, nfsservctl = 180, getpmsg = 181, putpmsg = 182, afs_syscall = 183, tuxcall = 184, security = 185, gettid = 186, readahead = 187, setxattr = 188, lsetxattr = 189, fsetxattr = 190, getxattr = 191, lgetxattr = 192, fgetxattr = 193, listxattr = 194, llistxattr = 195, flistxattr = 196, removexattr = 197, lremovexattr = 198, fremovexattr = 199, tkill = 200, time = 201, futex = 202, sched_setaffinity = 203, sched_getaffinity = 204, set_thread_area = 205, io_setup = 206, io_destroy = 207, io_getevents = 208, io_submit = 209, io_cancel = 210, get_thread_area = 211, lookup_dcookie = 212, epoll_create = 213, epoll_ctl_old = 214, epoll_wait_old = 215, remap_file_pages = 216, getdents64 = 217, set_tid_address = 218, restart_syscall = 219, semtimedop = 220, fadvise64 = 221, timer_create = 222, timer_settime = 223, timer_gettime = 224, timer_getoverrun = 225, timer_delete = 226, clock_settime = 227, clock_gettime = 228, clock_getres = 229, clock_nanosleep = 230, exit_group = 231, epoll_wait = 232, epoll_ctl = 233, tgkill = 234, utimes = 235, vserver = 236, mbind = 237, set_mempolicy = 238, get_mempolicy = 239, mq_open = 240, mq_unlink = 241, mq_timedsend = 242, mq_timedreceive = 243, mq_notify = 244, mq_getsetattr = 245, kexec_load = 246, waitid = 247, add_key = 248, request_key = 249, keyctl = 250, ioprio_set = 251, ioprio_get = 252, inotify_init = 253, inotify_add_watch = 254, inotify_rm_watch = 255, migrate_pages = 256, openat = 257, mkdirat = 258, mknodat = 259, fchownat = 260, futimesat = 261, newfstatat = 262, fstatat = 262, unlinkat = 263, renameat = 264, linkat = 265, symlinkat = 266, readlinkat = 267, fchmodat = 268, faccessat = 269, pselect6 = 270, ppoll = 271, unshare = 272, set_robust_list = 273, get_robust_list = 274, splice = 275, tee = 276, sync_file_range = 277, vmsplice = 278, move_pages = 279, utimensat = 280, epoll_pwait = 281, signalfd = 282, timerfd_create = 283, eventfd = 284, fallocate = 285, timerfd_settime = 286, timerfd_gettime = 287, accept4 = 288, signalfd4 = 289, eventfd2 = 290, epoll_create1 = 291, dup3 = 292, pipe2 = 293, inotify_init1 = 294, preadv = 295, pwritev = 296, rt_tgsigqueueinfo = 297, perf_event_open = 298, recvmmsg = 299, fanotify_init = 300, fanotify_mark = 301, prlimit64 = 302, name_to_handle_at = 303, open_by_handle_at = 304, clock_adjtime = 305, syncfs = 306, sendmmsg = 307, setns = 308, getcpu = 309, process_vm_readv = 310, process_vm_writev = 311, kcmp = 312, finit_module = 313, sched_setattr = 314, sched_getattr = 315, renameat2 = 316, seccomp = 317, getrandom = 318, memfd_create = 319, kexec_file_load = 320, bpf = 321, execveat = 322, userfaultfd = 323, membarrier = 324, mlock2 = 325, copy_file_range = 326, preadv2 = 327, pwritev2 = 328, pkey_mprotect = 329, pkey_alloc = 330, pkey_free = 331, statx = 332, io_pgetevents = 333, rseq = 334, pidfd_send_signal = 424, io_uring_setup = 425, io_uring_enter = 426, io_uring_register = 427, open_tree = 428, move_mount = 429, fsopen = 430, fsconfig = 431, fsmount = 432, fspick = 433, pidfd_open = 434, clone3 = 435, openat2 = 437, pidfd_getfd = 438, _, }; pub const O_CREAT = 0o100; pub const O_EXCL = 0o200; pub const O_NOCTTY = 0o400; pub const O_TRUNC = 0o1000; pub const O_APPEND = 0o2000; pub const O_NONBLOCK = 0o4000; pub const O_DSYNC = 0o10000; pub const O_SYNC = 0o4010000; pub const O_RSYNC = 0o4010000; pub const O_DIRECTORY = 0o200000; pub const O_NOFOLLOW = 0o400000; pub const O_CLOEXEC = 0o2000000; pub const O_ASYNC = 0o20000; pub const O_DIRECT = 0o40000; pub const O_LARGEFILE = 0; pub const O_NOATIME = 0o1000000; pub const O_PATH = 0o10000000; pub const O_TMPFILE = 0o20200000; pub const O_NDELAY = O_NONBLOCK; pub const F_DUPFD = 0; pub const F_GETFD = 1; pub const F_SETFD = 2; pub const F_GETFL = 3; pub const F_SETFL = 4; pub const F_SETOWN = 8; pub const F_GETOWN = 9; pub const F_SETSIG = 10; pub const F_GETSIG = 11; pub const F_GETLK = 5; pub const F_SETLK = 6; pub const F_SETLKW = 7; pub const F_SETOWN_EX = 15; pub const F_GETOWN_EX = 16; pub const F_GETOWNER_UIDS = 17; /// only give out 32bit addresses pub const MAP_32BIT = 0x40; /// stack-like segment pub const MAP_GROWSDOWN = 0x0100; /// ETXTBSY pub const MAP_DENYWRITE = 0x0800; /// mark it as an executable pub const MAP_EXECUTABLE = 0x1000; /// pages are locked pub const MAP_LOCKED = 0x2000; /// don't check for reservations pub const MAP_NORESERVE = 0x4000; pub const VDSO_CGT_SYM = "__vdso_clock_gettime"; pub const VDSO_CGT_VER = "LINUX_2.6"; pub const VDSO_GETCPU_SYM = "__vdso_getcpu"; pub const VDSO_GETCPU_VER = "LINUX_2.6"; pub const ARCH_SET_GS = 0x1001; pub const ARCH_SET_FS = 0x1002; pub const ARCH_GET_FS = 0x1003; pub const ARCH_GET_GS = 0x1004; pub const REG_R8 = 0; pub const REG_R9 = 1; pub const REG_R10 = 2; pub const REG_R11 = 3; pub const REG_R12 = 4; pub const REG_R13 = 5; pub const REG_R14 = 6; pub const REG_R15 = 7; pub const REG_RDI = 8; pub const REG_RSI = 9; pub const REG_RBP = 10; pub const REG_RBX = 11; pub const REG_RDX = 12; pub const REG_RAX = 13; pub const REG_RCX = 14; pub const REG_RSP = 15; pub const REG_RIP = 16; pub const REG_EFL = 17; pub const REG_CSGSFS = 18; pub const REG_ERR = 19; pub const REG_TRAPNO = 20; pub const REG_OLDMASK = 21; pub const REG_CR2 = 22; pub const msghdr = extern struct { msg_name: ?*sockaddr, msg_namelen: socklen_t, msg_iov: [*]iovec, msg_iovlen: i32, __pad1: i32, msg_control: ?*c_void, msg_controllen: socklen_t, __pad2: socklen_t, msg_flags: i32, }; pub const msghdr_const = extern struct { msg_name: ?*const sockaddr, msg_namelen: socklen_t, msg_iov: [*]iovec_const, msg_iovlen: i32, __pad1: i32, msg_control: ?*c_void, msg_controllen: socklen_t, __pad2: socklen_t, msg_flags: i32, }; pub const off_t = i64; pub const ino_t = u64; /// Renamed to Stat to not conflict with the stat function. /// atime, mtime, and ctime have functions to return `timespec`, /// because although this is a POSIX API, the layout and names of /// the structs are inconsistent across operating systems, and /// in C, macros are used to hide the differences. Here we use /// methods to accomplish this. pub const Stat = extern struct { dev: u64, ino: ino_t, nlink: usize, mode: u32, uid: u32, gid: u32, __pad0: u32, rdev: u64, size: off_t, blksize: isize, blocks: i64, atim: timespec, mtim: timespec, ctim: timespec, __unused: [3]isize, pub fn atime(self: Stat) timespec { return self.atim; } pub fn mtime(self: Stat) timespec { return self.mtim; } pub fn ctime(self: Stat) timespec { return self.ctim; } }; pub const timespec = extern struct { tv_sec: isize, tv_nsec: isize, }; pub const timeval = extern struct { tv_sec: isize, tv_usec: isize, }; pub const timezone = extern struct { tz_minuteswest: i32, tz_dsttime: i32, }; pub const Elf_Symndx = u32; pub const greg_t = usize; pub const gregset_t = [23]greg_t; pub const fpstate = extern struct { cwd: u16, swd: u16, ftw: u16, fop: u16, rip: usize, rdp: usize, mxcsr: u32, mxcr_mask: u32, st: [8]extern struct { significand: [4]u16, exponent: u16, padding: [3]u16 = undefined, }, xmm: [16]extern struct { element: [4]u32, }, padding: [24]u32 = undefined, }; pub const fpregset_t = *fpstate; pub const sigcontext = extern struct { r8: usize, r9: usize, r10: usize, r11: usize, r12: usize, r13: usize, r14: usize, r15: usize, rdi: usize, rsi: usize, rbp: usize, rbx: usize, rdx: usize, rax: usize, rcx: usize, rsp: usize, rip: usize, eflags: usize, cs: u16, gs: u16, fs: u16, pad0: u16 = undefined, err: usize, trapno: usize, oldmask: usize, cr2: usize, fpstate: *fpstate, reserved1: [8]usize = undefined, }; pub const mcontext_t = extern struct { gregs: gregset_t, fpregs: fpregset_t, reserved1: [8]usize = undefined, }; pub const ucontext_t = extern struct { flags: usize, link: *ucontext_t, stack: stack_t, mcontext: mcontext_t, sigmask: sigset_t, fpregs_mem: [64]usize, };
lib/std/os/bits/linux/x86_64.zig
const std = @import("std"); pub usingnamespace @import("../common.zig"); // This is only used on buffers with an LF delimiter, so only CR needs to be checked // LF is a valid line ending (RFC 7230, Section 3.5) pub fn normalizeLineEnding(buffer: []const u8) callconv(.Inline) []const u8 { if (buffer[buffer.len - 1] == '\r') return buffer[0 .. buffer.len - 1]; return buffer; } pub const ParserState = enum { start_line, header, body, }; /// Compares two of any type for equality. Containers are compared on a field-by-field basis, /// where possible. Pointers are not followed. Slices are compared by contents. pub fn reworkedMetaEql(a: anytype, b: @TypeOf(a)) bool { const T = @TypeOf(a); switch (@typeInfo(T)) { .Struct => |info| { inline for (info.fields) |field_info| { if (!reworkedMetaEql(@field(a, field_info.name), @field(b, field_info.name))) return false; } return true; }, .ErrorUnion => { if (a) |a_p| { if (b) |b_p| return reworkedMetaEql(a_p, b_p) else |_| return false; } else |a_e| { if (b) |_| return false else |b_e| return a_e == b_e; } }, .Union => |info| { if (info.tag_type) |Tag| { const tag_a = std.meta.activeTag(a); const tag_b = std.meta.activeTag(b); if (tag_a != tag_b) return false; inline for (info.fields) |field_info| { if (@field(Tag, field_info.name) == tag_a) { return reworkedMetaEql(@field(a, field_info.name), @field(b, field_info.name)); } } return false; } @compileError("cannot compare untagged union type " ++ @typeName(T)); }, .Array => { if (a.len != b.len) return false; for (a) |e, i| if (!reworkedMetaEql(e, b[i])) return false; return true; }, .Vector => |info| { var i: usize = 0; while (i < info.len) : (i += 1) { if (!reworkedMetaEql(a[i], b[i])) return false; } return true; }, .Pointer => |info| { return switch (info.size) { .One, .Many, .C => a == b, .Slice => std.mem.eql(info.child, a, b), }; }, .Optional => { if (a == null and b == null) return true; if (a == null or b == null) return false; return reworkedMetaEql(a.?, b.?); }, else => return a == b, } }
src/parser/common.zig
const __fixsfdi = @import("fixsfdi.zig").__fixsfdi; const std = @import("std"); const math = std.math; const testing = std.testing; const warn = std.debug.warn; fn test__fixsfdi(a: f32, expected: i64) void { const x = __fixsfdi(a); //warn("a={}:{x} x={}:{x} expected={}:{x}:@as(u32, {x})\n", a, @bitCast(u32, a), x, x, expected, expected, @bitCast(u64, expected)); testing.expect(x == expected); } test "fixsfdi" { //warn("\n"); test__fixsfdi(-math.f32_max, math.minInt(i64)); test__fixsfdi(-0x1.FFFFFFFFFFFFFp+1023, math.minInt(i64)); test__fixsfdi(-0x1.FFFFFFFFFFFFFp+1023, -0x8000000000000000); test__fixsfdi(-0x1.0000000000000p+127, -0x8000000000000000); test__fixsfdi(-0x1.FFFFFFFFFFFFFp+126, -0x8000000000000000); test__fixsfdi(-0x1.FFFFFFFFFFFFEp+126, -0x8000000000000000); test__fixsfdi(-0x1.0000000000001p+63, -0x8000000000000000); test__fixsfdi(-0x1.0000000000000p+63, -0x8000000000000000); test__fixsfdi(-0x1.FFFFFFFFFFFFFp+62, -0x8000000000000000); test__fixsfdi(-0x1.FFFFFFFFFFFFEp+62, -0x8000000000000000); test__fixsfdi(-0x1.FFFFFFp+62, -0x8000000000000000); test__fixsfdi(-0x1.FFFFFEp+62, -0x7fffff8000000000); test__fixsfdi(-0x1.FFFFFCp+62, -0x7fffff0000000000); test__fixsfdi(-2.01, -2); test__fixsfdi(-2.0, -2); test__fixsfdi(-1.99, -1); test__fixsfdi(-1.0, -1); test__fixsfdi(-0.99, 0); test__fixsfdi(-0.5, 0); test__fixsfdi(-math.f32_min, 0); test__fixsfdi(0.0, 0); test__fixsfdi(math.f32_min, 0); test__fixsfdi(0.5, 0); test__fixsfdi(0.99, 0); test__fixsfdi(1.0, 1); test__fixsfdi(1.5, 1); test__fixsfdi(1.99, 1); test__fixsfdi(2.0, 2); test__fixsfdi(2.01, 2); test__fixsfdi(0x1.FFFFFCp+62, 0x7FFFFF0000000000); test__fixsfdi(0x1.FFFFFEp+62, 0x7FFFFF8000000000); test__fixsfdi(0x1.FFFFFFp+62, 0x7FFFFFFFFFFFFFFF); test__fixsfdi(0x1.FFFFFFFFFFFFEp+62, 0x7FFFFFFFFFFFFFFF); test__fixsfdi(0x1.FFFFFFFFFFFFFp+62, 0x7FFFFFFFFFFFFFFF); test__fixsfdi(0x1.0000000000000p+63, 0x7FFFFFFFFFFFFFFF); test__fixsfdi(0x1.0000000000001p+63, 0x7FFFFFFFFFFFFFFF); test__fixsfdi(0x1.FFFFFFFFFFFFEp+126, 0x7FFFFFFFFFFFFFFF); test__fixsfdi(0x1.FFFFFFFFFFFFFp+126, 0x7FFFFFFFFFFFFFFF); test__fixsfdi(0x1.0000000000000p+127, 0x7FFFFFFFFFFFFFFF); test__fixsfdi(0x1.FFFFFFFFFFFFFp+1023, 0x7FFFFFFFFFFFFFFF); test__fixsfdi(0x1.FFFFFFFFFFFFFp+1023, math.maxInt(i64)); test__fixsfdi(math.f64_max, math.maxInt(i64)); }
lib/std/special/compiler_rt/fixsfdi_test.zig
const std = @import("std"); const builtin = @import("builtin"); const fs = std.fs; const io = std.io; const mem = std.mem; const process = std.process; const Archive = @import("archive/Archive.zig"); const overview = \\Zig Archiver \\ \\Usage: zar [options] [-]<operation>[modifiers] [relpos] [count] <archive> [files] \\ \\Options: \\ --format=<type> \\ Can be default, gnu, darwin or bsd. This determines the format used to serialise an archive, this is ignored when parsing archives as type there is always inferred. When creating an archive the host machine is used to infer <type> if one is not specified. \\ --version \\ Print program version details and exit. \\ -h, --help \\ Print (this) help text and exit. \\ \\Ignored for compatability: \\ --plugin=<string> \\ \\Operations: \\ r - replace/insert [files] in <archive>, create archive if it does not exist. \\ d - delete [files] from <archive>. \\ m - move [files] in <archive>. \\ p - print contents of files in <archive>. \\ q - quick append [files] to <archive>. \\ s - act as ranlib. \\ t - display filenames in <archive>. \\ x - extract [files] from <archive>. \\ S - show symbols in the <archive>. \\ \\Modifiers: \\ c - Disable archive creation warning if inserting files to new archive. \\ u - Only update archive contents if [files] have more recent timestamps than it. \\ D - Use zero for timestamps, GIDs and UIDs in archived files (enabled by default). \\ U - Use real timestamps, GIDS and UIDs for archived files. \\ v - Print verbose output, depending on opertion: \\ S: show file names that symbols belong to. \\ \\Note, in the case of conflicting modifiers, the last one listed always takes precedence. \\ ; const version = "0.0.0"; const version_details = \\zar (https://github.com/moosichu/zar): \\ zar version {s} \\ {s} build \\ default archive type: {s} \\ host: {s}-{s}-{s} \\ ; fn printError(stderr: anytype, comptime errorString: []const u8) !void { try stderr.print("error: " ++ errorString ++ "\n", .{}); try stderr.print(overview, .{}); } fn checkArgsBounds(stderr: anytype, args: anytype, index: u32) !bool { if (index >= args.len) { try printError(stderr, "an archive must be specified"); return false; } return true; } fn openOrCreateFile(archive_path: []u8, stderr: fs.File.Writer, print_creation_warning: bool) !fs.File { const open_file_handle = fs.cwd().openFile(archive_path, .{ .write = true }) catch |err| switch (err) { error.FileNotFound => { if (print_creation_warning) { try stderr.print("Warning: creating new archive as none exists at path provided\n", .{}); } const create_file_handle = try fs.cwd().createFile(archive_path, .{ .read = true }); return create_file_handle; }, else => return err, }; return open_file_handle; } pub fn main() anyerror!void { var arena = std.heap.ArenaAllocator.init(std.testing.allocator); defer arena.deinit(); var allocator = &arena.allocator; const args = try process.argsAlloc(allocator); // skip the executable name const stdout = io.getStdOut().writer(); const stderr = io.getStdErr().writer(); var arg_index: u32 = 1; var archive_type = Archive.ArchiveType.ambiguous; // Process Options First var keep_processing_current_option = true; while (keep_processing_current_option) { if (!try checkArgsBounds(stderr, args, arg_index)) { return; } keep_processing_current_option = false; var current_arg = args[arg_index]; { // TODO: Make sure an arg doesn't show up twice! const format_string_prefix = "--format="; const plugin_string_prefix = "--plugin="; const help_string = "--help"; const help_shortcut = "-h"; const version_string = "--version"; if (mem.startsWith(u8, current_arg, format_string_prefix)) { // TODO: Handle format option! keep_processing_current_option = true; const format_string = current_arg[format_string_prefix.len..]; if (mem.eql(u8, format_string, "default")) { // do nothing } else if (mem.eql(u8, format_string, "bsd")) { archive_type = .bsd; } else if (mem.eql(u8, format_string, "darwin")) { archive_type = .bsd; } else if (mem.eql(u8, format_string, "gnu")) { archive_type = .gnu; } else { // TODO: do an actual error here! return error.TODO; } arg_index = arg_index + 1; continue; } else if (mem.startsWith(u8, current_arg, plugin_string_prefix)) { keep_processing_current_option = true; arg_index = arg_index + 1; continue; } else if (mem.eql(u8, current_arg, help_string) or mem.eql(u8, current_arg, help_shortcut)) { try stdout.print(overview, .{}); return; } else if (mem.eql(u8, current_arg, version_string)) { // TODO: calculate build, archive type & host! const target = builtin.target; const default_archive_type = @tagName(Archive.getDefaultArchiveTypeFromHost()); try stdout.print(version_details, .{ version, @tagName(builtin.mode), default_archive_type, @tagName(target.cpu.arch), @tagName(target.os.tag), @tagName(target.abi) }); return; } } } if (!try checkArgsBounds(stderr, args, arg_index)) { return; } const operation_slice = slice: { // the operation may start with a hyphen - so slice it! var arg_slice = args[arg_index][0..args[arg_index].len]; if (arg_slice[0] == '-') { if (arg_slice.len == 1) { try printError(stderr, "a valid operation must be provided - only hyphen found"); return; } arg_slice = arg_slice[1..arg_slice.len]; } break :slice arg_slice; }; // Process Operation const operation = operation: { switch (operation_slice[0]) { 'r' => break :operation Archive.Operation.insert, 'd' => break :operation Archive.Operation.delete, 'm' => break :operation Archive.Operation.move, 'p' => break :operation Archive.Operation.print_contents, 'w' => break :operation Archive.Operation.quick_append, 's' => break :operation Archive.Operation.ranlib, 't' => break :operation Archive.Operation.print_names, 'x' => break :operation Archive.Operation.extract, 'S' => break :operation Archive.Operation.print_symbols, else => { try printError(stderr, "a valid operation must be provided"); return; }, } }; var modifiers: Archive.Modifiers = .{}; if (operation_slice.len > 1) { const modifier_slice = operation_slice[1..]; for (modifier_slice) |modifier_char| { switch (modifier_char) { 'c' => modifiers.create = true, 'u' => modifiers.update_only = true, 'U' => modifiers.use_real_timestamps_and_ids = true, 'D' => modifiers.use_real_timestamps_and_ids = false, 'v' => modifiers.verbose = true, // TODO: should we print warning with unknown modifier? else => {}, } } } arg_index = arg_index + 1; if (!try checkArgsBounds(stderr, args, arg_index)) { return; } // TODO: Process [relpos] // TODO: Process [count] const archive_path = args[arg_index]; arg_index = arg_index + 1; const files = file_result: { if (args.len > arg_index) { break :file_result args[arg_index..args.len]; } const empty = [_][:0]u8{}; break :file_result &empty; }; switch (operation) { .insert => { const file = try openOrCreateFile(archive_path, stderr, !modifiers.create); defer file.close(); var archive = try Archive.create(file, archive_path, archive_type, modifiers); if (archive.parse(allocator, stderr)) { try archive.insertFiles(allocator, files); try archive.finalize(allocator); } else |err| switch (err) { // These are errors we know how to handle error.NotArchive => { // archive.parse prints appropriate errors for these messages return; }, else => return err, } }, .delete => { const file = try openOrCreateFile(archive_path, stderr, !modifiers.create); defer file.close(); var archive = try Archive.create(file, archive_path, archive_type, modifiers); if (archive.parse(allocator, stderr)) { try archive.deleteFiles(files); try archive.finalize(allocator); } else |err| return err; }, .print_names => { const file = try fs.cwd().openFile(archive_path, .{}); defer file.close(); var archive = try Archive.create(file, archive_path, archive_type, modifiers); if (archive.parse(allocator, stderr)) { for (archive.files.items) |parsed_file| { try stdout.print("{s}\n", .{parsed_file.name}); } } else |err| switch (err) { // These are errors we know how to handle error.NotArchive => { // archive.parse prints appropriate errors for these messages return; }, else => return err, } }, .print_contents => { const file = try fs.cwd().openFile(archive_path, .{}); defer file.close(); var archive = try Archive.create(file, archive_path, archive_type, modifiers); if (archive.parse(allocator, stderr)) { for (archive.files.items) |parsed_file| { try parsed_file.contents.write(stdout, stderr); } } else |err| switch (err) { // These are errors we know how to handle error.NotArchive => { // archive.parse prints appropriate errors for these messages return; }, else => return err, } }, .print_symbols => { const file = try fs.cwd().openFile(archive_path, .{}); defer file.close(); var archive = try Archive.create(file, archive_path, archive_type, modifiers); if (archive.parse(allocator, stderr)) { for (archive.symbols.items) |symbol| { if (modifiers.verbose) { if (archive.file_offset_to_index.get(symbol.file_offset)) |file_index| { try stdout.print("{s}: {s}\n", .{ archive.files.items[file_index].name, symbol.name }); } else { try stdout.print("?: {s}\n", .{symbol.name}); } } else { try stdout.print("{s}\n", .{symbol.name}); } } } else |err| switch (err) { // These are errors we know how to handle error.NotArchive => { // archive.parse prints appropriate errors for these messages return; }, else => return err, } }, else => { std.debug.warn("Operation {} still needs to be implemented!\n", .{operation}); return error.TODO; }, } }
src/main.zig
const MachO = @This(); const std = @import("std"); const build_options = @import("build_options"); const builtin = @import("builtin"); const assert = std.debug.assert; const fmt = std.fmt; const fs = std.fs; const log = std.log.scoped(.link); const macho = std.macho; const math = std.math; const mem = std.mem; const meta = std.meta; const aarch64 = @import("../codegen/aarch64.zig"); const bind = @import("MachO/bind.zig"); const codegen = @import("../codegen.zig"); const commands = @import("MachO/commands.zig"); const link = @import("../link.zig"); const llvm_backend = @import("../codegen/llvm.zig"); const target_util = @import("../target.zig"); const trace = @import("../tracy.zig").trace; const Air = @import("../Air.zig"); const Allocator = mem.Allocator; const Archive = @import("MachO/Archive.zig"); const Cache = @import("../Cache.zig"); const CodeSignature = @import("MachO/CodeSignature.zig"); const Compilation = @import("../Compilation.zig"); const DebugSymbols = @import("MachO/DebugSymbols.zig"); const Dylib = @import("MachO/Dylib.zig"); const File = link.File; const Object = @import("MachO/Object.zig"); const LibStub = @import("tapi.zig").LibStub; const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const LoadCommand = commands.LoadCommand; const Module = @import("../Module.zig"); const SegmentCommand = commands.SegmentCommand; const StringIndexAdapter = std.hash_map.StringIndexAdapter; const StringIndexContext = std.hash_map.StringIndexContext; pub const TextBlock = @import("MachO/TextBlock.zig"); const Trie = @import("MachO/Trie.zig"); pub const base_tag: File.Tag = File.Tag.macho; base: File, /// If this is not null, an object file is created by LLVM and linked with LLD afterwards. llvm_object: ?*LlvmObject = null, /// Debug symbols bundle (or dSym). d_sym: ?DebugSymbols = null, /// Page size is dependent on the target cpu architecture. /// For x86_64 that's 4KB, whereas for aarch64, that's 16KB. page_size: u16, /// TODO Should we figure out embedding code signatures for other Apple platforms as part of the linker? /// Or should this be a separate tool? /// https://github.com/ziglang/zig/issues/9567 requires_adhoc_codesig: bool, /// We commit 0x1000 = 4096 bytes of space to the header and /// the table of load commands. This should be plenty for any /// potential future extensions. header_pad: u16 = 0x1000, /// The absolute address of the entry point. entry_addr: ?u64 = null, objects: std.ArrayListUnmanaged(Object) = .{}, archives: std.ArrayListUnmanaged(Archive) = .{}, dylibs: std.ArrayListUnmanaged(Dylib) = .{}, dylibs_map: std.StringHashMapUnmanaged(u16) = .{}, referenced_dylibs: std.AutoArrayHashMapUnmanaged(u16, void) = .{}, load_commands: std.ArrayListUnmanaged(LoadCommand) = .{}, pagezero_segment_cmd_index: ?u16 = null, text_segment_cmd_index: ?u16 = null, data_const_segment_cmd_index: ?u16 = null, data_segment_cmd_index: ?u16 = null, linkedit_segment_cmd_index: ?u16 = null, dyld_info_cmd_index: ?u16 = null, symtab_cmd_index: ?u16 = null, dysymtab_cmd_index: ?u16 = null, dylinker_cmd_index: ?u16 = null, data_in_code_cmd_index: ?u16 = null, function_starts_cmd_index: ?u16 = null, main_cmd_index: ?u16 = null, dylib_id_cmd_index: ?u16 = null, source_version_cmd_index: ?u16 = null, build_version_cmd_index: ?u16 = null, uuid_cmd_index: ?u16 = null, code_signature_cmd_index: ?u16 = null, /// Path to libSystem /// TODO this is obsolete, remove it. libsystem_cmd_index: ?u16 = null, // __TEXT segment sections text_section_index: ?u16 = null, stubs_section_index: ?u16 = null, stub_helper_section_index: ?u16 = null, text_const_section_index: ?u16 = null, cstring_section_index: ?u16 = null, ustring_section_index: ?u16 = null, gcc_except_tab_section_index: ?u16 = null, unwind_info_section_index: ?u16 = null, eh_frame_section_index: ?u16 = null, objc_methlist_section_index: ?u16 = null, objc_methname_section_index: ?u16 = null, objc_methtype_section_index: ?u16 = null, objc_classname_section_index: ?u16 = null, // __DATA_CONST segment sections got_section_index: ?u16 = null, mod_init_func_section_index: ?u16 = null, mod_term_func_section_index: ?u16 = null, data_const_section_index: ?u16 = null, objc_cfstring_section_index: ?u16 = null, objc_classlist_section_index: ?u16 = null, objc_imageinfo_section_index: ?u16 = null, // __DATA segment sections tlv_section_index: ?u16 = null, tlv_data_section_index: ?u16 = null, tlv_bss_section_index: ?u16 = null, la_symbol_ptr_section_index: ?u16 = null, data_section_index: ?u16 = null, bss_section_index: ?u16 = null, common_section_index: ?u16 = null, objc_const_section_index: ?u16 = null, objc_selrefs_section_index: ?u16 = null, objc_classrefs_section_index: ?u16 = null, objc_data_section_index: ?u16 = null, locals: std.ArrayListUnmanaged(macho.nlist_64) = .{}, globals: std.ArrayListUnmanaged(macho.nlist_64) = .{}, undefs: std.ArrayListUnmanaged(macho.nlist_64) = .{}, symbol_resolver: std.AutoHashMapUnmanaged(u32, SymbolWithLoc) = .{}, locals_free_list: std.ArrayListUnmanaged(u32) = .{}, globals_free_list: std.ArrayListUnmanaged(u32) = .{}, stub_helper_stubs_start_off: ?u64 = null, strtab: std.ArrayListUnmanaged(u8) = .{}, strtab_dir: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{}, got_entries: std.ArrayListUnmanaged(GotIndirectionKey) = .{}, got_entries_map: std.AutoHashMapUnmanaged(GotIndirectionKey, u32) = .{}, got_entries_free_list: std.ArrayListUnmanaged(u32) = .{}, stubs: std.ArrayListUnmanaged(u32) = .{}, stubs_map: std.AutoHashMapUnmanaged(u32, u32) = .{}, error_flags: File.ErrorFlags = File.ErrorFlags{}, got_entries_count_dirty: bool = false, load_commands_dirty: bool = false, rebase_info_dirty: bool = false, binding_info_dirty: bool = false, lazy_binding_info_dirty: bool = false, export_info_dirty: bool = false, strtab_dirty: bool = false, strtab_needs_relocation: bool = false, has_dices: bool = false, has_stabs: bool = false, section_ordinals: std.AutoArrayHashMapUnmanaged(MatchingSection, void) = .{}, pending_updates: std.ArrayListUnmanaged(struct { kind: enum { got, stub, }, index: u32, }) = .{}, /// A list of text blocks that have surplus capacity. This list can have false /// positives, as functions grow and shrink over time, only sometimes being added /// or removed from the freelist. /// /// A text block has surplus capacity when its overcapacity value is greater than /// padToIdeal(minimum_text_block_size). That is, when it has so /// much extra capacity, that we could fit a small new symbol in it, itself with /// ideal_capacity or more. /// /// Ideal capacity is defined by size + (size / ideal_factor). /// /// Overcapacity is measured by actual_capacity - ideal_capacity. Note that /// overcapacity can be negative. A simple way to have negative overcapacity is to /// allocate a fresh text block, which will have ideal capacity, and then grow it /// by 1 byte. It will then have -1 overcapacity. text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = .{}, /// Pointer to the last allocated text block last_text_block: ?*TextBlock = null, /// List of TextBlocks that are owned directly by the linker. /// Currently these are only TextBlocks that are the result of linking /// object files. TextBlock which take part in incremental linking are /// at present owned by Module.Decl. /// TODO consolidate this. managed_blocks: std.ArrayListUnmanaged(*TextBlock) = .{}, blocks: std.AutoHashMapUnmanaged(MatchingSection, *TextBlock) = .{}, /// Table of Decls that are currently alive. /// We store them here so that we can properly dispose of any allocated /// memory within the TextBlock in the incremental linker. /// TODO consolidate this. decls: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{}, /// Currently active Module.Decl. /// TODO this might not be necessary if we figure out how to pass Module.Decl instance /// to codegen.genSetReg() or alterntively move PIE displacement for MCValue{ .memory = x } /// somewhere else in the codegen. active_decl: ?*Module.Decl = null, const SymbolWithLoc = struct { // Table where the symbol can be found. where: enum { global, undef, }, where_index: u32, local_sym_index: u32 = 0, file: u16 = 0, }; pub const GotIndirectionKey = struct { where: enum { local, undef, }, where_index: u32, }; /// When allocating, the ideal_capacity is calculated by /// actual_capacity + (actual_capacity / ideal_factor) const ideal_factor = 2; /// Default path to dyld /// TODO instead of hardcoding it, we should probably look through some env vars and search paths /// instead but this will do for now. const DEFAULT_DYLD_PATH: [*:0]const u8 = "/usr/lib/dyld"; /// Default lib search path /// TODO instead of hardcoding it, we should probably look through some env vars and search paths /// instead but this will do for now. const DEFAULT_LIB_SEARCH_PATH: []const u8 = "/usr/lib"; const LIB_SYSTEM_NAME: [*:0]const u8 = "System"; /// TODO we should search for libSystem and fail if it doesn't exist, instead of hardcoding it const LIB_SYSTEM_PATH: [*:0]const u8 = DEFAULT_LIB_SEARCH_PATH ++ "/libSystem.B.dylib"; /// In order for a slice of bytes to be considered eligible to keep metadata pointing at /// it as a possible place to put new symbols, it must have enough room for this many bytes /// (plus extra for reserved capacity). const minimum_text_block_size = 64; pub const min_text_capacity = padToIdeal(minimum_text_block_size); pub const Export = struct { sym_index: ?u32 = null, }; pub const SrcFn = struct { /// Offset from the beginning of the Debug Line Program header that contains this function. off: u32, /// Size of the line number program component belonging to this function, not /// including padding. len: u32, /// Points to the previous and next neighbors, based on the offset from .debug_line. /// This can be used to find, for example, the capacity of this `SrcFn`. prev: ?*SrcFn, next: ?*SrcFn, pub const empty: SrcFn = .{ .off = 0, .len = 0, .prev = null, .next = null, }; }; pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*MachO { assert(options.object_format == .macho); if (build_options.have_llvm and options.use_llvm) { const self = try createEmpty(allocator, options); errdefer self.base.destroy(); self.llvm_object = try LlvmObject.create(allocator, options); return self; } const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options), }); errdefer file.close(); const self = try createEmpty(allocator, options); errdefer { self.base.file = null; self.base.destroy(); } self.base.file = file; if (options.output_mode == .Lib and options.link_mode == .Static) { return self; } if (!options.strip and options.module != null) { // Create dSYM bundle. const dir = options.module.?.zig_cache_artifact_directory; log.debug("creating {s}.dSYM bundle in {s}", .{ sub_path, dir.path }); const d_sym_path = try fmt.allocPrint( allocator, "{s}.dSYM" ++ fs.path.sep_str ++ "Contents" ++ fs.path.sep_str ++ "Resources" ++ fs.path.sep_str ++ "DWARF", .{sub_path}, ); defer allocator.free(d_sym_path); var d_sym_bundle = try dir.handle.makeOpenPath(d_sym_path, .{}); defer d_sym_bundle.close(); const d_sym_file = try d_sym_bundle.createFile(sub_path, .{ .truncate = false, .read = true, }); self.d_sym = .{ .base = self, .file = d_sym_file, }; } // Index 0 is always a null symbol. try self.locals.append(allocator, .{ .n_strx = 0, .n_type = 0, .n_sect = 0, .n_desc = 0, .n_value = 0, }); try self.populateMissingMetadata(); try self.writeLocalSymbol(0); if (self.d_sym) |*ds| { try ds.populateMissingMetadata(allocator); try ds.writeLocalSymbol(0); } return self; } pub fn createEmpty(gpa: *Allocator, options: link.Options) !*MachO { const self = try gpa.create(MachO); const cpu_arch = options.target.cpu.arch; const os_tag = options.target.os.tag; const abi = options.target.abi; const page_size: u16 = if (cpu_arch == .aarch64) 0x4000 else 0x1000; // Adhoc code signature is required when targeting aarch64-macos either directly or indirectly via the simulator // ABI such as aarch64-ios-simulator, etc. const requires_adhoc_codesig = cpu_arch == .aarch64 and (os_tag == .macos or abi == .simulator); self.* = .{ .base = .{ .tag = .macho, .options = options, .allocator = gpa, .file = null, }, .page_size = page_size, .requires_adhoc_codesig = requires_adhoc_codesig, }; return self; } pub fn flush(self: *MachO, comp: *Compilation) !void { if (self.base.options.output_mode == .Lib and self.base.options.link_mode == .Static) { if (build_options.have_llvm) { return self.base.linkAsArchive(comp); } else { log.err("TODO: non-LLVM archiver for MachO object files", .{}); return error.TODOImplementWritingStaticLibFiles; } } const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1; if (use_stage1) { return self.linkWithZld(comp); } else { switch (self.base.options.effectiveOutputMode()) { .Exe, .Obj => {}, .Lib => return error.TODOImplementWritingLibFiles, } return self.flushModule(comp); } } pub fn flushModule(self: *MachO, comp: *Compilation) !void { _ = comp; const tracy = trace(@src()); defer tracy.end(); const output_mode = self.base.options.output_mode; switch (output_mode) { .Exe => { if (self.entry_addr) |addr| { // Update LC_MAIN with entry offset. const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; const main_cmd = &self.load_commands.items[self.main_cmd_index.?].Main; main_cmd.entryoff = addr - text_segment.inner.vmaddr; main_cmd.stacksize = self.base.options.stack_size_override orelse 0; self.load_commands_dirty = true; } try self.writeRebaseInfoTable(); try self.writeBindInfoTable(); try self.writeLazyBindInfoTable(); try self.writeExportInfo(); try self.writeAllGlobalAndUndefSymbols(); try self.writeIndirectSymbolTable(); try self.writeStringTable(); try self.updateLinkeditSegmentSizes(); if (self.d_sym) |*ds| { // Flush debug symbols bundle. try ds.flushModule(self.base.allocator, self.base.options); } if (self.requires_adhoc_codesig) { // Preallocate space for the code signature. // We need to do this at this stage so that we have the load commands with proper values // written out to the file. // The most important here is to have the correct vm and filesize of the __LINKEDIT segment // where the code signature goes into. try self.writeCodeSignaturePadding(); } }, .Obj => {}, .Lib => return error.TODOImplementWritingLibFiles, } try self.writeLoadCommands(); try self.writeHeader(); if (self.entry_addr == null and self.base.options.output_mode == .Exe) { log.debug("flushing. no_entry_point_found = true", .{}); self.error_flags.no_entry_point_found = true; } else { log.debug("flushing. no_entry_point_found = false", .{}); self.error_flags.no_entry_point_found = false; } assert(!self.got_entries_count_dirty); assert(!self.load_commands_dirty); assert(!self.rebase_info_dirty); assert(!self.binding_info_dirty); assert(!self.lazy_binding_info_dirty); assert(!self.export_info_dirty); assert(!self.strtab_dirty); assert(!self.strtab_needs_relocation); if (self.requires_adhoc_codesig) { try self.writeCodeSignature(); // code signing always comes last } } fn resolveSearchDir( arena: *Allocator, dir: []const u8, syslibroot: ?[]const u8, ) !?[]const u8 { var candidates = std.ArrayList([]const u8).init(arena); if (fs.path.isAbsolute(dir)) { if (syslibroot) |root| { const common_dir = if (std.Target.current.os.tag == .windows) blk: { // We need to check for disk designator and strip it out from dir path so // that we can concat dir with syslibroot. // TODO we should backport this mechanism to 'MachO.Dylib.parseDependentLibs()' const disk_designator = fs.path.diskDesignatorWindows(dir); if (mem.indexOf(u8, dir, disk_designator)) |where| { break :blk dir[where + disk_designator.len ..]; } break :blk dir; } else dir; const full_path = try fs.path.join(arena, &[_][]const u8{ root, common_dir }); try candidates.append(full_path); } } try candidates.append(dir); for (candidates.items) |candidate| { // Verify that search path actually exists var tmp = fs.cwd().openDir(candidate, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; defer tmp.close(); return candidate; } return null; } fn resolveLib( arena: *Allocator, search_dirs: []const []const u8, name: []const u8, ext: []const u8, ) !?[]const u8 { const search_name = try std.fmt.allocPrint(arena, "lib{s}{s}", .{ name, ext }); for (search_dirs) |dir| { const full_path = try fs.path.join(arena, &[_][]const u8{ dir, search_name }); // Check if the file exists. const tmp = fs.cwd().openFile(full_path, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; defer tmp.close(); return full_path; } return null; } fn resolveFramework( arena: *Allocator, search_dirs: []const []const u8, name: []const u8, ext: []const u8, ) !?[]const u8 { const search_name = try std.fmt.allocPrint(arena, "{s}{s}", .{ name, ext }); const prefix_path = try std.fmt.allocPrint(arena, "{s}.framework", .{name}); for (search_dirs) |dir| { const full_path = try fs.path.join(arena, &[_][]const u8{ dir, prefix_path, search_name }); // Check if the file exists. const tmp = fs.cwd().openFile(full_path, .{}) catch |err| switch (err) { error.FileNotFound => continue, else => |e| return e, }; defer tmp.close(); return full_path; } return null; } fn linkWithZld(self: *MachO, comp: *Compilation) !void { const tracy = trace(@src()); defer tracy.end(); var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); const arena = &arena_allocator.allocator; const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type. // If there is no Zig code to compile, then we should skip flushing the output file because it // will not be part of the linker line anyway. const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: { const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1; if (use_stage1) { const obj_basename = try std.zig.binNameAlloc(arena, .{ .root_name = self.base.options.root_name, .target = self.base.options.target, .output_mode = .Obj, }); const o_directory = module.zig_cache_artifact_directory; const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename}); break :blk full_obj_path; } try self.flushModule(comp); const obj_basename = self.base.intermediary_basename.?; const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename}); break :blk full_obj_path; } else null; const is_lib = self.base.options.output_mode == .Lib; const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib; const is_exe_or_dyn_lib = is_dyn_lib or self.base.options.output_mode == .Exe; const stack_size = self.base.options.stack_size_override orelse 0; const id_symlink_basename = "zld.id"; var man: Cache.Manifest = undefined; defer if (!self.base.options.disable_lld_caching) man.deinit(); var digest: [Cache.hex_digest_len]u8 = undefined; if (!self.base.options.disable_lld_caching) { man = comp.cache_parent.obtain(); // We are about to obtain this lock, so here we give other processes a chance first. self.base.releaseLock(); try man.addListOfFiles(self.base.options.objects); for (comp.c_object_table.keys()) |key| { _ = try man.addFile(key.status.success.object_path, null); } try man.addOptionalFile(module_obj_path); // We can skip hashing libc and libc++ components that we are in charge of building from Zig // installation sources because they are always a product of the compiler version + target information. man.hash.add(stack_size); man.hash.addListOfBytes(self.base.options.lib_dirs); man.hash.addListOfBytes(self.base.options.framework_dirs); man.hash.addListOfBytes(self.base.options.frameworks); man.hash.addListOfBytes(self.base.options.rpath_list); if (is_dyn_lib) { man.hash.addOptional(self.base.options.version); } man.hash.addStringSet(self.base.options.system_libs); man.hash.addOptionalBytes(self.base.options.sysroot); // We don't actually care whether it's a cache hit or miss; we just need the digest and the lock. _ = try man.hit(); digest = man.final(); var prev_digest_buf: [digest.len]u8 = undefined; const prev_digest: []u8 = Cache.readSmallFile( directory.handle, id_symlink_basename, &prev_digest_buf, ) catch |err| blk: { log.debug("MachO Zld new_digest={s} error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) }); // Handle this as a cache miss. break :blk prev_digest_buf[0..0]; }; if (mem.eql(u8, prev_digest, &digest)) { log.debug("MachO Zld digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)}); // Hot diggity dog! The output binary is already there. self.base.lock = man.toOwnedLock(); return; } log.debug("MachO Zld prev_digest={s} new_digest={s}", .{ std.fmt.fmtSliceHexLower(prev_digest), std.fmt.fmtSliceHexLower(&digest) }); // We are about to change the output file to be different, so we invalidate the build hash now. directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) { error.FileNotFound => {}, else => |e| return e, }; } const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path}); if (self.base.options.output_mode == .Obj) { // LLD's MachO driver does not support the equvialent of `-r` so we do a simple file copy // here. TODO: think carefully about how we can avoid this redundant operation when doing // build-obj. See also the corresponding TODO in linkAsArchive. const the_object_path = blk: { if (self.base.options.objects.len != 0) break :blk self.base.options.objects[0]; if (comp.c_object_table.count() != 0) break :blk comp.c_object_table.keys()[0].status.success.object_path; if (module_obj_path) |p| break :blk p; // TODO I think this is unreachable. Audit this situation when solving the above TODO // regarding eliding redundant object -> object transformations. return error.NoObjectsToLink; }; // This can happen when using --enable-cache and using the stage1 backend. In this case // we can skip the file copy. if (!mem.eql(u8, the_object_path, full_out_path)) { try fs.cwd().copyFile(the_object_path, fs.cwd(), full_out_path, .{}); } } else { // Positional arguments to the linker such as object files and static archives. var positionals = std.ArrayList([]const u8).init(arena); try positionals.appendSlice(self.base.options.objects); for (comp.c_object_table.keys()) |key| { try positionals.append(key.status.success.object_path); } if (module_obj_path) |p| { try positionals.append(p); } try positionals.append(comp.compiler_rt_static_lib.?.full_object_path); // libc++ dep if (self.base.options.link_libcpp) { try positionals.append(comp.libcxxabi_static_lib.?.full_object_path); try positionals.append(comp.libcxx_static_lib.?.full_object_path); } // Shared and static libraries passed via `-l` flag. var search_lib_names = std.ArrayList([]const u8).init(arena); const system_libs = self.base.options.system_libs.keys(); for (system_libs) |link_lib| { // By this time, we depend on these libs being dynamically linked libraries and not static libraries // (the check for that needs to be earlier), but they could be full paths to .dylib files, in which // case we want to avoid prepending "-l". if (Compilation.classifyFileExt(link_lib) == .shared_library) { try positionals.append(link_lib); continue; } try search_lib_names.append(link_lib); } var lib_dirs = std.ArrayList([]const u8).init(arena); for (self.base.options.lib_dirs) |dir| { if (try resolveSearchDir(arena, dir, self.base.options.sysroot)) |search_dir| { try lib_dirs.append(search_dir); } else { log.warn("directory not found for '-L{s}'", .{dir}); } } var libs = std.ArrayList([]const u8).init(arena); var lib_not_found = false; for (search_lib_names.items) |lib_name| { // Assume ld64 default: -search_paths_first // Look in each directory for a dylib (stub first), and then for archive // TODO implement alternative: -search_dylibs_first for (&[_][]const u8{ ".tbd", ".dylib", ".a" }) |ext| { if (try resolveLib(arena, lib_dirs.items, lib_name, ext)) |full_path| { try libs.append(full_path); break; } } else { log.warn("library not found for '-l{s}'", .{lib_name}); lib_not_found = true; } } if (lib_not_found) { log.warn("Library search paths:", .{}); for (lib_dirs.items) |dir| { log.warn(" {s}", .{dir}); } } // If we were given the sysroot, try to look there first for libSystem.B.{dylib, tbd}. var libsystem_available = false; if (self.base.options.sysroot != null) blk: { // Try stub file first. If we hit it, then we're done as the stub file // re-exports every single symbol definition. if (try resolveLib(arena, lib_dirs.items, "System", ".tbd")) |full_path| { try libs.append(full_path); libsystem_available = true; break :blk; } // If we didn't hit the stub file, try .dylib next. However, libSystem.dylib // doesn't export libc.dylib which we'll need to resolve subsequently also. if (try resolveLib(arena, lib_dirs.items, "System", ".dylib")) |libsystem_path| { if (try resolveLib(arena, lib_dirs.items, "c", ".dylib")) |libc_path| { try libs.append(libsystem_path); try libs.append(libc_path); libsystem_available = true; break :blk; } } } if (!libsystem_available) { const full_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "darwin", "libSystem.B.tbd", }); try libs.append(full_path); } // frameworks var framework_dirs = std.ArrayList([]const u8).init(arena); for (self.base.options.framework_dirs) |dir| { if (try resolveSearchDir(arena, dir, self.base.options.sysroot)) |search_dir| { try framework_dirs.append(search_dir); } else { log.warn("directory not found for '-F{s}'", .{dir}); } } var framework_not_found = false; for (self.base.options.frameworks) |framework| { for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| { if (try resolveFramework(arena, framework_dirs.items, framework, ext)) |full_path| { try libs.append(full_path); break; } } else { log.warn("framework not found for '-framework {s}'", .{framework}); framework_not_found = true; } } if (framework_not_found) { log.warn("Framework search paths:", .{}); for (framework_dirs.items) |dir| { log.warn(" {s}", .{dir}); } } // rpaths var rpath_table = std.StringArrayHashMap(void).init(arena); for (self.base.options.rpath_list) |rpath| { if (rpath_table.contains(rpath)) continue; try rpath_table.putNoClobber(rpath, {}); } var rpaths = std.ArrayList([]const u8).init(arena); try rpaths.ensureCapacity(rpath_table.count()); for (rpath_table.keys()) |*key| { rpaths.appendAssumeCapacity(key.*); } if (self.base.options.verbose_link) { var argv = std.ArrayList([]const u8).init(arena); try argv.append("zig"); try argv.append("ld"); if (is_exe_or_dyn_lib) { try argv.append("-dynamic"); } if (is_dyn_lib) { try argv.append("-dylib"); const install_name = try std.fmt.allocPrint(arena, "@rpath/{s}", .{ self.base.options.emit.?.sub_path, }); try argv.append("-install_name"); try argv.append(install_name); } if (self.base.options.sysroot) |syslibroot| { try argv.append("-syslibroot"); try argv.append(syslibroot); } for (rpaths.items) |rpath| { try argv.append("-rpath"); try argv.append(rpath); } try argv.appendSlice(positionals.items); try argv.append("-o"); try argv.append(full_out_path); try argv.append("-lSystem"); try argv.append("-lc"); for (search_lib_names.items) |l_name| { try argv.append(try std.fmt.allocPrint(arena, "-l{s}", .{l_name})); } for (self.base.options.lib_dirs) |lib_dir| { try argv.append(try std.fmt.allocPrint(arena, "-L{s}", .{lib_dir})); } for (self.base.options.frameworks) |framework| { try argv.append(try std.fmt.allocPrint(arena, "-framework {s}", .{framework})); } for (self.base.options.framework_dirs) |framework_dir| { try argv.append(try std.fmt.allocPrint(arena, "-F{s}", .{framework_dir})); } Compilation.dump_argv(argv.items); } const sub_path = self.base.options.emit.?.sub_path; self.base.file = try directory.handle.createFile(sub_path, .{ .truncate = true, .read = true, .mode = link.determineMode(self.base.options), }); // TODO mimicking insertion of null symbol from incremental linker. // This will need to moved. try self.locals.append(self.base.allocator, .{ .n_strx = 0, .n_type = macho.N_UNDF, .n_sect = 0, .n_desc = 0, .n_value = 0, }); try self.strtab.append(self.base.allocator, 0); try self.populateMetadata(); try self.addRpathLCs(rpaths.items); try self.parseInputFiles(positionals.items, self.base.options.sysroot); try self.parseLibs(libs.items, self.base.options.sysroot); try self.resolveSymbols(); try self.parseTextBlocks(); try self.addLoadDylibLCs(); try self.addDataInCodeLC(); try self.addCodeSignatureLC(); { // Add dyld_stub_binder as the final GOT entry. const n_strx = self.strtab_dir.getKeyAdapted(@as([]const u8, "dyld_stub_binder"), StringIndexAdapter{ .bytes = &self.strtab, }) orelse unreachable; const resolv = self.symbol_resolver.get(n_strx) orelse unreachable; const got_index = @intCast(u32, self.got_entries.items.len); const got_entry = GotIndirectionKey{ .where = .undef, .where_index = resolv.where_index, }; try self.got_entries.append(self.base.allocator, got_entry); try self.got_entries_map.putNoClobber(self.base.allocator, got_entry, got_index); } try self.sortSections(); try self.allocateTextSegment(); try self.allocateDataConstSegment(); try self.allocateDataSegment(); self.allocateLinkeditSegment(); try self.allocateTextBlocks(); try self.flushZld(); } if (!self.base.options.disable_lld_caching) { // Update the file with the digest. If it fails we can continue; it only // means that the next invocation will have an unnecessary cache miss. Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| { log.warn("failed to save linking hash digest file: {s}", .{@errorName(err)}); }; // Again failure here only means an unnecessary cache miss. man.writeManifest() catch |err| { log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)}); }; // We hang on to this lock so that the output file path can be used without // other processes clobbering it. self.base.lock = man.toOwnedLock(); } } fn parseObject(self: *MachO, path: []const u8) !bool { const file = fs.cwd().openFile(path, .{}) catch |err| switch (err) { error.FileNotFound => return false, else => |e| return e, }; errdefer file.close(); const name = try self.base.allocator.dupe(u8, path); errdefer self.base.allocator.free(name); var object = Object{ .name = name, .file = file, }; object.parse(self.base.allocator, self.base.options.target) catch |err| switch (err) { error.EndOfStream, error.NotObject => { object.deinit(self.base.allocator); return false; }, else => |e| return e, }; try self.objects.append(self.base.allocator, object); return true; } fn parseArchive(self: *MachO, path: []const u8) !bool { const file = fs.cwd().openFile(path, .{}) catch |err| switch (err) { error.FileNotFound => return false, else => |e| return e, }; errdefer file.close(); const name = try self.base.allocator.dupe(u8, path); errdefer self.base.allocator.free(name); var archive = Archive{ .name = name, .file = file, }; archive.parse(self.base.allocator, self.base.options.target) catch |err| switch (err) { error.EndOfStream, error.NotArchive => { archive.deinit(self.base.allocator); return false; }, else => |e| return e, }; try self.archives.append(self.base.allocator, archive); return true; } const ParseDylibError = error{ OutOfMemory, EmptyStubFile, MismatchedCpuArchitecture, UnsupportedCpuArchitecture, } || fs.File.OpenError || std.os.PReadError || Dylib.Id.ParseError; const DylibCreateOpts = struct { syslibroot: ?[]const u8 = null, id: ?Dylib.Id = null, is_dependent: bool = false, }; pub fn parseDylib(self: *MachO, path: []const u8, opts: DylibCreateOpts) ParseDylibError!bool { const file = fs.cwd().openFile(path, .{}) catch |err| switch (err) { error.FileNotFound => return false, else => |e| return e, }; errdefer file.close(); const name = try self.base.allocator.dupe(u8, path); errdefer self.base.allocator.free(name); var dylib = Dylib{ .name = name, .file = file, }; dylib.parse(self.base.allocator, self.base.options.target) catch |err| switch (err) { error.EndOfStream, error.NotDylib => { try file.seekTo(0); var lib_stub = LibStub.loadFromFile(self.base.allocator, file) catch { dylib.deinit(self.base.allocator); return false; }; defer lib_stub.deinit(); try dylib.parseFromStub(self.base.allocator, self.base.options.target, lib_stub); }, else => |e| return e, }; if (opts.id) |id| { if (dylib.id.?.current_version < id.compatibility_version) { log.warn("found dylib is incompatible with the required minimum version", .{}); log.warn(" dylib: {s}", .{id.name}); log.warn(" required minimum version: {}", .{id.compatibility_version}); log.warn(" dylib version: {}", .{dylib.id.?.current_version}); // TODO maybe this should be an error and facilitate auto-cleanup? dylib.deinit(self.base.allocator); return false; } } const dylib_id = @intCast(u16, self.dylibs.items.len); try self.dylibs.append(self.base.allocator, dylib); try self.dylibs_map.putNoClobber(self.base.allocator, dylib.id.?.name, dylib_id); if (!(opts.is_dependent or self.referenced_dylibs.contains(dylib_id))) { try self.referenced_dylibs.putNoClobber(self.base.allocator, dylib_id, {}); } // TODO this should not be performed if the user specifies `-flat_namespace` flag. // See ld64 manpages. try dylib.parseDependentLibs(self, opts.syslibroot); return true; } fn parseInputFiles(self: *MachO, files: []const []const u8, syslibroot: ?[]const u8) !void { for (files) |file_name| { const full_path = full_path: { var buffer: [fs.MAX_PATH_BYTES]u8 = undefined; const path = try std.fs.realpath(file_name, &buffer); break :full_path try self.base.allocator.dupe(u8, path); }; defer self.base.allocator.free(full_path); if (try self.parseObject(full_path)) continue; if (try self.parseArchive(full_path)) continue; if (try self.parseDylib(full_path, .{ .syslibroot = syslibroot, })) continue; log.warn("unknown filetype for positional input file: '{s}'", .{file_name}); } } fn parseLibs(self: *MachO, libs: []const []const u8, syslibroot: ?[]const u8) !void { for (libs) |lib| { if (try self.parseDylib(lib, .{ .syslibroot = syslibroot, })) continue; if (try self.parseArchive(lib)) continue; log.warn("unknown filetype for a library: '{s}'", .{lib}); } } pub const MatchingSection = struct { seg: u16, sect: u16, }; pub fn getMatchingSection(self: *MachO, sect: macho.section_64) !?MatchingSection { const text_seg = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const data_const_seg = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const data_seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; const segname = commands.segmentName(sect); const sectname = commands.sectionName(sect); const res: ?MatchingSection = blk: { switch (commands.sectionType(sect)) { macho.S_4BYTE_LITERALS, macho.S_8BYTE_LITERALS, macho.S_16BYTE_LITERALS => { if (self.text_const_section_index == null) { self.text_const_section_index = @intCast(u16, text_seg.sections.items.len); try text_seg.addSection(self.base.allocator, "__const", .{}); } break :blk .{ .seg = self.text_segment_cmd_index.?, .sect = self.text_const_section_index.?, }; }, macho.S_CSTRING_LITERALS => { if (mem.eql(u8, sectname, "__objc_methname")) { // TODO it seems the common values within the sections in objects are deduplicated/merged // on merging the sections' contents. if (self.objc_methname_section_index == null) { self.objc_methname_section_index = @intCast(u16, text_seg.sections.items.len); try text_seg.addSection(self.base.allocator, "__objc_methname", .{ .flags = macho.S_CSTRING_LITERALS, }); } break :blk .{ .seg = self.text_segment_cmd_index.?, .sect = self.objc_methname_section_index.?, }; } else if (mem.eql(u8, sectname, "__objc_methtype")) { if (self.objc_methtype_section_index == null) { self.objc_methtype_section_index = @intCast(u16, text_seg.sections.items.len); try text_seg.addSection(self.base.allocator, "__objc_methtype", .{ .flags = macho.S_CSTRING_LITERALS, }); } break :blk .{ .seg = self.text_segment_cmd_index.?, .sect = self.objc_methtype_section_index.?, }; } else if (mem.eql(u8, sectname, "__objc_classname")) { if (self.objc_classname_section_index == null) { self.objc_classname_section_index = @intCast(u16, text_seg.sections.items.len); try text_seg.addSection(self.base.allocator, "__objc_classname", .{}); } break :blk .{ .seg = self.text_segment_cmd_index.?, .sect = self.objc_classname_section_index.?, }; } if (self.cstring_section_index == null) { self.cstring_section_index = @intCast(u16, text_seg.sections.items.len); try text_seg.addSection(self.base.allocator, "__cstring", .{ .flags = macho.S_CSTRING_LITERALS, }); } break :blk .{ .seg = self.text_segment_cmd_index.?, .sect = self.cstring_section_index.?, }; }, macho.S_LITERAL_POINTERS => { if (mem.eql(u8, segname, "__DATA") and mem.eql(u8, sectname, "__objc_selrefs")) { if (self.objc_selrefs_section_index == null) { self.objc_selrefs_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__objc_selrefs", .{ .flags = macho.S_LITERAL_POINTERS, }); } break :blk .{ .seg = self.data_segment_cmd_index.?, .sect = self.objc_selrefs_section_index.?, }; } // TODO investigate break :blk null; }, macho.S_MOD_INIT_FUNC_POINTERS => { if (self.mod_init_func_section_index == null) { self.mod_init_func_section_index = @intCast(u16, data_const_seg.sections.items.len); try data_const_seg.addSection(self.base.allocator, "__mod_init_func", .{ .flags = macho.S_MOD_INIT_FUNC_POINTERS, }); } break :blk .{ .seg = self.data_const_segment_cmd_index.?, .sect = self.mod_init_func_section_index.?, }; }, macho.S_MOD_TERM_FUNC_POINTERS => { if (self.mod_term_func_section_index == null) { self.mod_term_func_section_index = @intCast(u16, data_const_seg.sections.items.len); try data_const_seg.addSection(self.base.allocator, "__mod_term_func", .{ .flags = macho.S_MOD_TERM_FUNC_POINTERS, }); } break :blk .{ .seg = self.data_const_segment_cmd_index.?, .sect = self.mod_term_func_section_index.?, }; }, macho.S_ZEROFILL => { if (mem.eql(u8, sectname, "__common")) { if (self.common_section_index == null) { self.common_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__common", .{ .flags = macho.S_ZEROFILL, }); } break :blk .{ .seg = self.data_segment_cmd_index.?, .sect = self.common_section_index.?, }; } else { if (self.bss_section_index == null) { self.bss_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__bss", .{ .flags = macho.S_ZEROFILL, }); } break :blk .{ .seg = self.data_segment_cmd_index.?, .sect = self.bss_section_index.?, }; } }, macho.S_THREAD_LOCAL_VARIABLES => { if (self.tlv_section_index == null) { self.tlv_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__thread_vars", .{ .flags = macho.S_THREAD_LOCAL_VARIABLES, }); } break :blk .{ .seg = self.data_segment_cmd_index.?, .sect = self.tlv_section_index.?, }; }, macho.S_THREAD_LOCAL_REGULAR => { if (self.tlv_data_section_index == null) { self.tlv_data_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__thread_data", .{ .flags = macho.S_THREAD_LOCAL_REGULAR, }); } break :blk .{ .seg = self.data_segment_cmd_index.?, .sect = self.tlv_data_section_index.?, }; }, macho.S_THREAD_LOCAL_ZEROFILL => { if (self.tlv_bss_section_index == null) { self.tlv_bss_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__thread_bss", .{ .flags = macho.S_THREAD_LOCAL_ZEROFILL, }); } break :blk .{ .seg = self.data_segment_cmd_index.?, .sect = self.tlv_bss_section_index.?, }; }, macho.S_COALESCED => { if (mem.eql(u8, "__TEXT", segname) and mem.eql(u8, "__eh_frame", sectname)) { // TODO I believe __eh_frame is currently part of __unwind_info section // in the latest ld64 output. if (self.eh_frame_section_index == null) { self.eh_frame_section_index = @intCast(u16, text_seg.sections.items.len); try text_seg.addSection(self.base.allocator, "__eh_frame", .{}); } break :blk .{ .seg = self.text_segment_cmd_index.?, .sect = self.eh_frame_section_index.?, }; } // TODO audit this: is this the right mapping? if (self.data_const_section_index == null) { self.data_const_section_index = @intCast(u16, data_const_seg.sections.items.len); try data_const_seg.addSection(self.base.allocator, "__const", .{}); } break :blk .{ .seg = self.data_const_segment_cmd_index.?, .sect = self.data_const_section_index.?, }; }, macho.S_REGULAR => { if (commands.sectionIsCode(sect)) { if (self.text_section_index == null) { self.text_section_index = @intCast(u16, text_seg.sections.items.len); try text_seg.addSection(self.base.allocator, "__text", .{ .flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS, }); } break :blk .{ .seg = self.text_segment_cmd_index.?, .sect = self.text_section_index.?, }; } if (commands.sectionIsDebug(sect)) { // TODO debug attributes if (mem.eql(u8, "__LD", segname) and mem.eql(u8, "__compact_unwind", sectname)) { log.debug("TODO compact unwind section: type 0x{x}, name '{s},{s}'", .{ sect.flags, segname, sectname, }); } break :blk null; } if (mem.eql(u8, segname, "__TEXT")) { if (mem.eql(u8, sectname, "__ustring")) { if (self.ustring_section_index == null) { self.ustring_section_index = @intCast(u16, text_seg.sections.items.len); try text_seg.addSection(self.base.allocator, "__ustring", .{}); } break :blk .{ .seg = self.text_segment_cmd_index.?, .sect = self.ustring_section_index.?, }; } else if (mem.eql(u8, sectname, "__gcc_except_tab")) { if (self.gcc_except_tab_section_index == null) { self.gcc_except_tab_section_index = @intCast(u16, text_seg.sections.items.len); try text_seg.addSection(self.base.allocator, "__gcc_except_tab", .{}); } break :blk .{ .seg = self.text_segment_cmd_index.?, .sect = self.gcc_except_tab_section_index.?, }; } else if (mem.eql(u8, sectname, "__objc_methlist")) { if (self.objc_methlist_section_index == null) { self.objc_methlist_section_index = @intCast(u16, text_seg.sections.items.len); try text_seg.addSection(self.base.allocator, "__objc_methlist", .{}); } break :blk .{ .seg = self.text_segment_cmd_index.?, .sect = self.objc_methlist_section_index.?, }; } else if (mem.eql(u8, sectname, "__rodata") or mem.eql(u8, sectname, "__typelink") or mem.eql(u8, sectname, "__itablink") or mem.eql(u8, sectname, "__gosymtab") or mem.eql(u8, sectname, "__gopclntab")) { if (self.data_const_section_index == null) { self.data_const_section_index = @intCast(u16, data_const_seg.sections.items.len); try data_const_seg.addSection(self.base.allocator, "__const", .{}); } break :blk .{ .seg = self.data_const_segment_cmd_index.?, .sect = self.data_const_section_index.?, }; } else { if (self.text_const_section_index == null) { self.text_const_section_index = @intCast(u16, text_seg.sections.items.len); try text_seg.addSection(self.base.allocator, "__const", .{}); } break :blk .{ .seg = self.text_segment_cmd_index.?, .sect = self.text_const_section_index.?, }; } } if (mem.eql(u8, segname, "__DATA_CONST")) { if (self.data_const_section_index == null) { self.data_const_section_index = @intCast(u16, data_const_seg.sections.items.len); try data_const_seg.addSection(self.base.allocator, "__const", .{}); } break :blk .{ .seg = self.data_const_segment_cmd_index.?, .sect = self.data_const_section_index.?, }; } if (mem.eql(u8, segname, "__DATA")) { if (mem.eql(u8, sectname, "__const")) { if (self.data_const_section_index == null) { self.data_const_section_index = @intCast(u16, data_const_seg.sections.items.len); try data_const_seg.addSection(self.base.allocator, "__const", .{}); } break :blk .{ .seg = self.data_const_segment_cmd_index.?, .sect = self.data_const_section_index.?, }; } else if (mem.eql(u8, sectname, "__cfstring")) { if (self.objc_cfstring_section_index == null) { self.objc_cfstring_section_index = @intCast(u16, data_const_seg.sections.items.len); try data_const_seg.addSection(self.base.allocator, "__cfstring", .{}); } break :blk .{ .seg = self.data_const_segment_cmd_index.?, .sect = self.objc_cfstring_section_index.?, }; } else if (mem.eql(u8, sectname, "__objc_classlist")) { if (self.objc_classlist_section_index == null) { self.objc_classlist_section_index = @intCast(u16, data_const_seg.sections.items.len); try data_const_seg.addSection(self.base.allocator, "__objc_classlist", .{}); } break :blk .{ .seg = self.data_const_segment_cmd_index.?, .sect = self.objc_classlist_section_index.?, }; } else if (mem.eql(u8, sectname, "__objc_imageinfo")) { if (self.objc_imageinfo_section_index == null) { self.objc_imageinfo_section_index = @intCast(u16, data_const_seg.sections.items.len); try data_const_seg.addSection(self.base.allocator, "__objc_imageinfo", .{}); } break :blk .{ .seg = self.data_const_segment_cmd_index.?, .sect = self.objc_imageinfo_section_index.?, }; } else if (mem.eql(u8, sectname, "__objc_const")) { if (self.objc_const_section_index == null) { self.objc_const_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__objc_const", .{}); } break :blk .{ .seg = self.data_segment_cmd_index.?, .sect = self.objc_const_section_index.?, }; } else if (mem.eql(u8, sectname, "__objc_classrefs")) { if (self.objc_classrefs_section_index == null) { self.objc_classrefs_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__objc_classrefs", .{}); } break :blk .{ .seg = self.data_segment_cmd_index.?, .sect = self.objc_classrefs_section_index.?, }; } else if (mem.eql(u8, sectname, "__objc_data")) { if (self.objc_data_section_index == null) { self.objc_data_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__objc_data", .{}); } break :blk .{ .seg = self.data_segment_cmd_index.?, .sect = self.objc_data_section_index.?, }; } else { if (self.data_section_index == null) { self.data_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__data", .{}); } break :blk .{ .seg = self.data_segment_cmd_index.?, .sect = self.data_section_index.?, }; } } if (mem.eql(u8, "__LLVM", segname) and mem.eql(u8, "__asm", sectname)) { log.debug("TODO LLVM asm section: type 0x{x}, name '{s},{s}'", .{ sect.flags, segname, sectname, }); } break :blk null; }, else => break :blk null, } }; if (res) |match| { _ = try self.section_ordinals.getOrPut(self.base.allocator, match); } return res; } fn sortSections(self: *MachO) !void { var text_index_mapping = std.AutoHashMap(u16, u16).init(self.base.allocator); defer text_index_mapping.deinit(); var data_const_index_mapping = std.AutoHashMap(u16, u16).init(self.base.allocator); defer data_const_index_mapping.deinit(); var data_index_mapping = std.AutoHashMap(u16, u16).init(self.base.allocator); defer data_index_mapping.deinit(); { // __TEXT segment const seg = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; var sections = seg.sections.toOwnedSlice(self.base.allocator); defer self.base.allocator.free(sections); try seg.sections.ensureCapacity(self.base.allocator, sections.len); const indices = &[_]*?u16{ &self.text_section_index, &self.stubs_section_index, &self.stub_helper_section_index, &self.gcc_except_tab_section_index, &self.cstring_section_index, &self.ustring_section_index, &self.text_const_section_index, &self.objc_methlist_section_index, &self.objc_methname_section_index, &self.objc_methtype_section_index, &self.objc_classname_section_index, &self.eh_frame_section_index, }; for (indices) |maybe_index| { const new_index: u16 = if (maybe_index.*) |index| blk: { const idx = @intCast(u16, seg.sections.items.len); seg.sections.appendAssumeCapacity(sections[index]); try text_index_mapping.putNoClobber(index, idx); break :blk idx; } else continue; maybe_index.* = new_index; } } { // __DATA_CONST segment const seg = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; var sections = seg.sections.toOwnedSlice(self.base.allocator); defer self.base.allocator.free(sections); try seg.sections.ensureCapacity(self.base.allocator, sections.len); const indices = &[_]*?u16{ &self.got_section_index, &self.mod_init_func_section_index, &self.mod_term_func_section_index, &self.data_const_section_index, &self.objc_cfstring_section_index, &self.objc_classlist_section_index, &self.objc_imageinfo_section_index, }; for (indices) |maybe_index| { const new_index: u16 = if (maybe_index.*) |index| blk: { const idx = @intCast(u16, seg.sections.items.len); seg.sections.appendAssumeCapacity(sections[index]); try data_const_index_mapping.putNoClobber(index, idx); break :blk idx; } else continue; maybe_index.* = new_index; } } { // __DATA segment const seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; var sections = seg.sections.toOwnedSlice(self.base.allocator); defer self.base.allocator.free(sections); try seg.sections.ensureCapacity(self.base.allocator, sections.len); // __DATA segment const indices = &[_]*?u16{ &self.la_symbol_ptr_section_index, &self.objc_const_section_index, &self.objc_selrefs_section_index, &self.objc_classrefs_section_index, &self.objc_data_section_index, &self.data_section_index, &self.tlv_section_index, &self.tlv_data_section_index, &self.tlv_bss_section_index, &self.bss_section_index, &self.common_section_index, }; for (indices) |maybe_index| { const new_index: u16 = if (maybe_index.*) |index| blk: { const idx = @intCast(u16, seg.sections.items.len); seg.sections.appendAssumeCapacity(sections[index]); try data_index_mapping.putNoClobber(index, idx); break :blk idx; } else continue; maybe_index.* = new_index; } } { var transient: std.AutoHashMapUnmanaged(MatchingSection, *TextBlock) = .{}; try transient.ensureCapacity(self.base.allocator, self.blocks.count()); var it = self.blocks.iterator(); while (it.next()) |entry| { const old = entry.key_ptr.*; const sect = if (old.seg == self.text_segment_cmd_index.?) text_index_mapping.get(old.sect).? else if (old.seg == self.data_const_segment_cmd_index.?) data_const_index_mapping.get(old.sect).? else data_index_mapping.get(old.sect).?; transient.putAssumeCapacityNoClobber(.{ .seg = old.seg, .sect = sect, }, entry.value_ptr.*); } self.blocks.clearAndFree(self.base.allocator); self.blocks.deinit(self.base.allocator); self.blocks = transient; } { // Create new section ordinals. self.section_ordinals.clearRetainingCapacity(); const text_seg = self.load_commands.items[self.text_segment_cmd_index.?].Segment; for (text_seg.sections.items) |_, sect_id| { const res = self.section_ordinals.getOrPutAssumeCapacity(.{ .seg = self.text_segment_cmd_index.?, .sect = @intCast(u16, sect_id), }); assert(!res.found_existing); } const data_const_seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; for (data_const_seg.sections.items) |_, sect_id| { const res = self.section_ordinals.getOrPutAssumeCapacity(.{ .seg = self.data_const_segment_cmd_index.?, .sect = @intCast(u16, sect_id), }); assert(!res.found_existing); } const data_seg = self.load_commands.items[self.data_segment_cmd_index.?].Segment; for (data_seg.sections.items) |_, sect_id| { const res = self.section_ordinals.getOrPutAssumeCapacity(.{ .seg = self.data_segment_cmd_index.?, .sect = @intCast(u16, sect_id), }); assert(!res.found_existing); } } } fn allocateTextSegment(self: *MachO) !void { const seg = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const nstubs = @intCast(u32, self.stubs.items.len); const base_vmaddr = self.load_commands.items[self.pagezero_segment_cmd_index.?].Segment.inner.vmsize; seg.inner.fileoff = 0; seg.inner.vmaddr = base_vmaddr; // Set stubs and stub_helper sizes const stubs = &seg.sections.items[self.stubs_section_index.?]; const stub_helper = &seg.sections.items[self.stub_helper_section_index.?]; stubs.size += nstubs * stubs.reserved2; const stub_size: u4 = switch (self.base.options.target.cpu.arch) { .x86_64 => 10, .aarch64 => 3 * @sizeOf(u32), else => unreachable, }; stub_helper.size += nstubs * stub_size; var sizeofcmds: u64 = 0; for (self.load_commands.items) |lc| { sizeofcmds += lc.cmdsize(); } try self.allocateSegment(self.text_segment_cmd_index.?, @sizeOf(macho.mach_header_64) + sizeofcmds); // Shift all sections to the back to minimize jump size between __TEXT and __DATA segments. var min_alignment: u32 = 0; for (seg.sections.items) |sect| { const alignment = try math.powi(u32, 2, sect.@"align"); min_alignment = math.max(min_alignment, alignment); } assert(min_alignment > 0); const last_sect_idx = seg.sections.items.len - 1; const last_sect = seg.sections.items[last_sect_idx]; const shift: u32 = blk: { const diff = seg.inner.filesize - last_sect.offset - last_sect.size; const factor = @divTrunc(diff, min_alignment); break :blk @intCast(u32, factor * min_alignment); }; if (shift > 0) { for (seg.sections.items) |*sect| { sect.offset += shift; sect.addr += shift; } } } fn allocateDataConstSegment(self: *MachO) !void { const seg = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const nentries = @intCast(u32, self.got_entries.items.len); const text_seg = self.load_commands.items[self.text_segment_cmd_index.?].Segment; seg.inner.fileoff = text_seg.inner.fileoff + text_seg.inner.filesize; seg.inner.vmaddr = text_seg.inner.vmaddr + text_seg.inner.vmsize; // Set got size const got = &seg.sections.items[self.got_section_index.?]; got.size += nentries * @sizeOf(u64); try self.allocateSegment(self.data_const_segment_cmd_index.?, 0); } fn allocateDataSegment(self: *MachO) !void { const seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; const nstubs = @intCast(u32, self.stubs.items.len); const data_const_seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; seg.inner.fileoff = data_const_seg.inner.fileoff + data_const_seg.inner.filesize; seg.inner.vmaddr = data_const_seg.inner.vmaddr + data_const_seg.inner.vmsize; // Set la_symbol_ptr and data size const la_symbol_ptr = &seg.sections.items[self.la_symbol_ptr_section_index.?]; const data = &seg.sections.items[self.data_section_index.?]; la_symbol_ptr.size += nstubs * @sizeOf(u64); data.size += @sizeOf(u64); // We need at least 8bytes for address of dyld_stub_binder try self.allocateSegment(self.data_segment_cmd_index.?, 0); } fn allocateLinkeditSegment(self: *MachO) void { const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const data_seg = self.load_commands.items[self.data_segment_cmd_index.?].Segment; seg.inner.fileoff = data_seg.inner.fileoff + data_seg.inner.filesize; seg.inner.vmaddr = data_seg.inner.vmaddr + data_seg.inner.vmsize; } fn allocateSegment(self: *MachO, index: u16, offset: u64) !void { const seg = &self.load_commands.items[index].Segment; // Allocate the sections according to their alignment at the beginning of the segment. var start: u64 = offset; for (seg.sections.items) |*sect| { const alignment = try math.powi(u32, 2, sect.@"align"); const start_aligned = mem.alignForwardGeneric(u64, start, alignment); const end_aligned = mem.alignForwardGeneric(u64, start_aligned + sect.size, alignment); sect.offset = @intCast(u32, seg.inner.fileoff + start_aligned); sect.addr = seg.inner.vmaddr + start_aligned; start = end_aligned; } const seg_size_aligned = mem.alignForwardGeneric(u64, start, self.page_size); seg.inner.filesize = seg_size_aligned; seg.inner.vmsize = seg_size_aligned; } fn allocateTextBlocks(self: *MachO) !void { var it = self.blocks.iterator(); while (it.next()) |entry| { const match = entry.key_ptr.*; var block: *TextBlock = entry.value_ptr.*; // Find the first block while (block.prev) |prev| { block = prev; } const seg = self.load_commands.items[match.seg].Segment; const sect = seg.sections.items[match.sect]; var base_addr: u64 = sect.addr; const n_sect = @intCast(u8, self.section_ordinals.getIndex(match).? + 1); log.debug(" within section {s},{s}", .{ commands.segmentName(sect), commands.sectionName(sect) }); log.debug(" {}", .{sect}); while (true) { const block_alignment = try math.powi(u32, 2, block.alignment); base_addr = mem.alignForwardGeneric(u64, base_addr, block_alignment); const sym = &self.locals.items[block.local_sym_index]; sym.n_value = base_addr; sym.n_sect = n_sect; log.debug(" {s}: start=0x{x}, end=0x{x}, size={}, align={}", .{ self.getString(sym.n_strx), base_addr, base_addr + block.size, block.size, block.alignment, }); // Update each alias (if any) for (block.aliases.items) |index| { const alias_sym = &self.locals.items[index]; alias_sym.n_value = base_addr; alias_sym.n_sect = n_sect; } // Update each symbol contained within the TextBlock for (block.contained.items) |sym_at_off| { const contained_sym = &self.locals.items[sym_at_off.local_sym_index]; contained_sym.n_value = base_addr + sym_at_off.offset; contained_sym.n_sect = n_sect; } base_addr += block.size; if (block.next) |next| { block = next; } else break; } } // Update globals { var sym_it = self.symbol_resolver.valueIterator(); while (sym_it.next()) |resolv| { if (resolv.where != .global) continue; assert(resolv.local_sym_index != 0); const local_sym = self.locals.items[resolv.local_sym_index]; const sym = &self.globals.items[resolv.where_index]; sym.n_value = local_sym.n_value; sym.n_sect = local_sym.n_sect; } } } fn writeTextBlocks(self: *MachO) !void { var it = self.blocks.iterator(); while (it.next()) |entry| { const match = entry.key_ptr.*; var block: *TextBlock = entry.value_ptr.*; while (block.prev) |prev| { block = prev; } const seg = self.load_commands.items[match.seg].Segment; const sect = seg.sections.items[match.sect]; const sect_type = commands.sectionType(sect); log.debug(" for section {s},{s}", .{ commands.segmentName(sect), commands.sectionName(sect) }); log.debug(" {}", .{sect}); var code = try self.base.allocator.alloc(u8, sect.size); defer self.base.allocator.free(code); if (sect_type == macho.S_ZEROFILL or sect_type == macho.S_THREAD_LOCAL_ZEROFILL) { mem.set(u8, code, 0); } else { var base_off: u64 = 0; while (true) { const block_alignment = try math.powi(u32, 2, block.alignment); const aligned_base_off = mem.alignForwardGeneric(u64, base_off, block_alignment); const sym = self.locals.items[block.local_sym_index]; log.debug(" {s}: start=0x{x}, end=0x{x}, size={}, align={}", .{ self.getString(sym.n_strx), aligned_base_off, aligned_base_off + block.size, block.size, block.alignment, }); try block.resolveRelocs(self); mem.copy(u8, code[aligned_base_off..][0..block.size], block.code.items); // TODO NOP for machine code instead of just zeroing out const padding_len = aligned_base_off - base_off; mem.set(u8, code[base_off..][0..padding_len], 0); base_off = aligned_base_off + block.size; if (block.next) |next| { block = next; } else break; } mem.set(u8, code[base_off..], 0); } try self.base.file.?.pwriteAll(code, sect.offset); } } fn writeStubHelperCommon(self: *MachO) !void { const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const stub_helper = &text_segment.sections.items[self.stub_helper_section_index.?]; const data_const_segment = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const got = &data_const_segment.sections.items[self.got_section_index.?]; const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; const data = &data_segment.sections.items[self.data_section_index.?]; self.stub_helper_stubs_start_off = blk: { switch (self.base.options.target.cpu.arch) { .x86_64 => { const code_size = 15; var code: [code_size]u8 = undefined; // lea %r11, [rip + disp] code[0] = 0x4c; code[1] = 0x8d; code[2] = 0x1d; { const target_addr = data.addr + data.size - @sizeOf(u64); const displacement = try math.cast(u32, target_addr - stub_helper.addr - 7); mem.writeIntLittle(u32, code[3..7], displacement); } // push %r11 code[7] = 0x41; code[8] = 0x53; // jmp [rip + disp] code[9] = 0xff; code[10] = 0x25; { const n_strx = self.strtab_dir.getKeyAdapted(@as([]const u8, "dyld_stub_binder"), StringIndexAdapter{ .bytes = &self.strtab, }) orelse unreachable; const resolv = self.symbol_resolver.get(n_strx) orelse unreachable; const got_index = self.got_entries_map.get(.{ .where = .undef, .where_index = resolv.where_index, }) orelse unreachable; const addr = got.addr + got_index * @sizeOf(u64); const displacement = try math.cast(u32, addr - stub_helper.addr - code_size); mem.writeIntLittle(u32, code[11..], displacement); } try self.base.file.?.pwriteAll(&code, stub_helper.offset); break :blk stub_helper.offset + code_size; }, .aarch64 => { var code: [6 * @sizeOf(u32)]u8 = undefined; data_blk_outer: { const this_addr = stub_helper.addr; const target_addr = data.addr + data.size - @sizeOf(u64); data_blk: { const displacement = math.cast(i21, target_addr - this_addr) catch break :data_blk; // adr x17, disp mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.adr(.x17, displacement).toU32()); // nop mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.nop().toU32()); break :data_blk_outer; } data_blk: { const new_this_addr = this_addr + @sizeOf(u32); const displacement = math.cast(i21, target_addr - new_this_addr) catch break :data_blk; // nop mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.nop().toU32()); // adr x17, disp mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.adr(.x17, displacement).toU32()); break :data_blk_outer; } // Jump is too big, replace adr with adrp and add. const this_page = @intCast(i32, this_addr >> 12); const target_page = @intCast(i32, target_addr >> 12); const pages = @intCast(i21, target_page - this_page); mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.adrp(.x17, pages).toU32()); const narrowed = @truncate(u12, target_addr); mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.add(.x17, .x17, narrowed, false).toU32()); } // stp x16, x17, [sp, #-16]! code[8] = 0xf0; code[9] = 0x47; code[10] = 0xbf; code[11] = 0xa9; binder_blk_outer: { const n_strx = self.strtab_dir.getKeyAdapted(@as([]const u8, "dyld_stub_binder"), StringIndexAdapter{ .bytes = &self.strtab, }) orelse unreachable; const resolv = self.symbol_resolver.get(n_strx) orelse unreachable; const got_index = self.got_entries_map.get(.{ .where = .undef, .where_index = resolv.where_index, }) orelse unreachable; const this_addr = stub_helper.addr + 3 * @sizeOf(u32); const target_addr = got.addr + got_index * @sizeOf(u64); binder_blk: { const displacement = math.divExact(u64, target_addr - this_addr, 4) catch break :binder_blk; const literal = math.cast(u18, displacement) catch break :binder_blk; // ldr x16, label mem.writeIntLittle(u32, code[12..16], aarch64.Instruction.ldr(.x16, .{ .literal = literal, }).toU32()); // nop mem.writeIntLittle(u32, code[16..20], aarch64.Instruction.nop().toU32()); break :binder_blk_outer; } binder_blk: { const new_this_addr = this_addr + @sizeOf(u32); const displacement = math.divExact(u64, target_addr - new_this_addr, 4) catch break :binder_blk; const literal = math.cast(u18, displacement) catch break :binder_blk; // Pad with nop to please division. // nop mem.writeIntLittle(u32, code[12..16], aarch64.Instruction.nop().toU32()); // ldr x16, label mem.writeIntLittle(u32, code[16..20], aarch64.Instruction.ldr(.x16, .{ .literal = literal, }).toU32()); break :binder_blk_outer; } // Use adrp followed by ldr(immediate). const this_page = @intCast(i32, this_addr >> 12); const target_page = @intCast(i32, target_addr >> 12); const pages = @intCast(i21, target_page - this_page); mem.writeIntLittle(u32, code[12..16], aarch64.Instruction.adrp(.x16, pages).toU32()); const narrowed = @truncate(u12, target_addr); const offset = try math.divExact(u12, narrowed, 8); mem.writeIntLittle(u32, code[16..20], aarch64.Instruction.ldr(.x16, .{ .register = .{ .rn = .x16, .offset = aarch64.Instruction.LoadStoreOffset.imm(offset), }, }).toU32()); } // br x16 code[20] = 0x00; code[21] = 0x02; code[22] = 0x1f; code[23] = 0xd6; try self.base.file.?.pwriteAll(&code, stub_helper.offset); break :blk stub_helper.offset + 6 * @sizeOf(u32); }, else => unreachable, } }; for (self.stubs.items) |_, i| { const index = @intCast(u32, i); // TODO weak bound pointers try self.writeLazySymbolPointer(index); try self.writeStub(index); try self.writeStubInStubHelper(index); } } fn resolveSymbolsInObject( self: *MachO, object_id: u16, tentatives: *std.AutoArrayHashMap(u32, void), unresolved: *std.AutoArrayHashMap(u32, void), ) !void { const object = &self.objects.items[object_id]; log.debug("resolving symbols in '{s}'", .{object.name}); for (object.symtab.items) |sym, id| { const sym_id = @intCast(u32, id); const sym_name = object.getString(sym.n_strx); if (symbolIsStab(sym)) { log.err("unhandled symbol type: stab", .{}); log.err(" symbol '{s}'", .{sym_name}); log.err(" first definition in '{s}'", .{object.name}); return error.UnhandledSymbolType; } if (symbolIsIndr(sym)) { log.err("unhandled symbol type: indirect", .{}); log.err(" symbol '{s}'", .{sym_name}); log.err(" first definition in '{s}'", .{object.name}); return error.UnhandledSymbolType; } if (symbolIsAbs(sym)) { log.err("unhandled symbol type: absolute", .{}); log.err(" symbol '{s}'", .{sym_name}); log.err(" first definition in '{s}'", .{object.name}); return error.UnhandledSymbolType; } const n_strx = try self.makeString(sym_name); if (symbolIsSect(sym)) { // Defined symbol regardless of scope lands in the locals symbol table. const local_sym_index = @intCast(u32, self.locals.items.len); try self.locals.append(self.base.allocator, .{ .n_strx = n_strx, .n_type = macho.N_SECT, .n_sect = 0, .n_desc = 0, .n_value = sym.n_value, }); try object.symbol_mapping.putNoClobber(self.base.allocator, sym_id, local_sym_index); try object.reverse_symbol_mapping.putNoClobber(self.base.allocator, local_sym_index, sym_id); // If the symbol's scope is not local aka translation unit, then we need work out // if we should save the symbol as a global, or potentially flag the error. if (!symbolIsExt(sym)) continue; const local = self.locals.items[local_sym_index]; const resolv = self.symbol_resolver.getPtr(n_strx) orelse { const global_sym_index = @intCast(u32, self.globals.items.len); try self.globals.append(self.base.allocator, .{ .n_strx = n_strx, .n_type = sym.n_type, .n_sect = 0, .n_desc = sym.n_desc, .n_value = sym.n_value, }); try self.symbol_resolver.putNoClobber(self.base.allocator, n_strx, .{ .where = .global, .where_index = global_sym_index, .local_sym_index = local_sym_index, .file = object_id, }); continue; }; switch (resolv.where) { .global => { const global = &self.globals.items[resolv.where_index]; if (symbolIsTentative(global.*)) { _ = tentatives.fetchSwapRemove(resolv.where_index); } else if (!(symbolIsWeakDef(sym) or symbolIsPext(sym)) and !(symbolIsWeakDef(global.*) or symbolIsPext(global.*))) { log.err("symbol '{s}' defined multiple times", .{sym_name}); log.err(" first definition in '{s}'", .{self.objects.items[resolv.file].name}); log.err(" next definition in '{s}'", .{object.name}); return error.MultipleSymbolDefinitions; } else if (symbolIsWeakDef(sym) or symbolIsPext(sym)) continue; // Current symbol is weak, so skip it. // Otherwise, update the resolver and the global symbol. global.n_type = sym.n_type; resolv.local_sym_index = local_sym_index; resolv.file = object_id; continue; }, .undef => { const undef = &self.undefs.items[resolv.where_index]; undef.* = .{ .n_strx = 0, .n_type = macho.N_UNDF, .n_sect = 0, .n_desc = 0, .n_value = 0, }; _ = unresolved.fetchSwapRemove(resolv.where_index); }, } const global_sym_index = @intCast(u32, self.globals.items.len); try self.globals.append(self.base.allocator, .{ .n_strx = local.n_strx, .n_type = sym.n_type, .n_sect = 0, .n_desc = sym.n_desc, .n_value = sym.n_value, }); resolv.* = .{ .where = .global, .where_index = global_sym_index, .local_sym_index = local_sym_index, .file = object_id, }; } else if (symbolIsTentative(sym)) { // Symbol is a tentative definition. const resolv = self.symbol_resolver.getPtr(n_strx) orelse { const global_sym_index = @intCast(u32, self.globals.items.len); try self.globals.append(self.base.allocator, .{ .n_strx = try self.makeString(sym_name), .n_type = sym.n_type, .n_sect = 0, .n_desc = sym.n_desc, .n_value = sym.n_value, }); try self.symbol_resolver.putNoClobber(self.base.allocator, n_strx, .{ .where = .global, .where_index = global_sym_index, .file = object_id, }); _ = try tentatives.getOrPut(global_sym_index); continue; }; switch (resolv.where) { .global => { const global = &self.globals.items[resolv.where_index]; if (!symbolIsTentative(global.*)) continue; if (global.n_value >= sym.n_value) continue; global.n_desc = sym.n_desc; global.n_value = sym.n_value; resolv.file = object_id; }, .undef => { const undef = &self.undefs.items[resolv.where_index]; const global_sym_index = @intCast(u32, self.globals.items.len); try self.globals.append(self.base.allocator, .{ .n_strx = undef.n_strx, .n_type = sym.n_type, .n_sect = 0, .n_desc = sym.n_desc, .n_value = sym.n_value, }); _ = try tentatives.getOrPut(global_sym_index); resolv.* = .{ .where = .global, .where_index = global_sym_index, .file = object_id, }; undef.* = .{ .n_strx = 0, .n_type = macho.N_UNDF, .n_sect = 0, .n_desc = 0, .n_value = 0, }; _ = unresolved.fetchSwapRemove(resolv.where_index); }, } } else { // Symbol is undefined. if (self.symbol_resolver.contains(n_strx)) continue; const undef_sym_index = @intCast(u32, self.undefs.items.len); try self.undefs.append(self.base.allocator, .{ .n_strx = try self.makeString(sym_name), .n_type = macho.N_UNDF, .n_sect = 0, .n_desc = 0, .n_value = 0, }); try self.symbol_resolver.putNoClobber(self.base.allocator, n_strx, .{ .where = .undef, .where_index = undef_sym_index, .file = object_id, }); _ = try unresolved.getOrPut(undef_sym_index); } } } fn resolveSymbols(self: *MachO) !void { var tentatives = std.AutoArrayHashMap(u32, void).init(self.base.allocator); defer tentatives.deinit(); var unresolved = std.AutoArrayHashMap(u32, void).init(self.base.allocator); defer unresolved.deinit(); // First pass, resolve symbols in provided objects. for (self.objects.items) |_, object_id| { try self.resolveSymbolsInObject(@intCast(u16, object_id), &tentatives, &unresolved); } // Second pass, resolve symbols in static libraries. var next_sym: usize = 0; loop: while (next_sym < unresolved.count()) { const sym = self.undefs.items[unresolved.keys()[next_sym]]; const sym_name = self.getString(sym.n_strx); for (self.archives.items) |archive| { // Check if the entry exists in a static archive. const offsets = archive.toc.get(sym_name) orelse { // No hit. continue; }; assert(offsets.items.len > 0); const object_id = @intCast(u16, self.objects.items.len); const object = try self.objects.addOne(self.base.allocator); object.* = try archive.parseObject(self.base.allocator, self.base.options.target, offsets.items[0]); try self.resolveSymbolsInObject(object_id, &tentatives, &unresolved); continue :loop; } next_sym += 1; } // Convert any tentative definition into a regular symbol and allocate // text blocks for each tentative defintion. while (tentatives.popOrNull()) |entry| { const sym = &self.globals.items[entry.key]; const match: MatchingSection = blk: { if (self.common_section_index == null) { const data_seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; self.common_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__common", .{ .flags = macho.S_ZEROFILL, }); } break :blk .{ .seg = self.data_segment_cmd_index.?, .sect = self.common_section_index.?, }; }; _ = try self.section_ordinals.getOrPut(self.base.allocator, match); const size = sym.n_value; const code = try self.base.allocator.alloc(u8, size); defer self.base.allocator.free(code); mem.set(u8, code, 0); const alignment = (sym.n_desc >> 8) & 0x0f; sym.n_value = 0; sym.n_desc = 0; sym.n_sect = @intCast(u8, self.section_ordinals.getIndex(match).? + 1); var local_sym = sym.*; local_sym.n_type = macho.N_SECT; const local_sym_index = @intCast(u32, self.locals.items.len); try self.locals.append(self.base.allocator, local_sym); const resolv = self.symbol_resolver.getPtr(sym.n_strx) orelse unreachable; resolv.local_sym_index = local_sym_index; const block = try self.base.allocator.create(TextBlock); block.* = TextBlock.empty; block.local_sym_index = local_sym_index; block.size = size; block.alignment = alignment; try self.managed_blocks.append(self.base.allocator, block); try block.code.appendSlice(self.base.allocator, code); // Update target section's metadata // TODO should we update segment's size here too? // How does it tie with incremental space allocs? const tseg = &self.load_commands.items[match.seg].Segment; const tsect = &tseg.sections.items[match.sect]; const new_alignment = math.max(tsect.@"align", block.alignment); const new_alignment_pow_2 = try math.powi(u32, 2, new_alignment); const new_size = mem.alignForwardGeneric(u64, tsect.size, new_alignment_pow_2) + block.size; tsect.size = new_size; tsect.@"align" = new_alignment; if (self.blocks.getPtr(match)) |last| { last.*.next = block; block.prev = last.*; last.* = block; } else { try self.blocks.putNoClobber(self.base.allocator, match, block); } } // Third pass, resolve symbols in dynamic libraries. { // Put dyld_stub_binder as an undefined special symbol. const n_strx = try self.makeString("dyld_stub_binder"); const undef_sym_index = @intCast(u32, self.undefs.items.len); try self.undefs.append(self.base.allocator, .{ .n_strx = n_strx, .n_type = macho.N_UNDF, .n_sect = 0, .n_desc = 0, .n_value = 0, }); try self.symbol_resolver.putNoClobber(self.base.allocator, n_strx, .{ .where = .undef, .where_index = undef_sym_index, }); _ = try unresolved.getOrPut(undef_sym_index); } next_sym = 0; loop: while (next_sym < unresolved.count()) { const sym = self.undefs.items[unresolved.keys()[next_sym]]; const sym_name = self.getString(sym.n_strx); for (self.dylibs.items) |dylib, id| { if (!dylib.symbols.contains(sym_name)) continue; const dylib_id = @intCast(u16, id); if (!self.referenced_dylibs.contains(dylib_id)) { try self.referenced_dylibs.putNoClobber(self.base.allocator, dylib_id, {}); } const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable; const resolv = self.symbol_resolver.getPtr(sym.n_strx) orelse unreachable; const undef = &self.undefs.items[resolv.where_index]; undef.n_type |= macho.N_EXT; undef.n_desc = @intCast(u16, ordinal + 1) * macho.N_SYMBOL_RESOLVER; _ = unresolved.fetchSwapRemove(resolv.where_index); continue :loop; } next_sym += 1; } // Fourth pass, handle synthetic symbols and flag any undefined references. if (self.strtab_dir.getKeyAdapted(@as([]const u8, "___dso_handle"), StringIndexAdapter{ .bytes = &self.strtab, })) |n_strx| blk: { const resolv = self.symbol_resolver.getPtr(n_strx) orelse break :blk; if (resolv.where != .undef) break :blk; const undef = &self.undefs.items[resolv.where_index]; const match: MatchingSection = .{ .seg = self.text_segment_cmd_index.?, .sect = self.text_section_index.?, }; const local_sym_index = @intCast(u32, self.locals.items.len); var nlist = macho.nlist_64{ .n_strx = undef.n_strx, .n_type = macho.N_SECT, .n_sect = @intCast(u8, self.section_ordinals.getIndex(match).? + 1), .n_desc = 0, .n_value = 0, }; try self.locals.append(self.base.allocator, nlist); const global_sym_index = @intCast(u32, self.globals.items.len); nlist.n_type |= macho.N_EXT; nlist.n_desc = macho.N_WEAK_DEF; try self.globals.append(self.base.allocator, nlist); _ = unresolved.fetchSwapRemove(resolv.where_index); undef.* = .{ .n_strx = 0, .n_type = macho.N_UNDF, .n_sect = 0, .n_desc = 0, .n_value = 0, }; resolv.* = .{ .where = .global, .where_index = global_sym_index, .local_sym_index = local_sym_index, }; // We create an empty atom for this symbol. // TODO perhaps we should special-case special symbols? Create a separate // linked list of atoms? const block = try self.base.allocator.create(TextBlock); block.* = TextBlock.empty; block.local_sym_index = local_sym_index; block.size = 0; block.alignment = 0; try self.managed_blocks.append(self.base.allocator, block); if (self.blocks.getPtr(match)) |last| { last.*.next = block; block.prev = last.*; last.* = block; } else { try self.blocks.putNoClobber(self.base.allocator, match, block); } } for (unresolved.keys()) |index| { const sym = self.undefs.items[index]; const sym_name = self.getString(sym.n_strx); const resolv = self.symbol_resolver.get(sym.n_strx) orelse unreachable; log.err("undefined reference to symbol '{s}'", .{sym_name}); log.err(" first referenced in '{s}'", .{self.objects.items[resolv.file].name}); } if (unresolved.count() > 0) return error.UndefinedSymbolReference; } fn parseTextBlocks(self: *MachO) !void { for (self.objects.items) |*object, object_id| { try object.parseTextBlocks(self.base.allocator, @intCast(u16, object_id), self); } } fn populateMetadata(self: *MachO) !void { if (self.pagezero_segment_cmd_index == null) { self.pagezero_segment_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Segment = SegmentCommand.empty("__PAGEZERO", .{ .vmsize = 0x100000000, // size always set to 4GB }), }); } if (self.text_segment_cmd_index == null) { self.text_segment_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Segment = SegmentCommand.empty("__TEXT", .{ .vmaddr = 0x100000000, // always starts at 4GB .maxprot = macho.VM_PROT_READ | macho.VM_PROT_EXECUTE, .initprot = macho.VM_PROT_READ | macho.VM_PROT_EXECUTE, }), }); } if (self.text_section_index == null) { const text_seg = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; self.text_section_index = @intCast(u16, text_seg.sections.items.len); const alignment: u2 = switch (self.base.options.target.cpu.arch) { .x86_64 => 0, .aarch64 => 2, else => unreachable, // unhandled architecture type }; try text_seg.addSection(self.base.allocator, "__text", .{ .@"align" = alignment, .flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS, }); _ = try self.section_ordinals.getOrPut(self.base.allocator, .{ .seg = self.text_segment_cmd_index.?, .sect = self.text_section_index.?, }); } if (self.stubs_section_index == null) { const text_seg = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; self.stubs_section_index = @intCast(u16, text_seg.sections.items.len); const alignment: u2 = switch (self.base.options.target.cpu.arch) { .x86_64 => 0, .aarch64 => 2, else => unreachable, // unhandled architecture type }; const stub_size: u4 = switch (self.base.options.target.cpu.arch) { .x86_64 => 6, .aarch64 => 3 * @sizeOf(u32), else => unreachable, // unhandled architecture type }; try text_seg.addSection(self.base.allocator, "__stubs", .{ .@"align" = alignment, .flags = macho.S_SYMBOL_STUBS | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS, .reserved2 = stub_size, }); _ = try self.section_ordinals.getOrPut(self.base.allocator, .{ .seg = self.text_segment_cmd_index.?, .sect = self.stubs_section_index.?, }); } if (self.stub_helper_section_index == null) { const text_seg = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; self.stub_helper_section_index = @intCast(u16, text_seg.sections.items.len); const alignment: u2 = switch (self.base.options.target.cpu.arch) { .x86_64 => 0, .aarch64 => 2, else => unreachable, // unhandled architecture type }; const stub_helper_size: u6 = switch (self.base.options.target.cpu.arch) { .x86_64 => 15, .aarch64 => 6 * @sizeOf(u32), else => unreachable, }; try text_seg.addSection(self.base.allocator, "__stub_helper", .{ .size = stub_helper_size, .@"align" = alignment, .flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS, }); _ = try self.section_ordinals.getOrPut(self.base.allocator, .{ .seg = self.text_segment_cmd_index.?, .sect = self.stub_helper_section_index.?, }); } if (self.data_const_segment_cmd_index == null) { self.data_const_segment_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Segment = SegmentCommand.empty("__DATA_CONST", .{ .maxprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE, .initprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE, }), }); } if (self.got_section_index == null) { const data_const_seg = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; self.got_section_index = @intCast(u16, data_const_seg.sections.items.len); try data_const_seg.addSection(self.base.allocator, "__got", .{ .@"align" = 3, // 2^3 = @sizeOf(u64) .flags = macho.S_NON_LAZY_SYMBOL_POINTERS, }); _ = try self.section_ordinals.getOrPut(self.base.allocator, .{ .seg = self.data_const_segment_cmd_index.?, .sect = self.got_section_index.?, }); } if (self.data_segment_cmd_index == null) { self.data_segment_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Segment = SegmentCommand.empty("__DATA", .{ .maxprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE, .initprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE, }), }); } if (self.la_symbol_ptr_section_index == null) { const data_seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; self.la_symbol_ptr_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__la_symbol_ptr", .{ .@"align" = 3, // 2^3 = @sizeOf(u64) .flags = macho.S_LAZY_SYMBOL_POINTERS, }); _ = try self.section_ordinals.getOrPut(self.base.allocator, .{ .seg = self.data_segment_cmd_index.?, .sect = self.la_symbol_ptr_section_index.?, }); } if (self.data_section_index == null) { const data_seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; self.data_section_index = @intCast(u16, data_seg.sections.items.len); try data_seg.addSection(self.base.allocator, "__data", .{ .@"align" = 3, // 2^3 = @sizeOf(u64) }); _ = try self.section_ordinals.getOrPut(self.base.allocator, .{ .seg = self.data_segment_cmd_index.?, .sect = self.data_section_index.?, }); } if (self.linkedit_segment_cmd_index == null) { self.linkedit_segment_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Segment = SegmentCommand.empty("__LINKEDIT", .{ .maxprot = macho.VM_PROT_READ, .initprot = macho.VM_PROT_READ, }), }); } if (self.dyld_info_cmd_index == null) { self.dyld_info_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .DyldInfoOnly = .{ .cmd = macho.LC_DYLD_INFO_ONLY, .cmdsize = @sizeOf(macho.dyld_info_command), .rebase_off = 0, .rebase_size = 0, .bind_off = 0, .bind_size = 0, .weak_bind_off = 0, .weak_bind_size = 0, .lazy_bind_off = 0, .lazy_bind_size = 0, .export_off = 0, .export_size = 0, }, }); } if (self.symtab_cmd_index == null) { self.symtab_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Symtab = .{ .cmd = macho.LC_SYMTAB, .cmdsize = @sizeOf(macho.symtab_command), .symoff = 0, .nsyms = 0, .stroff = 0, .strsize = 0, }, }); } if (self.dysymtab_cmd_index == null) { self.dysymtab_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Dysymtab = .{ .cmd = macho.LC_DYSYMTAB, .cmdsize = @sizeOf(macho.dysymtab_command), .ilocalsym = 0, .nlocalsym = 0, .iextdefsym = 0, .nextdefsym = 0, .iundefsym = 0, .nundefsym = 0, .tocoff = 0, .ntoc = 0, .modtaboff = 0, .nmodtab = 0, .extrefsymoff = 0, .nextrefsyms = 0, .indirectsymoff = 0, .nindirectsyms = 0, .extreloff = 0, .nextrel = 0, .locreloff = 0, .nlocrel = 0, }, }); } if (self.dylinker_cmd_index == null) { self.dylinker_cmd_index = @intCast(u16, self.load_commands.items.len); const cmdsize = @intCast(u32, mem.alignForwardGeneric( u64, @sizeOf(macho.dylinker_command) + mem.lenZ(DEFAULT_DYLD_PATH), @sizeOf(u64), )); var dylinker_cmd = commands.emptyGenericCommandWithData(macho.dylinker_command{ .cmd = macho.LC_LOAD_DYLINKER, .cmdsize = cmdsize, .name = @sizeOf(macho.dylinker_command), }); dylinker_cmd.data = try self.base.allocator.alloc(u8, cmdsize - dylinker_cmd.inner.name); mem.set(u8, dylinker_cmd.data, 0); mem.copy(u8, dylinker_cmd.data, mem.spanZ(DEFAULT_DYLD_PATH)); try self.load_commands.append(self.base.allocator, .{ .Dylinker = dylinker_cmd }); } if (self.main_cmd_index == null and self.base.options.output_mode == .Exe) { self.main_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Main = .{ .cmd = macho.LC_MAIN, .cmdsize = @sizeOf(macho.entry_point_command), .entryoff = 0x0, .stacksize = 0, }, }); } if (self.dylib_id_cmd_index == null and self.base.options.output_mode == .Lib) { self.dylib_id_cmd_index = @intCast(u16, self.load_commands.items.len); const install_name = try std.fmt.allocPrint(self.base.allocator, "@rpath/{s}", .{ self.base.options.emit.?.sub_path, }); defer self.base.allocator.free(install_name); var dylib_cmd = try commands.createLoadDylibCommand( self.base.allocator, install_name, 2, 0x10000, // TODO forward user-provided versions 0x10000, ); errdefer dylib_cmd.deinit(self.base.allocator); dylib_cmd.inner.cmd = macho.LC_ID_DYLIB; try self.load_commands.append(self.base.allocator, .{ .Dylib = dylib_cmd }); } if (self.source_version_cmd_index == null) { self.source_version_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .SourceVersion = .{ .cmd = macho.LC_SOURCE_VERSION, .cmdsize = @sizeOf(macho.source_version_command), .version = 0x0, }, }); } if (self.build_version_cmd_index == null) { self.build_version_cmd_index = @intCast(u16, self.load_commands.items.len); const cmdsize = @intCast(u32, mem.alignForwardGeneric( u64, @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version), @sizeOf(u64), )); const ver = self.base.options.target.os.version_range.semver.min; const version = ver.major << 16 | ver.minor << 8 | ver.patch; const is_simulator_abi = self.base.options.target.abi == .simulator; var cmd = commands.emptyGenericCommandWithData(macho.build_version_command{ .cmd = macho.LC_BUILD_VERSION, .cmdsize = cmdsize, .platform = switch (self.base.options.target.os.tag) { .macos => macho.PLATFORM_MACOS, .ios => if (is_simulator_abi) macho.PLATFORM_IOSSIMULATOR else macho.PLATFORM_IOS, .watchos => if (is_simulator_abi) macho.PLATFORM_WATCHOSSIMULATOR else macho.PLATFORM_WATCHOS, .tvos => if (is_simulator_abi) macho.PLATFORM_TVOSSIMULATOR else macho.PLATFORM_TVOS, else => unreachable, }, .minos = version, .sdk = version, .ntools = 1, }); const ld_ver = macho.build_tool_version{ .tool = macho.TOOL_LD, .version = 0x0, }; cmd.data = try self.base.allocator.alloc(u8, cmdsize - @sizeOf(macho.build_version_command)); mem.set(u8, cmd.data, 0); mem.copy(u8, cmd.data, mem.asBytes(&ld_ver)); try self.load_commands.append(self.base.allocator, .{ .BuildVersion = cmd }); } if (self.uuid_cmd_index == null) { self.uuid_cmd_index = @intCast(u16, self.load_commands.items.len); var uuid_cmd: macho.uuid_command = .{ .cmd = macho.LC_UUID, .cmdsize = @sizeOf(macho.uuid_command), .uuid = undefined, }; std.crypto.random.bytes(&uuid_cmd.uuid); try self.load_commands.append(self.base.allocator, .{ .Uuid = uuid_cmd }); } } fn addDataInCodeLC(self: *MachO) !void { if (self.data_in_code_cmd_index == null) { self.data_in_code_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .LinkeditData = .{ .cmd = macho.LC_DATA_IN_CODE, .cmdsize = @sizeOf(macho.linkedit_data_command), .dataoff = 0, .datasize = 0, }, }); } } fn addCodeSignatureLC(self: *MachO) !void { if (self.code_signature_cmd_index == null and self.requires_adhoc_codesig) { self.code_signature_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .LinkeditData = .{ .cmd = macho.LC_CODE_SIGNATURE, .cmdsize = @sizeOf(macho.linkedit_data_command), .dataoff = 0, .datasize = 0, }, }); } } fn addRpathLCs(self: *MachO, rpaths: []const []const u8) !void { for (rpaths) |rpath| { const cmdsize = @intCast(u32, mem.alignForwardGeneric( u64, @sizeOf(macho.rpath_command) + rpath.len + 1, @sizeOf(u64), )); var rpath_cmd = commands.emptyGenericCommandWithData(macho.rpath_command{ .cmd = macho.LC_RPATH, .cmdsize = cmdsize, .path = @sizeOf(macho.rpath_command), }); rpath_cmd.data = try self.base.allocator.alloc(u8, cmdsize - rpath_cmd.inner.path); mem.set(u8, rpath_cmd.data, 0); mem.copy(u8, rpath_cmd.data, rpath); try self.load_commands.append(self.base.allocator, .{ .Rpath = rpath_cmd }); } } fn addLoadDylibLCs(self: *MachO) !void { for (self.referenced_dylibs.keys()) |id| { const dylib = self.dylibs.items[id]; const dylib_id = dylib.id orelse unreachable; var dylib_cmd = try commands.createLoadDylibCommand( self.base.allocator, dylib_id.name, dylib_id.timestamp, dylib_id.current_version, dylib_id.compatibility_version, ); errdefer dylib_cmd.deinit(self.base.allocator); try self.load_commands.append(self.base.allocator, .{ .Dylib = dylib_cmd }); } } fn flushZld(self: *MachO) !void { self.load_commands_dirty = true; try self.writeTextBlocks(); try self.writeStubHelperCommon(); if (self.common_section_index) |index| { const seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; const sect = &seg.sections.items[index]; sect.offset = 0; } if (self.bss_section_index) |index| { const seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; const sect = &seg.sections.items[index]; sect.offset = 0; } if (self.tlv_bss_section_index) |index| { const seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; const sect = &seg.sections.items[index]; sect.offset = 0; } try self.writeGotEntries(); try self.setEntryPoint(); try self.writeRebaseInfoTableZld(); try self.writeBindInfoTableZld(); try self.writeLazyBindInfoTableZld(); try self.writeExportInfoZld(); try self.writeDices(); { const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; symtab.symoff = @intCast(u32, seg.inner.fileoff + seg.inner.filesize); } try self.writeSymbolTable(); try self.writeStringTableZld(); { // Seal __LINKEDIT size const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; seg.inner.vmsize = mem.alignForwardGeneric(u64, seg.inner.filesize, self.page_size); } if (self.requires_adhoc_codesig) { try self.writeCodeSignaturePadding(); } try self.writeLoadCommands(); try self.writeHeader(); if (self.requires_adhoc_codesig) { try self.writeCodeSignature(); } } fn writeGotEntries(self: *MachO) !void { const seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const sect = seg.sections.items[self.got_section_index.?]; var buffer = try self.base.allocator.alloc(u8, self.got_entries.items.len * @sizeOf(u64)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); var writer = stream.writer(); for (self.got_entries.items) |key| { const address: u64 = switch (key.where) { .local => self.locals.items[key.where_index].n_value, .undef => 0, }; try writer.writeIntLittle(u64, address); } log.debug("writing GOT pointers at 0x{x} to 0x{x}", .{ sect.offset, sect.offset + buffer.len }); try self.base.file.?.pwriteAll(buffer, sect.offset); } fn setEntryPoint(self: *MachO) !void { if (self.base.options.output_mode != .Exe) return; // TODO we should respect the -entry flag passed in by the user to set a custom // entrypoint. For now, assume default of `_main`. const seg = self.load_commands.items[self.text_segment_cmd_index.?].Segment; const n_strx = self.strtab_dir.getKeyAdapted(@as([]const u8, "_main"), StringIndexAdapter{ .bytes = &self.strtab, }) orelse { log.err("'_main' export not found", .{}); return error.MissingMainEntrypoint; }; const resolv = self.symbol_resolver.get(n_strx) orelse unreachable; assert(resolv.where == .global); const sym = self.globals.items[resolv.where_index]; const ec = &self.load_commands.items[self.main_cmd_index.?].Main; ec.entryoff = @intCast(u32, sym.n_value - seg.inner.vmaddr); ec.stacksize = self.base.options.stack_size_override orelse 0; } fn writeRebaseInfoTableZld(self: *MachO) !void { var pointers = std.ArrayList(bind.Pointer).init(self.base.allocator); defer pointers.deinit(); { var it = self.blocks.iterator(); while (it.next()) |entry| { const match = entry.key_ptr.*; var block: *TextBlock = entry.value_ptr.*; if (match.seg == self.text_segment_cmd_index.?) continue; // __TEXT is non-writable const seg = self.load_commands.items[match.seg].Segment; while (true) { const sym = self.locals.items[block.local_sym_index]; const base_offset = sym.n_value - seg.inner.vmaddr; for (block.rebases.items) |offset| { try pointers.append(.{ .offset = base_offset + offset, .segment_id = match.seg, }); } if (block.prev) |prev| { block = prev; } else break; } } } if (self.got_section_index) |idx| { const seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const sect = seg.sections.items[idx]; const base_offset = sect.addr - seg.inner.vmaddr; const segment_id = @intCast(u16, self.data_const_segment_cmd_index.?); for (self.got_entries.items) |entry, i| { if (entry.where == .undef) continue; try pointers.append(.{ .offset = base_offset + i * @sizeOf(u64), .segment_id = segment_id, }); } } if (self.la_symbol_ptr_section_index) |idx| { const seg = self.load_commands.items[self.data_segment_cmd_index.?].Segment; const sect = seg.sections.items[idx]; const base_offset = sect.addr - seg.inner.vmaddr; const segment_id = @intCast(u16, self.data_segment_cmd_index.?); try pointers.ensureUnusedCapacity(self.stubs.items.len); for (self.stubs.items) |_, i| { pointers.appendAssumeCapacity(.{ .offset = base_offset + i * @sizeOf(u64), .segment_id = segment_id, }); } } std.sort.sort(bind.Pointer, pointers.items, {}, bind.pointerCmp); const size = try bind.rebaseInfoSize(pointers.items); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); try bind.writeRebaseInfo(pointers.items, stream.writer()); const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; dyld_info.rebase_off = @intCast(u32, seg.inner.fileoff); dyld_info.rebase_size = @intCast(u32, mem.alignForwardGeneric(u64, buffer.len, @sizeOf(u64))); seg.inner.filesize += dyld_info.rebase_size; log.debug("writing rebase info from 0x{x} to 0x{x}", .{ dyld_info.rebase_off, dyld_info.rebase_off + dyld_info.rebase_size }); try self.base.file.?.pwriteAll(buffer, dyld_info.rebase_off); } fn writeBindInfoTableZld(self: *MachO) !void { var pointers = std.ArrayList(bind.Pointer).init(self.base.allocator); defer pointers.deinit(); if (self.got_section_index) |idx| { const seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const sect = seg.sections.items[idx]; const base_offset = sect.addr - seg.inner.vmaddr; const segment_id = @intCast(u16, self.data_const_segment_cmd_index.?); for (self.got_entries.items) |entry, i| { if (entry.where == .local) continue; const sym = self.undefs.items[entry.where_index]; try pointers.append(.{ .offset = base_offset + i * @sizeOf(u64), .segment_id = segment_id, .dylib_ordinal = @divExact(sym.n_desc, macho.N_SYMBOL_RESOLVER), .name = self.getString(sym.n_strx), }); } } { var it = self.blocks.iterator(); while (it.next()) |entry| { const match = entry.key_ptr.*; var block: *TextBlock = entry.value_ptr.*; if (match.seg == self.text_segment_cmd_index.?) continue; // __TEXT is non-writable const seg = self.load_commands.items[match.seg].Segment; while (true) { const sym = self.locals.items[block.local_sym_index]; const base_offset = sym.n_value - seg.inner.vmaddr; for (block.bindings.items) |binding| { const bind_sym = self.undefs.items[binding.local_sym_index]; try pointers.append(.{ .offset = binding.offset + base_offset, .segment_id = match.seg, .dylib_ordinal = @divExact(bind_sym.n_desc, macho.N_SYMBOL_RESOLVER), .name = self.getString(bind_sym.n_strx), }); } if (block.prev) |prev| { block = prev; } else break; } } } const size = try bind.bindInfoSize(pointers.items); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); try bind.writeBindInfo(pointers.items, stream.writer()); const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; dyld_info.bind_off = @intCast(u32, seg.inner.fileoff + seg.inner.filesize); dyld_info.bind_size = @intCast(u32, mem.alignForwardGeneric(u64, buffer.len, @alignOf(u64))); seg.inner.filesize += dyld_info.bind_size; log.debug("writing binding info from 0x{x} to 0x{x}", .{ dyld_info.bind_off, dyld_info.bind_off + dyld_info.bind_size }); try self.base.file.?.pwriteAll(buffer, dyld_info.bind_off); } fn writeLazyBindInfoTableZld(self: *MachO) !void { var pointers = std.ArrayList(bind.Pointer).init(self.base.allocator); defer pointers.deinit(); if (self.la_symbol_ptr_section_index) |idx| { const seg = self.load_commands.items[self.data_segment_cmd_index.?].Segment; const sect = seg.sections.items[idx]; const base_offset = sect.addr - seg.inner.vmaddr; const segment_id = @intCast(u16, self.data_segment_cmd_index.?); try pointers.ensureUnusedCapacity(self.stubs.items.len); for (self.stubs.items) |import_id, i| { const sym = self.undefs.items[import_id]; pointers.appendAssumeCapacity(.{ .offset = base_offset + i * @sizeOf(u64), .segment_id = segment_id, .dylib_ordinal = @divExact(sym.n_desc, macho.N_SYMBOL_RESOLVER), .name = self.getString(sym.n_strx), }); } } const size = try bind.lazyBindInfoSize(pointers.items); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); try bind.writeLazyBindInfo(pointers.items, stream.writer()); const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; dyld_info.lazy_bind_off = @intCast(u32, seg.inner.fileoff + seg.inner.filesize); dyld_info.lazy_bind_size = @intCast(u32, mem.alignForwardGeneric(u64, buffer.len, @alignOf(u64))); seg.inner.filesize += dyld_info.lazy_bind_size; log.debug("writing lazy binding info from 0x{x} to 0x{x}", .{ dyld_info.lazy_bind_off, dyld_info.lazy_bind_off + dyld_info.lazy_bind_size }); try self.base.file.?.pwriteAll(buffer, dyld_info.lazy_bind_off); try self.populateLazyBindOffsetsInStubHelper(buffer); } fn writeExportInfoZld(self: *MachO) !void { var trie: Trie = .{}; defer trie.deinit(self.base.allocator); const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; const base_address = text_segment.inner.vmaddr; // TODO handle macho.EXPORT_SYMBOL_FLAGS_REEXPORT and macho.EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER. log.debug("writing export trie", .{}); for (self.globals.items) |sym| { const sym_name = self.getString(sym.n_strx); log.debug(" | putting '{s}' defined at 0x{x}", .{ sym_name, sym.n_value }); try trie.put(self.base.allocator, .{ .name = sym_name, .vmaddr_offset = sym.n_value - base_address, .export_flags = macho.EXPORT_SYMBOL_FLAGS_KIND_REGULAR, }); } try trie.finalize(self.base.allocator); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, trie.size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); const nwritten = try trie.write(stream.writer()); assert(nwritten == trie.size); const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; dyld_info.export_off = @intCast(u32, seg.inner.fileoff + seg.inner.filesize); dyld_info.export_size = @intCast(u32, mem.alignForwardGeneric(u64, buffer.len, @alignOf(u64))); seg.inner.filesize += dyld_info.export_size; log.debug("writing export info from 0x{x} to 0x{x}", .{ dyld_info.export_off, dyld_info.export_off + dyld_info.export_size }); try self.base.file.?.pwriteAll(buffer, dyld_info.export_off); } fn writeSymbolTable(self: *MachO) !void { const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; var locals = std.ArrayList(macho.nlist_64).init(self.base.allocator); defer locals.deinit(); try locals.appendSlice(self.locals.items); if (self.has_stabs) { for (self.objects.items) |object| { if (object.debug_info == null) continue; // Open scope try locals.ensureUnusedCapacity(3); locals.appendAssumeCapacity(.{ .n_strx = try self.makeString(object.tu_comp_dir.?), .n_type = macho.N_SO, .n_sect = 0, .n_desc = 0, .n_value = 0, }); locals.appendAssumeCapacity(.{ .n_strx = try self.makeString(object.tu_name.?), .n_type = macho.N_SO, .n_sect = 0, .n_desc = 0, .n_value = 0, }); locals.appendAssumeCapacity(.{ .n_strx = try self.makeString(object.name), .n_type = macho.N_OSO, .n_sect = 0, .n_desc = 1, .n_value = object.mtime orelse 0, }); for (object.text_blocks.items) |block| { if (block.stab) |stab| { const nlists = try stab.asNlists(block.local_sym_index, self); defer self.base.allocator.free(nlists); try locals.appendSlice(nlists); } else { for (block.contained.items) |sym_at_off| { const stab = sym_at_off.stab orelse continue; const nlists = try stab.asNlists(sym_at_off.local_sym_index, self); defer self.base.allocator.free(nlists); try locals.appendSlice(nlists); } } } // Close scope try locals.append(.{ .n_strx = 0, .n_type = macho.N_SO, .n_sect = 0, .n_desc = 0, .n_value = 0, }); } } const nlocals = locals.items.len; const nexports = self.globals.items.len; const nundefs = self.undefs.items.len; const locals_off = symtab.symoff + symtab.nsyms * @sizeOf(macho.nlist_64); const locals_size = nlocals * @sizeOf(macho.nlist_64); log.debug("writing local symbols from 0x{x} to 0x{x}", .{ locals_off, locals_size + locals_off }); try self.base.file.?.pwriteAll(mem.sliceAsBytes(locals.items), locals_off); const exports_off = locals_off + locals_size; const exports_size = nexports * @sizeOf(macho.nlist_64); log.debug("writing exported symbols from 0x{x} to 0x{x}", .{ exports_off, exports_size + exports_off }); try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.globals.items), exports_off); const undefs_off = exports_off + exports_size; const undefs_size = nundefs * @sizeOf(macho.nlist_64); log.debug("writing undefined symbols from 0x{x} to 0x{x}", .{ undefs_off, undefs_size + undefs_off }); try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.undefs.items), undefs_off); symtab.nsyms += @intCast(u32, nlocals + nexports + nundefs); seg.inner.filesize += locals_size + exports_size + undefs_size; // Update dynamic symbol table. const dysymtab = &self.load_commands.items[self.dysymtab_cmd_index.?].Dysymtab; dysymtab.nlocalsym += @intCast(u32, nlocals); dysymtab.iextdefsym = dysymtab.nlocalsym; dysymtab.nextdefsym = @intCast(u32, nexports); dysymtab.iundefsym = dysymtab.nlocalsym + dysymtab.nextdefsym; dysymtab.nundefsym = @intCast(u32, nundefs); const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const stubs = &text_segment.sections.items[self.stubs_section_index.?]; const data_const_segment = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const got = &data_const_segment.sections.items[self.got_section_index.?]; const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; const la_symbol_ptr = &data_segment.sections.items[self.la_symbol_ptr_section_index.?]; const nstubs = @intCast(u32, self.stubs.items.len); const ngot_entries = @intCast(u32, self.got_entries.items.len); dysymtab.indirectsymoff = @intCast(u32, seg.inner.fileoff + seg.inner.filesize); dysymtab.nindirectsyms = nstubs * 2 + ngot_entries; const needed_size = dysymtab.nindirectsyms * @sizeOf(u32); seg.inner.filesize += needed_size; log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ dysymtab.indirectsymoff, dysymtab.indirectsymoff + needed_size, }); var buf = try self.base.allocator.alloc(u8, needed_size); defer self.base.allocator.free(buf); var stream = std.io.fixedBufferStream(buf); var writer = stream.writer(); stubs.reserved1 = 0; for (self.stubs.items) |id| { try writer.writeIntLittle(u32, dysymtab.iundefsym + id); } got.reserved1 = nstubs; for (self.got_entries.items) |entry| { switch (entry.where) { .undef => { try writer.writeIntLittle(u32, dysymtab.iundefsym + entry.where_index); }, .local => { try writer.writeIntLittle(u32, macho.INDIRECT_SYMBOL_LOCAL); }, } } la_symbol_ptr.reserved1 = got.reserved1 + ngot_entries; for (self.stubs.items) |id| { try writer.writeIntLittle(u32, dysymtab.iundefsym + id); } try self.base.file.?.pwriteAll(buf, dysymtab.indirectsymoff); } pub fn deinit(self: *MachO) void { if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| llvm_object.destroy(self.base.allocator); } if (self.d_sym) |*ds| { ds.deinit(self.base.allocator); } self.section_ordinals.deinit(self.base.allocator); self.pending_updates.deinit(self.base.allocator); self.got_entries.deinit(self.base.allocator); self.got_entries_map.deinit(self.base.allocator); self.got_entries_free_list.deinit(self.base.allocator); self.stubs.deinit(self.base.allocator); self.stubs_map.deinit(self.base.allocator); self.strtab_dir.deinit(self.base.allocator); self.strtab.deinit(self.base.allocator); self.undefs.deinit(self.base.allocator); self.globals.deinit(self.base.allocator); self.globals_free_list.deinit(self.base.allocator); self.locals.deinit(self.base.allocator); self.locals_free_list.deinit(self.base.allocator); self.symbol_resolver.deinit(self.base.allocator); for (self.objects.items) |*object| { object.deinit(self.base.allocator); } self.objects.deinit(self.base.allocator); for (self.archives.items) |*archive| { archive.deinit(self.base.allocator); } self.archives.deinit(self.base.allocator); for (self.dylibs.items) |*dylib| { dylib.deinit(self.base.allocator); } self.dylibs.deinit(self.base.allocator); self.dylibs_map.deinit(self.base.allocator); self.referenced_dylibs.deinit(self.base.allocator); for (self.load_commands.items) |*lc| { lc.deinit(self.base.allocator); } self.load_commands.deinit(self.base.allocator); for (self.managed_blocks.items) |block| { block.deinit(self.base.allocator); self.base.allocator.destroy(block); } self.managed_blocks.deinit(self.base.allocator); self.blocks.deinit(self.base.allocator); self.text_block_free_list.deinit(self.base.allocator); for (self.decls.keys()) |decl| { decl.link.macho.deinit(self.base.allocator); } self.decls.deinit(self.base.allocator); } pub fn closeFiles(self: MachO) void { for (self.objects.items) |object| { object.file.close(); } for (self.archives.items) |archive| { archive.file.close(); } for (self.dylibs.items) |dylib| { dylib.file.close(); } } fn freeTextBlock(self: *MachO, text_block: *TextBlock) void { log.debug("freeTextBlock {*}", .{text_block}); text_block.deinit(self.base.allocator); var already_have_free_list_node = false; { var i: usize = 0; // TODO turn text_block_free_list into a hash map while (i < self.text_block_free_list.items.len) { if (self.text_block_free_list.items[i] == text_block) { _ = self.text_block_free_list.swapRemove(i); continue; } if (self.text_block_free_list.items[i] == text_block.prev) { already_have_free_list_node = true; } i += 1; } } // TODO process free list for dbg info just like we do above for vaddrs if (self.last_text_block == text_block) { // TODO shrink the __text section size here self.last_text_block = text_block.prev; } if (self.d_sym) |*ds| { if (ds.dbg_info_decl_first == text_block) { ds.dbg_info_decl_first = text_block.dbg_info_next; } if (ds.dbg_info_decl_last == text_block) { // TODO shrink the .debug_info section size here ds.dbg_info_decl_last = text_block.dbg_info_prev; } } if (text_block.prev) |prev| { prev.next = text_block.next; if (!already_have_free_list_node and prev.freeListEligible(self.*)) { // The free list is heuristics, it doesn't have to be perfect, so we can ignore // the OOM here. self.text_block_free_list.append(self.base.allocator, prev) catch {}; } } else { text_block.prev = null; } if (text_block.next) |next| { next.prev = text_block.prev; } else { text_block.next = null; } if (text_block.dbg_info_prev) |prev| { prev.dbg_info_next = text_block.dbg_info_next; // TODO the free list logic like we do for text blocks above } else { text_block.dbg_info_prev = null; } if (text_block.dbg_info_next) |next| { next.dbg_info_prev = text_block.dbg_info_prev; } else { text_block.dbg_info_next = null; } } fn shrinkTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64) void { _ = self; _ = text_block; _ = new_block_size; // TODO check the new capacity, and if it crosses the size threshold into a big enough // capacity, insert a free list node for it. } fn growTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 { const sym = self.locals.items[text_block.local_sym_index]; const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value; const need_realloc = !align_ok or new_block_size > text_block.capacity(self.*); if (!need_realloc) return sym.n_value; return self.allocateTextBlock(text_block, new_block_size, alignment); } pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void { if (decl.link.macho.local_sym_index != 0) return; try self.locals.ensureUnusedCapacity(self.base.allocator, 1); try self.got_entries.ensureUnusedCapacity(self.base.allocator, 1); try self.decls.putNoClobber(self.base.allocator, decl, {}); if (self.locals_free_list.popOrNull()) |i| { log.debug("reusing symbol index {d} for {s}", .{ i, decl.name }); decl.link.macho.local_sym_index = i; } else { log.debug("allocating symbol index {d} for {s}", .{ self.locals.items.len, decl.name }); decl.link.macho.local_sym_index = @intCast(u32, self.locals.items.len); _ = self.locals.addOneAssumeCapacity(); } const got_index: u32 = blk: { if (self.got_entries_free_list.popOrNull()) |i| { log.debug("reusing GOT entry index {d} for {s}", .{ i, decl.name }); break :blk i; } else { const got_index = @intCast(u32, self.got_entries.items.len); log.debug("allocating GOT entry index {d} for {s}", .{ got_index, decl.name }); _ = self.got_entries.addOneAssumeCapacity(); self.got_entries_count_dirty = true; self.rebase_info_dirty = true; break :blk got_index; } }; self.locals.items[decl.link.macho.local_sym_index] = .{ .n_strx = 0, .n_type = 0, .n_sect = 0, .n_desc = 0, .n_value = 0, }; const got_entry = GotIndirectionKey{ .where = .local, .where_index = decl.link.macho.local_sym_index, }; self.got_entries.items[got_index] = got_entry; try self.got_entries_map.putNoClobber(self.base.allocator, got_entry, got_index); } pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); } const tracy = trace(@src()); defer tracy.end(); const decl = func.owner_decl; var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined; const debug_buffers = if (self.d_sym) |*ds| blk: { debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl); break :blk &debug_buffers_buf; } else null; defer { if (debug_buffers) |dbg| { dbg.dbg_line_buffer.deinit(); dbg.dbg_info_buffer.deinit(); var it = dbg.dbg_info_type_relocs.valueIterator(); while (it.next()) |value| { value.relocs.deinit(self.base.allocator); } dbg.dbg_info_type_relocs.deinit(self.base.allocator); } } self.active_decl = decl; const res = if (debug_buffers) |dbg| try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ .dwarf = .{ .dbg_line = &dbg.dbg_line_buffer, .dbg_info = &dbg.dbg_info_buffer, .dbg_info_type_relocs = &dbg.dbg_info_type_relocs, }, }) else try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); switch (res) { .appended => { // TODO clearing the code and relocs buffer should probably be orchestrated // in a different, smarter, more automatic way somewhere else, in a more centralised // way than this. // If we don't clear the buffers here, we are up for some nasty surprises when // this TextBlock is reused later on and was not freed by freeTextBlock(). decl.link.macho.code.clearAndFree(self.base.allocator); try decl.link.macho.code.appendSlice(self.base.allocator, code_buffer.items); }, .fail => |em| { decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, em); return; }, } const symbol = try self.placeDecl(decl, decl.link.macho.code.items.len); try self.writeCode(symbol, decl.link.macho.code.items); if (debug_buffers) |db| { try self.d_sym.?.commitDeclDebugInfo( self.base.allocator, module, decl, db, self.base.options.target, ); } // Since we updated the vaddr and the size, each corresponding export symbol also // needs to be updated. const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; try self.updateDeclExports(module, decl, decl_exports); } pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); } const tracy = trace(@src()); defer tracy.end(); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? } var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined; const debug_buffers = if (self.d_sym) |*ds| blk: { debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl); break :blk &debug_buffers_buf; } else null; defer { if (debug_buffers) |dbg| { dbg.dbg_line_buffer.deinit(); dbg.dbg_info_buffer.deinit(); var it = dbg.dbg_info_type_relocs.valueIterator(); while (it.next()) |value| { value.relocs.deinit(self.base.allocator); } dbg.dbg_info_type_relocs.deinit(self.base.allocator); } } self.active_decl = decl; const res = if (debug_buffers) |dbg| try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ .ty = decl.ty, .val = decl.val, }, &code_buffer, .{ .dwarf = .{ .dbg_line = &dbg.dbg_line_buffer, .dbg_info = &dbg.dbg_info_buffer, .dbg_info_type_relocs = &dbg.dbg_info_type_relocs, }, }) else try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ .ty = decl.ty, .val = decl.val, }, &code_buffer, .none); const code = blk: { switch (res) { .externally_managed => |x| break :blk x, .appended => { // TODO clearing the code and relocs buffer should probably be orchestrated // in a different, smarter, more automatic way somewhere else, in a more centralised // way than this. // If we don't clear the buffers here, we are up for some nasty surprises when // this TextBlock is reused later on and was not freed by freeTextBlock(). decl.link.macho.code.clearAndFree(self.base.allocator); try decl.link.macho.code.appendSlice(self.base.allocator, code_buffer.items); break :blk decl.link.macho.code.items; }, .fail => |em| { decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, em); return; }, } }; const symbol = try self.placeDecl(decl, code.len); try self.writeCode(symbol, code); // Since we updated the vaddr and the size, each corresponding export symbol also // needs to be updated. const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; try self.updateDeclExports(module, decl, decl_exports); } fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64 { const required_alignment = decl.ty.abiAlignment(self.base.options.target); assert(decl.link.macho.local_sym_index != 0); // Caller forgot to call allocateDeclIndexes() const symbol = &self.locals.items[decl.link.macho.local_sym_index]; if (decl.link.macho.size != 0) { const capacity = decl.link.macho.capacity(self.*); const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment); if (need_realloc) { const vaddr = try self.growTextBlock(&decl.link.macho, code_len, required_alignment); log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr }); if (vaddr != symbol.n_value) { log.debug(" (writing new GOT entry)", .{}); const got_index = self.got_entries_map.get(.{ .where = .local, .where_index = decl.link.macho.local_sym_index, }) orelse unreachable; try self.writeGotEntry(got_index); } symbol.n_value = vaddr; } else if (code_len < decl.link.macho.size) { self.shrinkTextBlock(&decl.link.macho, code_len); } decl.link.macho.size = code_len; const new_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{mem.spanZ(decl.name)}); defer self.base.allocator.free(new_name); symbol.n_strx = try self.makeString(new_name); symbol.n_type = macho.N_SECT; symbol.n_sect = @intCast(u8, self.text_section_index.?) + 1; symbol.n_desc = 0; try self.writeLocalSymbol(decl.link.macho.local_sym_index); if (self.d_sym) |*ds| try ds.writeLocalSymbol(decl.link.macho.local_sym_index); } else { const decl_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{mem.spanZ(decl.name)}); defer self.base.allocator.free(decl_name); const name_str_index = try self.makeString(decl_name); const addr = try self.allocateTextBlock(&decl.link.macho, code_len, required_alignment); log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, addr }); errdefer self.freeTextBlock(&decl.link.macho); symbol.* = .{ .n_strx = name_str_index, .n_type = macho.N_SECT, .n_sect = @intCast(u8, self.text_section_index.?) + 1, .n_desc = 0, .n_value = addr, }; const got_index = self.got_entries_map.get(.{ .where = .local, .where_index = decl.link.macho.local_sym_index, }) orelse unreachable; try self.writeGotEntry(got_index); try self.writeLocalSymbol(decl.link.macho.local_sym_index); if (self.d_sym) |*ds| try ds.writeLocalSymbol(decl.link.macho.local_sym_index); } // Resolve relocations try decl.link.macho.resolveRelocs(self); // TODO this requires further investigation: should we dispose of resolved relocs, or keep them // so that we can reapply them when moving/growing sections? decl.link.macho.relocs.clearAndFree(self.base.allocator); // Apply pending updates while (self.pending_updates.popOrNull()) |update| { switch (update.kind) { .got => unreachable, .stub => { try self.writeStub(update.index); try self.writeStubInStubHelper(update.index); try self.writeLazySymbolPointer(update.index); self.rebase_info_dirty = true; self.lazy_binding_info_dirty = true; }, } } return symbol; } fn writeCode(self: *MachO, symbol: *macho.nlist_64, code: []const u8) !void { const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const text_section = text_segment.sections.items[self.text_section_index.?]; const section_offset = symbol.n_value - text_section.addr; const file_offset = text_section.offset + section_offset; log.debug("writing code for symbol {s} at file offset 0x{x}", .{ self.getString(symbol.n_strx), file_offset }); try self.base.file.?.pwriteAll(code, file_offset); } pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void { if (self.d_sym) |*ds| { try ds.updateDeclLineNumber(module, decl); } } pub fn updateDeclExports( self: *MachO, module: *Module, decl: *Module.Decl, exports: []const *Module.Export, ) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl, exports); } const tracy = trace(@src()); defer tracy.end(); try self.globals.ensureCapacity(self.base.allocator, self.globals.items.len + exports.len); if (decl.link.macho.local_sym_index == 0) return; const decl_sym = &self.locals.items[decl.link.macho.local_sym_index]; for (exports) |exp| { const exp_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{exp.options.name}); defer self.base.allocator.free(exp_name); if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, "__text")) { try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1); module.failed_exports.putAssumeCapacityNoClobber( exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}), ); continue; } } var n_type: u8 = macho.N_SECT | macho.N_EXT; var n_desc: u16 = 0; switch (exp.options.linkage) { .Internal => { // Symbol should be hidden, or in MachO lingo, private extern. // We should also mark the symbol as Weak: n_desc == N_WEAK_DEF. // TODO work out when to add N_WEAK_REF. n_type |= macho.N_PEXT; n_desc |= macho.N_WEAK_DEF; }, .Strong => { // Check if the export is _main, and note if os. // Otherwise, don't do anything since we already have all the flags // set that we need for global (strong) linkage. // n_type == N_SECT | N_EXT if (mem.eql(u8, exp_name, "_main")) { self.entry_addr = decl_sym.n_value; } }, .Weak => { // Weak linkage is specified as part of n_desc field. // Symbol's n_type is like for a symbol with strong linkage. n_desc |= macho.N_WEAK_DEF; }, .LinkOnce => { try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.count() + 1); module.failed_exports.putAssumeCapacityNoClobber( exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}), ); continue; }, } if (exp.link.macho.sym_index) |i| { const sym = &self.globals.items[i]; sym.* = .{ .n_strx = sym.n_strx, .n_type = n_type, .n_sect = @intCast(u8, self.text_section_index.?) + 1, .n_desc = n_desc, .n_value = decl_sym.n_value, }; } else { const name_str_index = try self.makeString(exp_name); const i = if (self.globals_free_list.popOrNull()) |i| i else blk: { _ = self.globals.addOneAssumeCapacity(); self.export_info_dirty = true; break :blk @intCast(u32, self.globals.items.len - 1); }; self.globals.items[i] = .{ .n_strx = name_str_index, .n_type = n_type, .n_sect = @intCast(u8, self.text_section_index.?) + 1, .n_desc = n_desc, .n_value = decl_sym.n_value, }; const resolv = try self.symbol_resolver.getOrPut(self.base.allocator, name_str_index); resolv.value_ptr.* = .{ .where = .global, .where_index = i, .local_sym_index = decl.link.macho.local_sym_index, }; exp.link.macho.sym_index = @intCast(u32, i); } } } pub fn deleteExport(self: *MachO, exp: Export) void { const sym_index = exp.sym_index orelse return; self.globals_free_list.append(self.base.allocator, sym_index) catch {}; self.globals.items[sym_index].n_type = 0; } pub fn freeDecl(self: *MachO, decl: *Module.Decl) void { log.debug("freeDecl {*}", .{decl}); _ = self.decls.swapRemove(decl); // Appending to free lists is allowed to fail because the free lists are heuristics based anyway. self.freeTextBlock(&decl.link.macho); if (decl.link.macho.local_sym_index != 0) { self.locals_free_list.append(self.base.allocator, decl.link.macho.local_sym_index) catch {}; const got_key = GotIndirectionKey{ .where = .local, .where_index = decl.link.macho.local_sym_index, }; const got_index = self.got_entries_map.get(got_key) orelse unreachable; _ = self.got_entries_map.remove(got_key); self.got_entries_free_list.append(self.base.allocator, got_index) catch {}; self.locals.items[decl.link.macho.local_sym_index].n_type = 0; decl.link.macho.local_sym_index = 0; } if (self.d_sym) |*ds| { // TODO make this logic match freeTextBlock. Maybe abstract the logic // out since the same thing is desired for both. _ = ds.dbg_line_fn_free_list.remove(&decl.fn_link.macho); if (decl.fn_link.macho.prev) |prev| { ds.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; prev.next = decl.fn_link.macho.next; if (decl.fn_link.macho.next) |next| { next.prev = prev; } else { ds.dbg_line_fn_last = prev; } } else if (decl.fn_link.macho.next) |next| { ds.dbg_line_fn_first = next; next.prev = null; } if (ds.dbg_line_fn_first == &decl.fn_link.macho) { ds.dbg_line_fn_first = decl.fn_link.macho.next; } if (ds.dbg_line_fn_last == &decl.fn_link.macho) { ds.dbg_line_fn_last = decl.fn_link.macho.prev; } } } pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 { assert(decl.link.macho.local_sym_index != 0); return self.locals.items[decl.link.macho.local_sym_index].n_value; } pub fn populateMissingMetadata(self: *MachO) !void { switch (self.base.options.output_mode) { .Exe => {}, .Obj => return error.TODOImplementWritingObjFiles, .Lib => return error.TODOImplementWritingLibFiles, } if (self.pagezero_segment_cmd_index == null) { self.pagezero_segment_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Segment = SegmentCommand.empty("__PAGEZERO", .{ .vmsize = 0x100000000, // size always set to 4GB }), }); self.load_commands_dirty = true; } if (self.text_segment_cmd_index == null) { self.text_segment_cmd_index = @intCast(u16, self.load_commands.items.len); const maxprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE | macho.VM_PROT_EXECUTE; const initprot = macho.VM_PROT_READ | macho.VM_PROT_EXECUTE; const program_code_size_hint = self.base.options.program_code_size_hint; const got_size_hint = @sizeOf(u64) * self.base.options.symbol_count_hint; const ideal_size = self.header_pad + program_code_size_hint + 3 * got_size_hint; const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); log.debug("found __TEXT segment free space 0x{x} to 0x{x}", .{ 0, needed_size }); try self.load_commands.append(self.base.allocator, .{ .Segment = SegmentCommand.empty("__TEXT", .{ .vmaddr = 0x100000000, // always starts at 4GB .vmsize = needed_size, .filesize = needed_size, .maxprot = maxprot, .initprot = initprot, }), }); self.load_commands_dirty = true; } if (self.text_section_index == null) { const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; self.text_section_index = @intCast(u16, text_segment.sections.items.len); const alignment: u2 = switch (self.base.options.target.cpu.arch) { .x86_64 => 0, .aarch64 => 2, else => unreachable, // unhandled architecture type }; const flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS; const needed_size = self.base.options.program_code_size_hint; const off = text_segment.findFreeSpace(needed_size, @as(u16, 1) << alignment, self.header_pad); log.debug("found __text section free space 0x{x} to 0x{x}", .{ off, off + needed_size }); try text_segment.addSection(self.base.allocator, "__text", .{ .addr = text_segment.inner.vmaddr + off, .size = @intCast(u32, needed_size), .offset = @intCast(u32, off), .@"align" = alignment, .flags = flags, }); self.load_commands_dirty = true; } if (self.stubs_section_index == null) { const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; self.stubs_section_index = @intCast(u16, text_segment.sections.items.len); const alignment: u2 = switch (self.base.options.target.cpu.arch) { .x86_64 => 0, .aarch64 => 2, else => unreachable, // unhandled architecture type }; const stub_size: u4 = switch (self.base.options.target.cpu.arch) { .x86_64 => 6, .aarch64 => 3 * @sizeOf(u32), else => unreachable, // unhandled architecture type }; const flags = macho.S_SYMBOL_STUBS | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS; const needed_size = @sizeOf(u64) * self.base.options.symbol_count_hint; const off = text_segment.findFreeSpace(needed_size, @alignOf(u64), self.header_pad); assert(off + needed_size <= text_segment.inner.fileoff + text_segment.inner.filesize); // TODO Must expand __TEXT segment. log.debug("found __stubs section free space 0x{x} to 0x{x}", .{ off, off + needed_size }); try text_segment.addSection(self.base.allocator, "__stubs", .{ .addr = text_segment.inner.vmaddr + off, .size = needed_size, .offset = @intCast(u32, off), .@"align" = alignment, .flags = flags, .reserved2 = stub_size, }); self.load_commands_dirty = true; } if (self.stub_helper_section_index == null) { const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; self.stub_helper_section_index = @intCast(u16, text_segment.sections.items.len); const alignment: u2 = switch (self.base.options.target.cpu.arch) { .x86_64 => 0, .aarch64 => 2, else => unreachable, // unhandled architecture type }; const flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS; const needed_size = @sizeOf(u64) * self.base.options.symbol_count_hint; const off = text_segment.findFreeSpace(needed_size, @alignOf(u64), self.header_pad); assert(off + needed_size <= text_segment.inner.fileoff + text_segment.inner.filesize); // TODO Must expand __TEXT segment. log.debug("found __stub_helper section free space 0x{x} to 0x{x}", .{ off, off + needed_size }); try text_segment.addSection(self.base.allocator, "__stub_helper", .{ .addr = text_segment.inner.vmaddr + off, .size = needed_size, .offset = @intCast(u32, off), .@"align" = alignment, .flags = flags, }); self.load_commands_dirty = true; } if (self.data_const_segment_cmd_index == null) { self.data_const_segment_cmd_index = @intCast(u16, self.load_commands.items.len); const maxprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE | macho.VM_PROT_EXECUTE; const initprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE; const address_and_offset = self.nextSegmentAddressAndOffset(); const ideal_size = @sizeOf(u64) * self.base.options.symbol_count_hint; const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); log.debug("found __DATA_CONST segment free space 0x{x} to 0x{x}", .{ address_and_offset.offset, address_and_offset.offset + needed_size }); try self.load_commands.append(self.base.allocator, .{ .Segment = SegmentCommand.empty("__DATA_CONST", .{ .vmaddr = address_and_offset.address, .vmsize = needed_size, .fileoff = address_and_offset.offset, .filesize = needed_size, .maxprot = maxprot, .initprot = initprot, }), }); self.load_commands_dirty = true; } if (self.got_section_index == null) { const dc_segment = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; self.got_section_index = @intCast(u16, dc_segment.sections.items.len); const flags = macho.S_NON_LAZY_SYMBOL_POINTERS; const needed_size = @sizeOf(u64) * self.base.options.symbol_count_hint; const off = dc_segment.findFreeSpace(needed_size, @alignOf(u64), null); assert(off + needed_size <= dc_segment.inner.fileoff + dc_segment.inner.filesize); // TODO Must expand __DATA_CONST segment. log.debug("found __got section free space 0x{x} to 0x{x}", .{ off, off + needed_size }); try dc_segment.addSection(self.base.allocator, "__got", .{ .addr = dc_segment.inner.vmaddr + off - dc_segment.inner.fileoff, .size = needed_size, .offset = @intCast(u32, off), .@"align" = 3, // 2^3 = @sizeOf(u64) .flags = flags, }); self.load_commands_dirty = true; } if (self.data_segment_cmd_index == null) { self.data_segment_cmd_index = @intCast(u16, self.load_commands.items.len); const maxprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE | macho.VM_PROT_EXECUTE; const initprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE; const address_and_offset = self.nextSegmentAddressAndOffset(); const ideal_size = 2 * @sizeOf(u64) * self.base.options.symbol_count_hint; const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); log.debug("found __DATA segment free space 0x{x} to 0x{x}", .{ address_and_offset.offset, address_and_offset.offset + needed_size }); try self.load_commands.append(self.base.allocator, .{ .Segment = SegmentCommand.empty("__DATA", .{ .vmaddr = address_and_offset.address, .vmsize = needed_size, .fileoff = address_and_offset.offset, .filesize = needed_size, .maxprot = maxprot, .initprot = initprot, }), }); self.load_commands_dirty = true; } if (self.la_symbol_ptr_section_index == null) { const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; self.la_symbol_ptr_section_index = @intCast(u16, data_segment.sections.items.len); const flags = macho.S_LAZY_SYMBOL_POINTERS; const needed_size = @sizeOf(u64) * self.base.options.symbol_count_hint; const off = data_segment.findFreeSpace(needed_size, @alignOf(u64), null); assert(off + needed_size <= data_segment.inner.fileoff + data_segment.inner.filesize); // TODO Must expand __DATA segment. log.debug("found __la_symbol_ptr section free space 0x{x} to 0x{x}", .{ off, off + needed_size }); try data_segment.addSection(self.base.allocator, "__la_symbol_ptr", .{ .addr = data_segment.inner.vmaddr + off - data_segment.inner.fileoff, .size = needed_size, .offset = @intCast(u32, off), .@"align" = 3, // 2^3 = @sizeOf(u64) .flags = flags, }); self.load_commands_dirty = true; } if (self.data_section_index == null) { const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; self.data_section_index = @intCast(u16, data_segment.sections.items.len); const needed_size = @sizeOf(u64) * self.base.options.symbol_count_hint; const off = data_segment.findFreeSpace(needed_size, @alignOf(u64), null); assert(off + needed_size <= data_segment.inner.fileoff + data_segment.inner.filesize); // TODO Must expand __DATA segment. log.debug("found __data section free space 0x{x} to 0x{x}", .{ off, off + needed_size }); try data_segment.addSection(self.base.allocator, "__data", .{ .addr = data_segment.inner.vmaddr + off - data_segment.inner.fileoff, .size = needed_size, .offset = @intCast(u32, off), .@"align" = 3, // 2^3 = @sizeOf(u64) }); self.load_commands_dirty = true; } if (self.linkedit_segment_cmd_index == null) { self.linkedit_segment_cmd_index = @intCast(u16, self.load_commands.items.len); const maxprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE | macho.VM_PROT_EXECUTE; const initprot = macho.VM_PROT_READ; const address_and_offset = self.nextSegmentAddressAndOffset(); log.debug("found __LINKEDIT segment free space at 0x{x}", .{address_and_offset.offset}); try self.load_commands.append(self.base.allocator, .{ .Segment = SegmentCommand.empty("__LINKEDIT", .{ .vmaddr = address_and_offset.address, .fileoff = address_and_offset.offset, .maxprot = maxprot, .initprot = initprot, }), }); self.load_commands_dirty = true; } if (self.dyld_info_cmd_index == null) { self.dyld_info_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .DyldInfoOnly = .{ .cmd = macho.LC_DYLD_INFO_ONLY, .cmdsize = @sizeOf(macho.dyld_info_command), .rebase_off = 0, .rebase_size = 0, .bind_off = 0, .bind_size = 0, .weak_bind_off = 0, .weak_bind_size = 0, .lazy_bind_off = 0, .lazy_bind_size = 0, .export_off = 0, .export_size = 0, }, }); const dyld = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; // Preallocate rebase, binding, lazy binding info, and export info. const expected_size = 48; // TODO This is totally random. const rebase_off = self.findFreeSpaceLinkedit(expected_size, 1, null); log.debug("found rebase info free space 0x{x} to 0x{x}", .{ rebase_off, rebase_off + expected_size }); dyld.rebase_off = @intCast(u32, rebase_off); dyld.rebase_size = expected_size; const bind_off = self.findFreeSpaceLinkedit(expected_size, 1, null); log.debug("found binding info free space 0x{x} to 0x{x}", .{ bind_off, bind_off + expected_size }); dyld.bind_off = @intCast(u32, bind_off); dyld.bind_size = expected_size; const lazy_bind_off = self.findFreeSpaceLinkedit(expected_size, 1, null); log.debug("found lazy binding info free space 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + expected_size }); dyld.lazy_bind_off = @intCast(u32, lazy_bind_off); dyld.lazy_bind_size = expected_size; const export_off = self.findFreeSpaceLinkedit(expected_size, 1, null); log.debug("found export info free space 0x{x} to 0x{x}", .{ export_off, export_off + expected_size }); dyld.export_off = @intCast(u32, export_off); dyld.export_size = expected_size; self.load_commands_dirty = true; } if (self.symtab_cmd_index == null) { self.symtab_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Symtab = .{ .cmd = macho.LC_SYMTAB, .cmdsize = @sizeOf(macho.symtab_command), .symoff = 0, .nsyms = 0, .stroff = 0, .strsize = 0, }, }); const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; const symtab_size = self.base.options.symbol_count_hint * @sizeOf(macho.nlist_64); const symtab_off = self.findFreeSpaceLinkedit(symtab_size, @sizeOf(macho.nlist_64), null); log.debug("found symbol table free space 0x{x} to 0x{x}", .{ symtab_off, symtab_off + symtab_size }); symtab.symoff = @intCast(u32, symtab_off); symtab.nsyms = @intCast(u32, self.base.options.symbol_count_hint); try self.strtab.append(self.base.allocator, 0); const strtab_size = self.strtab.items.len; const strtab_off = self.findFreeSpaceLinkedit(strtab_size, 1, symtab_off); log.debug("found string table free space 0x{x} to 0x{x}", .{ strtab_off, strtab_off + strtab_size }); symtab.stroff = @intCast(u32, strtab_off); symtab.strsize = @intCast(u32, strtab_size); self.load_commands_dirty = true; self.strtab_dirty = true; } if (self.dysymtab_cmd_index == null) { self.dysymtab_cmd_index = @intCast(u16, self.load_commands.items.len); // Preallocate space for indirect symbol table. const indsymtab_size = self.base.options.symbol_count_hint * @sizeOf(u64); // Each entry is just a u64. const indsymtab_off = self.findFreeSpaceLinkedit(indsymtab_size, @sizeOf(u64), null); log.debug("found indirect symbol table free space 0x{x} to 0x{x}", .{ indsymtab_off, indsymtab_off + indsymtab_size }); try self.load_commands.append(self.base.allocator, .{ .Dysymtab = .{ .cmd = macho.LC_DYSYMTAB, .cmdsize = @sizeOf(macho.dysymtab_command), .ilocalsym = 0, .nlocalsym = 0, .iextdefsym = 0, .nextdefsym = 0, .iundefsym = 0, .nundefsym = 0, .tocoff = 0, .ntoc = 0, .modtaboff = 0, .nmodtab = 0, .extrefsymoff = 0, .nextrefsyms = 0, .indirectsymoff = @intCast(u32, indsymtab_off), .nindirectsyms = @intCast(u32, self.base.options.symbol_count_hint), .extreloff = 0, .nextrel = 0, .locreloff = 0, .nlocrel = 0, }, }); self.load_commands_dirty = true; } if (self.dylinker_cmd_index == null) { self.dylinker_cmd_index = @intCast(u16, self.load_commands.items.len); const cmdsize = @intCast(u32, mem.alignForwardGeneric( u64, @sizeOf(macho.dylinker_command) + mem.lenZ(DEFAULT_DYLD_PATH), @sizeOf(u64), )); var dylinker_cmd = commands.emptyGenericCommandWithData(macho.dylinker_command{ .cmd = macho.LC_LOAD_DYLINKER, .cmdsize = cmdsize, .name = @sizeOf(macho.dylinker_command), }); dylinker_cmd.data = try self.base.allocator.alloc(u8, cmdsize - dylinker_cmd.inner.name); mem.set(u8, dylinker_cmd.data, 0); mem.copy(u8, dylinker_cmd.data, mem.spanZ(DEFAULT_DYLD_PATH)); try self.load_commands.append(self.base.allocator, .{ .Dylinker = dylinker_cmd }); self.load_commands_dirty = true; } if (self.libsystem_cmd_index == null) { self.libsystem_cmd_index = @intCast(u16, self.load_commands.items.len); var dylib_cmd = try commands.createLoadDylibCommand(self.base.allocator, mem.spanZ(LIB_SYSTEM_PATH), 2, 0, 0); errdefer dylib_cmd.deinit(self.base.allocator); try self.load_commands.append(self.base.allocator, .{ .Dylib = dylib_cmd }); self.load_commands_dirty = true; } if (self.main_cmd_index == null) { self.main_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Main = .{ .cmd = macho.LC_MAIN, .cmdsize = @sizeOf(macho.entry_point_command), .entryoff = 0x0, .stacksize = 0, }, }); self.load_commands_dirty = true; } if (self.build_version_cmd_index == null) { self.build_version_cmd_index = @intCast(u16, self.load_commands.items.len); const cmdsize = @intCast(u32, mem.alignForwardGeneric( u64, @sizeOf(macho.build_version_command) + @sizeOf(macho.build_tool_version), @sizeOf(u64), )); const ver = self.base.options.target.os.version_range.semver.min; const version = ver.major << 16 | ver.minor << 8 | ver.patch; const is_simulator_abi = self.base.options.target.abi == .simulator; var cmd = commands.emptyGenericCommandWithData(macho.build_version_command{ .cmd = macho.LC_BUILD_VERSION, .cmdsize = cmdsize, .platform = switch (self.base.options.target.os.tag) { .macos => macho.PLATFORM_MACOS, .ios => if (is_simulator_abi) macho.PLATFORM_IOSSIMULATOR else macho.PLATFORM_IOS, .watchos => if (is_simulator_abi) macho.PLATFORM_WATCHOSSIMULATOR else macho.PLATFORM_WATCHOS, .tvos => if (is_simulator_abi) macho.PLATFORM_TVOSSIMULATOR else macho.PLATFORM_TVOS, else => unreachable, }, .minos = version, .sdk = version, .ntools = 1, }); const ld_ver = macho.build_tool_version{ .tool = macho.TOOL_LD, .version = 0x0, }; cmd.data = try self.base.allocator.alloc(u8, cmdsize - @sizeOf(macho.build_version_command)); mem.set(u8, cmd.data, 0); mem.copy(u8, cmd.data, mem.asBytes(&ld_ver)); try self.load_commands.append(self.base.allocator, .{ .BuildVersion = cmd }); } if (self.source_version_cmd_index == null) { self.source_version_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .SourceVersion = .{ .cmd = macho.LC_SOURCE_VERSION, .cmdsize = @sizeOf(macho.source_version_command), .version = 0x0, }, }); self.load_commands_dirty = true; } if (self.uuid_cmd_index == null) { self.uuid_cmd_index = @intCast(u16, self.load_commands.items.len); var uuid_cmd: macho.uuid_command = .{ .cmd = macho.LC_UUID, .cmdsize = @sizeOf(macho.uuid_command), .uuid = undefined, }; std.crypto.random.bytes(&uuid_cmd.uuid); try self.load_commands.append(self.base.allocator, .{ .Uuid = uuid_cmd }); self.load_commands_dirty = true; } if (self.code_signature_cmd_index == null and self.requires_adhoc_codesig) { self.code_signature_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .LinkeditData = .{ .cmd = macho.LC_CODE_SIGNATURE, .cmdsize = @sizeOf(macho.linkedit_data_command), .dataoff = 0, .datasize = 0, }, }); self.load_commands_dirty = true; } if (!self.strtab_dir.containsAdapted(@as([]const u8, "dyld_stub_binder"), StringIndexAdapter{ .bytes = &self.strtab, })) { const import_sym_index = @intCast(u32, self.undefs.items.len); const n_strx = try self.makeString("dyld_stub_binder"); try self.undefs.append(self.base.allocator, .{ .n_strx = n_strx, .n_type = macho.N_UNDF | macho.N_EXT, .n_sect = 0, .n_desc = @intCast(u8, 1) * macho.N_SYMBOL_RESOLVER, .n_value = 0, }); try self.symbol_resolver.putNoClobber(self.base.allocator, n_strx, .{ .where = .undef, .where_index = import_sym_index, }); const got_key = GotIndirectionKey{ .where = .undef, .where_index = import_sym_index, }; const got_index = @intCast(u32, self.got_entries.items.len); try self.got_entries.append(self.base.allocator, got_key); try self.got_entries_map.putNoClobber(self.base.allocator, got_key, got_index); try self.writeGotEntry(got_index); self.binding_info_dirty = true; } if (self.stub_helper_stubs_start_off == null) { try self.writeStubHelperPreamble(); } } fn allocateTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 { const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const text_section = &text_segment.sections.items[self.text_section_index.?]; const new_block_ideal_capacity = padToIdeal(new_block_size); // We use these to indicate our intention to update metadata, placing the new block, // and possibly removing a free list node. // It would be simpler to do it inside the for loop below, but that would cause a // problem if an error was returned later in the function. So this action // is actually carried out at the end of the function, when errors are no longer possible. var block_placement: ?*TextBlock = null; var free_list_removal: ?usize = null; // First we look for an appropriately sized free list node. // The list is unordered. We'll just take the first thing that works. const vaddr = blk: { var i: usize = 0; while (i < self.text_block_free_list.items.len) { const big_block = self.text_block_free_list.items[i]; // We now have a pointer to a live text block that has too much capacity. // Is it enough that we could fit this new text block? const sym = self.locals.items[big_block.local_sym_index]; const capacity = big_block.capacity(self.*); const ideal_capacity = padToIdeal(capacity); const ideal_capacity_end_vaddr = sym.n_value + ideal_capacity; const capacity_end_vaddr = sym.n_value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_block_ideal_capacity; const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the block that it points to has grown to take up // more of the extra capacity. if (!big_block.freeListEligible(self.*)) { const bl = self.text_block_free_list.swapRemove(i); bl.deinit(self.base.allocator); } else { i += 1; } continue; } // At this point we know that we will place the new block here. But the // remaining question is whether there is still yet enough capacity left // over for there to still be a free list node. const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr; const keep_free_list_node = remaining_capacity >= min_text_capacity; // Set up the metadata to be updated, after errors are no longer possible. block_placement = big_block; if (!keep_free_list_node) { free_list_removal = i; } break :blk new_start_vaddr; } else if (self.last_text_block) |last| { const last_symbol = self.locals.items[last.local_sym_index]; // TODO We should pad out the excess capacity with NOPs. For executables, // no padding seems to be OK, but it will probably not be for objects. const ideal_capacity = padToIdeal(last.size); const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity; const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); block_placement = last; break :blk new_start_vaddr; } else { break :blk text_section.addr; } }; const expand_text_section = block_placement == null or block_placement.?.next == null; if (expand_text_section) { const needed_size = (vaddr + new_block_size) - text_section.addr; assert(needed_size <= text_segment.inner.filesize); // TODO must move the entire text section. self.last_text_block = text_block; text_section.size = needed_size; self.load_commands_dirty = true; // TODO Make more granular. if (self.d_sym) |*ds| { const debug_text_seg = &ds.load_commands.items[ds.text_segment_cmd_index.?].Segment; const debug_text_sect = &debug_text_seg.sections.items[ds.text_section_index.?]; debug_text_sect.size = needed_size; ds.load_commands_dirty = true; } } text_block.size = new_block_size; if (text_block.prev) |prev| { prev.next = text_block.next; } if (text_block.next) |next| { next.prev = text_block.prev; } if (block_placement) |big_block| { text_block.prev = big_block; text_block.next = big_block.next; big_block.next = text_block; } else { text_block.prev = null; text_block.next = null; } if (free_list_removal) |i| { _ = self.text_block_free_list.swapRemove(i); } return vaddr; } pub fn addExternFn(self: *MachO, name: []const u8) !u32 { const sym_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{name}); defer self.base.allocator.free(sym_name); if (self.strtab_dir.getKeyAdapted(@as([]const u8, sym_name), StringIndexAdapter{ .bytes = &self.strtab, })) |n_strx| { const resolv = self.symbol_resolver.get(n_strx) orelse unreachable; return resolv.where_index; } log.debug("adding new extern function '{s}' with dylib ordinal 1", .{sym_name}); const import_sym_index = @intCast(u32, self.undefs.items.len); const n_strx = try self.makeString(sym_name); try self.undefs.append(self.base.allocator, .{ .n_strx = n_strx, .n_type = macho.N_UNDF | macho.N_EXT, .n_sect = 0, .n_desc = @intCast(u8, 1) * macho.N_SYMBOL_RESOLVER, .n_value = 0, }); try self.symbol_resolver.putNoClobber(self.base.allocator, n_strx, .{ .where = .undef, .where_index = import_sym_index, }); const stubs_index = @intCast(u32, self.stubs.items.len); try self.stubs.append(self.base.allocator, import_sym_index); try self.stubs_map.putNoClobber(self.base.allocator, import_sym_index, stubs_index); // TODO discuss this. The caller context expects codegen.InnerError{ OutOfMemory, CodegenFail }, // which obviously doesn't include file writing op errors. So instead of trying to write the stub // entry right here and now, queue it up and dispose of when updating decl. try self.pending_updates.append(self.base.allocator, .{ .kind = .stub, .index = stubs_index, }); return import_sym_index; } const NextSegmentAddressAndOffset = struct { address: u64, offset: u64, }; fn nextSegmentAddressAndOffset(self: *MachO) NextSegmentAddressAndOffset { var prev_segment_idx: ?usize = null; // We use optional here for safety. for (self.load_commands.items) |cmd, i| { if (cmd == .Segment) { prev_segment_idx = i; } } const prev_segment = self.load_commands.items[prev_segment_idx.?].Segment; const address = prev_segment.inner.vmaddr + prev_segment.inner.vmsize; const offset = prev_segment.inner.fileoff + prev_segment.inner.filesize; return .{ .address = address, .offset = offset, }; } fn allocatedSizeLinkedit(self: *MachO, start: u64) u64 { assert(start > 0); var min_pos: u64 = std.math.maxInt(u64); // __LINKEDIT is a weird segment where sections get their own load commands so we // special-case it. if (self.dyld_info_cmd_index) |idx| { const dyld_info = self.load_commands.items[idx].DyldInfoOnly; if (dyld_info.rebase_off > start and dyld_info.rebase_off < min_pos) min_pos = dyld_info.rebase_off; if (dyld_info.bind_off > start and dyld_info.bind_off < min_pos) min_pos = dyld_info.bind_off; if (dyld_info.weak_bind_off > start and dyld_info.weak_bind_off < min_pos) min_pos = dyld_info.weak_bind_off; if (dyld_info.lazy_bind_off > start and dyld_info.lazy_bind_off < min_pos) min_pos = dyld_info.lazy_bind_off; if (dyld_info.export_off > start and dyld_info.export_off < min_pos) min_pos = dyld_info.export_off; } if (self.function_starts_cmd_index) |idx| { const fstart = self.load_commands.items[idx].LinkeditData; if (fstart.dataoff > start and fstart.dataoff < min_pos) min_pos = fstart.dataoff; } if (self.data_in_code_cmd_index) |idx| { const dic = self.load_commands.items[idx].LinkeditData; if (dic.dataoff > start and dic.dataoff < min_pos) min_pos = dic.dataoff; } if (self.dysymtab_cmd_index) |idx| { const dysymtab = self.load_commands.items[idx].Dysymtab; if (dysymtab.indirectsymoff > start and dysymtab.indirectsymoff < min_pos) min_pos = dysymtab.indirectsymoff; // TODO Handle more dynamic symbol table sections. } if (self.symtab_cmd_index) |idx| { const symtab = self.load_commands.items[idx].Symtab; if (symtab.symoff > start and symtab.symoff < min_pos) min_pos = symtab.symoff; if (symtab.stroff > start and symtab.stroff < min_pos) min_pos = symtab.stroff; } return min_pos - start; } inline fn checkForCollision(start: u64, end: u64, off: u64, size: u64) ?u64 { const increased_size = padToIdeal(size); const test_end = off + increased_size; if (end > off and start < test_end) { return test_end; } return null; } fn detectAllocCollisionLinkedit(self: *MachO, start: u64, size: u64) ?u64 { const end = start + padToIdeal(size); // __LINKEDIT is a weird segment where sections get their own load commands so we // special-case it. if (self.dyld_info_cmd_index) |idx| outer: { if (self.load_commands.items.len == idx) break :outer; const dyld_info = self.load_commands.items[idx].DyldInfoOnly; if (checkForCollision(start, end, dyld_info.rebase_off, dyld_info.rebase_size)) |pos| { return pos; } // Binding info if (checkForCollision(start, end, dyld_info.bind_off, dyld_info.bind_size)) |pos| { return pos; } // Weak binding info if (checkForCollision(start, end, dyld_info.weak_bind_off, dyld_info.weak_bind_size)) |pos| { return pos; } // Lazy binding info if (checkForCollision(start, end, dyld_info.lazy_bind_off, dyld_info.lazy_bind_size)) |pos| { return pos; } // Export info if (checkForCollision(start, end, dyld_info.export_off, dyld_info.export_size)) |pos| { return pos; } } if (self.function_starts_cmd_index) |idx| outer: { if (self.load_commands.items.len == idx) break :outer; const fstart = self.load_commands.items[idx].LinkeditData; if (checkForCollision(start, end, fstart.dataoff, fstart.datasize)) |pos| { return pos; } } if (self.data_in_code_cmd_index) |idx| outer: { if (self.load_commands.items.len == idx) break :outer; const dic = self.load_commands.items[idx].LinkeditData; if (checkForCollision(start, end, dic.dataoff, dic.datasize)) |pos| { return pos; } } if (self.dysymtab_cmd_index) |idx| outer: { if (self.load_commands.items.len == idx) break :outer; const dysymtab = self.load_commands.items[idx].Dysymtab; // Indirect symbol table const nindirectsize = dysymtab.nindirectsyms * @sizeOf(u32); if (checkForCollision(start, end, dysymtab.indirectsymoff, nindirectsize)) |pos| { return pos; } // TODO Handle more dynamic symbol table sections. } if (self.symtab_cmd_index) |idx| outer: { if (self.load_commands.items.len == idx) break :outer; const symtab = self.load_commands.items[idx].Symtab; // Symbol table const symsize = symtab.nsyms * @sizeOf(macho.nlist_64); if (checkForCollision(start, end, symtab.symoff, symsize)) |pos| { return pos; } // String table if (checkForCollision(start, end, symtab.stroff, symtab.strsize)) |pos| { return pos; } } return null; } fn findFreeSpaceLinkedit(self: *MachO, object_size: u64, min_alignment: u16, start: ?u64) u64 { const linkedit = self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; var st: u64 = start orelse linkedit.inner.fileoff; while (self.detectAllocCollisionLinkedit(st, object_size)) |item_end| { st = mem.alignForwardGeneric(u64, item_end, min_alignment); } return st; } fn writeGotEntry(self: *MachO, index: usize) !void { const seg = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const sect = &seg.sections.items[self.got_section_index.?]; const off = sect.offset + @sizeOf(u64) * index; if (self.got_entries_count_dirty) { // TODO relocate. self.got_entries_count_dirty = false; } const got_entry = self.got_entries.items[index]; const sym = switch (got_entry.where) { .local => self.locals.items[got_entry.where_index], .undef => self.undefs.items[got_entry.where_index], }; log.debug("writing offset table entry [ 0x{x} => 0x{x} ({s}) ]", .{ off, sym.n_value, self.getString(sym.n_strx), }); try self.base.file.?.pwriteAll(mem.asBytes(&sym.n_value), off); } fn writeLazySymbolPointer(self: *MachO, index: u32) !void { const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; const stub_helper = text_segment.sections.items[self.stub_helper_section_index.?]; const data_segment = self.load_commands.items[self.data_segment_cmd_index.?].Segment; const la_symbol_ptr = data_segment.sections.items[self.la_symbol_ptr_section_index.?]; const stub_size: u4 = switch (self.base.options.target.cpu.arch) { .x86_64 => 10, .aarch64 => 3 * @sizeOf(u32), else => unreachable, }; const stub_off = self.stub_helper_stubs_start_off.? + index * stub_size; const end = stub_helper.addr + stub_off - stub_helper.offset; var buf: [@sizeOf(u64)]u8 = undefined; mem.writeIntLittle(u64, &buf, end); const off = la_symbol_ptr.offset + index * @sizeOf(u64); log.debug("writing lazy symbol pointer entry 0x{x} at 0x{x}", .{ end, off }); try self.base.file.?.pwriteAll(&buf, off); } fn writeStubHelperPreamble(self: *MachO) !void { const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const stub_helper = &text_segment.sections.items[self.stub_helper_section_index.?]; const data_const_segment = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const got = &data_const_segment.sections.items[self.got_section_index.?]; const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; const data = &data_segment.sections.items[self.data_section_index.?]; switch (self.base.options.target.cpu.arch) { .x86_64 => { const code_size = 15; var code: [code_size]u8 = undefined; // lea %r11, [rip + disp] code[0] = 0x4c; code[1] = 0x8d; code[2] = 0x1d; { const target_addr = data.addr; const displacement = try math.cast(u32, target_addr - stub_helper.addr - 7); mem.writeIntLittle(u32, code[3..7], displacement); } // push %r11 code[7] = 0x41; code[8] = 0x53; // jmp [rip + disp] code[9] = 0xff; code[10] = 0x25; { const displacement = try math.cast(u32, got.addr - stub_helper.addr - code_size); mem.writeIntLittle(u32, code[11..], displacement); } try self.base.file.?.pwriteAll(&code, stub_helper.offset); self.stub_helper_stubs_start_off = stub_helper.offset + code_size; }, .aarch64 => { var code: [6 * @sizeOf(u32)]u8 = undefined; data_blk_outer: { const this_addr = stub_helper.addr; const target_addr = data.addr; data_blk: { const displacement = math.cast(i21, target_addr - this_addr) catch break :data_blk; // adr x17, disp mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.adr(.x17, displacement).toU32()); // nop mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.nop().toU32()); break :data_blk_outer; } data_blk: { const new_this_addr = this_addr + @sizeOf(u32); const displacement = math.cast(i21, target_addr - new_this_addr) catch break :data_blk; // nop mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.nop().toU32()); // adr x17, disp mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.adr(.x17, displacement).toU32()); break :data_blk_outer; } // Jump is too big, replace adr with adrp and add. const this_page = @intCast(i32, this_addr >> 12); const target_page = @intCast(i32, target_addr >> 12); const pages = @intCast(i21, target_page - this_page); // adrp x17, pages mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.adrp(.x17, pages).toU32()); const narrowed = @truncate(u12, target_addr); mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.add(.x17, .x17, narrowed, false).toU32()); } // stp x16, x17, [sp, #-16]! mem.writeIntLittle(u32, code[8..12], aarch64.Instruction.stp( .x16, .x17, aarch64.Register.sp, aarch64.Instruction.LoadStorePairOffset.pre_index(-16), ).toU32()); binder_blk_outer: { const this_addr = stub_helper.addr + 3 * @sizeOf(u32); const target_addr = got.addr; binder_blk: { const displacement = math.divExact(u64, target_addr - this_addr, 4) catch break :binder_blk; const literal = math.cast(u18, displacement) catch break :binder_blk; // ldr x16, label mem.writeIntLittle(u32, code[12..16], aarch64.Instruction.ldr(.x16, .{ .literal = literal, }).toU32()); // nop mem.writeIntLittle(u32, code[16..20], aarch64.Instruction.nop().toU32()); break :binder_blk_outer; } binder_blk: { const new_this_addr = this_addr + @sizeOf(u32); const displacement = math.divExact(u64, target_addr - new_this_addr, 4) catch break :binder_blk; const literal = math.cast(u18, displacement) catch break :binder_blk; // nop mem.writeIntLittle(u32, code[12..16], aarch64.Instruction.nop().toU32()); // ldr x16, label mem.writeIntLittle(u32, code[16..20], aarch64.Instruction.ldr(.x16, .{ .literal = literal, }).toU32()); break :binder_blk_outer; } // Jump is too big, replace ldr with adrp and ldr(register). const this_page = @intCast(i32, this_addr >> 12); const target_page = @intCast(i32, target_addr >> 12); const pages = @intCast(i21, target_page - this_page); // adrp x16, pages mem.writeIntLittle(u32, code[12..16], aarch64.Instruction.adrp(.x16, pages).toU32()); const narrowed = @truncate(u12, target_addr); const offset = try math.divExact(u12, narrowed, 8); // ldr x16, x16, offset mem.writeIntLittle(u32, code[16..20], aarch64.Instruction.ldr(.x16, .{ .register = .{ .rn = .x16, .offset = aarch64.Instruction.LoadStoreOffset.imm(offset), }, }).toU32()); } // br x16 mem.writeIntLittle(u32, code[20..24], aarch64.Instruction.br(.x16).toU32()); try self.base.file.?.pwriteAll(&code, stub_helper.offset); self.stub_helper_stubs_start_off = stub_helper.offset + code.len; }, else => unreachable, } } fn writeStub(self: *MachO, index: u32) !void { const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; const stubs = text_segment.sections.items[self.stubs_section_index.?]; const data_segment = self.load_commands.items[self.data_segment_cmd_index.?].Segment; const la_symbol_ptr = data_segment.sections.items[self.la_symbol_ptr_section_index.?]; const stub_off = stubs.offset + index * stubs.reserved2; const stub_addr = stubs.addr + index * stubs.reserved2; const la_ptr_addr = la_symbol_ptr.addr + index * @sizeOf(u64); log.debug("writing stub at 0x{x}", .{stub_off}); var code = try self.base.allocator.alloc(u8, stubs.reserved2); defer self.base.allocator.free(code); switch (self.base.options.target.cpu.arch) { .x86_64 => { assert(la_ptr_addr >= stub_addr + stubs.reserved2); const displacement = try math.cast(u32, la_ptr_addr - stub_addr - stubs.reserved2); // jmp code[0] = 0xff; code[1] = 0x25; mem.writeIntLittle(u32, code[2..][0..4], displacement); }, .aarch64 => { assert(la_ptr_addr >= stub_addr); outer: { const this_addr = stub_addr; const target_addr = la_ptr_addr; inner: { const displacement = math.divExact(u64, target_addr - this_addr, 4) catch break :inner; const literal = math.cast(u18, displacement) catch break :inner; // ldr x16, literal mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.ldr(.x16, .{ .literal = literal, }).toU32()); // nop mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.nop().toU32()); break :outer; } inner: { const new_this_addr = this_addr + @sizeOf(u32); const displacement = math.divExact(u64, target_addr - new_this_addr, 4) catch break :inner; const literal = math.cast(u18, displacement) catch break :inner; // nop mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.nop().toU32()); // ldr x16, literal mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.ldr(.x16, .{ .literal = literal, }).toU32()); break :outer; } // Use adrp followed by ldr(register). const this_page = @intCast(i32, this_addr >> 12); const target_page = @intCast(i32, target_addr >> 12); const pages = @intCast(i21, target_page - this_page); // adrp x16, pages mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.adrp(.x16, pages).toU32()); const narrowed = @truncate(u12, target_addr); const offset = try math.divExact(u12, narrowed, 8); // ldr x16, x16, offset mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.ldr(.x16, .{ .register = .{ .rn = .x16, .offset = aarch64.Instruction.LoadStoreOffset.imm(offset), }, }).toU32()); } // br x16 mem.writeIntLittle(u32, code[8..12], aarch64.Instruction.br(.x16).toU32()); }, else => unreachable, } try self.base.file.?.pwriteAll(code, stub_off); } fn writeStubInStubHelper(self: *MachO, index: u32) !void { const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; const stub_helper = text_segment.sections.items[self.stub_helper_section_index.?]; const stub_size: u4 = switch (self.base.options.target.cpu.arch) { .x86_64 => 10, .aarch64 => 3 * @sizeOf(u32), else => unreachable, }; const stub_off = self.stub_helper_stubs_start_off.? + index * stub_size; var code = try self.base.allocator.alloc(u8, stub_size); defer self.base.allocator.free(code); switch (self.base.options.target.cpu.arch) { .x86_64 => { const displacement = try math.cast( i32, @intCast(i64, stub_helper.offset) - @intCast(i64, stub_off) - stub_size, ); // pushq code[0] = 0x68; mem.writeIntLittle(u32, code[1..][0..4], 0x0); // Just a placeholder populated in `populateLazyBindOffsetsInStubHelper`. // jmpq code[5] = 0xe9; mem.writeIntLittle(u32, code[6..][0..4], @bitCast(u32, displacement)); }, .aarch64 => { const literal = blk: { const div_res = try math.divExact(u64, stub_size - @sizeOf(u32), 4); break :blk try math.cast(u18, div_res); }; // ldr w16, literal mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.ldr(.w16, .{ .literal = literal, }).toU32()); const displacement = try math.cast(i28, @intCast(i64, stub_helper.offset) - @intCast(i64, stub_off) - 4); // b disp mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.b(displacement).toU32()); // Just a placeholder populated in `populateLazyBindOffsetsInStubHelper`. mem.writeIntLittle(u32, code[8..12], 0x0); }, else => unreachable, } try self.base.file.?.pwriteAll(code, stub_off); } fn relocateSymbolTable(self: *MachO) !void { const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; const nlocals = self.locals.items.len; const nglobals = self.globals.items.len; const nundefs = self.undefs.items.len; const nsyms = nlocals + nglobals + nundefs; if (symtab.nsyms < nsyms) { const needed_size = nsyms * @sizeOf(macho.nlist_64); if (needed_size > self.allocatedSizeLinkedit(symtab.symoff)) { // Move the entire symbol table to a new location const new_symoff = self.findFreeSpaceLinkedit(needed_size, @alignOf(macho.nlist_64), null); const existing_size = symtab.nsyms * @sizeOf(macho.nlist_64); log.debug("relocating symbol table from 0x{x}-0x{x} to 0x{x}-0x{x}", .{ symtab.symoff, symtab.symoff + existing_size, new_symoff, new_symoff + existing_size, }); const amt = try self.base.file.?.copyRangeAll(symtab.symoff, self.base.file.?, new_symoff, existing_size); if (amt != existing_size) return error.InputOutput; symtab.symoff = @intCast(u32, new_symoff); self.strtab_needs_relocation = true; } symtab.nsyms = @intCast(u32, nsyms); self.load_commands_dirty = true; } } fn writeLocalSymbol(self: *MachO, index: usize) !void { const tracy = trace(@src()); defer tracy.end(); try self.relocateSymbolTable(); const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; const off = symtab.symoff + @sizeOf(macho.nlist_64) * index; log.debug("writing local symbol {} at 0x{x}", .{ index, off }); try self.base.file.?.pwriteAll(mem.asBytes(&self.locals.items[index]), off); } fn writeAllGlobalAndUndefSymbols(self: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); try self.relocateSymbolTable(); const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; const nlocals = self.locals.items.len; const nglobals = self.globals.items.len; const nundefs = self.undefs.items.len; const locals_off = symtab.symoff; const locals_size = nlocals * @sizeOf(macho.nlist_64); const globals_off = locals_off + locals_size; const globals_size = nglobals * @sizeOf(macho.nlist_64); log.debug("writing global symbols from 0x{x} to 0x{x}", .{ globals_off, globals_size + globals_off }); try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.globals.items), globals_off); const undefs_off = globals_off + globals_size; const undefs_size = nundefs * @sizeOf(macho.nlist_64); log.debug("writing extern symbols from 0x{x} to 0x{x}", .{ undefs_off, undefs_size + undefs_off }); try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.undefs.items), undefs_off); // Update dynamic symbol table. const dysymtab = &self.load_commands.items[self.dysymtab_cmd_index.?].Dysymtab; dysymtab.nlocalsym = @intCast(u32, nlocals); dysymtab.iextdefsym = @intCast(u32, nlocals); dysymtab.nextdefsym = @intCast(u32, nglobals); dysymtab.iundefsym = @intCast(u32, nlocals + nglobals); dysymtab.nundefsym = @intCast(u32, nundefs); self.load_commands_dirty = true; } fn writeIndirectSymbolTable(self: *MachO) !void { // TODO figure out a way not to rewrite the table every time if // no new undefs are not added. const tracy = trace(@src()); defer tracy.end(); const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const stubs = &text_segment.sections.items[self.stubs_section_index.?]; const data_const_seg = &self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const got = &data_const_seg.sections.items[self.got_section_index.?]; const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment; const la_symbol_ptr = &data_segment.sections.items[self.la_symbol_ptr_section_index.?]; const dysymtab = &self.load_commands.items[self.dysymtab_cmd_index.?].Dysymtab; const nstubs = @intCast(u32, self.stubs.items.len); const ngot_entries = @intCast(u32, self.got_entries.items.len); const allocated_size = self.allocatedSizeLinkedit(dysymtab.indirectsymoff); const nindirectsyms = nstubs * 2 + ngot_entries; const needed_size = @intCast(u32, nindirectsyms * @sizeOf(u32)); if (needed_size > allocated_size) { dysymtab.nindirectsyms = 0; dysymtab.indirectsymoff = @intCast(u32, self.findFreeSpaceLinkedit(needed_size, @sizeOf(u32), null)); } dysymtab.nindirectsyms = nindirectsyms; log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ dysymtab.indirectsymoff, dysymtab.indirectsymoff + needed_size, }); var buf = try self.base.allocator.alloc(u8, needed_size); defer self.base.allocator.free(buf); var stream = std.io.fixedBufferStream(buf); var writer = stream.writer(); stubs.reserved1 = 0; for (self.stubs.items) |id| { try writer.writeIntLittle(u32, dysymtab.iundefsym + id); } got.reserved1 = nstubs; for (self.got_entries.items) |entry| { switch (entry.where) { .undef => { try writer.writeIntLittle(u32, dysymtab.iundefsym + entry.where_index); }, .local => { try writer.writeIntLittle(u32, macho.INDIRECT_SYMBOL_LOCAL); }, } } la_symbol_ptr.reserved1 = got.reserved1 + ngot_entries; for (self.stubs.items) |id| { try writer.writeIntLittle(u32, dysymtab.iundefsym + id); } try self.base.file.?.pwriteAll(buf, dysymtab.indirectsymoff); self.load_commands_dirty = true; } fn writeDices(self: *MachO) !void { if (!self.has_dices) return; const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const dice_cmd = &self.load_commands.items[self.data_in_code_cmd_index.?].LinkeditData; const fileoff = seg.inner.fileoff + seg.inner.filesize; var buf = std.ArrayList(u8).init(self.base.allocator); defer buf.deinit(); var block: *TextBlock = self.blocks.get(.{ .seg = self.text_segment_cmd_index orelse return, .sect = self.text_section_index orelse return, }) orelse return; while (block.prev) |prev| { block = prev; } const text_seg = self.load_commands.items[self.text_segment_cmd_index.?].Segment; const text_sect = text_seg.sections.items[self.text_section_index.?]; while (true) { if (block.dices.items.len > 0) { const sym = self.locals.items[block.local_sym_index]; const base_off = try math.cast(u32, sym.n_value - text_sect.addr + text_sect.offset); try buf.ensureUnusedCapacity(block.dices.items.len * @sizeOf(macho.data_in_code_entry)); for (block.dices.items) |dice| { const rebased_dice = macho.data_in_code_entry{ .offset = base_off + dice.offset, .length = dice.length, .kind = dice.kind, }; buf.appendSliceAssumeCapacity(mem.asBytes(&rebased_dice)); } } if (block.next) |next| { block = next; } else break; } const datasize = @intCast(u32, buf.items.len); dice_cmd.dataoff = @intCast(u32, fileoff); dice_cmd.datasize = datasize; seg.inner.filesize += datasize; log.debug("writing data-in-code from 0x{x} to 0x{x}", .{ fileoff, fileoff + datasize }); try self.base.file.?.pwriteAll(buf.items, fileoff); } fn writeCodeSignaturePadding(self: *MachO) !void { // TODO figure out how not to rewrite padding every single time. const tracy = trace(@src()); defer tracy.end(); const linkedit_segment = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const code_sig_cmd = &self.load_commands.items[self.code_signature_cmd_index.?].LinkeditData; const fileoff = linkedit_segment.inner.fileoff + linkedit_segment.inner.filesize; const needed_size = CodeSignature.calcCodeSignaturePaddingSize( self.base.options.emit.?.sub_path, fileoff, self.page_size, ); code_sig_cmd.dataoff = @intCast(u32, fileoff); code_sig_cmd.datasize = needed_size; // Advance size of __LINKEDIT segment linkedit_segment.inner.filesize += needed_size; if (linkedit_segment.inner.vmsize < linkedit_segment.inner.filesize) { linkedit_segment.inner.vmsize = mem.alignForwardGeneric(u64, linkedit_segment.inner.filesize, self.page_size); } log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ fileoff, fileoff + needed_size }); // Pad out the space. We need to do this to calculate valid hashes for everything in the file // except for code signature data. try self.base.file.?.pwriteAll(&[_]u8{0}, fileoff + needed_size - 1); self.load_commands_dirty = true; } fn writeCodeSignature(self: *MachO) !void { const tracy = trace(@src()); defer tracy.end(); const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; const code_sig_cmd = self.load_commands.items[self.code_signature_cmd_index.?].LinkeditData; var code_sig: CodeSignature = .{}; defer code_sig.deinit(self.base.allocator); try code_sig.calcAdhocSignature( self.base.allocator, self.base.file.?, self.base.options.emit.?.sub_path, text_segment.inner, code_sig_cmd, self.base.options.output_mode, self.page_size, ); var buffer = try self.base.allocator.alloc(u8, code_sig.size()); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); try code_sig.write(stream.writer()); log.debug("writing code signature from 0x{x} to 0x{x}", .{ code_sig_cmd.dataoff, code_sig_cmd.dataoff + buffer.len }); try self.base.file.?.pwriteAll(buffer, code_sig_cmd.dataoff); } fn writeExportInfo(self: *MachO) !void { if (!self.export_info_dirty) return; if (self.globals.items.len == 0) return; const tracy = trace(@src()); defer tracy.end(); var trie: Trie = .{}; defer trie.deinit(self.base.allocator); const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; const base_address = text_segment.inner.vmaddr; // TODO handle macho.EXPORT_SYMBOL_FLAGS_REEXPORT and macho.EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER. log.debug("writing export trie", .{}); for (self.globals.items) |sym| { const sym_name = self.getString(sym.n_strx); log.debug(" | putting '{s}' defined at 0x{x}", .{ sym_name, sym.n_value }); try trie.put(self.base.allocator, .{ .name = sym_name, .vmaddr_offset = sym.n_value - base_address, .export_flags = macho.EXPORT_SYMBOL_FLAGS_KIND_REGULAR, }); } try trie.finalize(self.base.allocator); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, trie.size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); const nwritten = try trie.write(stream.writer()); assert(nwritten == trie.size); const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; const allocated_size = self.allocatedSizeLinkedit(dyld_info.export_off); const needed_size = mem.alignForwardGeneric(u64, buffer.len, @alignOf(u64)); if (needed_size > allocated_size) { dyld_info.export_off = 0; dyld_info.export_off = @intCast(u32, self.findFreeSpaceLinkedit(needed_size, 1, null)); // TODO this might require relocating all following LC_DYLD_INFO_ONLY sections too. } dyld_info.export_size = @intCast(u32, needed_size); log.debug("writing export info from 0x{x} to 0x{x}", .{ dyld_info.export_off, dyld_info.export_off + dyld_info.export_size }); try self.base.file.?.pwriteAll(buffer, dyld_info.export_off); self.load_commands_dirty = true; self.export_info_dirty = false; } fn writeRebaseInfoTable(self: *MachO) !void { if (!self.rebase_info_dirty) return; const tracy = trace(@src()); defer tracy.end(); var pointers = std.ArrayList(bind.Pointer).init(self.base.allocator); defer pointers.deinit(); { var it = self.blocks.iterator(); while (it.next()) |entry| { const match = entry.key_ptr.*; var block: *TextBlock = entry.value_ptr.*; if (match.seg == self.text_segment_cmd_index.?) continue; // __TEXT is non-writable const seg = self.load_commands.items[match.seg].Segment; while (true) { const sym = self.locals.items[block.local_sym_index]; const base_offset = sym.n_value - seg.inner.vmaddr; for (block.rebases.items) |offset| { try pointers.append(.{ .offset = base_offset + offset, .segment_id = match.seg, }); } if (block.prev) |prev| { block = prev; } else break; } } } if (self.got_section_index) |idx| { const seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const sect = seg.sections.items[idx]; const base_offset = sect.addr - seg.inner.vmaddr; const segment_id = @intCast(u16, self.data_const_segment_cmd_index.?); for (self.got_entries.items) |entry, i| { if (entry.where == .undef) continue; try pointers.append(.{ .offset = base_offset + i * @sizeOf(u64), .segment_id = segment_id, }); } } if (self.la_symbol_ptr_section_index) |idx| { const seg = self.load_commands.items[self.data_segment_cmd_index.?].Segment; const sect = seg.sections.items[idx]; const base_offset = sect.addr - seg.inner.vmaddr; const segment_id = @intCast(u16, self.data_segment_cmd_index.?); try pointers.ensureUnusedCapacity(self.stubs.items.len); for (self.stubs.items) |_, i| { pointers.appendAssumeCapacity(.{ .offset = base_offset + i * @sizeOf(u64), .segment_id = segment_id, }); } } std.sort.sort(bind.Pointer, pointers.items, {}, bind.pointerCmp); const size = try bind.rebaseInfoSize(pointers.items); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); try bind.writeRebaseInfo(pointers.items, stream.writer()); const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; const allocated_size = self.allocatedSizeLinkedit(dyld_info.rebase_off); const needed_size = mem.alignForwardGeneric(u64, buffer.len, @alignOf(u64)); if (needed_size > allocated_size) { dyld_info.rebase_off = 0; dyld_info.rebase_off = @intCast(u32, self.findFreeSpaceLinkedit(needed_size, 1, null)); // TODO this might require relocating all following LC_DYLD_INFO_ONLY sections too. } dyld_info.rebase_size = @intCast(u32, needed_size); log.debug("writing rebase info from 0x{x} to 0x{x}", .{ dyld_info.rebase_off, dyld_info.rebase_off + dyld_info.rebase_size }); try self.base.file.?.pwriteAll(buffer, dyld_info.rebase_off); self.load_commands_dirty = true; self.rebase_info_dirty = false; } fn writeBindInfoTable(self: *MachO) !void { if (!self.binding_info_dirty) return; const tracy = trace(@src()); defer tracy.end(); var pointers = std.ArrayList(bind.Pointer).init(self.base.allocator); defer pointers.deinit(); if (self.got_section_index) |idx| { const seg = self.load_commands.items[self.data_const_segment_cmd_index.?].Segment; const sect = seg.sections.items[idx]; const base_offset = sect.addr - seg.inner.vmaddr; const segment_id = @intCast(u16, self.data_const_segment_cmd_index.?); for (self.got_entries.items) |entry, i| { if (entry.where == .local) continue; const sym = self.undefs.items[entry.where_index]; try pointers.append(.{ .offset = base_offset + i * @sizeOf(u64), .segment_id = segment_id, .dylib_ordinal = @divExact(sym.n_desc, macho.N_SYMBOL_RESOLVER), .name = self.getString(sym.n_strx), }); } } { var it = self.blocks.iterator(); while (it.next()) |entry| { const match = entry.key_ptr.*; var block: *TextBlock = entry.value_ptr.*; if (match.seg == self.text_segment_cmd_index.?) continue; // __TEXT is non-writable const seg = self.load_commands.items[match.seg].Segment; while (true) { const sym = self.locals.items[block.local_sym_index]; const base_offset = sym.n_value - seg.inner.vmaddr; for (block.bindings.items) |binding| { const bind_sym = self.undefs.items[binding.local_sym_index]; try pointers.append(.{ .offset = binding.offset + base_offset, .segment_id = match.seg, .dylib_ordinal = @divExact(bind_sym.n_desc, macho.N_SYMBOL_RESOLVER), .name = self.getString(bind_sym.n_strx), }); } if (block.prev) |prev| { block = prev; } else break; } } } const size = try bind.bindInfoSize(pointers.items); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); try bind.writeBindInfo(pointers.items, stream.writer()); const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; const allocated_size = self.allocatedSizeLinkedit(dyld_info.bind_off); const needed_size = mem.alignForwardGeneric(u64, buffer.len, @alignOf(u64)); if (needed_size > allocated_size) { dyld_info.bind_off = 0; dyld_info.bind_off = @intCast(u32, self.findFreeSpaceLinkedit(needed_size, 1, null)); // TODO this might require relocating all following LC_DYLD_INFO_ONLY sections too. } dyld_info.bind_size = @intCast(u32, needed_size); log.debug("writing binding info from 0x{x} to 0x{x}", .{ dyld_info.bind_off, dyld_info.bind_off + dyld_info.bind_size }); try self.base.file.?.pwriteAll(buffer, dyld_info.bind_off); self.load_commands_dirty = true; self.binding_info_dirty = false; } fn writeLazyBindInfoTable(self: *MachO) !void { if (!self.lazy_binding_info_dirty) return; const tracy = trace(@src()); defer tracy.end(); var pointers = std.ArrayList(bind.Pointer).init(self.base.allocator); defer pointers.deinit(); if (self.la_symbol_ptr_section_index) |idx| { const seg = self.load_commands.items[self.data_segment_cmd_index.?].Segment; const sect = seg.sections.items[idx]; const base_offset = sect.addr - seg.inner.vmaddr; const segment_id = @intCast(u16, self.data_segment_cmd_index.?); try pointers.ensureUnusedCapacity(self.stubs.items.len); for (self.stubs.items) |import_id, i| { const sym = self.undefs.items[import_id]; pointers.appendAssumeCapacity(.{ .offset = base_offset + i * @sizeOf(u64), .segment_id = segment_id, .dylib_ordinal = @divExact(sym.n_desc, macho.N_SYMBOL_RESOLVER), .name = self.getString(sym.n_strx), }); } } const size = try bind.lazyBindInfoSize(pointers.items); var buffer = try self.base.allocator.alloc(u8, @intCast(usize, size)); defer self.base.allocator.free(buffer); var stream = std.io.fixedBufferStream(buffer); try bind.writeLazyBindInfo(pointers.items, stream.writer()); const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfoOnly; const allocated_size = self.allocatedSizeLinkedit(dyld_info.lazy_bind_off); const needed_size = mem.alignForwardGeneric(u64, buffer.len, @alignOf(u64)); if (needed_size > allocated_size) { dyld_info.lazy_bind_off = 0; dyld_info.lazy_bind_off = @intCast(u32, self.findFreeSpaceLinkedit(needed_size, 1, null)); // TODO this might require relocating all following LC_DYLD_INFO_ONLY sections too. } dyld_info.lazy_bind_size = @intCast(u32, needed_size); log.debug("writing lazy binding info from 0x{x} to 0x{x}", .{ dyld_info.lazy_bind_off, dyld_info.lazy_bind_off + dyld_info.lazy_bind_size }); try self.base.file.?.pwriteAll(buffer, dyld_info.lazy_bind_off); try self.populateLazyBindOffsetsInStubHelper(buffer); self.load_commands_dirty = true; self.lazy_binding_info_dirty = false; } fn populateLazyBindOffsetsInStubHelper(self: *MachO, buffer: []const u8) !void { if (self.stubs.items.len == 0) return; var stream = std.io.fixedBufferStream(buffer); var reader = stream.reader(); var offsets = std.ArrayList(u32).init(self.base.allocator); try offsets.append(0); defer offsets.deinit(); var valid_block = false; while (true) { const inst = reader.readByte() catch |err| switch (err) { error.EndOfStream => break, else => return err, }; const opcode: u8 = inst & macho.BIND_OPCODE_MASK; switch (opcode) { macho.BIND_OPCODE_DO_BIND => { valid_block = true; }, macho.BIND_OPCODE_DONE => { if (valid_block) { const offset = try stream.getPos(); try offsets.append(@intCast(u32, offset)); } valid_block = false; }, macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM => { var next = try reader.readByte(); while (next != @as(u8, 0)) { next = try reader.readByte(); } }, macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB => { _ = try std.leb.readULEB128(u64, reader); }, macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB => { _ = try std.leb.readULEB128(u64, reader); }, macho.BIND_OPCODE_SET_ADDEND_SLEB => { _ = try std.leb.readILEB128(i64, reader); }, else => {}, } } assert(self.stubs.items.len <= offsets.items.len); const stub_size: u4 = switch (self.base.options.target.cpu.arch) { .x86_64 => 10, .aarch64 => 3 * @sizeOf(u32), else => unreachable, }; const off: u4 = switch (self.base.options.target.cpu.arch) { .x86_64 => 1, .aarch64 => 2 * @sizeOf(u32), else => unreachable, }; var buf: [@sizeOf(u32)]u8 = undefined; for (self.stubs.items) |_, index| { const placeholder_off = self.stub_helper_stubs_start_off.? + index * stub_size + off; mem.writeIntLittle(u32, &buf, offsets.items[index]); try self.base.file.?.pwriteAll(&buf, placeholder_off); } } fn writeStringTable(self: *MachO) !void { if (!self.strtab_dirty) return; const tracy = trace(@src()); defer tracy.end(); const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; const allocated_size = self.allocatedSizeLinkedit(symtab.stroff); const needed_size = mem.alignForwardGeneric(u64, self.strtab.items.len, @alignOf(u64)); if (needed_size > allocated_size or self.strtab_needs_relocation) { symtab.strsize = 0; symtab.stroff = @intCast(u32, self.findFreeSpaceLinkedit(needed_size, 1, symtab.symoff)); self.strtab_needs_relocation = false; } symtab.strsize = @intCast(u32, needed_size); log.debug("writing string table from 0x{x} to 0x{x}", .{ symtab.stroff, symtab.stroff + symtab.strsize }); try self.base.file.?.pwriteAll(self.strtab.items, symtab.stroff); self.load_commands_dirty = true; self.strtab_dirty = false; } fn writeStringTableZld(self: *MachO) !void { const seg = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; symtab.stroff = @intCast(u32, seg.inner.fileoff + seg.inner.filesize); symtab.strsize = @intCast(u32, mem.alignForwardGeneric(u64, self.strtab.items.len, @alignOf(u64))); seg.inner.filesize += symtab.strsize; log.debug("writing string table from 0x{x} to 0x{x}", .{ symtab.stroff, symtab.stroff + symtab.strsize }); try self.base.file.?.pwriteAll(self.strtab.items, symtab.stroff); if (symtab.strsize > self.strtab.items.len) { // This is potentially the last section, so we need to pad it out. try self.base.file.?.pwriteAll(&[_]u8{0}, seg.inner.fileoff + seg.inner.filesize - 1); } } fn updateLinkeditSegmentSizes(self: *MachO) !void { if (!self.load_commands_dirty) return; const tracy = trace(@src()); defer tracy.end(); // Now, we are in position to update __LINKEDIT segment sizes. // TODO Add checkpointing so that we don't have to do this every single time. const linkedit_segment = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment; var final_offset = linkedit_segment.inner.fileoff; if (self.dyld_info_cmd_index) |idx| { const dyld_info = self.load_commands.items[idx].DyldInfoOnly; final_offset = std.math.max(final_offset, dyld_info.rebase_off + dyld_info.rebase_size); final_offset = std.math.max(final_offset, dyld_info.bind_off + dyld_info.bind_size); final_offset = std.math.max(final_offset, dyld_info.weak_bind_off + dyld_info.weak_bind_size); final_offset = std.math.max(final_offset, dyld_info.lazy_bind_off + dyld_info.lazy_bind_size); final_offset = std.math.max(final_offset, dyld_info.export_off + dyld_info.export_size); } if (self.function_starts_cmd_index) |idx| { const fstart = self.load_commands.items[idx].LinkeditData; final_offset = std.math.max(final_offset, fstart.dataoff + fstart.datasize); } if (self.data_in_code_cmd_index) |idx| { const dic = self.load_commands.items[idx].LinkeditData; final_offset = std.math.max(final_offset, dic.dataoff + dic.datasize); } if (self.dysymtab_cmd_index) |idx| { const dysymtab = self.load_commands.items[idx].Dysymtab; const nindirectsize = dysymtab.nindirectsyms * @sizeOf(u32); final_offset = std.math.max(final_offset, dysymtab.indirectsymoff + nindirectsize); // TODO Handle more dynamic symbol table sections. } if (self.symtab_cmd_index) |idx| { const symtab = self.load_commands.items[idx].Symtab; const symsize = symtab.nsyms * @sizeOf(macho.nlist_64); final_offset = std.math.max(final_offset, symtab.symoff + symsize); final_offset = std.math.max(final_offset, symtab.stroff + symtab.strsize); } const filesize = final_offset - linkedit_segment.inner.fileoff; linkedit_segment.inner.filesize = filesize; linkedit_segment.inner.vmsize = mem.alignForwardGeneric(u64, filesize, self.page_size); try self.base.file.?.pwriteAll(&[_]u8{0}, final_offset); self.load_commands_dirty = true; } /// Writes all load commands and section headers. fn writeLoadCommands(self: *MachO) !void { if (!self.load_commands_dirty) return; var sizeofcmds: u32 = 0; for (self.load_commands.items) |lc| { sizeofcmds += lc.cmdsize(); } var buffer = try self.base.allocator.alloc(u8, sizeofcmds); defer self.base.allocator.free(buffer); var writer = std.io.fixedBufferStream(buffer).writer(); for (self.load_commands.items) |lc| { try lc.write(writer); } const off = @sizeOf(macho.mach_header_64); log.debug("writing {} load commands from 0x{x} to 0x{x}", .{ self.load_commands.items.len, off, off + sizeofcmds }); try self.base.file.?.pwriteAll(buffer, off); self.load_commands_dirty = false; } /// Writes Mach-O file header. fn writeHeader(self: *MachO) !void { var header = commands.emptyHeader(.{ .flags = macho.MH_NOUNDEFS | macho.MH_DYLDLINK | macho.MH_PIE | macho.MH_TWOLEVEL, }); switch (self.base.options.target.cpu.arch) { .aarch64 => { header.cputype = macho.CPU_TYPE_ARM64; header.cpusubtype = macho.CPU_SUBTYPE_ARM_ALL; }, .x86_64 => { header.cputype = macho.CPU_TYPE_X86_64; header.cpusubtype = macho.CPU_SUBTYPE_X86_64_ALL; }, else => return error.UnsupportedCpuArchitecture, } switch (self.base.options.output_mode) { .Exe => { header.filetype = macho.MH_EXECUTE; }, .Lib => { // By this point, it can only be a dylib. header.filetype = macho.MH_DYLIB; header.flags |= macho.MH_NO_REEXPORTED_DYLIBS; }, else => unreachable, } if (self.tlv_section_index) |_| { header.flags |= macho.MH_HAS_TLV_DESCRIPTORS; } header.ncmds = @intCast(u32, self.load_commands.items.len); header.sizeofcmds = 0; for (self.load_commands.items) |cmd| { header.sizeofcmds += cmd.cmdsize(); } log.debug("writing Mach-O header {}", .{header}); try self.base.file.?.pwriteAll(mem.asBytes(&header), 0); } pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) { // TODO https://github.com/ziglang/zig/issues/1284 return std.math.add(@TypeOf(actual_size), actual_size, actual_size / ideal_factor) catch std.math.maxInt(@TypeOf(actual_size)); } pub fn makeString(self: *MachO, string: []const u8) !u32 { const gop = try self.strtab_dir.getOrPutContextAdapted(self.base.allocator, @as([]const u8, string), StringIndexAdapter{ .bytes = &self.strtab, }, StringIndexContext{ .bytes = &self.strtab, }); if (gop.found_existing) { const off = gop.key_ptr.*; log.debug("reusing string '{s}' at offset 0x{x}", .{ string, off }); return off; } try self.strtab.ensureUnusedCapacity(self.base.allocator, string.len + 1); const new_off = @intCast(u32, self.strtab.items.len); log.debug("writing new string '{s}' at offset 0x{x}", .{ string, new_off }); self.strtab.appendSliceAssumeCapacity(string); self.strtab.appendAssumeCapacity(0); gop.key_ptr.* = new_off; return new_off; } pub fn getString(self: *MachO, off: u32) []const u8 { assert(off < self.strtab.items.len); return mem.spanZ(@ptrCast([*:0]const u8, self.strtab.items.ptr + off)); } pub fn symbolIsStab(sym: macho.nlist_64) bool { return (macho.N_STAB & sym.n_type) != 0; } pub fn symbolIsPext(sym: macho.nlist_64) bool { return (macho.N_PEXT & sym.n_type) != 0; } pub fn symbolIsExt(sym: macho.nlist_64) bool { return (macho.N_EXT & sym.n_type) != 0; } pub fn symbolIsSect(sym: macho.nlist_64) bool { const type_ = macho.N_TYPE & sym.n_type; return type_ == macho.N_SECT; } pub fn symbolIsUndf(sym: macho.nlist_64) bool { const type_ = macho.N_TYPE & sym.n_type; return type_ == macho.N_UNDF; } pub fn symbolIsIndr(sym: macho.nlist_64) bool { const type_ = macho.N_TYPE & sym.n_type; return type_ == macho.N_INDR; } pub fn symbolIsAbs(sym: macho.nlist_64) bool { const type_ = macho.N_TYPE & sym.n_type; return type_ == macho.N_ABS; } pub fn symbolIsWeakDef(sym: macho.nlist_64) bool { return (sym.n_desc & macho.N_WEAK_DEF) != 0; } pub fn symbolIsWeakRef(sym: macho.nlist_64) bool { return (sym.n_desc & macho.N_WEAK_REF) != 0; } pub fn symbolIsTentative(sym: macho.nlist_64) bool { if (!symbolIsUndf(sym)) return false; return sym.n_value != 0; } pub fn symbolIsTemp(sym: macho.nlist_64, sym_name: []const u8) bool { if (!symbolIsSect(sym)) return false; if (symbolIsExt(sym)) return false; return mem.startsWith(u8, sym_name, "l") or mem.startsWith(u8, sym_name, "L"); } pub fn findFirst(comptime T: type, haystack: []T, start: usize, predicate: anytype) usize { if (!@hasDecl(@TypeOf(predicate), "predicate")) @compileError("Predicate is required to define fn predicate(@This(), T) bool"); if (start == haystack.len) return start; var i = start; while (i < haystack.len) : (i += 1) { if (predicate.predicate(haystack[i])) break; } return i; }
src/link/MachO.zig
const std = @import("std"); const TestContext = @import("../../src-self-hosted/test.zig").TestContext; // self-hosted does not yet support PE executable files / COFF object files // or mach-o files. So we do the ZIR transform test cases cross compiling for // x86_64-linux. const linux_x64 = std.zig.CrossTarget{ .cpu_arch = .x86_64, .os_tag = .linux, }; pub fn addCases(ctx: *TestContext) void { ctx.addZIRTransform("referencing decls which appear later in the file", linux_x64, \\@void = primitive(void) \\@fnty = fntype([], @void, cc=C) \\ \\@9 = str("entry") \\@10 = ref(@9) \\@11 = export(@10, @entry) \\ \\@entry = fn(@fnty, { \\ %11 = return() \\}) , \\@void = primitive(void) \\@fnty = fntype([], @void, cc=C) \\@9 = str("entry") \\@10 = ref(@9) \\@unnamed$6 = str("entry") \\@unnamed$7 = ref(@unnamed$6) \\@unnamed$8 = export(@unnamed$7, @entry) \\@unnamed$10 = fntype([], @void, cc=C) \\@entry = fn(@unnamed$10, { \\ %0 = return() \\}) \\ ); ctx.addZIRTransform("elemptr, add, cmp, condbr, return, breakpoint", linux_x64, \\@void = primitive(void) \\@usize = primitive(usize) \\@fnty = fntype([], @void, cc=C) \\@0 = int(0) \\@1 = int(1) \\@2 = int(2) \\@3 = int(3) \\ \\@entry = fn(@fnty, { \\ %a = str("\x32\x08\x01\x0a") \\ %aref = ref(%a) \\ %eptr0 = elemptr(%aref, @0) \\ %eptr1 = elemptr(%aref, @1) \\ %eptr2 = elemptr(%aref, @2) \\ %eptr3 = elemptr(%aref, @3) \\ %v0 = deref(%eptr0) \\ %v1 = deref(%eptr1) \\ %v2 = deref(%eptr2) \\ %v3 = deref(%eptr3) \\ %x0 = add(%v0, %v1) \\ %x1 = add(%v2, %v3) \\ %result = add(%x0, %x1) \\ \\ %expected = int(69) \\ %ok = cmp(%result, eq, %expected) \\ %10 = condbr(%ok, { \\ %11 = return() \\ }, { \\ %12 = breakpoint() \\ }) \\}) \\ \\@9 = str("entry") \\@10 = ref(@9) \\@11 = export(@10, @entry) , \\@void = primitive(void) \\@fnty = fntype([], @void, cc=C) \\@0 = int(0) \\@1 = int(1) \\@2 = int(2) \\@3 = int(3) \\@unnamed$7 = fntype([], @void, cc=C) \\@entry = fn(@unnamed$7, { \\ %0 = return() \\}) \\@a = str("2\x08\x01\n") \\@9 = str("entry") \\@10 = ref(@9) \\@unnamed$14 = str("entry") \\@unnamed$15 = ref(@unnamed$14) \\@unnamed$16 = export(@unnamed$15, @entry) \\ ); { var case = ctx.addZIRMulti("reference cycle with compile error in the cycle", linux_x64); case.addZIR( \\@void = primitive(void) \\@fnty = fntype([], @void, cc=C) \\ \\@9 = str("entry") \\@10 = ref(@9) \\@11 = export(@10, @entry) \\ \\@entry = fn(@fnty, { \\ %0 = call(@a, []) \\ %1 = return() \\}) \\ \\@a = fn(@fnty, { \\ %0 = call(@b, []) \\ %1 = return() \\}) \\ \\@b = fn(@fnty, { \\ %0 = call(@a, []) \\ %1 = return() \\}) , \\@void = primitive(void) \\@fnty = fntype([], @void, cc=C) \\@9 = str("entry") \\@10 = ref(@9) \\@unnamed$6 = str("entry") \\@unnamed$7 = ref(@unnamed$6) \\@unnamed$8 = export(@unnamed$7, @entry) \\@unnamed$12 = fntype([], @void, cc=C) \\@entry = fn(@unnamed$12, { \\ %0 = call(@a, [], modifier=auto) \\ %1 = return() \\}) \\@unnamed$17 = fntype([], @void, cc=C) \\@a = fn(@unnamed$17, { \\ %0 = call(@b, [], modifier=auto) \\ %1 = return() \\}) \\@unnamed$22 = fntype([], @void, cc=C) \\@b = fn(@unnamed$22, { \\ %0 = call(@a, [], modifier=auto) \\ %1 = return() \\}) \\ ); // Now we introduce a compile error case.addError( \\@void = primitive(void) \\@fnty = fntype([], @void, cc=C) \\ \\@9 = str("entry") \\@10 = ref(@9) \\@11 = export(@10, @entry) \\ \\@entry = fn(@fnty, { \\ %0 = call(@a, []) \\ %1 = return() \\}) \\ \\@a = fn(@fnty, { \\ %0 = call(@b, []) \\ %1 = return() \\}) \\ \\@b = fn(@fnty, { \\ %9 = compileerror("message") \\ %0 = call(@a, []) \\ %1 = return() \\}) , &[_][]const u8{ ":19:21: error: message", }, ); // Now we remove the call to `a`. `a` and `b` form a cycle, but no entry points are // referencing either of them. This tests that the cycle is detected, and the error // goes away. case.addZIR( \\@void = primitive(void) \\@fnty = fntype([], @void, cc=C) \\ \\@9 = str("entry") \\@10 = ref(@9) \\@11 = export(@10, @entry) \\ \\@entry = fn(@fnty, { \\ %1 = return() \\}) \\ \\@a = fn(@fnty, { \\ %0 = call(@b, []) \\ %1 = return() \\}) \\ \\@b = fn(@fnty, { \\ %9 = compileerror("message") \\ %0 = call(@a, []) \\ %1 = return() \\}) , \\@void = primitive(void) \\@fnty = fntype([], @void, cc=C) \\@9 = str("entry") \\@10 = ref(@9) \\@unnamed$6 = str("entry") \\@unnamed$7 = ref(@unnamed$6) \\@unnamed$8 = export(@unnamed$7, @entry) \\@unnamed$10 = fntype([], @void, cc=C) \\@entry = fn(@unnamed$10, { \\ %0 = return() \\}) \\ ); } if (std.Target.current.os.tag != .linux or std.Target.current.cpu.arch != .x86_64) { // TODO implement self-hosted PE (.exe file) linking // TODO implement more ZIR so we don't depend on x86_64-linux return; } ctx.addZIRCompareOutput( "hello world ZIR, update msg", &[_][]const u8{ \\@noreturn = primitive(noreturn) \\@void = primitive(void) \\@usize = primitive(usize) \\@0 = int(0) \\@1 = int(1) \\@2 = int(2) \\@3 = int(3) \\ \\@syscall_array = str("syscall") \\@sysoutreg_array = str("={rax}") \\@rax_array = str("{rax}") \\@rdi_array = str("{rdi}") \\@rcx_array = str("rcx") \\@r11_array = str("r11") \\@rdx_array = str("{rdx}") \\@rsi_array = str("{rsi}") \\@memory_array = str("memory") \\@len_array = str("len") \\ \\@msg = str("Hello, world!\n") \\ \\@start_fnty = fntype([], @noreturn, cc=Naked) \\@start = fn(@start_fnty, { \\ %SYS_exit_group = int(231) \\ %exit_code = as(@usize, @0) \\ \\ %syscall = ref(@syscall_array) \\ %sysoutreg = ref(@sysoutreg_array) \\ %rax = ref(@rax_array) \\ %rdi = ref(@rdi_array) \\ %rcx = ref(@rcx_array) \\ %rdx = ref(@rdx_array) \\ %rsi = ref(@rsi_array) \\ %r11 = ref(@r11_array) \\ %memory = ref(@memory_array) \\ \\ %SYS_write = as(@usize, @1) \\ %STDOUT_FILENO = as(@usize, @1) \\ \\ %msg_ptr = ref(@msg) \\ %msg_addr = ptrtoint(%msg_ptr) \\ \\ %len_name = ref(@len_array) \\ %msg_len_ptr = fieldptr(%msg_ptr, %len_name) \\ %msg_len = deref(%msg_len_ptr) \\ %rc_write = asm(%syscall, @usize, \\ volatile=1, \\ output=%sysoutreg, \\ inputs=[%rax, %rdi, %rsi, %rdx], \\ clobbers=[%rcx, %r11, %memory], \\ args=[%SYS_write, %STDOUT_FILENO, %msg_addr, %msg_len]) \\ \\ %rc_exit = asm(%syscall, @usize, \\ volatile=1, \\ output=%sysoutreg, \\ inputs=[%rax, %rdi], \\ clobbers=[%rcx, %r11, %memory], \\ args=[%SYS_exit_group, %exit_code]) \\ \\ %99 = unreachable() \\}); \\ \\@9 = str("_start") \\@10 = ref(@9) \\@11 = export(@10, @start) , \\@noreturn = primitive(noreturn) \\@void = primitive(void) \\@usize = primitive(usize) \\@0 = int(0) \\@1 = int(1) \\@2 = int(2) \\@3 = int(3) \\ \\@syscall_array = str("syscall") \\@sysoutreg_array = str("={rax}") \\@rax_array = str("{rax}") \\@rdi_array = str("{rdi}") \\@rcx_array = str("rcx") \\@r11_array = str("r11") \\@rdx_array = str("{rdx}") \\@rsi_array = str("{rsi}") \\@memory_array = str("memory") \\@len_array = str("len") \\ \\@msg = str("Hello, world!\n") \\@msg2 = str("HELL WORLD\n") \\ \\@start_fnty = fntype([], @noreturn, cc=Naked) \\@start = fn(@start_fnty, { \\ %SYS_exit_group = int(231) \\ %exit_code = as(@usize, @0) \\ \\ %syscall = ref(@syscall_array) \\ %sysoutreg = ref(@sysoutreg_array) \\ %rax = ref(@rax_array) \\ %rdi = ref(@rdi_array) \\ %rcx = ref(@rcx_array) \\ %rdx = ref(@rdx_array) \\ %rsi = ref(@rsi_array) \\ %r11 = ref(@r11_array) \\ %memory = ref(@memory_array) \\ \\ %SYS_write = as(@usize, @1) \\ %STDOUT_FILENO = as(@usize, @1) \\ \\ %msg_ptr = ref(@msg2) \\ %msg_addr = ptrtoint(%msg_ptr) \\ \\ %len_name = ref(@len_array) \\ %msg_len_ptr = fieldptr(%msg_ptr, %len_name) \\ %msg_len = deref(%msg_len_ptr) \\ %rc_write = asm(%syscall, @usize, \\ volatile=1, \\ output=%sysoutreg, \\ inputs=[%rax, %rdi, %rsi, %rdx], \\ clobbers=[%rcx, %r11, %memory], \\ args=[%SYS_write, %STDOUT_FILENO, %msg_addr, %msg_len]) \\ \\ %rc_exit = asm(%syscall, @usize, \\ volatile=1, \\ output=%sysoutreg, \\ inputs=[%rax, %rdi], \\ clobbers=[%rcx, %r11, %memory], \\ args=[%SYS_exit_group, %exit_code]) \\ \\ %99 = unreachable() \\}); \\ \\@9 = str("_start") \\@10 = ref(@9) \\@11 = export(@10, @start) , \\@noreturn = primitive(noreturn) \\@void = primitive(void) \\@usize = primitive(usize) \\@0 = int(0) \\@1 = int(1) \\@2 = int(2) \\@3 = int(3) \\ \\@syscall_array = str("syscall") \\@sysoutreg_array = str("={rax}") \\@rax_array = str("{rax}") \\@rdi_array = str("{rdi}") \\@rcx_array = str("rcx") \\@r11_array = str("r11") \\@rdx_array = str("{rdx}") \\@rsi_array = str("{rsi}") \\@memory_array = str("memory") \\@len_array = str("len") \\ \\@msg = str("Hello, world!\n") \\@msg2 = str("Editing the same msg2 decl but this time with a much longer message which will\ncause the data to need to be relocated in virtual address space.\n") \\ \\@start_fnty = fntype([], @noreturn, cc=Naked) \\@start = fn(@start_fnty, { \\ %SYS_exit_group = int(231) \\ %exit_code = as(@usize, @0) \\ \\ %syscall = ref(@syscall_array) \\ %sysoutreg = ref(@sysoutreg_array) \\ %rax = ref(@rax_array) \\ %rdi = ref(@rdi_array) \\ %rcx = ref(@rcx_array) \\ %rdx = ref(@rdx_array) \\ %rsi = ref(@rsi_array) \\ %r11 = ref(@r11_array) \\ %memory = ref(@memory_array) \\ \\ %SYS_write = as(@usize, @1) \\ %STDOUT_FILENO = as(@usize, @1) \\ \\ %msg_ptr = ref(@msg2) \\ %msg_addr = ptrtoint(%msg_ptr) \\ \\ %len_name = ref(@len_array) \\ %msg_len_ptr = fieldptr(%msg_ptr, %len_name) \\ %msg_len = deref(%msg_len_ptr) \\ %rc_write = asm(%syscall, @usize, \\ volatile=1, \\ output=%sysoutreg, \\ inputs=[%rax, %rdi, %rsi, %rdx], \\ clobbers=[%rcx, %r11, %memory], \\ args=[%SYS_write, %STDOUT_FILENO, %msg_addr, %msg_len]) \\ \\ %rc_exit = asm(%syscall, @usize, \\ volatile=1, \\ output=%sysoutreg, \\ inputs=[%rax, %rdi], \\ clobbers=[%rcx, %r11, %memory], \\ args=[%SYS_exit_group, %exit_code]) \\ \\ %99 = unreachable() \\}); \\ \\@9 = str("_start") \\@10 = ref(@9) \\@11 = export(@10, @start) }, &[_][]const u8{ \\Hello, world! \\ , \\HELL WORLD \\ , \\Editing the same msg2 decl but this time with a much longer message which will \\cause the data to need to be relocated in virtual address space. \\ }, ); ctx.addZIRCompareOutput( "function call with no args no return value", &[_][]const u8{ \\@noreturn = primitive(noreturn) \\@void = primitive(void) \\@usize = primitive(usize) \\@0 = int(0) \\@1 = int(1) \\@2 = int(2) \\@3 = int(3) \\ \\@syscall_array = str("syscall") \\@sysoutreg_array = str("={rax}") \\@rax_array = str("{rax}") \\@rdi_array = str("{rdi}") \\@rcx_array = str("rcx") \\@r11_array = str("r11") \\@memory_array = str("memory") \\ \\@exit0_fnty = fntype([], @noreturn) \\@exit0 = fn(@exit0_fnty, { \\ %SYS_exit_group = int(231) \\ %exit_code = as(@usize, @0) \\ \\ %syscall = ref(@syscall_array) \\ %sysoutreg = ref(@sysoutreg_array) \\ %rax = ref(@rax_array) \\ %rdi = ref(@rdi_array) \\ %rcx = ref(@rcx_array) \\ %r11 = ref(@r11_array) \\ %memory = ref(@memory_array) \\ \\ %rc = asm(%syscall, @usize, \\ volatile=1, \\ output=%sysoutreg, \\ inputs=[%rax, %rdi], \\ clobbers=[%rcx, %r11, %memory], \\ args=[%SYS_exit_group, %exit_code]) \\ \\ %99 = unreachable() \\}); \\ \\@start_fnty = fntype([], @noreturn, cc=Naked) \\@start = fn(@start_fnty, { \\ %0 = call(@exit0, []) \\}) \\@9 = str("_start") \\@10 = ref(@9) \\@11 = export(@10, @start) }, &[_][]const u8{""}, ); }
test/stage2/zir.zig
const std = @import("std"); const builtin = @import("builtin"); const Builder = std.build.Builder; const path = std.fs.path; const sep_str = path.sep_str; const Target = std.build.Target; pub fn build(b: *Builder) void { const mode = b.standardReleaseOptions(); const target = b.standardTargetOptions(.{ .whitelist = &[_]Target{ .{ .cpu_arch = .x86_64, .os_tag = .linux, .abi = .musl, }, .{ .cpu_arch = .x86_64, .os_tag = .windows, .abi = .gnu, }, }, }); const lib = getLibrary(b, mode, target, "."); lib.install(); const exe = b.addExecutable("sdl-zig-demo", "example/main.zig"); exe.addIncludeDir("include"); exe.setBuildMode(mode); exe.setTarget(target); exe.linkLibrary(lib); b.default_step.dependOn(&exe.step); exe.install(); const run = b.step("run", "Run the demo"); const run_cmd = exe.run(); run.dependOn(&run_cmd.step); } pub const Options = struct { artifact: *std.build.LibExeObjStep, prefix: []const u8, gfx: bool = false, override_mode: ?std.builtin.Mode = null, }; pub fn linkArtifact(b: *Builder, options: Options) void { const mode = options.override_mode orelse options.artifact.build_mode; const lib = getLibrary(b, mode, options.artifact.target, options.prefix); options.artifact.addIncludeDir(b.fmt("{s}/zig-prebuilt/include", .{options.prefix})); options.artifact.addIncludeDir(b.fmt("{s}/zig-prebuilt/include/SDL2", .{options.prefix})); options.artifact.linkLibrary(lib); if (options.gfx) { const gfx_lib = getLibGfx(b, mode, options.artifact.target, options.prefix); options.artifact.addIncludeDir(b.fmt("{s}/include", .{options.prefix})); options.artifact.addIncludeDir(b.fmt("{s}/extra/gfx/zig-prebuilt/include", .{options.prefix})); options.artifact.linkLibrary(gfx_lib); } } pub fn getLibGfx( b: *Builder, mode: std.builtin.Mode, target: std.build.Target, prefix: []const u8, ) *std.build.LibExeObjStep { const lib_cflags = &[_][]const u8{"-std=c99"}; const lib = b.addStaticLibrary("SDL2_gfx", null); lib.setBuildMode(mode); lib.setTarget(target); lib.linkSystemLibrary("c"); lib.addIncludeDir(b.fmt("{s}/zig-prebuilt/include/SDL2", .{prefix})); for (generic_gfx_src_files) |src_file| { const full_src_path = path.join(b.allocator, &[_][]const u8{ prefix, "extra", "gfx", src_file }) catch unreachable; lib.addCSourceFile(full_src_path, lib_cflags); } return lib; } pub fn getLibrary( b: *Builder, mode: std.builtin.Mode, target: std.build.Target, prefix: []const u8, ) *std.build.LibExeObjStep { const conf_dir = b.fmt("{s}/zig-prebuilt/{s}-{s}-{s}", .{ prefix, @tagName(target.getCpuArch()), @tagName(target.getOsTag()), @tagName(target.getAbi()), }); const lib_cflags = &[_][]const u8{}; const lib = b.addStaticLibrary("SDL2", null); lib.setBuildMode(mode); lib.setTarget(target); lib.linkSystemLibrary("c"); if (target.isWindows()) { lib.linkSystemLibrary("setupapi"); lib.linkSystemLibrary("winmm"); lib.linkSystemLibrary("gdi32"); lib.linkSystemLibrary("imm32"); lib.linkSystemLibrary("version"); lib.linkSystemLibrary("oleaut32"); lib.linkSystemLibrary("ole32"); } else if (target.isDarwin()) { lib.linkFramework("OpenGL"); lib.linkFramework("Metal"); lib.linkFramework("CoreVideo"); lib.linkFramework("Cocoa"); lib.linkFramework("IOKit"); lib.linkFramework("ForceFeedback"); lib.linkFramework("Carbon"); lib.linkFramework("CoreAudio"); lib.linkFramework("AudioToolbox"); lib.linkFramework("AVFoundation"); lib.linkFramework("Foundation"); } lib.addIncludeDir(conf_dir); lib.addIncludeDir(b.fmt("{s}/include", .{prefix})); lib.addIncludeDir(b.fmt("{s}/src/video/khronos", .{prefix})); for (generic_src_files) |src_file| { const full_src_path = path.join(b.allocator, &[_][]const u8{ prefix, "src", src_file }) catch unreachable; lib.addCSourceFile(full_src_path, lib_cflags); } if (target.isWindows()) { for (windows_src_files) |src_file| { const full_src_path = path.join(b.allocator, &[_][]const u8{ prefix, "src", src_file }) catch unreachable; lib.addCSourceFile(full_src_path, lib_cflags); } } else if (target.isDarwin()) { for (darwin_src_files) |src_file| { const full_src_path = path.join(b.allocator, &[_][]const u8{ prefix, "src", src_file }) catch unreachable; lib.addCSourceFile(full_src_path, lib_cflags); } } return lib; } const generic_src_files = [_][]const u8{ "SDL_log.c", "sensor/dummy/SDL_dummysensor.c", "sensor/SDL_sensor.c", "atomic/SDL_atomic.c", "atomic/SDL_spinlock.c", "audio/SDL_audiocvt.c", "audio/SDL_audio.c", "audio/dsp/SDL_dspaudio.c", "audio/sndio/SDL_sndioaudio.c", "audio/disk/SDL_diskaudio.c", "audio/dummy/SDL_dummyaudio.c", "audio/sun/SDL_sunaudio.c", "audio/paudio/SDL_paudio.c", "audio/directsound/SDL_directsound.c", "audio/SDL_mixer.c", "audio/SDL_wave.c", "audio/SDL_audiotypecvt.c", "audio/wasapi/SDL_wasapi_win32.c", "audio/wasapi/SDL_wasapi.c", "audio/SDL_audiodev.c", "audio/netbsd/SDL_netbsdaudio.c", "audio/nas/SDL_nasaudio.c", "audio/winmm/SDL_winmm.c", "audio/esd/SDL_esdaudio.c", "audio/fusionsound/SDL_fsaudio.c", "audio/qsa/SDL_qsa_audio.c", "audio/arts/SDL_artsaudio.c", "timer/SDL_timer.c", "timer/dummy/SDL_systimer.c", //"timer/haiku/SDL_systimer.c", //"loadso/dummy/SDL_sysloadso.c", "loadso/dlopen/SDL_sysloadso.c", //"power/haiku/SDL_syspower.c", "power/SDL_power.c", "cpuinfo/SDL_cpuinfo.c", "SDL_assert.c", "thread/generic/SDL_syscond.c", //"thread/generic/SDL_systls.c", //"thread/generic/SDL_sysmutex.c", //"thread/generic/SDL_systhread.c", //"thread/generic/SDL_syssem.c", "thread/SDL_thread.c", "SDL_error.c", "file/SDL_rwops.c", "haptic/SDL_haptic.c", "haptic/dummy/SDL_syshaptic.c", //"filesystem/dummy/SDL_sysfilesystem.c", "events/SDL_events.c", "events/SDL_touch.c", "events/SDL_clipboardevents.c", "events/SDL_displayevents.c", "events/SDL_mouse.c", "events/SDL_dropevents.c", "events/SDL_quit.c", "events/SDL_gesture.c", "events/SDL_keyboard.c", "events/SDL_windowevents.c", "render/opengles2/SDL_render_gles2.c", "render/opengles2/SDL_shaders_gles2.c", "render/opengl/SDL_render_gl.c", "render/opengl/SDL_shaders_gl.c", "render/opengles/SDL_render_gles.c", "render/SDL_yuv_sw.c", "render/software/SDL_rotate.c", "render/software/SDL_blendline.c", "render/software/SDL_blendfillrect.c", "render/software/SDL_drawpoint.c", "render/software/SDL_render_sw.c", "render/software/SDL_drawline.c", "render/software/SDL_blendpoint.c", "render/SDL_render.c", "render/SDL_d3dmath.c", "SDL_hints.c", "SDL.c", "SDL_dataqueue.c", //"dynapi/SDL_dynapi.c", "libm/e_log.c", "libm/s_copysign.c", "libm/e_fmod.c", "libm/e_sqrt.c", "libm/s_atan.c", "libm/s_tan.c", "libm/k_sin.c", "libm/k_cos.c", "libm/e_exp.c", "libm/k_rem_pio2.c", "libm/s_fabs.c", "libm/e_pow.c", "libm/s_scalbn.c", "libm/s_cos.c", "libm/k_tan.c", "libm/e_rem_pio2.c", "libm/s_sin.c", "libm/e_atan2.c", "libm/s_floor.c", "libm/e_log10.c", "joystick/hidapi/SDL_hidapi_xbox360.c", "joystick/hidapi/SDL_hidapi_xboxone.c", "joystick/hidapi/SDL_hidapi_ps4.c", "joystick/hidapi/SDL_hidapi_switch.c", "joystick/hidapi/SDL_hidapijoystick.c", "joystick/SDL_joystick.c", "joystick/dummy/SDL_sysjoystick.c", "joystick/SDL_gamecontroller.c", "joystick/steam/SDL_steamcontroller.c", "joystick/virtual/SDL_virtualjoystick.c", "video/yuv2rgb/yuv_rgb.c", "video/SDL_vulkan_utils.c", "video/SDL_blit_1.c", "video/SDL_pixels.c", "video/SDL_blit_0.c", "video/raspberry/SDL_rpimouse.c", "video/raspberry/SDL_rpievents.c", "video/raspberry/SDL_rpiopengles.c", "video/raspberry/SDL_rpivideo.c", "video/SDL_rect.c", "video/dummy/SDL_nullframebuffer.c", "video/dummy/SDL_nullvideo.c", "video/dummy/SDL_nullevents.c", "video/SDL_yuv.c", "video/SDL_video.c", "video/SDL_stretch.c", "video/SDL_blit_slow.c", "video/pandora/SDL_pandora_events.c", "video/pandora/SDL_pandora.c", "video/SDL_fillrect.c", "video/SDL_blit_copy.c", "video/SDL_RLEaccel.c", "video/SDL_bmp.c", "video/SDL_blit_auto.c", "video/directfb/SDL_DirectFB_video.c", "video/directfb/SDL_DirectFB_opengl.c", "video/directfb/SDL_DirectFB_window.c", "video/directfb/SDL_DirectFB_render.c", "video/directfb/SDL_DirectFB_modes.c", "video/directfb/SDL_DirectFB_shape.c", "video/directfb/SDL_DirectFB_WM.c", "video/directfb/SDL_DirectFB_mouse.c", "video/directfb/SDL_DirectFB_dyn.c", "video/directfb/SDL_DirectFB_events.c", "video/SDL_blit_N.c", "video/SDL_egl.c", "video/SDL_surface.c", "video/SDL_blit.c", "video/kmsdrm/SDL_kmsdrmopengles.c", "video/kmsdrm/SDL_kmsdrmevents.c", "video/kmsdrm/SDL_kmsdrmmouse.c", "video/kmsdrm/SDL_kmsdrmdyn.c", "video/kmsdrm/SDL_kmsdrmvideo.c", "video/SDL_blit_A.c", "video/SDL_shape.c", "video/SDL_clipboard.c", "stdlib/SDL_iconv.c", "stdlib/SDL_malloc.c", "stdlib/SDL_string.c", "stdlib/SDL_stdlib.c", "stdlib/SDL_getenv.c", "stdlib/SDL_qsort.c", "stdlib/SDL_crc32.c", }; const linux_src_files = [_][]const u8{ "thread/pthread/SDL_syscond.c", "thread/pthread/SDL_systls.c", "thread/pthread/SDL_sysmutex.c", "thread/pthread/SDL_systhread.c", "thread/pthread/SDL_syssem.c", "hidapi/libusb/hid.c", "timer/unix/SDL_systimer.c", "haptic/linux/SDL_syshaptic.c", "haptic/darwin/SDL_syshaptic.c", "core/unix/SDL_poll.c", "power/macosx/SDL_syspower.c", "hidapi/mac/hid.c", "power/linux/SDL_syspower.c", "hidapi/linux/hid.c", "core/linux/SDL_ime.c", "core/linux/SDL_fcitx.c", "core/linux/SDL_udev.c", "core/linux/SDL_dbus.c", "core/linux/SDL_ibus.c", "core/linux/SDL_evdev_kbd.c", "core/linux/SDL_evdev.c", "filesystem/unix/SDL_sysfilesystem.c", "audio/pulseaudio/SDL_pulseaudio.c", "audio/jack/SDL_jackaudio.c", "audio/alsa/SDL_alsa_audio.c", "video/qnx/gl.c", "video/qnx/keyboard.c", "video/qnx/video.c", "power/psp/SDL_syspower.c", "audio/psp/SDL_pspaudio.c", "timer/psp/SDL_systimer.c", "thread/psp/SDL_syscond.c", "thread/psp/SDL_sysmutex.c", "thread/psp/SDL_systhread.c", "thread/psp/SDL_syssem.c", "render/psp/SDL_render_psp.c", "joystick/psp/SDL_sysjoystick.c", "video/psp/SDL_pspmouse.c", "video/psp/SDL_pspgl.c", "video/psp/SDL_pspevents.c", "video/psp/SDL_pspvideo.c", "joystick/linux/SDL_sysjoystick.c", "joystick/bsd/SDL_sysjoystick.c", "joystick/darwin/SDL_sysjoystick.c", "joystick/android/SDL_sysjoystick.c", "filesystem/android/SDL_sysfilesystem.c", "haptic/android/SDL_syshaptic.c", "core/android/SDL_android.c", "power/android/SDL_syspower.c", "sensor/android/SDL_androidsensor.c", "audio/android/SDL_androidaudio.c", "video/android/SDL_androidkeyboard.c", "video/android/SDL_androidvulkan.c", "video/android/SDL_androidtouch.c", "video/android/SDL_androidmouse.c", "video/android/SDL_androidevents.c", "video/android/SDL_androidvideo.c", "video/android/SDL_androidclipboard.c", "video/android/SDL_androidwindow.c", "video/android/SDL_androidmessagebox.c", "video/android/SDL_androidgl.c", "power/emscripten/SDL_syspower.c", "audio/emscripten/SDL_emscriptenaudio.c", "filesystem/emscripten/SDL_sysfilesystem.c", "joystick/emscripten/SDL_sysjoystick.c", "video/emscripten/SDL_emscriptenframebuffer.c", "video/emscripten/SDL_emscriptenevents.c", "video/emscripten/SDL_emscriptenmouse.c", "video/emscripten/SDL_emscriptenopengles.c", "video/emscripten/SDL_emscriptenvideo.c", "audio/nacl/SDL_naclaudio.c", "filesystem/nacl/SDL_sysfilesystem.c", "video/nacl/SDL_naclwindow.c", "video/nacl/SDL_naclvideo.c", "video/nacl/SDL_naclglue.c", "video/nacl/SDL_naclopengles.c", "video/nacl/SDL_naclevents.c", "video/mir/SDL_mirvulkan.c", "video/mir/SDL_mirevents.c", "video/mir/SDL_mirmouse.c", "video/mir/SDL_mirwindow.c", "video/mir/SDL_mirdyn.c", "video/mir/SDL_mirframebuffer.c", "video/mir/SDL_mirvideo.c", "video/mir/SDL_miropengl.c", "video/x11/SDL_x11shape.c", "video/x11/imKStoUCS.c", "video/x11/SDL_x11framebuffer.c", "video/x11/SDL_x11events.c", "video/x11/SDL_x11opengles.c", "video/x11/SDL_x11touch.c", "video/x11/SDL_x11video.c", "video/x11/edid-parse.c", "video/x11/SDL_x11mouse.c", "video/x11/SDL_x11dyn.c", "video/x11/SDL_x11modes.c", "video/x11/SDL_x11messagebox.c", "video/x11/SDL_x11window.c", "video/x11/SDL_x11vulkan.c", "video/x11/SDL_x11keyboard.c", "video/x11/SDL_x11clipboard.c", "video/x11/SDL_x11opengl.c", "video/x11/SDL_x11xinput2.c", "video/vivante/SDL_vivantevideo.c", "video/vivante/SDL_vivanteplatform.c", "video/vivante/SDL_vivanteopengles.c", "video/wayland/SDL_waylandvideo.c", "video/wayland/SDL_waylandclipboard.c", "video/wayland/SDL_waylandvulkan.c", "video/wayland/SDL_waylandtouch.c", "video/wayland/SDL_waylandmouse.c", "video/wayland/SDL_waylandevents.c", "video/wayland/SDL_waylandwindow.c", "video/wayland/SDL_waylandopengles.c", "video/wayland/SDL_waylanddyn.c", "video/wayland/SDL_waylanddatamanager.c", }; const windows_src_files = [_][]const u8{ "render/direct3d11/SDL_shaders_d3d11.c", "render/direct3d11/SDL_render_d3d11.c", "render/direct3d/SDL_shaders_d3d.c", "render/direct3d/SDL_render_d3d.c", "hidapi/windows/hid.c", "timer/windows/SDL_systimer.c", "loadso/windows/SDL_sysloadso.c", "power/windows/SDL_syspower.c", "core/windows/SDL_windows.c", "core/windows/SDL_xinput.c", "thread/windows/SDL_systls.c", "thread/windows/SDL_sysmutex.c", "thread/windows/SDL_systhread.c", "thread/windows/SDL_syssem.c", "haptic/windows/SDL_dinputhaptic.c", "haptic/windows/SDL_xinputhaptic.c", "haptic/windows/SDL_windowshaptic.c", "filesystem/windows/SDL_sysfilesystem.c", "joystick/windows/SDL_windowsjoystick.c", "joystick/windows/SDL_xinputjoystick.c", "joystick/windows/SDL_dinputjoystick.c", "joystick/windows/SDL_mmjoystick.c", "video/windows/SDL_windowsclipboard.c", "video/windows/SDL_windowsmessagebox.c", "video/windows/SDL_windowsmouse.c", "video/windows/SDL_windowsvideo.c", "video/windows/SDL_windowsopengl.c", "video/windows/SDL_windowsvulkan.c", "video/windows/SDL_windowsshape.c", "video/windows/SDL_windowskeyboard.c", "video/windows/SDL_windowswindow.c", "video/windows/SDL_windowsmodes.c", "video/windows/SDL_windowsopengles.c", "video/windows/SDL_windowsframebuffer.c", "video/windows/SDL_windowsevents.c", }; const darwin_src_files = [_][]const u8{ "file/cocoa/SDL_rwopsbundlesupport.m", "misc/macosx/SDL_sysurl.m", //"audio/coreaudio/SDL_coreaudio.m", //"hidapi/SDL_hidapi.c", "joystick/darwin/SDL_iokitjoystick.c", //"joystick/iphoneos/SDL_mfijoystick.m", //"joystick/hidapi/SDL_hidapi_gamecube.c", //"joystick/hidapi/SDL_hidapi_luna.c", //"joystick/hidapi/SDL_hidapi_ps4.c", //"joystick/hidapi/SDL_hidapi_ps5.c", //"joystick/hidapi/SDL_hidapi_rumble.c", //"joystick/hidapi/SDL_hidapi_stadia.c", //"joystick/hidapi/SDL_hidapi_steam.c", //"joystick/hidapi/SDL_hidapi_switch.c", //"joystick/hidapi/SDL_hidapi_xbox360.c", //"joystick/hidapi/SDL_hidapi_xbox360w.c", //"joystick/hidapi/SDL_hidapi_xboxone.c", //"joystick/hidapi/SDL_hidapijoystick.c", "haptic/darwin/SDL_syshaptic.c", "power/macosx/SDL_syspower.c", "locale/macosx/SDL_syslocale.m", "timer/unix/SDL_systimer.c", "filesystem/cocoa/SDL_sysfilesystem.m", "thread/pthread/SDL_systhread.c", "thread/pthread/SDL_sysmutex.c", "thread/pthread/SDL_syscond.c", "thread/pthread/SDL_syssem.c", "thread/pthread/SDL_systls.c", "render/metal/SDL_render_metal.m", "video/cocoa/SDL_cocoaclipboard.m", "video/cocoa/SDL_cocoaevents.m", "video/cocoa/SDL_cocoakeyboard.m", "video/cocoa/SDL_cocoamessagebox.m", "video/cocoa/SDL_cocoametalview.m", "video/cocoa/SDL_cocoamodes.m", "video/cocoa/SDL_cocoamouse.m", "video/cocoa/SDL_cocoaopengl.m", "video/cocoa/SDL_cocoaopengles.m", "video/cocoa/SDL_cocoashape.m", "video/cocoa/SDL_cocoavideo.m", "video/cocoa/SDL_cocoavulkan.m", "video/cocoa/SDL_cocoawindow.m", }; const generic_gfx_src_files = [_][]const u8{ "SDL2_imageFilter.c", "SDL2_framerate.c", "SDL2_gfxPrimitives.c", "SDL2_rotozoom.c", };
build.zig
const std = @import("std"); const testing = std.testing; const Tree = @import("tree.zig").Tree; const KV = @import("kv.zig").KV; const Hash = @import("hash.zig").H; pub const LinkTag = enum(u2) { Pruned, Modified, Stored, }; pub const Link = union(LinkTag) { Pruned: Pruned, Modified: Modified, Stored: Stored, pub fn key(self: Link) []const u8 { return switch (self) { .Pruned => self.Pruned.key, .Modified => self.Modified.tree.key(), .Stored => self.Stored.tree.key(), }; } pub fn tree(self: Link) ?*Tree { return switch (self) { .Pruned => null, .Modified => self.Modified.tree, .Stored => self.Stored.tree, }; } pub fn hash(self: Link) ?Hash { return switch (self) { .Pruned => self.Pruned.hash, .Modified => null, .Stored => self.Stored.hash, }; } pub fn height(self: Link) u8 { return switch (self) { .Pruned => 1 + std.mem.max(u8, self.Pruned.child_heights[0..]), .Modified => 1 + std.mem.max(u8, self.Modified.child_heights[0..]), .Stored => 1 + std.mem.max(u8, self.Stored.child_heights[0..]), }; } pub fn childHeights(self: Link) [2]u8 { return switch (self) { .Pruned => self.Pruned.child_heights, .Modified => self.Modified.child_heights, .Stored => self.Stored.child_heights, }; } pub fn balanceFactor(self: Link) i16 { return switch (self) { .Pruned => @as(i16, self.Pruned.child_heights[1]) - @as(i16, self.Pruned.child_heights[0]), .Modified => @as(i16, self.Modified.child_heights[1]) - @as(i16, self.Modified.child_heights[0]), .Stored => @as(i16, self.Stored.child_heights[1]) - @as(i16, self.Stored.child_heights[0]), }; } pub fn fromModifiedTree(t: *Tree) Link { return Link{ .Modified = Modified{ .child_heights = t.childHeights(), .tree = t } }; } pub fn fromMarshal(hash_buf: []const u8, key_buf: []const u8) Link { var l = Link{ .Pruned = Pruned{ .hash = undefined, .child_heights = [2]u8{ 0, 0 }, .key = key_buf } }; std.mem.copy(u8, &l.Pruned.hash.inner, hash_buf); return l; } pub fn intoStored(self: Link, t: *Tree) Link { return switch (self) { .Pruned => Link{ .Stored = Stored{ .hash = self.hash().?, .tree = t, .child_heights = t.childHeights(), }, }, .Modified => Link{ .Stored = Stored{ .hash = self.tree().?.hash(), .tree = self.tree().?, .child_heights = self.childHeights(), }, }, .Stored => @panic("should be modified link"), }; } pub fn intoPruned(self: Link) Link { return switch (self) { .Pruned => @panic("should be stored link"), .Modified => @panic("should be stored link"), .Stored => Link{ .Pruned = Pruned{ .hash = self.hash().?, .child_heights = self.childHeights(), .key = self.key() } }, }; } }; pub const Pruned = struct { hash: Hash, child_heights: [2]u8, // [left, right] key: []const u8, }; pub const Modified = struct { child_heights: [2]u8, // [left, right] tree: *Tree, }; pub const Stored = struct { hash: Hash, child_heights: [2]u8, // [left, right] tree: *Tree, }; test "check size" { std.debug.print("size of link: {}\n", .{@sizeOf(Link)}); } test "key" { var tree: Tree = Tree{ .kv = KV.init("key", "value"), .left = null, .right = null }; const l: Link = Link{ .Modified = Modified{ .child_heights = .{ 0, 2 }, .tree = &tree } }; testing.expectEqualSlices(u8, l.key(), "key"); } test "tree" { var tree: Tree = Tree{ .kv = KV.init("key", "value"), .left = null, .right = null }; const l: Link = Link{ .Modified = Modified{ .child_heights = .{ 0, 2 }, .tree = &tree } }; var linkedTree = l.tree().?; testing.expectEqual(&tree, linkedTree); } test "hash" { const kvHash = KV.kvHash("key", "value"); const l: Link = Link{ .Pruned = Pruned{ .hash = kvHash, .child_heights = .{ 0, 2 }, .key = undefined } }; testing.expectEqualSlices(u8, l.hash().?.inner[0..], kvHash.inner[0..]); } test "height" { const l: Link = Link{ .Modified = Modified{ .child_heights = .{ 0, 2 }, .tree = undefined } }; testing.expectEqual(l.height(), 3); } test "balanceFactor" { const l: Link = Link{ .Modified = Modified{ .child_heights = .{ 2, 0 }, .tree = undefined } }; testing.expectEqual(l.balanceFactor(), -2); }
src/link.zig
const std = @import("std"); const builtin = std.builtin; const TypeInfo = builtin.TypeInfo; const alignForward = std.mem.alignForward; const pi = std.math.pi; pub const Math = struct { pub fn FixedPoint(comptime isSigned: bool, comptime integral: comptime_int, comptime fractional: comptime_int) type { return packed struct { raw: RawType = undefined, const SignedRawType = @Type(TypeInfo{ .Int = TypeInfo.Int{ .is_signed = true, .bits = integral + fractional } }); const UnsignedRawType = @Type(TypeInfo{ .Int = TypeInfo.Int{ .is_signed = false, .bits = integral + fractional } }); const RawType = if (isSigned) SignedRawType else UnsignedRawType; const SignedAlignIntegerType = @Type(TypeInfo{ .Int = TypeInfo.Int{ .is_signed = true, .bits = alignForward(integral + fractional, 8) } }); const UnsignedAlignIntegerType = @Type(TypeInfo{ .Int = TypeInfo.Int{ .is_signed = false, .bits = alignForward(integral + fractional, 8) } }); const AlignIntegerType = if (isSigned) SignedAlignIntegerType else UnsignedAlignIntegerType; const InputIntegerType = @Type(TypeInfo{ .Int = TypeInfo.Int{ .is_signed = isSigned, .bits = integral } }); const MaxIntegerType = if (isSigned) i32 else u32; pub const Shift = fractional; pub const Scale = 1 << fractional; pub const IntegralMask = (1 << integral) - 1; pub const FractionalMask = (1 << fractional) - 1; const Self = @This(); pub inline fn fromInt(value: InputIntegerType) Self { return Self{ .raw = @truncate(RawType, @intCast(AlignIntegerType, value) << Shift), }; } pub inline fn fromF32(comptime value: f32) Self { return Self{ .raw = @floatToInt(RawType, value * Scale), }; } pub inline fn setInt(self: *Self, value: InputIntegerType) void { self.raw = fromInt(value).raw; } pub inline fn setF32(self: *Self, comptime value: f32) void { self.raw = fromF32(value).raw; } pub inline fn integral(self: Self) UnsignedAlignIntegerType { return @intCast(UnsignedAlignIntegerType, (@bitCast(UnsignedRawType, self.raw) >> Shift) & (IntegralMask)); } pub inline fn fractional(self: Self) UnsignedAlignIntegerType { return @intCast(UnsignedAlignIntegerType, @bitCast(UnsignedRawType, self.raw) & FractionalMask); } pub inline fn toF32(self: Self) f32 { return @intToFloat(f32, self.raw) / Scale; } pub inline fn add(left: Self, right: Self) Self { return Self{ .raw = left.raw + right.raw, }; } pub inline fn addSelf(left: *Self, right: Self) void { left.raw = add(left, right).raw; } pub inline fn sub(left: Self, right: Self) Self { return Self{ .raw = left.raw - right.raw, }; } pub inline fn subSelf(left: *Self, right: Self) void { left.raw = sub(left, right).raw; } pub inline fn mul(left: Self, right: Self) Self { return Self{ .raw = @truncate(RawType, (@intCast(MaxIntegerType, left.raw) * @intCast(MaxIntegerType, right.raw)) >> Shift), }; } pub inline fn mulSelf(left: *Self, right: Self) void { left.raw = mul(left, right).raw; } pub inline fn div(left: Self, right: Self) Self { return Self{ .raw = @truncate(RawType, @divTrunc(@intCast(MaxIntegerType, left.raw) * Scale, @intCast(MaxIntegerType, right.raw))), }; } pub inline fn divSelf(left: *Self, right: Self) void { left.raw = div(left, right).raw; } pub const toInt = comptime if (isSigned) toIntSigned else toIntUnsigned; fn toIntUnsigned(self: Self) AlignIntegerType { return self.raw >> Shift; } fn toIntSigned(self: Self) AlignIntegerType { return @divFloor(self.raw, Scale); } }; } pub const FixedI8_8 = FixedPoint(true, 8, 8); pub const FixedU8_8 = FixedPoint(false, 8, 8); pub const FixedI4_12 = FixedPoint(true, 4, 12); pub const FixedU4_12 = FixedPoint(false, 4, 12); pub const FixedI19_8 = FixedPoint(true, 19, 8); pub const FixedU19_8 = FixedPoint(false, 19, 8); pub const sin_lut: [512]i16 = comptime blk: { @setEvalBranchQuota(10000); var result: [512]i16 = undefined; var i:usize = 0; while (i < result.len) : (i += 1) { const sinValue = std.math.sin(@intToFloat(f32, i) * 2.0 * pi/512.0); const fixedValue = FixedI4_12.fromF32(sinValue); result[i] = fixedValue.raw; } break :blk result; }; pub fn sin(theta: i32) FixedI4_12 { return FixedI4_12 { .raw = sin_lut[@bitCast(u32, (theta>>7)&0x1FF)], }; } pub fn cos(theta: i32) FixedI4_12 { return FixedI4_12 { .raw = sin_lut[@bitCast(u32, ((theta>>7)+128)&0x1FF)], }; } pub inline fn degreeToGbaAngle(comptime input: i32) i32 { return @floatToInt(i32, @intToFloat(f32, input) * ((1 << 16) / 360.0)); } pub inline fn radianToGbaAngle(comptime input: f32) i32 { return @floatToInt(i32, input * ((1 << 16) / (std.math.tau))); } };
GBA/math.zig
const Builder = @import("std").build.Builder; usingnamespace @import("GBA/builder.zig"); pub fn build(b: *Builder) void { const first = addGBAExecutable(b, "first", "examples/first/first.zig"); const mode3Draw = addGBAExecutable(b, "mode3draw", "examples/mode3draw/mode3draw.zig"); const debugPrint = addGBAExecutable(b, "debugPrint", "examples/debugPrint/debugPrint.zig"); // Mode 4 Flip const mode4flip = addGBAExecutable(b, "mode4flip", "examples/mode4flip/mode4flip.zig"); convertMode4Images(mode4flip, &[_]ImageSourceTarget{ .{ .source = "examples/mode4flip/front.bmp", .target = "examples/mode4flip/front.agi", }, .{ .source = "examples/mode4flip/back.bmp", .target = "examples/mode4flip/back.agi", }, }, "examples/mode4flip/mode4flip.agp"); // Key demo, TODO: Use image created by the build system once we support indexed image const keydemo = addGBAExecutable(b, "keydemo", "examples/keydemo/keydemo.zig"); keydemo.addCSourceFile("examples/keydemo/gba_pic.c", &[_][]const u8{"-std=c99"}); // Simple OBJ demo, TODO: Use tile and palette data created by the build system const objDemo = addGBAExecutable(b, "objDemo", "examples/objDemo/objDemo.zig"); objDemo.addCSourceFile("examples/objDemo/metroid_sprite_data.c", &[_][]const u8{"-std=c99"}); // tileDemo, TODO: Use tileset, tile and palette created by the build system const tileDemo = addGBAExecutable(b, "tileDemo", "examples/tileDemo/tileDemo.zig"); tileDemo.addCSourceFile("examples/tileDemo/brin.c", &[_][]const u8{"-std=c99"}); // screenBlock const screenBlock = addGBAExecutable(b, "screenBlock", "examples/screenBlock/screenBlock.zig"); // charBlock const charBlock = addGBAExecutable(b, "charBlock", "examples/charBlock/charBlock.zig"); charBlock.addCSourceFile("examples/charBlock/cbb_ids.c", &[_][]const u8{"-std=c99"}); // objAffine const objAffine = addGBAExecutable(b, "objAffine", "examples/objAffine/objAffine.zig"); objAffine.addCSourceFile("examples/objAffine/metr.c", &[_][]const u8{"-std=c99"}); }
build.zig
usingnamespace @import("vulkan"); const VK_API_VERSION_1_2 = makeApiVersion(0, 1, 2, 0); pub var vkb: BaseDispatch = undefined; pub var vki: InstanceDispatch = undefined; pub var vkd: DeviceDispatch = undefined; pub const BaseDispatch = BaseWrapper(.{ .CreateInstance, }); pub const InstanceDispatch = InstanceWrapper(.{ .DestroyInstance, .CreateDevice, .DestroySurfaceKHR, .EnumeratePhysicalDevices, .GetPhysicalDeviceProperties, .EnumerateDeviceExtensionProperties, .GetPhysicalDeviceSurfaceFormatsKHR, .GetPhysicalDeviceSurfacePresentModesKHR, .GetPhysicalDeviceSurfaceCapabilitiesKHR, .GetPhysicalDeviceQueueFamilyProperties, .GetPhysicalDeviceSurfaceSupportKHR, .GetPhysicalDeviceMemoryProperties, .GetDeviceProcAddr, .CreateDebugUtilsMessengerEXT, .DestroyDebugUtilsMessengerEXT, }); //TODO Split wrappers by extension maybe? pub const DeviceDispatch = DeviceWrapper(.{ .DestroyDevice, .GetDeviceQueue, .CreateSemaphore, .CreateFence, .CreateImageView, .DestroyImageView, .DestroySemaphore, .DestroyFence, .GetSwapchainImagesKHR, .CreateSwapchainKHR, .DestroySwapchainKHR, .AcquireNextImageKHR, .DeviceWaitIdle, .WaitForFences, .ResetFences, .QueueSubmit, .QueuePresentKHR, .CreateCommandPool, .DestroyCommandPool, .AllocateCommandBuffers, .FreeCommandBuffers, .QueueWaitIdle, .CreateDescriptorSetLayout, .DestroyDescriptorSetLayout, .CreateDescriptorPool, .DestroyDescriptorPool, .AllocateDescriptorSets, .CreateShaderModule, .DestroyShaderModule, .CreatePipelineLayout, .DestroyPipelineLayout, .CreateRenderPass, .DestroyRenderPass, .CreateGraphicsPipelines, .DestroyPipeline, .CreateFramebuffer, .DestroyFramebuffer, .BeginCommandBuffer, .EndCommandBuffer, .AllocateMemory, .FreeMemory, .CreateBuffer, .DestroyBuffer, .GetBufferMemoryRequirements, .MapMemory, .UnmapMemory, .BindBufferMemory, .BindImageMemory, .CmdBeginRenderPass, .CmdEndRenderPass, .CmdBindPipeline, .CmdDraw, .CmdSetViewport, .CmdSetScissor, .CmdBindVertexBuffers, .CmdBindIndexBuffer, .CmdCopyBuffer, .CmdPipelineBarrier, .CmdBindDescriptorSets, .CmdPushConstants, .CmdDrawIndexed, .CreateImage, .DestroyImage, .GetImageMemoryRequirements, });
src/vulkan/vk.zig
const std = @import("std"); pub const SMOVE = struct { source: []const u8, destination: []const u8, member: []const u8, /// Instantiates a new SMOVE command. pub fn init(source: []const u8, destination: []const u8, member: []const u8) SMOVE { // TODO: support std.hashmap used as a set! return .{ .source = source, .destination = destination, .member = member }; } /// Validates if the command is syntactically correct. pub fn validate(self: SMOVE) !void { // TODO: maybe this check is dumb and we shouldn't have it if (std.mem.eql(u8, self.source, self.destination)) { return error.SameSourceAndDestination; } } pub const RedisCommand = struct { pub fn serialize(self: SMOVE, comptime rootSerializer: type, msg: anytype) !void { return rootSerializer.serializeCommand(msg, .{ "SMOVE", self.source, self.destination, self.member, }); } }; }; test "basic usage" { const cmd = SMOVE.init("source", "destination", "element"); try cmd.validate(); const cmd1 = SMOVE.init("source", "source", "element"); std.testing.expectError(error.SameSourceAndDestination, cmd1.validate()); } test "serializer" { const serializer = @import("../../serializer.zig").CommandSerializer; var correctBuf: [1000]u8 = undefined; var correctMsg = std.io.fixedBufferStream(correctBuf[0..]); var testBuf: [1000]u8 = undefined; var testMsg = std.io.fixedBufferStream(testBuf[0..]); { { correctMsg.reset(); testMsg.reset(); try serializer.serializeCommand( testMsg.outStream(), SMOVE.init("s", "d", "m"), ); try serializer.serializeCommand( correctMsg.outStream(), .{ "SMOVE", "s", "d", "m" }, ); // std.debug.warn("{}\n\n\n{}\n", .{ correctMsg.getWritten(), testMsg.getWritten() }); std.testing.expectEqualSlices(u8, correctMsg.getWritten(), testMsg.getWritten()); } } }
src/commands/sets/smove.zig
const std = @import("std"); const git = @import("git.zig"); test "simple repository init" { const repo_path = "./zig-cache/test_repos/simple_repository_init"; std.fs.cwd().deleteTree(repo_path) catch {}; defer std.fs.cwd().deleteTree(repo_path) catch {}; const handle = try git.init(); defer handle.deinit(); const repo = try handle.repositoryInit(repo_path, false); defer repo.deinit(); } test "extended repository init" { const repo_path = "./zig-cache/test_repos/extended_repository_init/very/nested/repo"; std.fs.cwd().deleteTree("./zig-cache/test_repos/extended_repository_init") catch {}; defer std.fs.cwd().deleteTree("./zig-cache/test_repos/extended_repository_init") catch {}; const handle = try git.init(); defer handle.deinit(); const repo = try handle.repositoryInitExtended(repo_path, .{ .flags = .{ .mkdir = true, .mkpath = true } }); defer repo.deinit(); } test "repository discover" { var test_handle = try TestHandle.init("repository_discover"); defer test_handle.deinit(); var buf = try test_handle.handle.repositoryDiscover(test_handle.repo_path, false, null); defer buf.deinit(); } test "fresh repo has head reference to unborn branch" { var test_handle = try TestHandle.init("head_reference_unborn_error"); defer test_handle.deinit(); try std.testing.expectError(git.GitError.UnbornBranch, test_handle.repo.head()); } test "fresh repo has unborn head reference" { var test_handle = try TestHandle.init("head_reference_unborn"); defer test_handle.deinit(); try std.testing.expect(try test_handle.repo.isHeadUnborn()); } test "fresh repo is empty" { var test_handle = try TestHandle.init("fresh_repo_is_empty"); defer test_handle.deinit(); try std.testing.expect(try test_handle.repo.isEmpty()); } test "fresh repo index version is 2" { var test_handle = try TestHandle.init("fresh_repo_index_version"); defer test_handle.deinit(); const index = try test_handle.repo.indexGet(); defer index.deinit(); try std.testing.expectEqual(git.Index.IndexVersion.@"2", try index.versionGet()); } test "fresh repo index is empty" { var test_handle = try TestHandle.init("fresh_repo_index_version"); defer test_handle.deinit(); const index = try test_handle.repo.indexGet(); defer index.deinit(); try std.testing.expectEqual(@as(usize, 0), index.entryCount()); } test "fresh repo read index" { var test_handle = try TestHandle.init("fresh_repo_read_index"); defer test_handle.deinit(); const index = try test_handle.repo.indexGet(); defer index.deinit(); try index.readIndexFromDisk(false); } test "fresh repo iterate index" { var test_handle = try TestHandle.init("fresh_repo_iterate_index"); defer test_handle.deinit(); const index = try test_handle.repo.indexGet(); defer index.deinit(); var iter = try index.iterate(); defer iter.deinit(); var count: usize = 0; while (try iter.next()) |_| { count += 1; } try std.testing.expectEqual(@as(usize, 0), count); } test "fresh repo index entry by index is null" { var test_handle = try TestHandle.init("fresh_repo_read_index_by_index"); defer test_handle.deinit(); const index = try test_handle.repo.indexGet(); defer index.deinit(); try std.testing.expect(index.entryByIndex(0) == null); } test "fresh repo index path is not null" { var test_handle = try TestHandle.init("fresh_repo_read_index"); defer test_handle.deinit(); const index = try test_handle.repo.indexGet(); defer index.deinit(); try std.testing.expect(index.pathGet() != null); } test "fresh repo read then write index" { var test_handle = try TestHandle.init("fresh_repo_read_write_index"); defer test_handle.deinit(); const index = try test_handle.repo.indexGet(); defer index.deinit(); try index.readIndexFromDisk(false); try index.writeToDisk(); } test "fresh repo index clear" { var test_handle = try TestHandle.init("fresh_repo_read_write_index"); defer test_handle.deinit(); const index = try test_handle.repo.indexGet(); defer index.deinit(); try index.clear(); } test "change fresh repo index version" { var test_handle = try TestHandle.init("change_fresh_repo_index_version"); defer test_handle.deinit(); const index = try test_handle.repo.indexGet(); defer index.deinit(); try std.testing.expectEqual(git.Index.IndexVersion.@"2", try index.versionGet()); try index.versionSet(.@"3"); try std.testing.expectEqual(git.Index.IndexVersion.@"3", try index.versionGet()); } test "bare repo is bare" { const repo_path = "./zig-cache/test_repos/bare_repo_is_bare"; std.fs.cwd().deleteTree(repo_path) catch {}; defer std.fs.cwd().deleteTree(repo_path) catch {}; const handle = try git.init(); defer handle.deinit(); const repo = try handle.repositoryInitExtended(repo_path, .{ .flags = .{ .mkdir = true, .mkpath = true, .bare = true } }); defer repo.deinit(); try std.testing.expect(repo.isBare()); } test "item paths" { var test_handle = try TestHandle.init("item_paths"); defer test_handle.deinit(); var buf = try test_handle.repo.itemPath(.CONFIG); defer buf.deinit(); const expected = try std.fmt.allocPrintZ(std.testing.allocator, "{s}/.git/config", .{test_handle.repo_path[1..]}); defer std.testing.allocator.free(expected); try std.testing.expectStringEndsWith(buf.toSlice(), expected); } test "get config" { var test_handle = try TestHandle.init("get_config"); defer test_handle.deinit(); const config = try test_handle.repo.configGet(); defer config.deinit(); } test "get config snapshot" { var test_handle = try TestHandle.init("get_config_snapshot"); defer test_handle.deinit(); const config = try test_handle.repo.configSnapshot(); defer config.deinit(); } test "get odb" { var test_handle = try TestHandle.init("get_odb"); defer test_handle.deinit(); const odb = try test_handle.repo.odbGet(); defer odb.deinit(); } test "get ref db" { var test_handle = try TestHandle.init("get_refdb"); defer test_handle.deinit(); const ref_db = try test_handle.repo.refDb(); defer ref_db.deinit(); } test "get index" { var test_handle = try TestHandle.init("get_index"); defer test_handle.deinit(); const index = try test_handle.repo.indexGet(); defer index.deinit(); } fn dummyFetchHeadCallback(ref_name: [:0]const u8, remote_url: [:0]const u8, oid: *const git.Oid, is_merge: bool) c_int { _ = ref_name; _ = remote_url; _ = oid; _ = is_merge; return 0; } test "foreach fetch head fails on fresh repository" { var test_handle = try TestHandle.init("foreach_fetch_head_fails_on_fresh"); defer test_handle.deinit(); try std.testing.expectError(git.GitError.NotFound, test_handle.repo.fetchHeadForeach(dummyFetchHeadCallback)); } fn dummyFetchMergeCallback(oid: *const git.Oid) c_int { _ = oid; return 0; } test "foreach file status on fresh repository" { var test_handle = try TestHandle.init("foreach_file_status_on_fresh"); defer test_handle.deinit(); var count: usize = 0; _ = try test_handle.repo.fileStatusForeachWithUserData(&count, dummyFileStatusCallback); try std.testing.expectEqual(@as(usize, 0), count); } test "foreach file status extended on fresh repository" { var test_handle = try TestHandle.init("foreach_file_status_extended_on_fresh"); defer test_handle.deinit(); var count: usize = 0; _ = try test_handle.repo.fileStatusForeachExtendedWithUserData(.{}, &count, dummyFileStatusCallback); try std.testing.expectEqual(@as(usize, 0), count); } fn dummyFileStatusCallback(path: [:0]const u8, status: git.FileStatus, count: *usize) c_int { _ = path; _ = status; count.* += 1; return 0; } test "foreach merge head fails on fresh repository" { var test_handle = try TestHandle.init("foreach_merge_head_fails_on_fresh"); defer test_handle.deinit(); try std.testing.expectError(git.GitError.NotFound, test_handle.repo.mergeHeadForeach(dummyFetchMergeCallback)); } test "fresh repo state is none" { var test_handle = try TestHandle.init("fresh_repo_state_none"); defer test_handle.deinit(); try std.testing.expectEqual(git.Repository.RepositoryState.NONE, test_handle.repo.state()); } test "fresh repo has no namespace" { var test_handle = try TestHandle.init("fresh_repo_no_namespace"); defer test_handle.deinit(); try std.testing.expect((try test_handle.repo.namespaceGet()) == null); } test "set namespace" { var test_handle = try TestHandle.init("set_namespace"); defer test_handle.deinit(); try std.testing.expect((try test_handle.repo.namespaceGet()) == null); try test_handle.repo.namespaceSet("namespace"); const result = try test_handle.repo.namespaceGet(); try std.testing.expect(result != null); try std.testing.expectEqualStrings("namespace", result.?); } test "fresh repo is not a shallow clone" { var test_handle = try TestHandle.init("fresh_not_shallow"); defer test_handle.deinit(); try std.testing.expect(!test_handle.repo.isShallow()); } test "fresh repo has no identity set" { var test_handle = try TestHandle.init("fresh_no_identity"); defer test_handle.deinit(); const ident = try test_handle.repo.identityGet(); try std.testing.expect(ident.name == null); try std.testing.expect(ident.email == null); } test "set identity" { var test_handle = try TestHandle.init("set_identity"); defer test_handle.deinit(); { const ident = try test_handle.repo.identityGet(); try std.testing.expect(ident.name == null); try std.testing.expect(ident.email == null); } const name = "<NAME>"; const email = "test email"; { const ident = git.Repository.Identity{ .name = name, .email = email, }; try test_handle.repo.identitySet(ident); } { const ident = try test_handle.repo.identityGet(); try std.testing.expectEqualStrings(name, ident.name.?); try std.testing.expectEqualStrings(email, ident.email.?); } } test "fresh repo status list" { var test_handle = try TestHandle.init("status_list"); defer test_handle.deinit(); const status_list = try test_handle.repo.statusList(.{}); defer status_list.deinit(); try std.testing.expectEqual(@as(usize, 0), status_list.entryCount()); } test "bare index" { var handle = try git.init(); defer handle.deinit(); const path = "./zig-cache/test_repos/bare_index"; defer std.fs.cwd().deleteTree(path) catch {}; const index = try handle.indexOpen(path); defer index.deinit(); } test "new index" { var handle = try git.init(); defer handle.deinit(); const index = try handle.indexNew(); defer index.deinit(); } test "new index has no repository" { var handle = try git.init(); defer handle.deinit(); const index = try handle.indexNew(); defer index.deinit(); try std.testing.expect(index.repositoryGet() == null); } test "new index capabilities" { var handle = try git.init(); defer handle.deinit(); const index = try handle.indexNew(); defer index.deinit(); const cap = index.indexCapabilities(); try std.testing.expect(!cap.isGetCapabilitiesFromOwner()); try std.testing.expect(!cap.IGNORE_CASE); try std.testing.expect(!cap.NO_FILEMODE); try std.testing.expect(!cap.NO_SYMLINKS); } test "set maximum mmap window size" { var handle = try git.init(); defer handle.deinit(); try handle.optionSetMaximumMmapWindowSize(1024); try std.testing.expectEqual(@as(usize, 1024), try handle.optionGetMaximumMmapWindowSize()); } const TestHandle = struct { handle: git.Handle, repo_path: [:0]const u8, repo: *git.Repository, pub fn init(test_name: []const u8) !TestHandle { const repo_path = try std.fmt.allocPrintZ(std.testing.allocator, "./zig-cache/test_repos/{s}", .{test_name}); errdefer std.testing.allocator.free(repo_path); std.fs.cwd().deleteTree(repo_path) catch {}; const handle = try git.init(); errdefer handle.deinit(); const repo = try handle.repositoryInitExtended(repo_path, .{ .flags = .{ .mkdir = true, .mkpath = true } }); return TestHandle{ .handle = handle, .repo_path = repo_path, .repo = repo, }; } pub fn deinit(self: *TestHandle) void { self.repo.deinit(); self.handle.deinit(); std.fs.cwd().deleteTree(self.repo_path) catch {}; std.testing.allocator.free(self.repo_path); } }; comptime { std.testing.refAllDecls(@This()); }
src/tests.zig
const std = @import("std"); const mem = std.mem; const c = @cImport({ @cInclude("expat.h"); }); const gpa = std.heap.c_allocator; const Context = struct { const Self = @This(); protocol: Protocol, interface: *Interface, message: *Message, enumeration: *Enum, character_data: std.ArrayList(u8) = std.ArrayList(u8).init(gpa), fn deinit(self: Self) void { self.character_data.deinit(); } }; const Target = enum { client, server, }; const Protocol = struct { name: []const u8, namespace: []const u8, interfaces: std.ArrayList(Interface) = std.ArrayList(Interface).init(gpa), fn emitClient(protocol: Protocol, writer: anytype) !void { try writer.writeAll( \\const os = @import("std").os; \\const client = @import("wayland.zig").client; \\const common = @import("common.zig"); ); for (protocol.interfaces.items) |interface| try interface.emit(.client, protocol.namespace, writer); } fn emitServer(protocol: Protocol, writer: anytype) !void { try writer.writeAll( \\const os = @import("std").os; \\const server = @import("wayland.zig").server; \\const common = @import("common.zig"); ); for (protocol.interfaces.items) |interface| try interface.emit(.server, protocol.namespace, writer); } fn emitCommon(protocol: Protocol, writer: anytype) !void { try writer.writeAll( \\const common = @import("common.zig"); ); for (protocol.interfaces.items) |interface| try interface.emitCommon(writer); } }; const Interface = struct { name: []const u8, version: u32, requests: std.ArrayList(Message) = std.ArrayList(Message).init(gpa), events: std.ArrayList(Message) = std.ArrayList(Message).init(gpa), enums: std.ArrayList(Enum) = std.ArrayList(Enum).init(gpa), fn emit(interface: Interface, target: Target, namespace: []const u8, writer: anytype) !void { var buf: [1024]u8 = undefined; var fbs = std.io.fixedBufferStream(&buf); try printIdentifier(fbs.writer(), case(.title, trimPrefix(interface.name))); const title_case = fbs.getWritten(); try printIdentifier(fbs.writer(), trimPrefix(interface.name)); const snake_case = fbs.getWritten()[title_case.len..]; try writer.print( \\pub const {} = opaque {{ \\ pub const interface = common.{}.{}.interface; , .{ title_case, namespace, snake_case }); for (interface.enums.items) |e| { try writer.writeAll("pub const "); try printIdentifier(writer, case(.title, e.name)); try writer.print(" = common.{}.{}.", .{ namespace, snake_case }); try printIdentifier(writer, case(.title, e.name)); try writer.writeAll(";\n"); } if (target == .client) { if (interface.events.items.len > 0) { try writer.writeAll("pub const Event = union(enum) {"); for (interface.events.items) |event| try event.emitField(.client, writer); try writer.writeAll("};\n"); try writer.print( \\pub fn setListener( \\ {}: *{}, \\ comptime T: type, \\ listener: fn ({}: *{}, event: Event, data: T) void, \\ data: T, \\) !void {{ \\ const proxy = @ptrCast(*client.wl.Proxy, {}); \\ try proxy.addDispatcher(common.Dispatcher({}, T).dispatcher, listener, data); \\}} , .{ snake_case, title_case, snake_case, title_case, snake_case, title_case }); } for (interface.requests.items) |request, opcode| try request.emitFn(target, writer, interface, opcode); if (mem.eql(u8, interface.name, "wl_display")) try writer.writeAll(@embedFile("src/client_display_functions.zig")); } else { if (interface.requests.items.len > 0) { try writer.writeAll("pub const Request = union(enum) {"); for (interface.requests.items) |request| try request.emitField(.server, writer); try writer.writeAll("};\n"); @setEvalBranchQuota(1300); try writer.print( \\pub fn setHandler( \\ {}: *{}, \\ comptime T: type, \\ handler: fn ({}: *{}, request: Request, data: T) void, \\ data: T, \\ destroy: fn ({}: *{}) callconv(.C) void, \\) void {{ \\ const resource = @ptrCast(*server.wl.Resource, {}); \\ resource.setDispatcher( \\ common.Dispatcher({}, T).dispatcher, \\ handler, \\ data, \\ @ptrCast(resource.DestroyFn, destroy), \\ ); \\}} , .{ snake_case, title_case, snake_case, title_case, snake_case, title_case, snake_case, title_case }); } for (interface.events.items) |event, opcode| try event.emitFn(target, writer, interface, opcode); } try writer.writeAll("};\n"); } fn emitCommon(interface: Interface, writer: anytype) !void { try writer.writeAll("pub const "); try printIdentifier(writer, trimPrefix(interface.name)); // TODO: stop linking libwayland generated interface structs when // https://github.com/ziglang/zig/issues/131 is implemented // //try writer.print( // \\ = struct {{ // \\ pub const interface = common.Interface{{ // \\ .name = "{}", // \\ .version = {}, // \\ .method_count = {}, //, .{ // interface.name, // interface.version, // interface.requests.items.len, //}); //if (interface.requests.items.len > 0) { // try writer.writeAll(".methods = &[_]common.Message{"); // for (interface.requests.items) |request| try request.emitMessage(writer); // try writer.writeAll("},"); //} else { // try writer.writeAll(".methods = null,"); //} //try writer.print(".event_count = {},", .{interface.events.items.len}); //if (interface.events.items.len > 0) { // try writer.writeAll(".events = &[_]common.Message{"); // for (interface.events.items) |event| try event.emitMessage(writer); // try writer.writeAll("},"); //} else { // try writer.writeAll(".events = null,"); //} //try writer.writeAll("};"); try writer.print( \\ = struct {{ \\ extern const {}_interface: common.Interface; \\ pub inline fn interface() *const common.Interface {{ \\ return &{}_interface; \\ }} , .{ interface.name, interface.name }); for (interface.enums.items) |e| try e.emit(writer); try writer.writeAll("};"); } }; const Message = struct { name: []const u8, since: u32, args: std.ArrayList(Arg) = std.ArrayList(Arg).init(gpa), kind: union(enum) { normal: void, constructor: ?[]const u8, destructor: void, }, fn emitMessage(message: Message, writer: anytype) !void { try writer.print(".{{ .name = \"{}\", .signature = \"", .{message.name}); for (message.args.items) |arg| try arg.emitSignature(writer); try writer.writeAll("\", .types = "); if (message.args.items.len > 0) { try writer.writeAll("&[_]?*const common.Interface{"); for (message.args.items) |arg| { switch (arg.kind) { .object, .new_id => |interface| if (interface) |i| try writer.print("&common.{}.{}.interface,", .{ prefix(i), trimPrefix(i) }) else try writer.writeAll("null,"), else => try writer.writeAll("null,"), } } try writer.writeAll("},"); } else { try writer.writeAll("null,"); } try writer.writeAll("},"); } fn emitField(message: Message, target: Target, writer: anytype) !void { try printIdentifier(writer, message.name); try writer.writeAll(": struct {"); for (message.args.items) |arg| { if (target == .server and arg.kind == .new_id and arg.kind.new_id == null) { try writer.writeAll("interface: [*:0]const u8, version: u32,"); try printIdentifier(writer, arg.name); try writer.writeAll(": u32"); } else if (target == .client and arg.kind == .new_id) { try printIdentifier(writer, arg.name); try writer.writeAll(": *"); try printAbsolute(.client, writer, arg.kind.new_id.?); std.debug.assert(!arg.allow_null); } else { try printIdentifier(writer, arg.name); try writer.writeByte(':'); // See notes on NULL in doc comment for wl_message in wayland-util.h if (target == .client and arg.kind == .object and !arg.allow_null) try writer.writeByte('?'); try arg.emitType(target, writer); } try writer.writeByte(','); } try writer.writeAll("},\n"); } fn emitFn(message: Message, target: Target, writer: anytype, interface: Interface, opcode: usize) !void { try writer.writeAll("pub fn "); if (target == .server) { try writer.writeAll("send"); try printIdentifier(writer, case(.title, message.name)); } else { try printIdentifier(writer, case(.camel, message.name)); } try writer.writeByte('('); try printIdentifier(writer, trimPrefix(interface.name)); try writer.writeAll(": *"); try printIdentifier(writer, case(.title, trimPrefix(interface.name))); for (message.args.items) |arg| { if (target == .server and arg.kind == .new_id) { try writer.writeByte(','); try printIdentifier(writer, arg.name); try writer.writeByte(':'); if (arg.allow_null) try writer.writeByte('?'); try writer.writeByte('*'); if (arg.kind.new_id) |iface| try printIdentifier(writer, case(.title, trimPrefix(iface))) else try writer.writeAll("server.wl.Resource"); } else if (target == .client and arg.kind == .new_id) { if (arg.kind.new_id == null) try writer.writeAll(", comptime T: type, version: u32"); } else { try writer.writeByte(','); try printIdentifier(writer, arg.name); try writer.writeByte(':'); try arg.emitType(target, writer); } } if (target == .server or message.kind != .constructor) { try writer.writeAll(") void {"); } else if (message.kind.constructor) |new_iface| { try writer.writeAll(") !*"); try printIdentifier(writer, case(.title, trimPrefix(new_iface))); try writer.writeAll("{"); } else { try writer.writeAll(") !*T {"); } if (target == .server) try writer.writeAll("const resource = @ptrCast(*server.wl.Resource,") else try writer.writeAll("const proxy = @ptrCast(*client.wl.Proxy,"); try printIdentifier(writer, trimPrefix(interface.name)); try writer.writeAll(");"); if (message.args.items.len > 0) { try writer.writeAll("var args = [_]common.Argument{"); for (message.args.items) |arg| { switch (arg.kind) { .int, .uint, .fixed, .string, .array, .fd => { try writer.writeAll(".{ ."); try arg.emitSignature(writer); try writer.writeAll(" = "); if (arg.enum_name != null) { try writer.writeAll("switch (@typeInfo("); try arg.emitType(target, writer); // TODO We know the type of the enum at scanning time, but it's // currently a bit difficult to access it. const c_type = if (arg.kind == .uint) "u32" else "i32"; try writer.print( \\ )) {{ \\ .Enum => @intCast({}, @enumToInt({})), \\ .Struct => @bitCast(u32, {}), \\ else => unreachable, \\ }} , .{ c_type, arg.name, arg.name }); } else { try printIdentifier(writer, arg.name); } try writer.writeAll("},"); }, .object, .new_id => |new_iface| { if (arg.kind == .object or target == .server) { if (arg.allow_null) { try writer.writeAll(".{ .o = if ("); try printIdentifier(writer, arg.name); try writer.writeAll(") |o| @ptrCast(*common.Object, o) else null },"); } else { try writer.writeAll(".{ .o = @ptrCast(*common.Object, "); try printIdentifier(writer, arg.name); try writer.writeAll(") },"); } } else { if (new_iface == null) { try writer.writeAll( \\.{ .s = T.interface().name }, \\.{ .u = version }, ); } try writer.writeAll(".{ .o = null },"); } }, } } try writer.writeAll("};\n"); } const args = if (message.args.items.len > 0) "&args" else "null"; if (target == .server) { try writer.print("resource.postEvent({}, {});", .{ opcode, args }); } else switch (message.kind) { .normal, .destructor => { try writer.print("proxy.marshal({}, {});", .{ opcode, args }); if (message.kind == .destructor) try writer.writeAll("proxy.destroy();"); }, .constructor => |new_iface| { if (new_iface) |i| { try writer.writeAll("return @ptrCast(*"); try printIdentifier(writer, case(.title, trimPrefix(i))); try writer.print(", try proxy.marshalConstructor({}, &args, ", .{opcode}); try printIdentifier(writer, case(.title, trimPrefix(i))); try writer.writeAll(".interface()));"); } else { try writer.print("return @ptrCast(*T, try proxy.marshalConstructorVersioned({}, &args, T.interface(), version));", .{opcode}); } }, } try writer.writeAll("}\n"); } }; const Arg = struct { const Type = union(enum) { int, uint, fixed, string, new_id: ?[]const u8, object: ?[]const u8, array, fd, }; name: []const u8, kind: Type, allow_null: bool, enum_name: ?[]const u8, fn emitSignature(arg: Arg, writer: anytype) !void { switch (arg.kind) { .int => try writer.writeByte('i'), .uint => try writer.writeByte('u'), .fixed => try writer.writeByte('f'), .string => try writer.writeByte('s'), .new_id => |interface| if (interface == null) try writer.writeAll("sun") else try writer.writeByte('n'), .object => try writer.writeByte('o'), .array => try writer.writeByte('a'), .fd => try writer.writeByte('h'), } } fn emitType(arg: Arg, target: Target, writer: anytype) !void { switch (arg.kind) { .int, .uint => { if (arg.enum_name) |name| { if (mem.indexOfScalar(u8, name, '.')) |dot_index| { // Turn a reference like wl_shm.format into common.wl.shm.Format try writer.writeAll("common."); const us_index = mem.indexOfScalar(u8, name, '_') orelse 0; try writer.writeAll(name[0..us_index]); try writer.writeAll("."); try writer.writeAll(name[us_index + 1 .. dot_index + 1]); try writer.writeAll(case(.title, name[dot_index + 1 ..])); } else { try writer.writeAll(case(.title, name)); } } else if (arg.kind == .int) { try writer.writeAll("i32"); } else { try writer.writeAll("u32"); } }, .new_id => try writer.writeAll("u32"), .fixed => try writer.writeAll("common.Fixed"), .string => { if (arg.allow_null) try writer.writeByte('?'); try writer.writeAll("[*:0]const u8"); }, .object => |interface| if (interface) |i| { if (arg.allow_null) try writer.writeAll("?*") else try writer.writeByte('*'); try printAbsolute(target, writer, i); } else { if (arg.allow_null) try writer.writeByte('?'); try writer.writeAll("*common.Object"); }, .array => { if (arg.allow_null) try writer.writeByte('?'); try writer.writeAll("*common.Array"); }, .fd => try writer.writeAll("os.fd_t"), } } }; const Enum = struct { name: []const u8, since: u32, entries: std.ArrayList(Entry) = std.ArrayList(Entry).init(gpa), bitfield: bool, fn emit(e: Enum, writer: anytype) !void { try writer.writeAll("pub const "); try printIdentifier(writer, case(.title, e.name)); if (e.bitfield) { var entriesEmitted: u8 = 0; try writer.writeAll(" = packed struct {"); for (e.entries.items) |entry| { const value = entry.intValue(); if (value != 0 and std.math.isPowerOfTwo(value)) { entriesEmitted += 1; try printIdentifier(writer, entry.name); try writer.writeAll(": bool = false,"); } } // Pad to 32 bits try writer.print("_: u{} = 0,}};\n", .{32 - entriesEmitted}); } else { try writer.writeAll(" = extern enum(c_int) {"); for (e.entries.items) |entry| { try printIdentifier(writer, entry.name); try writer.print("= {},", .{entry.value}); } // Always generate non-exhaustive enums to ensure forward compatability try writer.writeAll("_,};\n"); } } }; const Entry = struct { name: []const u8, since: u32, value: []const u8, // Return numeric value of enum entry. Can be base 10 and hexadecimal notation. fn intValue(e: Entry) u32 { return std.fmt.parseInt(u32, e.value, 10) catch blk: { const index = mem.indexOfScalar(u8, e.value, 'x').?; break :blk std.fmt.parseInt(u32, e.value[index + 1 ..], 16) catch @panic("Can't parse enum entry."); }; } }; const Scanner = struct { /// Map from namespace to list of generated files const Map = std.hash_map.StringHashMap(std.ArrayListUnmanaged([]const u8)); client: Map = Map.init(gpa), server: Map = Map.init(gpa), common: Map = Map.init(gpa), fn scanProtocol(scanner: *Scanner, xml_filename: []const u8) !void { const xml_file = try std.fs.cwd().openFile(xml_filename, .{}); defer xml_file.close(); const parser = c.XML_ParserCreate(null) orelse return error.ParserCreateFailed; defer c.XML_ParserFree(parser); var ctx = Context{ .protocol = undefined, .interface = undefined, .message = undefined, .enumeration = undefined, }; defer ctx.deinit(); c.XML_SetUserData(parser, &ctx); c.XML_SetElementHandler(parser, start, end); c.XML_SetCharacterDataHandler(parser, characterData); while (true) { var buf: [4096]u8 = undefined; const read = try xml_file.readAll(&buf); const is_final = read < buf.len; if (c.XML_Parse(parser, &buf, @intCast(i32, read), if (is_final) 1 else 0) == .XML_STATUS_ERROR) return error.ParserError; if (is_final) break; } const xml_basename = std.fs.path.basename(xml_filename); const protocol_name = try mem.dupe(gpa, u8, xml_basename[0 .. xml_basename.len - 4]); for (protocol_name) |*ch| { if (ch.* == '-') ch.* = '_'; } const client_filename = try mem.concat(gpa, u8, &[_][]const u8{ protocol_name, "_client.zig" }); const client_file = try std.fs.cwd().createFile(client_filename, .{}); defer client_file.close(); try ctx.protocol.emitClient(client_file.writer()); try (try scanner.client.getOrPutValue(ctx.protocol.namespace, .{})).value.append(gpa, try mem.dupe(gpa, u8, client_filename)); const server_filename = try mem.concat(gpa, u8, &[_][]const u8{ protocol_name, "_server.zig" }); const server_file = try std.fs.cwd().createFile(server_filename, .{}); defer server_file.close(); try ctx.protocol.emitServer(server_file.writer()); try (try scanner.server.getOrPutValue(ctx.protocol.namespace, .{})).value.append(gpa, try mem.dupe(gpa, u8, server_filename)); const common_filename = try mem.concat(gpa, u8, &[_][]const u8{ protocol_name, "_common.zig" }); const common_file = try std.fs.cwd().createFile(common_filename, .{}); defer common_file.close(); try ctx.protocol.emitCommon(common_file.writer()); try (try scanner.common.getOrPutValue(ctx.protocol.namespace, .{})).value.append(gpa, try mem.dupe(gpa, u8, common_filename)); } }; pub fn main() !void { var scanner = Scanner{}; const argv = std.os.argv; for (argv[1..]) |xml_filename| try scanner.scanProtocol(mem.span(xml_filename)); { const client_file = try std.fs.cwd().createFile("client.zig", .{}); defer client_file.close(); const writer = client_file.writer(); var iter = scanner.client.iterator(); while (iter.next()) |entry| { try writer.print("pub const {} = struct {{", .{entry.key}); if (mem.eql(u8, entry.key, "wl")) try writer.writeAll("pub usingnamespace @import(\"wayland_client_core.zig\");\n"); for (entry.value.items) |generated_file| try writer.print("pub usingnamespace @import(\"{}\");", .{generated_file}); try writer.writeAll("};\n"); } } { const server_file = try std.fs.cwd().createFile("server.zig", .{}); defer server_file.close(); const writer = server_file.writer(); var iter = scanner.server.iterator(); while (iter.next()) |entry| { try writer.print("pub const {} = struct {{", .{entry.key}); if (mem.eql(u8, entry.key, "wl")) try writer.writeAll("pub usingnamespace @import(\"wayland_server_core.zig\");\n"); for (entry.value.items) |generated_file| try writer.print("pub usingnamespace @import(\"{}\");", .{generated_file}); try writer.writeAll("};\n"); } } { const common_file = try std.fs.cwd().createFile("common.zig", .{}); defer common_file.close(); const writer = common_file.writer(); try writer.writeAll(@embedFile("src/common_core.zig")); var iter = scanner.common.iterator(); while (iter.next()) |entry| { try writer.print("pub const {} = struct {{", .{entry.key}); for (entry.value.items) |generated_file| try writer.print("pub usingnamespace @import(\"{}\");", .{generated_file}); try writer.writeAll("};\n"); } } } fn start(user_data: ?*c_void, name: ?[*:0]const u8, atts: ?[*:null]?[*:0]const u8) callconv(.C) void { const ctx = @intToPtr(*Context, @ptrToInt(user_data)); handleStart(ctx, mem.span(name.?), atts.?) catch |err| { std.debug.warn("error reading xml: {}", .{err}); std.os.exit(1); }; } fn handleStart(ctx: *Context, name: []const u8, raw_atts: [*:null]?[*:0]const u8) !void { var atts = struct { name: ?[]const u8 = null, interface: ?[]const u8 = null, version: ?u32 = null, since: ?u32 = null, @"type": ?[]const u8 = null, value: ?[]const u8 = null, summary: ?[]const u8 = null, allow_null: ?bool = null, @"enum": ?[]const u8 = null, bitfield: ?bool = null, }{}; var i: usize = 0; while (raw_atts[i]) |att| : (i += 2) { inline for (@typeInfo(@TypeOf(atts)).Struct.fields) |field| { if (mem.eql(u8, field.name, mem.span(att)) or (mem.eql(u8, field.name, "allow_null") and mem.eql(u8, mem.span(att), "allow-null"))) { const val = mem.span(raw_atts[i + 1].?); if (field.field_type == ?u32) { @field(atts, field.name) = try std.fmt.parseInt(u32, val, 10); } else if (field.field_type == ?bool) { // TODO: error if != true and != false if (mem.eql(u8, val, "true")) @field(atts, field.name) = true else if (mem.eql(u8, val, "false")) @field(atts, field.name) = false; } else { @field(atts, field.name) = val; } } } } if (mem.eql(u8, name, "protocol")) { const protocol_name = try mem.dupe(gpa, u8, atts.name.?); const namespace = if (mem.eql(u8, protocol_name, "wayland")) "wl" else prefix(protocol_name); ctx.protocol = Protocol{ .name = protocol_name, .namespace = namespace }; } else if (mem.eql(u8, name, "interface")) { ctx.interface = try ctx.protocol.interfaces.addOne(); ctx.interface.* = .{ .name = try mem.dupe(gpa, u8, atts.name.?), .version = atts.version.?, }; } else if (mem.eql(u8, name, "event") or mem.eql(u8, name, "request")) { const list = if (mem.eql(u8, name, "event")) &ctx.interface.events else &ctx.interface.requests; ctx.message = try list.addOne(); ctx.message.* = .{ .name = try mem.dupe(gpa, u8, atts.name.?), .kind = if (atts.@"type" != null and mem.eql(u8, atts.@"type".?, "destructor")) .destructor else .normal, .since = atts.since orelse 1, }; } else if (mem.eql(u8, name, "arg")) { const kind = std.meta.stringToEnum(@TagType(Arg.Type), mem.span(atts.@"type".?)) orelse return error.InvalidType; if (kind == .new_id) ctx.message.kind = .{ .constructor = if (atts.interface) |f| try mem.dupe(gpa, u8, f) else null }; try ctx.message.args.append(.{ .name = try mem.dupe(gpa, u8, atts.name.?), .kind = switch (kind) { .object => .{ .object = if (atts.interface) |f| try mem.dupe(gpa, u8, f) else null }, .new_id => .{ .new_id = if (atts.interface) |f| try mem.dupe(gpa, u8, f) else null }, .int => .int, .uint => .uint, .fixed => .fixed, .string => .string, .array => .array, .fd => .fd, }, // TODO: enforce != false -> error, require if object/string/array .allow_null = if (atts.allow_null) |a_n| a_n else false, .enum_name = if (atts.@"enum") |e| try mem.dupe(gpa, u8, e) else null, }); } else if (mem.eql(u8, name, "enum")) { ctx.enumeration = try ctx.interface.enums.addOne(); ctx.enumeration.* = .{ .name = try mem.dupe(gpa, u8, atts.name.?), .since = atts.since orelse 1, .bitfield = if (atts.bitfield) |b| b else false, }; } else if (mem.eql(u8, name, "entry")) { try ctx.enumeration.entries.append(.{ .name = try mem.dupe(gpa, u8, atts.name.?), .since = atts.since orelse 1, .value = try mem.dupe(gpa, u8, atts.value.?), }); } } fn end(user_data: ?*c_void, name: ?[*:0]const u8) callconv(.C) void { const ctx = @intToPtr(*Context, @ptrToInt(user_data)); defer ctx.character_data.items.len = 0; } fn characterData(user_data: ?*c_void, s: ?[*]const u8, len: i32) callconv(.C) void { const ctx = @intToPtr(*Context, @ptrToInt(user_data)); ctx.character_data.appendSlice(s.?[0..@intCast(usize, len)]) catch std.os.exit(1); } fn prefix(s: []const u8) []const u8 { return s[0..mem.indexOfScalar(u8, s, '_').?]; } fn trimPrefix(s: []const u8) []const u8 { return s[mem.indexOfScalar(u8, s, '_').? + 1 ..]; } var case_buf: [512]u8 = undefined; fn case(out_case: enum { title, camel }, snake_case: []const u8) []const u8 { var i: usize = 0; var upper = out_case == .title; for (snake_case) |ch| { if (ch == '_') { upper = true; continue; } case_buf[i] = if (upper) std.ascii.toUpper(ch) else ch; i += 1; upper = false; } return case_buf[0..i]; } fn printAbsolute(target: Target, writer: anytype, interface: []const u8) !void { try writer.writeAll(@tagName(target)); try writer.writeByte('.'); try printIdentifier(writer, prefix(interface)); try writer.writeByte('.'); try printIdentifier(writer, case(.title, trimPrefix(interface))); } fn printIdentifier(writer: anytype, identifier: []const u8) !void { if (isValidIdentifier(identifier)) try writer.writeAll(identifier) else try writer.print("@\"{}\"", .{identifier}); } fn isValidIdentifier(identifier: []const u8) bool { // !keyword [A-Za-z_] [A-Za-z0-9_]* if (identifier.len == 0) return false; for (identifier) |ch, i| switch (ch) { 'A'...'Z', 'a'...'z', '_' => {}, '0'...'9' => if (i == 0) return false, else => return false, }; return std.zig.Token.getKeyword(identifier) == null; } test "parsing" { const testing = std.testing; const parser = c.XML_ParserCreate(null) orelse return error.ParserCreateFailed; defer c.XML_ParserFree(parser); var ctx = Context{ .protocol = undefined, .interface = undefined, .message = undefined, .enumeration = undefined, }; defer ctx.deinit(); c.XML_SetUserData(parser, &ctx); c.XML_SetElementHandler(parser, start, end); c.XML_SetCharacterDataHandler(parser, characterData); const sample = @embedFile("protocol/wayland.xml"); if (c.XML_Parse(parser, sample, sample.len, 1) == .XML_STATUS_ERROR) return error.ParserError; testing.expectEqualSlices(u8, "wayland", ctx.protocol.name); testing.expectEqual(@as(usize, 22), ctx.protocol.interfaces.items.len); { const wl_display = ctx.protocol.interfaces.items[0]; testing.expectEqualSlices(u8, "wl_display", wl_display.name); testing.expectEqual(@as(u32, 1), wl_display.version); testing.expectEqual(@as(usize, 2), wl_display.requests.items.len); testing.expectEqual(@as(usize, 2), wl_display.events.items.len); testing.expectEqual(@as(usize, 1), wl_display.enums.items.len); { const sync = wl_display.requests.items[0]; testing.expectEqualSlices(u8, "sync", sync.name); testing.expectEqual(@as(u32, 1), sync.since); testing.expectEqual(@as(usize, 1), sync.args.items.len); { const callback = sync.args.items[0]; testing.expectEqualSlices(u8, "callback", callback.name); testing.expect(callback.kind == .new_id); testing.expectEqualSlices(u8, "wl_callback", callback.kind.new_id.?); testing.expectEqual(false, callback.allow_null); testing.expectEqual(@as(?[]const u8, null), callback.enum_name); } testing.expect(sync.kind == .constructor); } { const error_event = wl_display.events.items[0]; testing.expectEqualSlices(u8, "error", error_event.name); testing.expectEqual(@as(u32, 1), error_event.since); testing.expectEqual(@as(usize, 3), error_event.args.items.len); { const object_id = error_event.args.items[0]; testing.expectEqualSlices(u8, "object_id", object_id.name); testing.expectEqual(Arg.Type{ .object = null }, object_id.kind); testing.expectEqual(false, object_id.allow_null); testing.expectEqual(@as(?[]const u8, null), object_id.enum_name); } { const code = error_event.args.items[1]; testing.expectEqualSlices(u8, "code", code.name); testing.expectEqual(Arg.Type.uint, code.kind); testing.expectEqual(false, code.allow_null); testing.expectEqual(@as(?[]const u8, null), code.enum_name); } { const message = error_event.args.items[2]; testing.expectEqualSlices(u8, "message", message.name); testing.expectEqual(Arg.Type.string, message.kind); testing.expectEqual(false, message.allow_null); testing.expectEqual(@as(?[]const u8, null), message.enum_name); } } { const error_enum = wl_display.enums.items[0]; testing.expectEqualSlices(u8, "error", error_enum.name); testing.expectEqual(@as(u32, 1), error_enum.since); testing.expectEqual(@as(usize, 4), error_enum.entries.items.len); { const invalid_object = error_enum.entries.items[0]; testing.expectEqualSlices(u8, "invalid_object", invalid_object.name); testing.expectEqual(@as(u32, 1), invalid_object.since); testing.expectEqualSlices(u8, "0", invalid_object.value); } { const invalid_method = error_enum.entries.items[1]; testing.expectEqualSlices(u8, "invalid_method", invalid_method.name); testing.expectEqual(@as(u32, 1), invalid_method.since); testing.expectEqualSlices(u8, "1", invalid_method.value); } { const no_memory = error_enum.entries.items[2]; testing.expectEqualSlices(u8, "no_memory", no_memory.name); testing.expectEqual(@as(u32, 1), no_memory.since); testing.expectEqualSlices(u8, "2", no_memory.value); } { const implementation = error_enum.entries.items[3]; testing.expectEqualSlices(u8, "implementation", implementation.name); testing.expectEqual(@as(u32, 1), implementation.since); testing.expectEqualSlices(u8, "3", implementation.value); } testing.expectEqual(false, error_enum.bitfield); } } { const wl_data_offer = ctx.protocol.interfaces.items[7]; testing.expectEqualSlices(u8, "wl_data_offer", wl_data_offer.name); testing.expectEqual(@as(u32, 3), wl_data_offer.version); testing.expectEqual(@as(usize, 5), wl_data_offer.requests.items.len); testing.expectEqual(@as(usize, 3), wl_data_offer.events.items.len); testing.expectEqual(@as(usize, 1), wl_data_offer.enums.items.len); { const accept = wl_data_offer.requests.items[0]; testing.expectEqualSlices(u8, "accept", accept.name); testing.expectEqual(@as(u32, 1), accept.since); testing.expectEqual(@as(usize, 2), accept.args.items.len); { const serial = accept.args.items[0]; testing.expectEqualSlices(u8, "serial", serial.name); testing.expectEqual(Arg.Type.uint, serial.kind); testing.expectEqual(false, serial.allow_null); } { const mime_type = accept.args.items[1]; testing.expectEqualSlices(u8, "mime_type", mime_type.name); testing.expectEqual(Arg.Type.string, mime_type.kind); testing.expectEqual(true, mime_type.allow_null); } } } }
scanner.zig
const std = @import("std"); const builtin = @import("builtin"); const Builder = std.build.Builder; const print = std.debug.print; const expected_zig_version: u32 = 9; const c_options = &[_][]const u8{ "-Wall", "-Wextra", "-Werror", // Looks like there is some kind of bug that prevents you from only using 'unsigned-integer-overflow'. // Normally 'signed-integer-overflow' should be sanitized. "-fno-sanitize=signed-integer-overflow", "-fno-sanitize=unsigned-integer-overflow", }; const CTest = struct { name: []const u8, offset: usize }; fn find_test(buffer: []const u8, start: usize) ?CTest { const prefix = std.mem.indexOfPos(u8, buffer, start, "\nTEST(") orelse return null; const sufix = std.mem.indexOfPos(u8, buffer, prefix, ",") orelse return null; return CTest{.name = buffer[prefix+6..sufix], .offset = sufix}; } fn create_test_files() anyerror!void { const cwd = std.fs.cwd(); try cwd.makePath("test"); var h_out = try cwd.createFile("test/test.h", .{.truncate = true}); defer h_out.close(); try h_out.writer().print("#include <testing.h>\n\n", .{}); var zig_out = try cwd.createFile("test/test.zig", .{.truncate = true}); defer zig_out.close(); try zig_out.writer().print("const std = @import(\"std\");\nconst c = @cImport({{\n @cInclude(\"testing.h\");\n @cInclude(\"test.h\");\n}});\n\n", .{}); } fn parse_file(name: []const u8) anyerror!void { var gp = std.heap.GeneralPurposeAllocator(.{.safety = true}){}; defer _ = gp.deinit(); const allocator = gp.allocator(); const cwd = std.fs.cwd(); var file = try cwd.openFile(name, .{}); defer file.close(); const megabyte = 1024*1024; const file_buffer = try file.readToEndAlloc(allocator, megabyte); defer allocator.free(file_buffer); var h_out = try cwd.openFile("test/test.h", .{.write = true}); defer h_out.close(); try h_out.seekFromEnd(0); var zig_out = try cwd.openFile("test/test.zig", .{.write = true}); defer zig_out.close(); try zig_out.seekFromEnd(0); var start: usize = 0; while (true) { const test_name = find_test(file_buffer, start) orelse break; try h_out.writer().print("extern int test_{s}(struct Test T);\n", .{test_name.name}); try zig_out.writer().print("test \"{s}\" {{\n try std.testing.expect(c.run_test(c.test_{s}));\n}}\n", .{test_name.name, test_name.name}); start = test_name.offset; } } fn build_libvxt(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget, cpu286: bool, cpuV20: bool, testing: bool) *std.build.LibExeObjStep { const libname = b.fmt("vxt.{s}.{s}", .{@tagName(target.getOsTag()), @tagName(target.getCpuArch())}); const lib = b.addStaticLibrary(libname, null); lib.addIncludeDir("lib/vxt/include"); lib.setTarget(target); if (cpu286 or testing) { lib.defineCMacroRaw("VXT_CPU_286"); } else if (cpuV20) { lib.defineCMacroRaw("VXT_CPU_V20"); } if (testing) { lib.defineCMacroRaw("TESTING"); lib.linkLibC(); lib.setBuildMode(.ReleaseSafe); create_test_files() catch unreachable; } else { lib.setBuildMode(mode); if (!target.toTarget().isWasm()) { lib.setOutputDir("build/lib"); lib.install(); } } const opt = c_options ++ &[_][]const u8{ "-Wstrict-prototypes", "-Wcast-align", "-Wno-unused-function", // Needed for unit tests. "-std=c11", "-pedantic", }; const files = &[_][]const u8{ "lib/vxt/system.c", "lib/vxt/dummy.c", "lib/vxt/memory.c", "lib/vxt/cpu.c", "lib/vxt/debugger.c", }; for (files) |file| { lib.addCSourceFile(file, opt); if (testing) { parse_file(file) catch unreachable; } } if (testing) { const file = "lib/vxt/testsuit.c"; lib.addCSourceFile(file, opt); parse_file(file) catch unreachable; } return lib; } fn assert_version(major: u32, minor: u32) void { const a = builtin.zig_version.major; const b = builtin.zig_version.minor; if (a != major or b != minor) { std.debug.panic("invalid Zig version, expected {}.{} but got {}.{}", .{ major, minor, a, b }); } } pub fn build(b: *Builder) void { assert_version(0, expected_zig_version); //const textmode = b.option(bool, "textmode", "Build for textmode only") orelse false; const validator = b.option(bool, "validator", "Enable PI8088 hardware validator") orelse false; const network = b.option(bool, "pcap", "Link with libpcap") orelse false; const sdl_path = b.option([]const u8, "sdl-path", "Path to SDL2 headers and libs") orelse null; const cpu286 = b.option(bool, "at", "Enable Intel 286 and IBM AT support") orelse false; const cpuV20 = b.option(bool, "v20", "Enable NEC V20 CPU support") orelse false; const mode = b.standardReleaseOptions(); const target = b.standardTargetOptions(.{}); //const wasm = target.toTarget().isWasm(); // -------- libvxt -------- const libvxt = build_libvxt(b, mode, target, cpu286, cpuV20, false); b.step("lib", "Build libvxt").dependOn(&libvxt.step); // -------- termbox -------- const termbox = b.addStaticLibrary("termbox", null); termbox.setBuildMode(mode); termbox.setTarget(target); termbox.linkLibC(); termbox.addIncludeDir("lib/termbox/src"); termbox.addCSourceFile("lib/termbox/src/termbox.c", c_options); termbox.addCSourceFile("lib/termbox/src/utf8.c", c_options); // -------- ini -------- const ini = b.addStaticLibrary("ini", "lib/inih/ini.c"); ini.setBuildMode(mode); ini.setTarget(target); ini.linkLibC(); // -------- nuked-opl3 -------- const opl3 = b.addStaticLibrary("nuked-opl3", "lib/nuked-opl3/opl3.c"); opl3.setBuildMode(mode); opl3.setTarget(target); opl3.linkLibC(); // -------- microui -------- const microui = b.addStaticLibrary("microui", null); microui.setBuildMode(mode); microui.setTarget(target); microui.linkLibC(); microui.addIncludeDir("lib/microui/src"); microui.addCSourceFile("lib/microui/src/microui.c", c_options ++ &[_][]const u8{"-std=c11", "-pedantic"}); // -------- pirepheral -------- const pirepheral = b.addStaticLibrary("vxtp", null); { pirepheral.setBuildMode(mode); pirepheral.setTarget(target); pirepheral.linkLibC(); pirepheral.addIncludeDir("lib/vxtp"); pirepheral.addIncludeDir("lib/vxt/include"); if (validator) { pirepheral.defineCMacroRaw("PI8088"); } if (cpu286) { pirepheral.defineCMacroRaw("VXT_CPU_286"); } else if (cpuV20) { pirepheral.defineCMacroRaw("VXT_CPU_V20"); } const opt = c_options ++ &[_][]const u8{"-std=c11", "-pedantic"}; pirepheral.addCSourceFile("lib/vxtp/disk.c", opt); pirepheral.addCSourceFile("lib/vxtp/pic.c", opt); pirepheral.addCSourceFile("lib/vxtp/ppi.c", opt); pirepheral.addCSourceFile("lib/vxtp/pit.c", opt); pirepheral.addCSourceFile("lib/vxtp/dma.c", opt); pirepheral.addCSourceFile("lib/vxtp/mda.c", opt); pirepheral.addCSourceFile("lib/vxtp/cga.c", opt); pirepheral.addCSourceFile("lib/vxtp/vga.c", opt); //pirepheral.addCSourceFile("lib/vxtp/ioext.c", opt); pirepheral.addCSourceFile("lib/vxtp/mouse.c", opt); pirepheral.addCSourceFile("lib/vxtp/joystick.c", opt); pirepheral.addCSourceFile("lib/vxtp/post.c", opt); pirepheral.addCSourceFile("lib/vxtp/rtc.c", opt); pirepheral.addCSourceFile("lib/vxtp/adlib.c", opt); pirepheral.addCSourceFile("lib/vxtp/fdc.c", opt); if (network) { pirepheral.defineCMacroRaw("VXTP_NETWORK"); pirepheral.addCSourceFile("lib/vxtp/network.c", opt); pirepheral.linkSystemLibrary("pcap"); } pirepheral.linkLibrary(opl3); pirepheral.defineCMacroRaw("VXTP_NUKED_OPL3"); pirepheral.addIncludeDir("lib/nuked-opl3"); } // -------- virtualxt sdl -------- const exe_sdl = b.addExecutable("virtualxt", "front/sdl/main.zig"); exe_sdl.addIncludeDir("front/sdl"); exe_sdl.defineCMacroRaw("ENTRY=c_main"); exe_sdl.setBuildMode(mode); exe_sdl.setTarget(target); exe_sdl.setOutputDir("build/bin"); exe_sdl.defineCMacroRaw(b.fmt("PLATFORM={s}", .{@tagName(target.getOsTag())})); if (sdl_path != null) { const p = .{sdl_path}; print("SDL2 location: {s}\n", p); exe_sdl.addIncludeDir(b.fmt("{s}/include", p)); if (target.isWindows() and target.getAbi() == .gnu) { exe_sdl.addLibPath(b.fmt("{s}/lib/x64", p)); } else { exe_sdl.addLibPath(b.fmt("{s}/lib", p)); } } exe_sdl.linkSystemLibrary("SDL2"); exe_sdl.linkLibrary(libvxt); exe_sdl.addIncludeDir("lib/vxt/include"); exe_sdl.linkLibrary(pirepheral); exe_sdl.addIncludeDir("lib/vxtp"); exe_sdl.linkLibrary(ini); exe_sdl.addIncludeDir("lib/inih"); exe_sdl.linkLibrary(opl3); // Part of vxtp? exe_sdl.linkLibC(); const opt = c_options ++ &[_][]const u8{"-std=c11", "-pedantic"}; exe_sdl.addCSourceFile("front/sdl/main.c", opt); exe_sdl.addCSourceFile("front/sdl/docopt.c", &[_][]const u8{"-std=c11", "-Wno-unused-variable", "-Wno-unused-parameter"}); if (cpu286) { exe_sdl.defineCMacroRaw("VXT_CPU_286"); } else if (cpuV20) { exe_sdl.defineCMacroRaw("VXT_CPU_V20"); } if (validator) { exe_sdl.linkSystemLibrary("gpiod"); exe_sdl.defineCMacroRaw("PI8088"); exe_sdl.addCSourceFile("tools/validator/pi8088/pi8088.c", opt); } if (network) { exe_sdl.defineCMacroRaw("VXTP_NETWORK"); } // -------- virtualxt libretro -------- { const libname = b.fmt("vxt-libretro.{s}.{s}", .{@tagName(target.getOsTag()), @tagName(target.getCpuArch())}); const libretro = b.addSharedLibrary(libname, null, .unversioned); libretro.setBuildMode(mode); libretro.setTarget(target); libretro.setOutputDir("build/lib"); libretro.linkLibrary(build_libvxt(b, mode, target, cpu286, cpuV20, false)); libretro.addIncludeDir("lib/vxt/include"); libretro.addIncludeDir("lib/vxt"); libretro.addIncludeDir("lib/libretro"); libretro.addCSourceFile("front/libretro/core.c", c_options ++ &[_][]const u8{"-std=c11", "-pedantic"}); b.step("libretro", "Build libretro core").dependOn(&libretro.step); } // -------- scrambler -------- { const scrambler = b.addExecutable("scrambler", null); scrambler.setBuildMode(mode); scrambler.setTarget(target); scrambler.setOutputDir("build/bin"); scrambler.linkLibC(); scrambler.linkSystemLibrary("gpiod"); scrambler.defineCMacroRaw("PI8088"); scrambler.linkLibrary(build_libvxt(b, mode, target, cpu286, cpuV20, false)); scrambler.addIncludeDir("lib/vxt/include"); scrambler.addIncludeDir("lib/vxt"); scrambler.addCSourceFile("tools/validator/pi8088/scrambler.c", opt); scrambler.addCSourceFile("tools/validator/pi8088/pi8088.c", opt); b.step("scrambler", "Build scrambler for RaspberryPi").dependOn(&scrambler.step); } // -------- test -------- { const tests = b.addTest("test/test.zig"); tests.setBuildMode(mode); tests.setTarget(target); tests.linkLibC(); tests.defineCMacroRaw("TESTING"); tests.defineCMacroRaw("VXT_CPU_286"); tests.addIncludeDir("test"); const lib_vxt = build_libvxt(b, mode, target, cpu286, cpuV20, true); if (validator) { lib_vxt.linkSystemLibrary("gpiod"); lib_vxt.defineCMacroRaw("PI8088"); lib_vxt.addCSourceFile("tools/validator/pi8088/pi8088.c", opt); } tests.linkLibrary(lib_vxt); tests.addIncludeDir("lib/vxt/include"); tests.addIncludeDir("lib/vxt"); b.step("test", "Run all libvxt tests").dependOn(&tests.step); // -------- check -------- { const check = b.addSystemCommand(&[_][]const u8{"cppcheck", "--enable=style", "lib/vxt"}); check.step.dependOn(&tests.step); b.step("check", "Run CppCheck on libvxt").dependOn(&check.step); } } // -------- doc -------- { const doc = b.addSystemCommand(&[_][]const u8{"doxygen", ".doxygen"}); doc.cwd = "lib/vxt/include"; b.step("doc", "Generate libvxt API documentation").dependOn(&doc.step); } // -------- run -------- const run_cmd = exe_sdl.run(); run_cmd.step.dependOn(&exe_sdl.step); if (b.args) |args| { run_cmd.addArgs(args); } b.step("run", "Run VirtualXT").dependOn(&run_cmd.step); // -------- artifact -------- b.default_step.dependOn(&exe_sdl.step); b.installArtifact(exe_sdl); }
build.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const List = std.ArrayList; const Map = std.AutoHashMap; const StrMap = std.StringHashMap; const BitSet = std.DynamicBitSet; const Str = []const u8; const util = @import("util.zig"); const gpa = util.gpa; const data = @embedFile("../data/puzzle/day06.txt"); fn FishWorld(comptime T: type, comptime fish_life: usize) type { return struct { day: u64 = 1, counts: [fish_life]T = [_]T{0} ** fish_life, const Self = @This(); fn advance(self: *Self) void { self.day += 1; var c0 = self.counts[0]; var idx: usize = 0; while (idx < fish_life - 1) : (idx += 1) { self.counts[idx] = self.counts[idx + 1]; } self.counts[6] += c0; self.counts[8] = c0; } fn totalFish(self: Self) T { var total: T = 0; for (self.counts) |count| { total += count; } return total; } }; } pub fn main() !void { var counts: [9]u64 = [_]u64{0} ** 9; var world = FishWorld(u64, 9){}; var words = tokenize(data, ",\r\n"); while (words.next()) |line| { const x = parseInt(u64, line, 10) catch unreachable; counts[x] += 1; world.counts[x] += 1; } // Pass 1, let's simulate. // Fairly sure there's an O(1) solution as well... var day: u64 = 1; var last_day: u64 = 256; while (day <= last_day) : (day += 1) { var c0 = counts[0]; var idx: u64 = 0; var total_fish: u64 = 0; while (idx < 8) : (idx += 1) { counts[idx] = counts[idx + 1]; total_fish += counts[idx]; } counts[8] = c0; counts[6] += c0; total_fish += 2 * c0; // Ok, I kinda want to print a bit of history, but I don't want to show eeeverything... if (day < 10 or day % 10 == 0 or last_day / 10 == day / 10) { print("{d: >3} {}\n", .{ day, total_fish }); } } // V2. Using a struct; while (world.day <= last_day) { world.advance(); if (world.day < 10 or world.day % 10 == 0 or last_day / 10 == world.day / 10) { print("{d: >3} {}\n", .{ world.day, world.totalFish() }); } } } // Useful stdlib functions const tokenize = std.mem.tokenize; const split = std.mem.split; const indexOf = std.mem.indexOfScalar; const indexOfAny = std.mem.indexOfAny; const indexOfStr = std.mem.indexOfPosLinear; const lastIndexOf = std.mem.lastIndexOfScalar; const lastIndexOfAny = std.mem.lastIndexOfAny; const lastIndexOfStr = std.mem.lastIndexOfLinear; const trim = std.mem.trim; const sliceMin = std.mem.min; const sliceMax = std.mem.max; const parseInt = std.fmt.parseInt; const parseFloat = std.fmt.parseFloat; const min = std.math.min; const min3 = std.math.min3; const max = std.math.max; const max3 = std.math.max3; const print = std.debug.print; const assert = std.debug.assert; const sort = std.sort.sort; const asc = std.sort.asc; const desc = std.sort.desc;
src/day06.zig
pub const Link = struct { ptr: ?*Link, }; pub const SingularLinkList = struct { const Self = @This(); // tail.ptr.?.ptr is head tail: Link, fn init() Self { return Self { .tail = Link { .ptr = null, }, }; } // Assumes the list is NOT mutated while being traversed // for maximum performance const NonMutatableIterator = struct { const Self = @This(); pSll: *SingularLinkList, pCur: ?*Link, pHead: ?*Link, doneFlag: bool, fn init(pSelf: *NonMutatableIterator, pSll: *SingularLinkList) ?*Link { pSelf.pSll = pSll; if (pSll.tail.ptr == null) { pSelf.pHead = null; pSelf.pCur = null; pSelf.doneFlag = true; } else { pSelf.pHead = pSll.tail.ptr.?.ptr orelse unreachable; pSelf.pCur = pSelf.pHead; pSelf.doneFlag = false; } return pSelf.pCur; } fn done(pSelf: *NonMutatableIterator) bool { return pSelf.doneFlag; } fn next(pSelf: *NonMutatableIterator) ?*Link { var pNext = pSelf.pCur.?.ptr.?; if (pNext == pSelf.pHead) { // If back to head we're done pSelf.doneFlag = true; } pSelf.pCur = pNext; return pNext; } }; fn next(pSelf: *Self, pPrev: ?*Link) ?*Link { if (pSelf.tail.ptr == null) return null; // empty list if (pPrev == null) return pSelf.tail.ptr.?.ptr; // asking for head if (pPrev.?.ptr == pSelf.tail.ptr.?.ptr) return null; // reached head return null return pPrev.?.ptr; } fn linkTo(comptime T: type, comptime link_field_name: []const u8, pLink: *Link) *T { return @fieldParentPtr(T, link_field_name, pLink); } fn append(pSelf: *Self, pNext: *Link) void { if (pSelf.tail.ptr == null) { pSelf.tail.ptr = pNext; pNext.ptr = pNext; } else { pNext.ptr = pSelf.tail.ptr.?.ptr; pSelf.tail.ptr.?.ptr = pNext; pSelf.tail.ptr = pNext; } } }; // Tests const std = @import("std"); const assert = std.debug.assert; const assertError = std.debug.assertError; const warn = std.debug.warn; const mem = std.mem; const Allocator = mem.Allocator; const Sll = SingularLinkList; const Node = struct { const Self = @This(); link: Link, data: usize, fn initPtr(pSelf: *Self, data: usize) void { pSelf.link.ptr = null; pSelf.data = data; } fn init(data: usize) Self { return Self { .link = Link { .ptr = null }, .data = data, }; } fn linkToNode(pLink: *Link) *Self { return Sll.linkTo(Self, "link", pLink); } }; test "Sll.empty" { var sll = Sll.init(); assert(sll.next(null) == null); var node: Link = undefined; assert(sll.next(&node) == null); var pNode: ?*Link = null; while (sll.next(pNode)) |pLink| { assertError("Expecting the link list to be empty", error.ExpectingEmpty); } // Test we can loop over an empty SSL var p: ?*Link = null; var idx: usize = 0; while (sll.next(p)) |pLink| { assertError("Expecting the link list to be empty", error.ExpectingEmpty); idx += 1; } assert(idx == 0); } test "Sll.empty.iterator" { var sll = Sll.init(); assert(sll.next(null) == null); var node: Link = undefined; assert(sll.next(&node) == null); // Test we can loop over an empty SSL var iter: Sll.NonMutatableIterator = undefined; //var pCur = Sll.NonMutatableIterator.init(&iter, &sll); var pCur = iter.init(&sll); while (!iter.done()) : (pCur = iter.next()) { assertError("Expecting the link list to be empty", error.ExpectingEmpty); } } test "Sll.one.element" { // Initialize var sll = Sll.init(); assert(sll.next(null) == null); // Add one element var node1 = Node.init(1); sll.append(&node1.link); // Test loop var p: ?*Link = null; var idx: usize = 0; while (sll.next(p)) |pLink| { var pNode = Node.linkToNode(pLink); assert(pNode.data == idx + 1); p = pLink; idx += 1; } assert(idx == 1); // Test iterator over a one element list var iter: Sll.NonMutatableIterator = undefined; var pCur: ?*Link = iter.init(&sll); idx = 0; while (!iter.done()) : (pCur = iter.next()) { var pNode = Node.linkToNode(pCur orelse unreachable); warn("Sll.one.element: iter pNode.data={}\n", pNode.data); assert(pNode.data == idx + 1); idx += 1; } assert(idx == 1); } test "Sll.two.elements" { // Initialize var sll = Sll.init(); assert(sll.next(null) == null); // Add two elements var node1 = Node.init(1); var node2 = Node.init(2); sll.append(&node1.link); sll.append(&node2.link); // Test loop var p: ?*Link = null; var idx: usize = 0; while (sll.next(p)) |pLink| { var pNode = Node.linkToNode(pLink); assert(pNode.data == idx + 1); p = pLink; idx += 1; } assert(idx == 2); // Test iterator over a two element list var iter: Sll.NonMutatableIterator = undefined; var pCur = iter.init(&sll); idx = 0; while (!iter.done()) : (pCur = iter.next()) { var pNode = Node.linkToNode(pCur orelse unreachable); warn("Sll.two.element: iter pNode.data={}\n", pNode.data); assert(pNode.data == idx + 1); idx += 1; } assert(idx == 2); } test "Sll.three.elements" { // Initialize var sll = Sll.init(); assert(sll.next(null) == null); // Add two elements var node1 = Node.init(1); var node2 = Node.init(2); var node3 = Node.init(3); sll.append(&node1.link); sll.append(&node2.link); sll.append(&node3.link); // Test loop var p: ?*Link = null; var idx: usize = 0; while (sll.next(p)) |pLink| { var pNode = Node.linkToNode(pLink); assert(pNode.data == idx + 1); p = pLink; idx += 1; } assert(idx == 3); // Test iterator over a three element list var iter: Sll.NonMutatableIterator = undefined; var pCur = iter.init(&sll); idx = 0; while (!iter.done()) : (pCur = iter.next()) { var pNode = Node.linkToNode(pCur orelse unreachable); warn("Sll.two.element: iter pNode.data={}\n", pNode.data); assert(pNode.data == idx + 1); idx += 1; } assert(idx == 3); } const Benchmark = @import("../zig-benchmark/benchmark.zig").Benchmark; fn no_opt() void { asm volatile ("": : :"memory"); } test "SllBm1000" { const SllBm1000 = struct { const Self = @This(); pAllocator: *Allocator, sll: Sll, list: []Node, fn init(pAllocator: *Allocator) !Self { var bm: Self = undefined; bm.pAllocator = pAllocator; bm.sll = Sll.init(); bm.list = try pAllocator.alloc(Node, 1000); for (bm.list) |_, i| { var node = &bm.list[i]; node.initPtr(i + 1); bm.sll.append(&node.link); } var p: ?*Link = null; var idx: usize = 0; while (bm.sll.next(p)) |pLink| { var pNode = Node.linkToNode(pLink); assert(pNode.data == idx + 1); p = pLink; idx += 1; } return bm; } fn deinit(pSelf: *Self) void { pSelf.pAllocator.free(pSelf.list); } fn benchmark(pSelf: *Self) void { var p: ?*Link = null; var idx: usize = 0; while (pSelf.sll.next(p)) |pLink| { //no_opt(); var pNode = Node.linkToNode(pLink); assert(pNode.data == idx + 1); p = pLink; idx += 1; } } }; var pAllocator = std.debug.global_allocator; var bm = Benchmark.init("SllBM1000", pAllocator); bm.repetitions = 10; var sllBm1000 = try SllBm1000.init(pAllocator); defer sllBm1000.deinit(); warn("\n"); try bm.run(&sllBm1000); } test "SllBm1000.iter" { const SllBm1000 = struct { const Self = @This(); pAllocator: *Allocator, sll: Sll, list: []Node, fn init(pAllocator: *Allocator) !Self { var bm: Self = undefined; bm.pAllocator = pAllocator; bm.sll = Sll.init(); bm.list = try pAllocator.alloc(Node, 1000); for (bm.list) |_, i| { var node = &bm.list[i]; node.initPtr(i + 1); bm.sll.append(&node.link); } var p: ?*Link = null; var idx: usize = 0; while (bm.sll.next(p)) |pLink| { var pNode = Node.linkToNode(pLink); assert(pNode.data == idx + 1); p = pLink; idx += 1; } return bm; } fn deinit(pSelf: *Self) void { pSelf.pAllocator.free(pSelf.list); } fn benchmark(pSelf: *Self) void { var iter: Sll.NonMutatableIterator = undefined; var pCur = iter.init(&pSelf.sll); var idx: usize = 0; while (!iter.done()) : (pCur = iter.next()) { var pNode = Node.linkToNode(pCur orelse unreachable); assert(pNode.data == idx + 1); idx += 1; } } }; var pAllocator = std.debug.global_allocator; var bm = Benchmark.init("SllBM1000.iter", pAllocator); bm.repetitions = 10; var sllBm1000 = try SllBm1000.init(pAllocator); defer sllBm1000.deinit(); warn("\n"); try bm.run(&sllBm1000); }
singular_link_list.zig
const std = @import("std"); const utils = @import("utils.zig"); pub const Error = utils.Error; const ToString = @This(); buffer: []u8, got: usize = 0, pub fn need(self: *const ToString, space: usize) Error!void { if (space > (self.buffer.len - self.got)) { return Error.NotEnoughDestination; } } pub fn get(self: *ToString) []u8 { return self.buffer[0..self.got]; } pub fn upgrade_buffer(self: *ToString, new: []u8) Error![]u8 { const old = self.buffer; _ = try utils.memory_copy_error(new, old); self.buffer = new; return old; } fn _char(self: *ToString, c: u8) void { self.buffer[self.got] = c; self.got += 1; } pub fn char(self: *ToString, c: u8) Error!void { try self.need(1); self._char(c); } pub fn string(self: *ToString, src: []const u8) Error!void { self.got += try utils.memory_copy_error(self.buffer[self.got..], src); } pub fn cstring(self: *ToString, cstr: [*:0]const u8) Error!void { try self.string(utils.cstring_to_string(cstr)); } fn hex_recurse(self: *ToString, value: usize) void { const next = value / 0x10; if (next > 0) { self.hex_recurse(next); } self._char(utils.nibble_char(@intCast(u4, value % 0x10))); } pub fn hex(self: *ToString, value: usize) Error!void { try self.need(2 + utils.hex_char_len(usize, value)); self._char('0'); self._char('x'); if (value == 0) { self._char('0'); return; } self.hex_recurse(value); } fn uint_recurse(self: *ToString, value: usize) Error!void { const next = value / 10; if (next > 0) { try self.uint_recurse(next); } try self.char('0' + @intCast(u8, value % 10)); } pub fn uint(self: *ToString, value: usize) Error!void { if (value == 0) { try self.char('0'); return; } const got = self.got; self.uint_recurse(value) catch |e| { // Restore self.got = got; return e; }; } test "ToString" { { var buffer: [128]u8 = undefined; var ts = ToString{.buffer = buffer[0..]}; try ts.string("Hello "); try ts.string(""); try ts.cstring("goodbye!"); try ts.cstring(""); try std.testing.expectEqualSlices(u8, ts.get(), "Hello goodbye!"); } { var buffer: [1]u8 = undefined; var ts = ToString{.buffer = buffer[0..]}; try std.testing.expectError(Error.NotEnoughDestination, ts.string("Hello")); try std.testing.expectEqualSlices(u8, ts.get(), ""); } { var buffer: [128]u8 = undefined; var ts = ToString{.buffer = buffer[0..]}; try ts.hex(0x0); try ts.hex(0x1); try ts.hex(0xf); try ts.hex(0x10); try ts.hex(0xff); try ts.hex(0x100); try std.testing.expectEqualSlices(u8, ts.get(), "0x00x10xf0x100xff0x100"); } { var buffer: [1]u8 = undefined; var ts = ToString{.buffer = buffer[0..]}; try std.testing.expectError(Error.NotEnoughDestination, ts.hex(0xff)); try std.testing.expectEqualSlices(u8, ts.get(), ""); } { var buffer: [10]u8 = undefined; var ts = ToString{.buffer = buffer[0..]}; try ts.uint(0); try ts.uint(2); try ts.uint(14); try ts.uint(1346); try std.testing.expectError(Error.NotEnoughDestination, ts.uint(23912)); try std.testing.expectEqualSlices(u8, ts.get(), "02141346"); } }
libs/utils/ToString.zig
const std = @import("std"); const Random = std.rand.Random; const math = std.math; const RomuTrio = @This(); x_state: u64, y_state: u64, z_state: u64, // set to nonzero seed pub fn init(init_s: u64) RomuTrio { var x = RomuTrio{ .x_state = undefined, .y_state = undefined, .z_state = undefined }; x.seed(init_s); return x; } pub fn random(self: *RomuTrio) Random { return Random.init(self, fill); } fn next(self: *RomuTrio) u64 { const xp = self.x_state; const yp = self.y_state; const zp = self.z_state; self.x_state = 15241094284759029579 *% zp; self.y_state = yp -% xp; self.y_state = std.math.rotl(u64, self.y_state, 12); self.z_state = zp -% yp; self.z_state = std.math.rotl(u64, self.z_state, 44); return xp; } pub fn seedWithBuf(self: *RomuTrio, buf: [24]u8) void { const seed_buf = @bitCast([3]u64, buf); self.x_state = seed_buf[0]; self.y_state = seed_buf[1]; self.z_state = seed_buf[2]; } pub fn seed(self: *RomuTrio, init_s: u64) void { // RomuTrio requires 192-bits of seed. var gen = std.rand.SplitMix64.init(init_s); self.x_state = gen.next(); self.y_state = gen.next(); self.z_state = gen.next(); } pub fn fill(self: *RomuTrio, buf: []u8) void { var i: usize = 0; const aligned_len = buf.len - (buf.len & 7); // Complete 8 byte segments. while (i < aligned_len) : (i += 8) { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { buf[i + j] = @truncate(u8, n); n >>= 8; } } // Remaining. (cuts the stream) if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { buf[i] = @truncate(u8, n); n >>= 8; } } } test "RomuTrio sequence" { // Unfortunately there does not seem to be an official test sequence. var r = RomuTrio.init(0); const seq = [_]u64{ 16294208416658607535, 13964609475759908645, 4703697494102998476, 3425221541186733346, 2285772463536419399, 9454187757529463048, 13695907680080547496, 8328236714879408626, 12323357569716880909, 12375466223337721820, }; for (seq) |s| { try std.testing.expectEqual(s, r.next()); } } test "RomuTrio fill" { // Unfortunately there does not seem to be an official test sequence. var r = RomuTrio.init(0); const seq = [_]u64{ 16294208416658607535, 13964609475759908645, 4703697494102998476, 3425221541186733346, 2285772463536419399, 9454187757529463048, 13695907680080547496, 8328236714879408626, 12323357569716880909, 12375466223337721820, }; for (seq) |s| { var buf0: [8]u8 = undefined; var buf1: [7]u8 = undefined; std.mem.writeIntLittle(u64, &buf0, s); r.fill(&buf1); try std.testing.expect(std.mem.eql(u8, buf0[0..7], buf1[0..])); } } test "RomuTrio buf seeding test" { const buf0 = @bitCast([24]u8, [3]u64{ 16294208416658607535, 13964609475759908645, 4703697494102998476 }); const resulting_state = .{ .x = 16294208416658607535, .y = 13964609475759908645, .z = 4703697494102998476 }; var r = RomuTrio.init(0); r.seedWithBuf(buf0); try std.testing.expect(r.x_state == resulting_state.x); try std.testing.expect(r.y_state == resulting_state.y); try std.testing.expect(r.z_state == resulting_state.z); }
lib/std/rand/RomuTrio.zig
const std = @import("std"); const warn = std.debug.warn; const InstructionBlock = struct { instructions: std.ArrayList(Instruction), page: u4, addr: ?u12, }; const Instruction = struct { opcode: []const u8, address: ?[]const u8, }; const VariableDef = struct { value: u16, page: u4, addr: ?u12, }; pub fn assemble(code: []const u8) ![4096]u16 { var allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer allocator.deinit(); const alloc = &allocator.allocator; var had_errors = false; var entry_point: ?[]const u8 = null; var current_page: u4 = 0; var vardefs = std.StringHashMap(VariableDef).init(alloc); var blocks = std.StringHashMap(InstructionBlock).init(alloc); var current_block: ?InstructionBlock = null; var current_block_name: ?[]const u8 = null; // Parsing stage var line_number: usize = 1; var lines = std.mem.split(code, "\n"); while (lines.next()) |line| : (line_number += 1) { var tokens = std.mem.tokenize(line, " "); const first_token = tokens.next() orelse continue; // Skip empty lines switch (first_token[0]) { '#' => continue, // Skip comment lines '@' => { // Parser Directives if (std.mem.eql(u8, first_token, "@page")) { // Set memory page const page_token = tokens.next() orelse { warn("{}: Missing page number\n", .{line_number}); had_errors = true; continue; }; current_page = std.fmt.parseInt(u4, page_token, 16) catch { warn("{}: Invalid page number {}\n", .{ line_number, page_token }); had_errors = true; continue; }; } else if (std.mem.eql(u8, first_token, "@entry")) { // Set entry point label const label_token = tokens.next() orelse { warn("{}: Missing entry point label\n", .{line_number}); had_errors = true; continue; }; entry_point = label_token; } else { warn("{}: Unknown directive {}\n", .{ line_number, first_token }); had_errors = true; } }, '$' => { // Variable Definitions const variable_name = first_token; const value_token = tokens.next() orelse "0"; const value = std.fmt.parseInt(u16, value_token, 16) catch { warn("{}: Invalid variable value {}\n", .{ line_number, value_token }); had_errors = true; continue; }; if (vardefs.contains(variable_name)) { warn("{}: Redefinition of variable {}\n", .{ line_number, variable_name }); had_errors = true; continue; } try vardefs.putNoClobber(variable_name, .{ .value = value, .page = current_page, .addr = null, }); }, ':' => { // Instruction blocks/labels if (current_block) |block| { // Save current block into ArrayList // Both current_block_name and block have been set because this is at least the second block // Also, duplicate blocks are checked during creation below, so assert there's no double try blocks.putNoClobber(current_block_name.?, block); } if (first_token.len < 2) { warn("{}: Missing block name\n", .{line_number}); had_errors = true; continue; } if (blocks.contains(first_token)) { warn("{}: Redefinition of block {}\n", .{ line_number, first_token }); had_errors = true; continue; } current_block = .{ .instructions = std.ArrayList(Instruction).init(alloc), .page = current_page, .addr = null, }; current_block_name = first_token; }, else => { if (first_token.len != 5) { warn("{}: Malformed instruction {}", .{ line_number, first_token }); had_errors = true; continue; } if (current_block != null) { // We can't use if-optional syntax here. // TODO find out why exactly the compiler complains try current_block.?.instructions.append(.{ .opcode = first_token, .address = tokens.next(), }); } else { warn("{}: Instruction outside block\n", .{line_number}); had_errors = true; continue; } }, } } // Save last open block into ArrayList if (current_block == null or current_block_name == null) { warn("{}: Missing any instruction block\n", .{line_number}); had_errors = true; } else { try blocks.putNoClobber(current_block_name.?, current_block.?); } // Codegen stage var memory = [_]u16{0} ** 4096; var page: u4 = 0; // Iterate through all vardefs and blocks per-page and give them addresses while (page < 15) : (page += 1) { // On page 0, reserve the first instruction for the entry point var in_page_cursor: usize = if (page == 0) 1 else 0; var var_it = vardefs.iterator(); while (var_it.next()) |vardef| { if (vardef.value.page != page) continue; vardef.value.addr = (@as(u12, page) << 8) + @truncate(u12, in_page_cursor); in_page_cursor += 1; } var block_it = blocks.iterator(); while (block_it.next()) |block| { if (block.value.page != page) continue; block.value.addr = (@as(u12, page) << 8) + @truncate(u12, in_page_cursor); in_page_cursor += block.value.instructions.items.len; } if (in_page_cursor > 255) { warn("Page {} is too full [{}/256]\n", .{ page, in_page_cursor }); had_errors = true; continue; } } // Commit vardefs into memory var var_iter = vardefs.iterator(); while (var_iter.next()) |vardef| { memory[vardef.value.addr.?] = vardef.value.value; } // Commit blocks into memory var block_iter = blocks.iterator(); while (block_iter.next()) |block| { for (block.value.instructions.items) |instruction, index| { const opcode = parse_instruction(instruction.opcode) catch { warn("Encountered invalid opcode {}\n", .{instruction}); had_errors = true; continue; }; var word: u16 = opcode; const truncate_address = (word >= 0x1000 and word <= 0x9fff); if (instruction.address) |address| { if (word >= 0xf000) { warn("Encountered extended opcode {} with address\n", .{instruction.opcode}); had_errors = true; } switch (address[0]) { '$' => { const var_ref = vardefs.getValue(address) orelse { warn("Encountered unknown variable {}\n", .{address}); had_errors = true; continue; }; word += if (truncate_address) @truncate(u8, var_ref.addr.?) else var_ref.addr.?; }, ':' => { const block_ref = blocks.getValue(address) orelse { warn("Encountered unknown label {}\n", .{address}); had_errors = true; continue; }; word += if (truncate_address) @truncate(u8, block_ref.addr.?) else block_ref.addr.?; }, else => { warn("Encountered malformed address {}\n", .{address}); had_errors = true; }, } } memory[block.value.addr.? + index] = word; } } if (entry_point == null) { warn("No entry point found\n", .{}); had_errors = true; return error.ParserFailure; } else if (!blocks.contains(entry_point.?)) { warn("Unknown entry point label {}\n", .{entry_point}); had_errors = true; return error.ParserFailure; } memory[0] = (@as(u16, @enumToInt(GlobalOpcode.PageAndJump)) << 12) + blocks.getValue(entry_point.?).?.addr.?; return if (had_errors) error.ParserFailure else memory; } usingnamespace @import("oidavm.zig"); const eql = std.mem.eql; fn parse_instruction(token: []const u8) !u16 { if (eql(u8, token, "noopr")) { // Starting here: Global Opcodes return @as(u16, @enumToInt(GlobalOpcode.NoOp)) << 12; } else if (eql(u8, token, "pgjmp")) { return @as(u16, @enumToInt(GlobalOpcode.PageAndJump)) << 12; } else if (eql(u8, token, "fftch")) { return @as(u16, @enumToInt(GlobalOpcode.FarFetch)) << 12; } else if (eql(u8, token, "fwrte")) { return @as(u16, @enumToInt(GlobalOpcode.FarWrite)) << 12; } else if (eql(u8, token, "incby")) { // Starting here: Paged Opcodes return @as(u16, @enumToInt(PagedOpcode.IncrementBy)) << 8; } else if (eql(u8, token, "minus")) { return @as(u16, @enumToInt(PagedOpcode.Minus)) << 8; } else if (eql(u8, token, "fetch")) { return @as(u16, @enumToInt(PagedOpcode.Fetch)) << 8; } else if (eql(u8, token, "write")) { return @as(u16, @enumToInt(PagedOpcode.Write)) << 8; } else if (eql(u8, token, "jmpto")) { return @as(u16, @enumToInt(PagedOpcode.Jump)) << 8; } else if (eql(u8, token, "jmpez")) { return @as(u16, @enumToInt(PagedOpcode.JumpEZ)) << 8; } else if (eql(u8, token, "cease")) { // Starting here: Extended Opcodes return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.Halt)); } else if (eql(u8, token, "outnm")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.OutputNumeric)); } else if (eql(u8, token, "outch")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.OutputChar)); } else if (eql(u8, token, "outhx")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.OutputHex)); } else if (eql(u8, token, "outlf")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.OutputLinefeed)); } else if (eql(u8, token, "inacc")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.InputACC)); } else if (eql(u8, token, "rando")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.Randomize)); } else if (eql(u8, token, "augmt")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.Augment)); } else if (eql(u8, token, "dimin")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.Diminish)); } else if (eql(u8, token, "shfl4")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.ShiftLeftFour)); } else if (eql(u8, token, "shfl1")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.ShiftLeftOne)); } else if (eql(u8, token, "shfr4")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.ShiftRightFour)); } else if (eql(u8, token, "shfr1")) { return 0xf000 + @as(u16, @enumToInt(ExtendedOpcode.ShiftRightOne)); } return error.InvalidOpcode; }
src/parser.zig
const MachO = @This(); const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const fs = std.fs; const log = std.log.scoped(.link); const macho = std.macho; const codegen = @import("../codegen.zig"); const math = std.math; const mem = std.mem; const trace = @import("../tracy.zig").trace; const Type = @import("../type.zig").Type; const build_options = @import("build_options"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const link = @import("../link.zig"); const File = link.File; const Cache = @import("../Cache.zig"); const target_util = @import("../target.zig"); pub const base_tag: File.Tag = File.Tag.macho; const LoadCommand = union(enum) { Segment: macho.segment_command_64, LinkeditData: macho.linkedit_data_command, Symtab: macho.symtab_command, Dysymtab: macho.dysymtab_command, pub fn cmdsize(self: LoadCommand) u32 { return switch (self) { .Segment => |x| x.cmdsize, .LinkeditData => |x| x.cmdsize, .Symtab => |x| x.cmdsize, .Dysymtab => |x| x.cmdsize, }; } pub fn write(self: LoadCommand, file: *fs.File, offset: u64) !void { return switch (self) { .Segment => |cmd| writeGeneric(cmd, file, offset), .LinkeditData => |cmd| writeGeneric(cmd, file, offset), .Symtab => |cmd| writeGeneric(cmd, file, offset), .Dysymtab => |cmd| writeGeneric(cmd, file, offset), }; } fn writeGeneric(cmd: anytype, file: *fs.File, offset: u64) !void { const slice = [1]@TypeOf(cmd){cmd}; return file.pwriteAll(mem.sliceAsBytes(slice[0..1]), offset); } }; base: File, /// Table of all load commands load_commands: std.ArrayListUnmanaged(LoadCommand) = .{}, segment_cmd_index: ?u16 = null, symtab_cmd_index: ?u16 = null, dysymtab_cmd_index: ?u16 = null, data_in_code_cmd_index: ?u16 = null, /// Table of all sections sections: std.ArrayListUnmanaged(macho.section_64) = .{}, /// __TEXT segment sections text_section_index: ?u16 = null, cstring_section_index: ?u16 = null, const_text_section_index: ?u16 = null, stubs_section_index: ?u16 = null, stub_helper_section_index: ?u16 = null, /// __DATA segment sections got_section_index: ?u16 = null, const_data_section_index: ?u16 = null, entry_addr: ?u64 = null, /// Table of all symbols used. /// Internally references string table for names (which are optional). symbol_table: std.ArrayListUnmanaged(macho.nlist_64) = .{}, /// Table of symbol names aka the string table. string_table: std.ArrayListUnmanaged(u8) = .{}, /// Table of symbol vaddr values. The values is the absolute vaddr value. /// If the vaddr of the executable __TEXT segment vaddr changes, the entire offset /// table needs to be rewritten. offset_table: std.ArrayListUnmanaged(u64) = .{}, error_flags: File.ErrorFlags = File.ErrorFlags{}, cmd_table_dirty: bool = false, /// Pointer to the last allocated text block last_text_block: ?*TextBlock = null, /// `alloc_num / alloc_den` is the factor of padding when allocating. const alloc_num = 4; const alloc_den = 3; /// Default path to dyld /// TODO instead of hardcoding it, we should probably look through some env vars and search paths /// instead but this will do for now. const DEFAULT_DYLD_PATH: [*:0]const u8 = "/usr/lib/dyld"; /// Default lib search path /// TODO instead of hardcoding it, we should probably look through some env vars and search paths /// instead but this will do for now. const DEFAULT_LIB_SEARCH_PATH: []const u8 = "/usr/lib"; const LIB_SYSTEM_NAME: [*:0]const u8 = "System"; /// TODO we should search for libSystem and fail if it doesn't exist, instead of hardcoding it const LIB_SYSTEM_PATH: [*:0]const u8 = DEFAULT_LIB_SEARCH_PATH ++ "/libSystem.B.dylib"; pub const TextBlock = struct { /// Index into the symbol table symbol_table_index: ?u32, /// Index into offset table offset_table_index: ?u32, /// Size of this text block size: u64, /// Points to the previous and next neighbours prev: ?*TextBlock, next: ?*TextBlock, pub const empty = TextBlock{ .symbol_table_index = null, .offset_table_index = null, .size = 0, .prev = null, .next = null, }; }; pub const SrcFn = struct { pub const empty = SrcFn{}; }; pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*MachO { assert(options.object_format == .macho); if (options.use_llvm) return error.LLVM_BackendIsTODO_ForMachO; // TODO if (options.use_lld) return error.LLD_LinkingIsTODO_ForMachO; // TODO const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options), }); errdefer file.close(); const self = try createEmpty(allocator, options); errdefer self.base.destroy(); self.base.file = file; switch (options.output_mode) { .Exe => {}, .Obj => {}, .Lib => return error.TODOImplementWritingLibFiles, } try self.populateMissingMetadata(); return self; } pub fn createEmpty(gpa: *Allocator, options: link.Options) !*MachO { const self = try gpa.create(MachO); self.* = .{ .base = .{ .tag = .macho, .options = options, .allocator = gpa, .file = null, }, }; return self; } pub fn flush(self: *MachO, comp: *Compilation) !void { if (build_options.have_llvm and self.base.options.use_lld) { return self.linkWithLLD(comp); } else { switch (self.base.options.effectiveOutputMode()) { .Exe, .Obj => {}, .Lib => return error.TODOImplementWritingLibFiles, } return self.flushModule(comp); } } pub fn flushModule(self: *MachO, comp: *Compilation) !void { const tracy = trace(@src()); defer tracy.end(); switch (self.base.options.output_mode) { .Exe => { var last_cmd_offset: usize = @sizeOf(macho.mach_header_64); { // Specify path to dynamic linker dyld const cmdsize = commandSize(@sizeOf(macho.dylinker_command) + mem.lenZ(DEFAULT_DYLD_PATH)); const load_dylinker = [1]macho.dylinker_command{ .{ .cmd = macho.LC_LOAD_DYLINKER, .cmdsize = cmdsize, .name = @sizeOf(macho.dylinker_command), }, }; try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylinker[0..1]), last_cmd_offset); const file_offset = last_cmd_offset + @sizeOf(macho.dylinker_command); try self.addPadding(cmdsize - @sizeOf(macho.dylinker_command), file_offset); try self.base.file.?.pwriteAll(mem.spanZ(DEFAULT_DYLD_PATH), file_offset); last_cmd_offset += cmdsize; } { // Link against libSystem const cmdsize = commandSize(@sizeOf(macho.dylib_command) + mem.lenZ(LIB_SYSTEM_PATH)); // TODO Find a way to work out runtime version from the OS version triple stored in std.Target. // In the meantime, we're gonna hardcode to the minimum compatibility version of 1.0.0. const min_version = 0x10000; const dylib = .{ .name = @sizeOf(macho.dylib_command), .timestamp = 2, // not sure why not simply 0; this is reverse engineered from Mach-O files .current_version = min_version, .compatibility_version = min_version, }; const load_dylib = [1]macho.dylib_command{ .{ .cmd = macho.LC_LOAD_DYLIB, .cmdsize = cmdsize, .dylib = dylib, }, }; try self.base.file.?.pwriteAll(mem.sliceAsBytes(load_dylib[0..1]), last_cmd_offset); const file_offset = last_cmd_offset + @sizeOf(macho.dylib_command); try self.addPadding(cmdsize - @sizeOf(macho.dylib_command), file_offset); try self.base.file.?.pwriteAll(mem.spanZ(LIB_SYSTEM_PATH), file_offset); last_cmd_offset += cmdsize; } }, .Obj => { { const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; symtab.nsyms = @intCast(u32, self.symbol_table.items.len); const allocated_size = self.allocatedSize(symtab.stroff); const needed_size = self.string_table.items.len; log.debug("allocated_size = 0x{x}, needed_size = 0x{x}\n", .{ allocated_size, needed_size }); if (needed_size > allocated_size) { symtab.strsize = 0; symtab.stroff = @intCast(u32, self.findFreeSpace(needed_size, 1)); } symtab.strsize = @intCast(u32, needed_size); log.debug("writing string table from 0x{x} to 0x{x}\n", .{ symtab.stroff, symtab.stroff + symtab.strsize }); try self.base.file.?.pwriteAll(self.string_table.items, symtab.stroff); } var last_cmd_offset: usize = @sizeOf(macho.mach_header_64); for (self.load_commands.items) |cmd| { try cmd.write(&self.base.file.?, last_cmd_offset); last_cmd_offset += cmd.cmdsize(); } const off = @sizeOf(macho.mach_header_64) + @sizeOf(macho.segment_command_64); try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.sections.items), off); }, .Lib => return error.TODOImplementWritingLibFiles, } if (self.entry_addr == null and self.base.options.output_mode == .Exe) { log.debug("flushing. no_entry_point_found = true\n", .{}); self.error_flags.no_entry_point_found = true; } else { log.debug("flushing. no_entry_point_found = false\n", .{}); self.error_flags.no_entry_point_found = false; try self.writeMachOHeader(); } } fn linkWithLLD(self: *MachO, comp: *Compilation) !void { const tracy = trace(@src()); defer tracy.end(); var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator); defer arena_allocator.deinit(); const arena = &arena_allocator.allocator; const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type. // If there is no Zig code to compile, then we should skip flushing the output file because it // will not be part of the linker line anyway. const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: { const use_stage1 = build_options.is_stage1 and self.base.options.use_llvm; if (use_stage1) { const obj_basename = try std.zig.binNameAlloc(arena, .{ .root_name = self.base.options.root_name, .target = self.base.options.target, .output_mode = .Obj, }); const o_directory = self.base.options.module.?.zig_cache_artifact_directory; const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename}); break :blk full_obj_path; } try self.flushModule(comp); const obj_basename = self.base.intermediary_basename.?; const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename}); break :blk full_obj_path; } else null; const is_lib = self.base.options.output_mode == .Lib; const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib; const is_exe_or_dyn_lib = is_dyn_lib or self.base.options.output_mode == .Exe; const target = self.base.options.target; const stack_size = self.base.options.stack_size_override orelse 16777216; const allow_shlib_undefined = self.base.options.allow_shlib_undefined orelse !self.base.options.is_native_os; const id_symlink_basename = "lld.id"; var man: Cache.Manifest = undefined; defer if (!self.base.options.disable_lld_caching) man.deinit(); var digest: [Cache.hex_digest_len]u8 = undefined; if (!self.base.options.disable_lld_caching) { man = comp.cache_parent.obtain(); // We are about to obtain this lock, so here we give other processes a chance first. self.base.releaseLock(); try man.addOptionalFile(self.base.options.linker_script); try man.addOptionalFile(self.base.options.version_script); try man.addListOfFiles(self.base.options.objects); for (comp.c_object_table.items()) |entry| { _ = try man.addFile(entry.key.status.success.object_path, null); } try man.addOptionalFile(module_obj_path); // We can skip hashing libc and libc++ components that we are in charge of building from Zig // installation sources because they are always a product of the compiler version + target information. man.hash.add(stack_size); man.hash.add(self.base.options.rdynamic); man.hash.addListOfBytes(self.base.options.extra_lld_args); man.hash.addListOfBytes(self.base.options.lib_dirs); man.hash.addListOfBytes(self.base.options.framework_dirs); man.hash.addListOfBytes(self.base.options.frameworks); man.hash.addListOfBytes(self.base.options.rpath_list); man.hash.add(self.base.options.is_compiler_rt_or_libc); man.hash.add(self.base.options.z_nodelete); man.hash.add(self.base.options.z_defs); if (is_dyn_lib) { man.hash.addOptional(self.base.options.version); } man.hash.addStringSet(self.base.options.system_libs); man.hash.add(allow_shlib_undefined); man.hash.add(self.base.options.bind_global_refs_locally); // We don't actually care whether it's a cache hit or miss; we just need the digest and the lock. _ = try man.hit(); digest = man.final(); var prev_digest_buf: [digest.len]u8 = undefined; const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| blk: { log.debug("MachO LLD new_digest={} readlink error: {}", .{ digest, @errorName(err) }); // Handle this as a cache miss. break :blk prev_digest_buf[0..0]; }; if (mem.eql(u8, prev_digest, &digest)) { log.debug("MachO LLD digest={} match - skipping invocation", .{digest}); // Hot diggity dog! The output binary is already there. self.base.lock = man.toOwnedLock(); return; } log.debug("MachO LLD prev_digest={} new_digest={}", .{ prev_digest, digest }); // We are about to change the output file to be different, so we invalidate the build hash now. directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) { error.FileNotFound => {}, else => |e| return e, }; } const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path}); if (self.base.options.output_mode == .Obj) { // LLD's MachO driver does not support the equvialent of `-r` so we do a simple file copy // here. TODO: think carefully about how we can avoid this redundant operation when doing // build-obj. See also the corresponding TODO in linkAsArchive. const the_object_path = blk: { if (self.base.options.objects.len != 0) break :blk self.base.options.objects[0]; if (comp.c_object_table.count() != 0) break :blk comp.c_object_table.items()[0].key.status.success.object_path; if (module_obj_path) |p| break :blk p; // TODO I think this is unreachable. Audit this situation when solving the above TODO // regarding eliding redundant object -> object transformations. return error.NoObjectsToLink; }; // This can happen when using --enable-cache and using the stage1 backend. In this case // we can skip the file copy. if (!mem.eql(u8, the_object_path, full_out_path)) { try fs.cwd().copyFile(the_object_path, fs.cwd(), full_out_path, .{}); } } else { // Create an LLD command line and invoke it. var argv = std.ArrayList([]const u8).init(self.base.allocator); defer argv.deinit(); // Even though we're calling LLD as a library it thinks the first argument is its own exe name. try argv.append("lld"); try argv.append("-error-limit"); try argv.append("0"); try argv.append("-demangle"); if (self.base.options.rdynamic) { try argv.append("--export-dynamic"); } try argv.appendSlice(self.base.options.extra_lld_args); if (self.base.options.z_nodelete) { try argv.append("-z"); try argv.append("nodelete"); } if (self.base.options.z_defs) { try argv.append("-z"); try argv.append("defs"); } if (is_dyn_lib) { try argv.append("-static"); } else { try argv.append("-dynamic"); } if (is_dyn_lib) { try argv.append("-dylib"); if (self.base.options.version) |ver| { const compat_vers = try std.fmt.allocPrint(arena, "{d}.0.0", .{ver.major}); try argv.append("-compatibility_version"); try argv.append(compat_vers); const cur_vers = try std.fmt.allocPrint(arena, "{d}.{d}.{d}", .{ ver.major, ver.minor, ver.patch }); try argv.append("-current_version"); try argv.append(cur_vers); } // TODO getting an error when running an executable when doing this rpath thing //Buf *dylib_install_name = buf_sprintf("@rpath/lib%s.%" ZIG_PRI_usize ".dylib", // buf_ptr(g->root_out_name), g->version_major); //try argv.append("-install_name"); //try argv.append(buf_ptr(dylib_install_name)); } try argv.append("-arch"); try argv.append(darwinArchString(target.cpu.arch)); switch (target.os.tag) { .macosx => { try argv.append("-macosx_version_min"); }, .ios, .tvos, .watchos => switch (target.cpu.arch) { .i386, .x86_64 => { try argv.append("-ios_simulator_version_min"); }, else => { try argv.append("-iphoneos_version_min"); }, }, else => unreachable, } const ver = target.os.version_range.semver.min; const version_string = try std.fmt.allocPrint(arena, "{d}.{d}.{d}", .{ ver.major, ver.minor, ver.patch }); try argv.append(version_string); try argv.append("-sdk_version"); try argv.append(version_string); if (target_util.requiresPIE(target) and self.base.options.output_mode == .Exe) { try argv.append("-pie"); } try argv.append("-o"); try argv.append(full_out_path); // rpaths var rpath_table = std.StringHashMap(void).init(self.base.allocator); defer rpath_table.deinit(); for (self.base.options.rpath_list) |rpath| { if ((try rpath_table.fetchPut(rpath, {})) == null) { try argv.append("-rpath"); try argv.append(rpath); } } if (is_dyn_lib) { if ((try rpath_table.fetchPut(full_out_path, {})) == null) { try argv.append("-rpath"); try argv.append(full_out_path); } } for (self.base.options.lib_dirs) |lib_dir| { try argv.append("-L"); try argv.append(lib_dir); } // Positional arguments to the linker such as object files. try argv.appendSlice(self.base.options.objects); for (comp.c_object_table.items()) |entry| { try argv.append(entry.key.status.success.object_path); } if (module_obj_path) |p| { try argv.append(p); } // compiler_rt on darwin is missing some stuff, so we still build it and rely on LinkOnce if (is_exe_or_dyn_lib and !self.base.options.is_compiler_rt_or_libc) { try argv.append(comp.compiler_rt_static_lib.?.full_object_path); } // Shared libraries. const system_libs = self.base.options.system_libs.items(); try argv.ensureCapacity(argv.items.len + system_libs.len); for (system_libs) |entry| { const link_lib = entry.key; // By this time, we depend on these libs being dynamically linked libraries and not static libraries // (the check for that needs to be earlier), but they could be full paths to .dylib files, in which // case we want to avoid prepending "-l". const ext = Compilation.classifyFileExt(link_lib); const arg = if (ext == .shared_library) link_lib else try std.fmt.allocPrint(arena, "-l{}", .{link_lib}); argv.appendAssumeCapacity(arg); } // libc++ dep if (self.base.options.link_libcpp) { try argv.append(comp.libcxxabi_static_lib.?.full_object_path); try argv.append(comp.libcxx_static_lib.?.full_object_path); } // On Darwin, libSystem has libc in it, but also you have to use it // to make syscalls because the syscall numbers are not documented // and change between versions. So we always link against libSystem. // LLD craps out if you do -lSystem cross compiling, so until that // codebase gets some love from the new maintainers we're left with // this dirty hack. if (self.base.options.is_native_os) { try argv.append("-lSystem"); } for (self.base.options.framework_dirs) |framework_dir| { try argv.append("-F"); try argv.append(framework_dir); } for (self.base.options.frameworks) |framework| { try argv.append("-framework"); try argv.append(framework); } if (allow_shlib_undefined) { try argv.append("-undefined"); try argv.append("dynamic_lookup"); } if (self.base.options.bind_global_refs_locally) { try argv.append("-Bsymbolic"); } if (self.base.options.verbose_link) { Compilation.dump_argv(argv.items); } const new_argv = try arena.allocSentinel(?[*:0]const u8, argv.items.len, null); for (argv.items) |arg, i| { new_argv[i] = try arena.dupeZ(u8, arg); } var stderr_context: LLDContext = .{ .macho = self, .data = std.ArrayList(u8).init(self.base.allocator), }; defer stderr_context.data.deinit(); var stdout_context: LLDContext = .{ .macho = self, .data = std.ArrayList(u8).init(self.base.allocator), }; defer stdout_context.data.deinit(); const llvm = @import("../llvm.zig"); const ok = llvm.Link( .MachO, new_argv.ptr, new_argv.len, append_diagnostic, @ptrToInt(&stdout_context), @ptrToInt(&stderr_context), ); if (stderr_context.oom or stdout_context.oom) return error.OutOfMemory; if (stdout_context.data.items.len != 0) { std.log.warn("unexpected LLD stdout: {}", .{stdout_context.data.items}); } if (!ok) { // TODO parse this output and surface with the Compilation API rather than // directly outputting to stderr here. std.debug.print("{}", .{stderr_context.data.items}); return error.LLDReportedFailure; } if (stderr_context.data.items.len != 0) { std.log.warn("unexpected LLD stderr: {}", .{stderr_context.data.items}); } } if (!self.base.options.disable_lld_caching) { // Update the dangling symlink with the digest. If it fails we can continue; it only // means that the next invocation will have an unnecessary cache miss. directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| { std.log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)}); }; // Again failure here only means an unnecessary cache miss. man.writeManifest() catch |err| { std.log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)}); }; // We hang on to this lock so that the output file path can be used without // other processes clobbering it. self.base.lock = man.toOwnedLock(); } } const LLDContext = struct { data: std.ArrayList(u8), macho: *MachO, oom: bool = false, }; fn append_diagnostic(context: usize, ptr: [*]const u8, len: usize) callconv(.C) void { const lld_context = @intToPtr(*LLDContext, context); const msg = ptr[0..len]; lld_context.data.appendSlice(msg) catch |err| switch (err) { error.OutOfMemory => lld_context.oom = true, }; } fn darwinArchString(arch: std.Target.Cpu.Arch) []const u8 { return switch (arch) { .aarch64, .aarch64_be, .aarch64_32 => "arm64", .thumb, .arm => "arm", .thumbeb, .armeb => "armeb", .powerpc => "ppc", .powerpc64 => "ppc64", .powerpc64le => "ppc64le", else => @tagName(arch), }; } pub fn deinit(self: *MachO) void { self.offset_table.deinit(self.base.allocator); self.string_table.deinit(self.base.allocator); self.symbol_table.deinit(self.base.allocator); self.sections.deinit(self.base.allocator); self.load_commands.deinit(self.base.allocator); } pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void { if (decl.link.macho.symbol_table_index) |_| return; try self.symbol_table.ensureCapacity(self.base.allocator, self.symbol_table.items.len + 1); try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1); log.debug("allocating symbol index {} for {}\n", .{ self.symbol_table.items.len, decl.name }); decl.link.macho.symbol_table_index = @intCast(u32, self.symbol_table.items.len); _ = self.symbol_table.addOneAssumeCapacity(); decl.link.macho.offset_table_index = @intCast(u32, self.offset_table.items.len); _ = self.offset_table.addOneAssumeCapacity(); self.symbol_table.items[decl.link.macho.symbol_table_index.?] = .{ .n_strx = 0, .n_type = 0, .n_sect = 0, .n_desc = 0, .n_value = 0, }; self.offset_table.items[decl.link.macho.offset_table_index.?] = 0; } pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { const tracy = trace(@src()); defer tracy.end(); var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); const typed_value = decl.typed_value.most_recent.typed_value; const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none); const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, em); return; }, }; log.debug("generated code {}\n", .{code}); const required_alignment = typed_value.ty.abiAlignment(self.base.options.target); const symbol = &self.symbol_table.items[decl.link.macho.symbol_table_index.?]; const decl_name = mem.spanZ(decl.name); const name_str_index = try self.makeString(decl_name); const addr = try self.allocateTextBlock(&decl.link.macho, code.len, required_alignment); log.debug("allocated text block for {} at 0x{x}\n", .{ decl_name, addr }); log.debug("updated text section {}\n", .{self.sections.items[self.text_section_index.?]}); symbol.* = .{ .n_strx = name_str_index, .n_type = macho.N_SECT, .n_sect = @intCast(u8, self.text_section_index.?) + 1, .n_desc = 0, .n_value = addr, }; // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated. const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; try self.updateDeclExports(module, decl, decl_exports); try self.writeSymbol(decl.link.macho.symbol_table_index.?); const text_section = self.sections.items[self.text_section_index.?]; const section_offset = symbol.n_value - text_section.addr; const file_offset = text_section.offset + section_offset; log.debug("file_offset 0x{x}\n", .{file_offset}); try self.base.file.?.pwriteAll(code, file_offset); } pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {} pub fn updateDeclExports( self: *MachO, module: *Module, decl: *const Module.Decl, exports: []const *Module.Export, ) !void { const tracy = trace(@src()); defer tracy.end(); if (decl.link.macho.symbol_table_index == null) return; const decl_sym = &self.symbol_table.items[decl.link.macho.symbol_table_index.?]; // TODO implement if (exports.len == 0) return; const exp = exports[0]; self.entry_addr = decl_sym.n_value; decl_sym.n_type |= macho.N_EXT; exp.link.sym_index = 0; } pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {} pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 { return self.symbol_table.items[decl.link.macho.symbol_table_index.?].n_value; } pub fn populateMissingMetadata(self: *MachO) !void { if (self.segment_cmd_index == null) { self.segment_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Segment = .{ .cmd = macho.LC_SEGMENT_64, .cmdsize = @sizeOf(macho.segment_command_64), .segname = makeStaticString(""), .vmaddr = 0, .vmsize = 0, .fileoff = 0, .filesize = 0, .maxprot = 0, .initprot = 0, .nsects = 0, .flags = 0, }, }); self.cmd_table_dirty = true; } if (self.symtab_cmd_index == null) { self.symtab_cmd_index = @intCast(u16, self.load_commands.items.len); try self.load_commands.append(self.base.allocator, .{ .Symtab = .{ .cmd = macho.LC_SYMTAB, .cmdsize = @sizeOf(macho.symtab_command), .symoff = 0, .nsyms = 0, .stroff = 0, .strsize = 0, }, }); self.cmd_table_dirty = true; } if (self.text_section_index == null) { self.text_section_index = @intCast(u16, self.sections.items.len); const segment = &self.load_commands.items[self.segment_cmd_index.?].Segment; segment.cmdsize += @sizeOf(macho.section_64); segment.nsects += 1; const file_size = self.base.options.program_code_size_hint; const off = @intCast(u32, self.findFreeSpace(file_size, 1)); const flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS; log.debug("found __text section free space 0x{x} to 0x{x}\n", .{ off, off + file_size }); try self.sections.append(self.base.allocator, .{ .sectname = makeStaticString("__text"), .segname = makeStaticString("__TEXT"), .addr = 0, .size = file_size, .offset = off, .@"align" = 0x1000, .reloff = 0, .nreloc = 0, .flags = flags, .reserved1 = 0, .reserved2 = 0, .reserved3 = 0, }); segment.vmsize += file_size; segment.filesize += file_size; segment.fileoff = off; log.debug("initial text section {}\n", .{self.sections.items[self.text_section_index.?]}); } { const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; if (symtab.symoff == 0) { const p_align = @sizeOf(macho.nlist_64); const nsyms = self.base.options.symbol_count_hint; const file_size = p_align * nsyms; const off = @intCast(u32, self.findFreeSpace(file_size, p_align)); log.debug("found symbol table free space 0x{x} to 0x{x}\n", .{ off, off + file_size }); symtab.symoff = off; symtab.nsyms = @intCast(u32, nsyms); } if (symtab.stroff == 0) { try self.string_table.append(self.base.allocator, 0); const file_size = @intCast(u32, self.string_table.items.len); const off = @intCast(u32, self.findFreeSpace(file_size, 1)); log.debug("found string table free space 0x{x} to 0x{x}\n", .{ off, off + file_size }); symtab.stroff = off; symtab.strsize = file_size; } } } fn allocateTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 { const segment = &self.load_commands.items[self.segment_cmd_index.?].Segment; const text_section = &self.sections.items[self.text_section_index.?]; const new_block_ideal_capacity = new_block_size * alloc_num / alloc_den; var block_placement: ?*TextBlock = null; const addr = blk: { if (self.last_text_block) |last| { const last_symbol = self.symbol_table.items[last.symbol_table_index.?]; const end_addr = last_symbol.n_value + last.size; const new_start_addr = mem.alignForwardGeneric(u64, end_addr, alignment); block_placement = last; break :blk new_start_addr; } else { break :blk text_section.addr; } }; log.debug("computed symbol address 0x{x}\n", .{addr}); const expand_text_section = block_placement == null or block_placement.?.next == null; if (expand_text_section) { const text_capacity = self.allocatedSize(text_section.offset); const needed_size = (addr + new_block_size) - text_section.addr; log.debug("text capacity 0x{x}, needed size 0x{x}\n", .{ text_capacity, needed_size }); assert(needed_size <= text_capacity); // TODO handle growth self.last_text_block = text_block; text_section.size = needed_size; segment.vmsize = needed_size; segment.filesize = needed_size; if (alignment < text_section.@"align") { text_section.@"align" = @intCast(u32, alignment); } } text_block.size = new_block_size; if (text_block.prev) |prev| { prev.next = text_block.next; } if (text_block.next) |next| { next.prev = text_block.prev; } if (block_placement) |big_block| { text_block.prev = big_block; text_block.next = big_block.next; big_block.next = text_block; } else { text_block.prev = null; text_block.next = null; } return addr; } fn makeStaticString(comptime bytes: []const u8) [16]u8 { var buf = [_]u8{0} ** 16; if (bytes.len > buf.len) @compileError("string too long; max 16 bytes"); mem.copy(u8, buf[0..], bytes); return buf; } fn makeString(self: *MachO, bytes: []const u8) !u32 { try self.string_table.ensureCapacity(self.base.allocator, self.string_table.items.len + bytes.len + 1); const result = self.string_table.items.len; self.string_table.appendSliceAssumeCapacity(bytes); self.string_table.appendAssumeCapacity(0); return @intCast(u32, result); } fn alignSize(comptime Int: type, min_size: anytype, alignment: Int) Int { const size = @intCast(Int, min_size); if (size % alignment == 0) return size; const div = size / alignment; return (div + 1) * alignment; } fn commandSize(min_size: anytype) u32 { return alignSize(u32, min_size, @sizeOf(u64)); } fn addPadding(self: *MachO, size: u64, file_offset: u64) !void { if (size == 0) return; const buf = try self.base.allocator.alloc(u8, size); defer self.base.allocator.free(buf); mem.set(u8, buf[0..], 0); try self.base.file.?.pwriteAll(buf, file_offset); } fn detectAllocCollision(self: *MachO, start: u64, size: u64) ?u64 { const hdr_size: u64 = @sizeOf(macho.mach_header_64); if (start < hdr_size) return hdr_size; const end = start + satMul(size, alloc_num) / alloc_den; { const off = @sizeOf(macho.mach_header_64); var tight_size: u64 = 0; for (self.load_commands.items) |cmd| { tight_size += cmd.cmdsize(); } const increased_size = satMul(tight_size, alloc_num) / alloc_den; const test_end = off + increased_size; if (end > off and start < test_end) { return test_end; } } for (self.sections.items) |section| { const increased_size = satMul(section.size, alloc_num) / alloc_den; const test_end = section.offset + increased_size; if (end > section.offset and start < test_end) { return test_end; } } if (self.symtab_cmd_index) |symtab_index| { const symtab = self.load_commands.items[symtab_index].Symtab; { const tight_size = @sizeOf(macho.nlist_64) * symtab.nsyms; const increased_size = satMul(tight_size, alloc_num) / alloc_den; const test_end = symtab.symoff + increased_size; if (end > symtab.symoff and start < test_end) { return test_end; } } { const increased_size = satMul(symtab.strsize, alloc_num) / alloc_den; const test_end = symtab.stroff + increased_size; if (end > symtab.stroff and start < test_end) { return test_end; } } } return null; } fn allocatedSize(self: *MachO, start: u64) u64 { if (start == 0) return 0; var min_pos: u64 = std.math.maxInt(u64); { const off = @sizeOf(macho.mach_header_64); if (off > start and off < min_pos) min_pos = off; } for (self.sections.items) |section| { if (section.offset <= start) continue; if (section.offset < min_pos) min_pos = section.offset; } if (self.symtab_cmd_index) |symtab_index| { const symtab = self.load_commands.items[symtab_index].Symtab; if (symtab.symoff > start and symtab.symoff < min_pos) min_pos = symtab.symoff; if (symtab.stroff > start and symtab.stroff < min_pos) min_pos = symtab.stroff; } return min_pos - start; } fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u16) u64 { var start: u64 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { start = mem.alignForwardGeneric(u64, item_end, min_alignment); } return start; } fn writeSymbol(self: *MachO, index: usize) !void { const tracy = trace(@src()); defer tracy.end(); const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab; const sym = [1]macho.nlist_64{self.symbol_table.items[index]}; const off = symtab.symoff + @sizeOf(macho.nlist_64) * index; log.debug("writing symbol {} at 0x{x}\n", .{ sym[0], off }); try self.base.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off); } /// Writes Mach-O file header. /// Should be invoked last as it needs up-to-date values of ncmds and sizeof_cmds bookkeeping /// variables. fn writeMachOHeader(self: *MachO) !void { var hdr: macho.mach_header_64 = undefined; hdr.magic = macho.MH_MAGIC_64; const CpuInfo = struct { cpu_type: macho.cpu_type_t, cpu_subtype: macho.cpu_subtype_t, }; const cpu_info: CpuInfo = switch (self.base.options.target.cpu.arch) { .aarch64 => .{ .cpu_type = macho.CPU_TYPE_ARM64, .cpu_subtype = macho.CPU_SUBTYPE_ARM_ALL, }, .x86_64 => .{ .cpu_type = macho.CPU_TYPE_X86_64, .cpu_subtype = macho.CPU_SUBTYPE_X86_64_ALL, }, else => return error.UnsupportedMachOArchitecture, }; hdr.cputype = cpu_info.cpu_type; hdr.cpusubtype = cpu_info.cpu_subtype; const filetype: u32 = switch (self.base.options.output_mode) { .Exe => macho.MH_EXECUTE, .Obj => macho.MH_OBJECT, .Lib => switch (self.base.options.link_mode) { .Static => return error.TODOStaticLibMachOType, .Dynamic => macho.MH_DYLIB, }, }; hdr.filetype = filetype; hdr.ncmds = @intCast(u32, self.load_commands.items.len); var sizeofcmds: u32 = 0; for (self.load_commands.items) |cmd| { sizeofcmds += cmd.cmdsize(); } hdr.sizeofcmds = sizeofcmds; // TODO should these be set to something else? hdr.flags = 0; hdr.reserved = 0; log.debug("writing Mach-O header {}\n", .{hdr}); try self.base.file.?.pwriteAll(@ptrCast([*]const u8, &hdr)[0..@sizeOf(macho.mach_header_64)], 0); } /// Saturating multiplication fn satMul(a: anytype, b: anytype) @TypeOf(a, b) { const T = @TypeOf(a, b); return std.math.mul(T, a, b) catch std.math.maxInt(T); }
src/link/MachO.zig
const std = @import("std"); const assert = std.debug.assert; const wgi = @import("../WindowGraphicsInput/WindowGraphicsInput.zig"); const ShaderProgram = wgi.ShaderProgram; const ShaderObject = wgi.ShaderObject; const ShaderType = wgi.ShaderType; const ArrayList = @import("std").ArrayList; const renderEngine = @import("RTRenderEngine.zig"); const Matrix = @import("../Mathematics/Mathematics.zig").Matrix; const files = @import("../Files.zig"); const loadFileWithNullTerminator = files.loadFileWithNullTerminator; const min = std.math.min; const getSettings = @import("RTRenderEngine.zig").getSettings; const builtin = @import("builtin"); const VertexAttributeType = @import("../ModelFiles/ModelFiles.zig").ModelData.VertexAttributeType; var standard_shader_vs_src: ?[]u8 = null; var standard_shader_fs_src: ?[]u8 = null; var standard_shader_common_src: ?[]u8 = null; // var square_depth_shader_fs_src: ?[]u8 = null; // var square_depth_fs: ?ShaderObject = null; // Appends src onto dst at the given offset into dst fn addString(src: []const u8, dst: []u8, offset: *u32) void { for (src[0..]) |b, i| dst[offset.* + i] = b; offset.* += @intCast(u32, src.len); } pub const ShaderInstance = struct { pub const ShaderConfig = struct { shadow: bool, // if true then this shader is for generating shadow maps inputs_bitmap: u8, // Only used if shadow = false max_vertex_lights: u32, max_fragment_lights: u32, non_uniform_scale: bool, recieve_shadows: bool, enable_specular_light: bool, enable_point_lights: bool, enable_directional_lights: bool, enable_spot_lights: bool, }; config: ShaderConfig, shader_name: []const u8, shader_program: ShaderProgram, mvp_matrix_location: ?i32 = null, model_matrix_location: ?i32 = null, model_view_matrix_location: ?i32 = null, normal_matrix_location: ?i32 = null, colour_location: ?i32 = null, per_obj_light_location: ?i32 = null, vertex_light_indices_location: ?i32 = null, fragment_light_indices_location: ?i32 = null, near_planes_location: ?i32 = null, far_planes_location: ?i32 = null, light_matrices_location: ?i32 = null, bone_matrices_location: ?i32 = null, specular_size_location: ?i32 = null, specular_intensity_location: ?i32 = null, specular_colouration_location: ?i32 = null, flat_shading_location: ?i32 = null, pub fn getShader(config_: ShaderInstance.ShaderConfig, allocator: *std.mem.Allocator) !*const ShaderInstance { var config = config_; config.max_fragment_lights = min(config.max_fragment_lights, 4); if (!config.shadow and !config.enable_point_lights and !config.enable_directional_lights and !config.enable_spot_lights and (config.max_fragment_lights != 0 or config.max_vertex_lights != 0)) { // assert(false); config.max_fragment_lights = 0; config.max_vertex_lights = 0; } config.max_vertex_lights = min(config.max_vertex_lights, 8); // Find loaded shader if (config.shadow) { for (shader_instances.?.items) |*a| { if (a.*.config.shadow and a.*.config.inputs_bitmap == config.inputs_bitmap) { return a; } } } else { for (shader_instances.?.items) |*a| { if (!a.*.config.shadow and a.*.config.inputs_bitmap == config.inputs_bitmap and a.*.config.max_vertex_lights == config.max_vertex_lights and a.*.config.max_fragment_lights == config.max_fragment_lights and a.*.config.non_uniform_scale == config.non_uniform_scale and a.*.config.recieve_shadows == config.recieve_shadows and a.*.config.enable_point_lights == config.enable_point_lights and a.*.config.enable_spot_lights == config.enable_spot_lights and a.*.config.enable_directional_lights == config.enable_directional_lights and a.*.config.enable_specular_light == config.enable_specular_light) { return a; } } } // Find cached shader or create new var si: *ShaderInstance = try shader_instances.?.addOne(); errdefer _ = shader_instances.?.pop(); if (builtin.mode == builtin.Mode.Debug) { si.* = try ShaderInstance.init(false, config, allocator); } else { si.* = ShaderInstance.loadFromBinaryFile("std", config, allocator) catch // Create the shader try ShaderInstance.init(true, config, allocator); } return si; } fn init(cache: bool, config: ShaderConfig, allocator: *std.mem.Allocator) !ShaderInstance { const glsl_version_string = "#version 330\n"; const vertex_positions_string = "#define HAS_VERTEX_COORDINATES\n"; const vertex_colours_string = "#define HAS_VERTEX_COLOURS\n"; const tex_coords_string = "#define HAS_TEXTURE_COORDINATES\n"; const normals_string = "#define HAS_NORMALS\n"; const vertex_weights_string = "#define HAS_VERTEX_WEIGHTS\n"; const normal_map_string = "#define NORMAL_MAP\n"; const max_vertex_lights_string = "#define MAX_VERTEX_LIGHTS x\n"; const max_fragment_lights_string = "#define MAX_FRAGMENT_LIGHTS x\n"; const non_uniform_scale_string = "#define ENABLE_NON_UNIFORM_SCALE\n"; const enable_shadows_string = "#define ENABLE_SHADOWS\n"; const enable_point_lights_string = "#define ENABLE_POINT_LIGHTS\n"; const enable_directional_lights_string = "#define ENABLE_DIRECTIONAL_LIGHTS\n"; const enable_spot_lights_string = "#define ENABLE_SPOT_LIGHTS\n"; const enable_specular_string = "#define ENABLE_SPECULAR\n"; var string: []u8 = try allocator.alloc(u8, 1024); defer allocator.free(string); var string_offset: u32 = 0; addString(glsl_version_string, string, &string_offset); if (config.enable_point_lights and getSettings().enable_point_lights) { addString(enable_point_lights_string, string, &string_offset); } if (config.enable_directional_lights and getSettings().enable_directional_lights) { addString(enable_directional_lights_string, string, &string_offset); } if (config.enable_spot_lights and getSettings().enable_spot_lights) { addString(enable_spot_lights_string, string, &string_offset); } if (config.enable_specular_light) { addString(enable_specular_string, string, &string_offset); } addString(max_vertex_lights_string, string, &string_offset); string[string_offset - 2] = '0' + @intCast(u8, config.max_vertex_lights); addString(max_fragment_lights_string, string, &string_offset); string[string_offset - 2] = '0' + @intCast(u8, config.max_fragment_lights); if (config.non_uniform_scale) { addString(non_uniform_scale_string, string, &string_offset); } if (config.recieve_shadows and getSettings().enable_shadows) { addString(enable_shadows_string, string, &string_offset); } const inputs_bitmap = config.inputs_bitmap; if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.Position))) != 0) { addString(vertex_positions_string, string, &string_offset); } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.Colour))) != 0) { addString(vertex_colours_string, string, &string_offset); } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.TextureCoordinates))) != 0) { addString(tex_coords_string, string, &string_offset); } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.Normal))) != 0) { addString(normals_string, string, &string_offset); } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.BoneIndices))) != 0) { if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.BoneWeights))) == 0) { assert(false); return error.NoBoneWeights; } addString(vertex_weights_string, string, &string_offset); } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.Tangent))) != 0) { if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.Normal))) == 0) { assert(false); return error.NoNormals; } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.TextureCoordinates))) == 0) { assert(false); return error.NoTexCoords; } addString(normal_map_string, string, &string_offset); } string[string_offset] = 0; var vertex_input_names: [8]([]const u8) = undefined; var i: u32 = 0; if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.Position))) != 0) { vertex_input_names[i] = "in_coords\x00"; i += 1; } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.Colour))) != 0) { vertex_input_names[i] = "in_vertex_colour\x00"; i += 1; } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.TextureCoordinates))) != 0) { vertex_input_names[i] = "in_texture_coordinates\x00"; i += 1; } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.Normal))) != 0) { vertex_input_names[i] = "in_normal\x00"; i += 1; } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.BoneIndices))) != 0) { vertex_input_names[i] = "in_bone_indices\x00"; i += 1; } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.BoneWeights))) != 0) { vertex_input_names[i] = "in_vertex_weights\x00"; i += 1; } if ((inputs_bitmap & (1 << @enumToInt(VertexAttributeType.Tangent))) != 0) { vertex_input_names[i] = "in_tangent\x00"; i += 1; } var shader_program: ShaderProgram = undefined; if (config.shadow) { var vs_shadow: ShaderObject = try ShaderObject.init(([_]([]const u8){ string[0..(string_offset + 1)], "#define VERTEX_SHADER\n#define SHADOW_MAP\n\x00", standard_shader_common_src.?, standard_shader_vs_src.?, })[0..], ShaderType.Vertex, allocator); defer vs_shadow.free(); shader_program = try ShaderProgram.init(&vs_shadow, null, vertex_input_names[0..i], allocator); errdefer shader_program.free(); } else { var vs: ShaderObject = try ShaderObject.init(([_]([]const u8){ string[0..(string_offset + 1)], "#define VERTEX_SHADER\n\x00", standard_shader_common_src.?, standard_shader_vs_src.?, })[0..], ShaderType.Vertex, allocator); defer vs.free(); var fs: ShaderObject = try ShaderObject.init(([_]([]const u8){ string[0..(string_offset + 1)], "#define FRAGMENT_SHADER\n\x00", standard_shader_common_src.?, standard_shader_fs_src.?, })[0..], ShaderType.Fragment, allocator); defer fs.free(); shader_program = try ShaderProgram.init(&vs, &fs, vertex_input_names[0..i], allocator); errdefer shader_program.free(); } var program = ShaderInstance{ .config = config, .shader_program = shader_program, .shader_name = "std", }; try program.setUniforms(); if (cache) { std.fs.makeDirAbsolute("ShaderCache") catch |e| { if (e != std.os.MakeDirError.PathAlreadyExists) { return e; } }; var file_path_: [128]u8 = undefined; const file_path = getFileName(file_path_[0..], program.shader_name, config) catch return program; program.shader_program.saveBinary(file_path, allocator) catch { std.fs.cwd().deleteFile(file_path) catch{}; }; } return program; } fn setUniforms(self: *ShaderInstance) !void { const index = self.shader_program.getUniformBlockIndex("UniformData") catch null; if (index != null) { try self.shader_program.setUniformBlockBinding(index.?, 1); } self.mvp_matrix_location = self.shader_program.getUniformLocation("mvp_matrix") catch null; self.model_matrix_location = self.shader_program.getUniformLocation("model_matrix") catch null; self.model_view_matrix_location = self.shader_program.getUniformLocation("model_view_matrix") catch null; self.normal_matrix_location = self.shader_program.getUniformLocation("normalMatrix") catch null; self.colour_location = self.shader_program.getUniformLocation("object_colour") catch null; self.per_obj_light_location = self.shader_program.getUniformLocation("per_obj_light") catch null; self.vertex_light_indices_location = self.shader_program.getUniformLocation("vertex_lights") catch null; self.fragment_light_indices_location = self.shader_program.getUniformLocation("fragment_lights") catch null; self.near_planes_location = self.shader_program.getUniformLocation("nearPlanes") catch null; self.far_planes_location = self.shader_program.getUniformLocation("farPlanes") catch null; self.specular_intensity_location = self.shader_program.getUniformLocation("specularIntensity") catch null; self.specular_size_location = self.shader_program.getUniformLocation("specularSize") catch null; self.specular_colouration_location = self.shader_program.getUniformLocation("specularColouration") catch null; self.flat_shading_location = self.shader_program.getUniformLocation("flatShading") catch null; const texLoc = self.shader_program.getUniformLocation("main_texture") catch null; if (texLoc != null) { try self.shader_program.setUniform1i(texLoc.?, 0); } const nmapLoc = self.shader_program.getUniformLocation("texture_normal_map") catch null; if (nmapLoc != null) { try self.shader_program.setUniform1i(nmapLoc.?, 1); } self.light_matrices_location = self.shader_program.getUniformLocation("lightMatrices") catch null; var shadow_texture_locations: [4]?i32 = [1]?i32{null} ** 4; var shadow_cube_texture_locations: [4]?i32 = [1]?i32{null} ** 4; shadow_texture_locations[0] = self.shader_program.getUniformLocation("shadowTexture0") catch null; shadow_texture_locations[1] = self.shader_program.getUniformLocation("shadowTexture1") catch null; shadow_texture_locations[2] = self.shader_program.getUniformLocation("shadowTexture2") catch null; shadow_texture_locations[3] = self.shader_program.getUniformLocation("shadowTexture3") catch null; shadow_cube_texture_locations[0] = self.shader_program.getUniformLocation("shadowCubeTextures0") catch null; shadow_cube_texture_locations[1] = self.shader_program.getUniformLocation("shadowCubeTextures1") catch null; shadow_cube_texture_locations[2] = self.shader_program.getUniformLocation("shadowCubeTextures2") catch null; shadow_cube_texture_locations[3] = self.shader_program.getUniformLocation("shadowCubeTextures3") catch null; var i: u32 = 0; while (i < 4) : (i += 1) { if (shadow_texture_locations[i] != null) { try self.shader_program.setUniform1i(shadow_texture_locations[i].?, @intCast(i32, 2 + i)); } if (shadow_cube_texture_locations[i] != null) { try self.shader_program.setUniform1i(shadow_cube_texture_locations[i].?, @intCast(i32, 6 + i)); } } self.bone_matrices_location = self.shader_program.getUniformLocation("boneMatrices") catch null; } pub fn getFileName(buf: []u8, shader_name: []const u8, config: ShaderConfig) ![]u8 { return try std.fmt.bufPrint(buf, "ShaderCache{}{}.{}.{}.{}.{}.{}.{}.{}.{}.{}.{}.bin", .{ files.path_seperator, shader_name, @boolToInt(config.shadow), config.inputs_bitmap, config.max_vertex_lights, config.max_fragment_lights, @boolToInt(config.non_uniform_scale), @boolToInt(config.recieve_shadows), @boolToInt(config.enable_specular_light), @boolToInt(config.enable_point_lights), @boolToInt(config.enable_directional_lights), @boolToInt(config.enable_spot_lights)}); } pub fn loadFromBinaryFile(shader_name: []const u8, config: ShaderConfig, allocator: *std.mem.Allocator) !ShaderInstance { var file_name_: [128]u8 = undefined; const file_name = try ShaderInstance.getFileName(file_name_[0..], shader_name, config); var shader_program = try ShaderProgram.loadFromBinaryFile(file_name, allocator); var si = ShaderInstance{ .config = config, .shader_name = shader_name, .shader_program = shader_program, }; try si.setUniforms(); return si; } pub fn bind(self: ShaderInstance) !void { try self.shader_program.bind(); } pub fn setMVPMatrix(self: ShaderInstance, matrix: *const Matrix(f32, 4)) !void { if (self.mvp_matrix_location != null) { try self.shader_program.setUniformMat4(self.mvp_matrix_location.?, 1, @bitCast([16]f32, matrix.data)[0..]); } } pub fn setModelMatrix(self: ShaderInstance, matrix: *const Matrix(f32, 4)) !void { if (self.model_matrix_location != null) { try self.shader_program.setUniformMat4(self.model_matrix_location.?, 1, @bitCast([16]f32, matrix.data)[0..]); } } pub fn setModelViewMatrix(self: ShaderInstance, matrix: *const Matrix(f32, 4)) !void { if (self.model_view_matrix_location != null) { try self.shader_program.setUniformMat4(self.model_view_matrix_location.?, 1, @bitCast([16]f32, matrix.data)[0..]); } } pub fn setNormalMatrix(self: ShaderInstance, matrix: *const Matrix(f32, 3)) !void { if (self.normal_matrix_location != null) { try self.shader_program.setUniformMat3(self.normal_matrix_location.?, 1, @bitCast([9]f32, matrix.data)[0..]); } } pub fn setColour(self: ShaderInstance, colour: [3]f32) !void { if (self.colour_location != null) { try self.shader_program.setUniform3f(self.colour_location.?, colour); } } pub fn setPerObjLight(self: ShaderInstance, colour: [3]f32) !void { if (self.per_obj_light_location != null) { try self.shader_program.setUniform3f(self.per_obj_light_location.?, colour); } } pub fn setVertexLightIndices(self: ShaderInstance, indices: [8]i32) !void { if (self.vertex_light_indices_location != null) { try self.shader_program.setUniform1iv(self.vertex_light_indices_location.?, indices[0..self.config.max_vertex_lights]); } } pub fn setFragmentLightIndices(self: ShaderInstance, indices: [4]i32) !void { if (self.fragment_light_indices_location != null) { try self.shader_program.setUniform1iv(self.fragment_light_indices_location.?, indices[0..self.config.max_fragment_lights]); } } // Near planes of shadow-casting lights pub fn setNearPlanes(self: ShaderInstance, near_planes: [4]f32) !void { if (self.near_planes_location != null) { try self.shader_program.setUniform1fv(self.near_planes_location.?, near_planes[0..self.config.max_fragment_lights]); } } // Far planes of shadow-casting lights pub fn setFarPlanes(self: ShaderInstance, far_planes: [4]f32) !void { if (self.far_planes_location != null) { try self.shader_program.setUniform1fv(self.far_planes_location.?, far_planes[0..self.config.max_fragment_lights]); } } pub fn setLightMatrices(self: ShaderInstance, matrices: [4]Matrix(f32, 4)) !void { if (self.light_matrices_location != null) { try self.shader_program.setUniformMat4(self.light_matrices_location.?, 4, @bitCast([64]f32, matrices)[0..]); } } pub fn setBoneMatrices(self: ShaderInstance, matrices: []const f32) !void { if (self.bone_matrices_location != null) { try self.shader_program.setUniformMat4(self.bone_matrices_location.?, @intCast(i32, matrices.len / 16), matrices); } } pub fn setSpecularIntensity(self: ShaderInstance, c: f32) !void { if (self.specular_intensity_location != null) { try self.shader_program.setUniform1f(self.specular_intensity_location.?, c); } } pub fn setSpecularSize(self: ShaderInstance, c: f32) !void { if (self.specular_size_location != null) { try self.shader_program.setUniform1f(self.specular_size_location.?, 1.0 - c); } } pub fn setSpecularColouration(self: ShaderInstance, c: f32) !void { if (self.specular_colouration_location != null) { try self.shader_program.setUniform1f(self.specular_colouration_location.?, c); } } pub fn setFlatShadingEnabled(self: ShaderInstance, enabled: bool) !void { if (self.flat_shading_location != null) { try self.shader_program.setUniform1i(self.flat_shading_location.?, @boolToInt(enabled)); } } // Used during development to detect shader performance problems pub fn validate(self: ShaderInstance, allocator: *std.mem.Allocator) void { if (builtin.mode == builtin.Mode.Debug) { // Uncomment this to enable shader validation // No idea if it does anything useful or not.. // self.shader_program.validate(allocator); } } }; var shader_instances: ?ArrayList(ShaderInstance) = null; // Called by RenderEngine.init. pub fn init(allocator: *std.mem.Allocator) !void { shader_instances = ArrayList(ShaderInstance).init(allocator); standard_shader_vs_src = try loadFileWithNullTerminator("StandardAssets" ++ files.path_seperator ++ "StandardShader.vs", allocator); standard_shader_fs_src = try loadFileWithNullTerminator("StandardAssets" ++ files.path_seperator ++ "StandardShader.fs", allocator); standard_shader_common_src = try loadFileWithNullTerminator("StandardAssets" ++ files.path_seperator ++ "StandardShader.glsl", allocator); } const window = wgi.window; fn intToBool(b: var) bool { return b != 0; } test "Standard Shader all combinations" { std.debug.warn("This may take some time...", .{}); var a = std.heap.page_allocator; try window.createWindow(false, 200, 200, "test", true, 0); defer window.closeWindow(); try renderEngine.init(wgi.getMicroTime(), a); try init(a); var inputs_bitmap: u8 = 0; var i: u32 = 0; while (inputs_bitmap < 128) : (inputs_bitmap += 1) { // 2^7 combinations of inputs if ((inputs_bitmap & (1 << 4)) >> 4 != (inputs_bitmap & (1 << 5)) >> 5) { continue; } if (inputs_bitmap & (1 << 6) != 0) { if (inputs_bitmap & (1 << 3) == 0) { // Can't have normal maps without normals continue; } else if (inputs_bitmap & (1 << 2) == 0) { // Can't have normal maps without texture coordinates continue; } } var inputs: [8]u32 = [1]u32{0} ** 8; var inputs_bitmap_: u8 = inputs_bitmap; const attribs_n = @popCount(u8, inputs_bitmap); var inputs_i: u32 = 0; var x: u32 = 1; while (inputs_bitmap_ != 0) : (inputs_bitmap_ >>= 1) { if (inputs_bitmap_ & 1 != 0) { inputs[inputs_i] = x; inputs_i += 1; } x += 1; } std.testing.expect(inputs_bitmap_ == 0); var shadow: u32 = 0; while (shadow < 2) : (shadow += 1) { var v_lights: u32 = 0; while (v_lights < 2) : (v_lights += 1) { var f_lights: u32 = 0; while (f_lights < 2) : (f_lights += 1) { var recv_shadows: u32 = 0; while (recv_shadows < 2) : (recv_shadows += 1) { if (intToBool(shadow) and (intToBool(recv_shadows) or v_lights != 0 or f_lights != 0)) { continue; } var config = ShaderInstance.ShaderConfig{ .shadow = intToBool(shadow), .inputs_bitmap = inputs_bitmap, .max_vertex_lights = v_lights, .max_fragment_lights = f_lights, .non_uniform_scale = false, .recieve_shadows = intToBool(recv_shadows), .enable_specular_light = true, .enable_point_lights = true, .enable_directional_lights = true, .enable_spot_lights = true, }; var sh = ShaderInstance.init(false, config, a) catch |e| { std.debug.warn("bitmap {}, attribs: {} {} {} {} {} {} {} {}\n", .{ inputs_bitmap, inputs[0], inputs[1], inputs[2], inputs[3], inputs[4], inputs[5], inputs[6], inputs[7] }); std.debug.warn("shadow {}, vertex lights {}, fragment lights {}, recieve shadows {}\n", .{ intToBool(shadow), v_lights, f_lights, intToBool(recv_shadows) }); return e; }; sh.shader_program.free(); } } } } } }
src/RTRenderEngine/Shader.zig
const std = @import("std"); const json = std.json; const MaybeDefined = @import("json_serialize.zig").MaybeDefined; pub const String = []const u8; pub const Integer = i64; pub const Float = f64; pub const Bool = bool; pub const Array = json.Array; pub const Object = json.ObjectMap; pub const Any = json.Value; // Specification: // https://microsoft.github.io/language-server-protocol/specifications/specification-current/ pub const RequestId = union(enum) { String: String, Integer: Integer, Float: Float, }; pub const Request = struct { jsonrpc: String = "2.0", method: String, /// Must be an Array or an Object params: Any, id: MaybeDefined(RequestId) = .NotDefined, pub fn validate(self: *const Request) bool { if (!std.mem.eql(u8, self.jsonrpc, "2.0")) { return false; } return switch (self.params) { .Object, .Array => true, else => false, }; } }; pub const Response = struct { jsonrpc: String = "2.0", @"error": MaybeDefined(Error) = .NotDefined, result: MaybeDefined(Any) = .NotDefined, id: ?RequestId, pub const Error = struct { code: Integer, message: String, data: MaybeDefined(Any), }; pub fn validate(self: *const Response) bool { if (!std.mem.eql(u8, self.jsonrpc, "2.0")) { return false; } const errorDefined = self.@"error" == .Defined; const resultDefined = self.result == .Defined; // exactly one of them must be defined return errorDefined != resultDefined; } }; pub const ErrorCodes = struct { // Defined by JSON RPC pub const ParseError = -32700; pub const InvalidRequest = -32600; pub const MethodNotFound = -32601; pub const InvalidParams = -32602; pub const InternalError = -32603; // Implementation specific JSON RPC errors pub const serverErrorStart = -32099; pub const serverErrorEnd = -32000; pub const ServerNotInitialized = -32002; pub const UnknownErrorCode = -32001; // Defined by LSP pub const RequestCancelled = -32800; pub const ContentModified = -32801; }; pub const DocumentUri = String; pub const Position = struct { line: Integer, character: Integer, }; pub const Range = struct { start: Position, end: Position, }; pub const Location = struct { uri: DocumentUri, range: Range, }; pub const LocationLink = struct { originSelectionRange: MaybeDefined(Range) = .NotDefined, targetUri: DocumentUri, targetRange: Range, targetSelectionRange: Range, }; pub const Diagnostic = struct { range: Range, severity: MaybeDefined(DiagnosticSeverity) = .NotDefined, code: MaybeDefined(Any) = .NotDefined, source: MaybeDefined(String) = .NotDefined, message: String, relatedInformation: MaybeDefined([]DiagnosticRelatedInformation) = .NotDefined, }; pub const DiagnosticRelatedInformation = struct { location: Location, message: String, }; pub const DiagnosticSeverity = enum(Integer) { Error = 1, Warning = 2, Information = 3, Hint = 4, _, }; pub const Command = struct { title: String, command: String, arguments: MaybeDefined([]Any), }; pub const TextEdit = struct { range: Range, newText: String, }; pub const TextDocumentSyncKind = enum(Integer) { None = 0, Full = 1, Incremental = 2, _, }; pub const InitializeParams = struct { processId: ?Integer, rootPath: MaybeDefined(?String), rootUri: ?DocumentUri, initializationOptions: MaybeDefined(Any), capabilities: ClientCapabilities, // trace: MaybeDefined(String), // workspaceFolders: MaybeDefined(?[]WorkspaceFolder), }; pub const InitializedParams = struct {}; pub const Trace = struct { pub const Off = "off"; pub const Messages = "messages"; pub const Verbose = "verbose"; }; pub const WorkspaceFolder = struct {}; pub const ClientCapabilities = struct {}; pub const DidChangeTextDocumentParams = struct { contentChanges: []TextDocumentContentChangeEvent, textDocument: VersionedTextDocumentIdentifier, }; pub const TextDocumentContentChangeEvent = struct { range: MaybeDefined(Range), text: String, }; pub const TextDocumentIdentifier = struct { uri: DocumentUri, }; pub const VersionedTextDocumentIdentifier = struct { uri: DocumentUri, version: ?Integer, }; pub const PublishDiagnosticsParams = struct { uri: DocumentUri, diagnostics: []Diagnostic, }; pub const CompletionParams = struct { textDocument: TextDocumentIdentifier, position: Position, context: MaybeDefined(CompletionContext), }; pub const CompletionTriggerKind = enum(Integer) { Invoked = 1, TriggerCharacter = 2, TriggerForIncompleteCompletions = 3, _, }; pub const CompletionContext = struct { triggerKind: CompletionTriggerKind, triggerCharacter: MaybeDefined(String), }; // not complete definition pub const CompletionItem = struct { label: String, kind: MaybeDefined(CompletionItemKind) = .NotDefined, textEdit: MaybeDefined(TextEdit) = .NotDefined, filterText: MaybeDefined(String) = .NotDefined, }; pub const CompletionItemKind = enum(Integer) { Text = 1, Method = 2, Function = 3, Constructor = 4, Field = 5, Variable = 6, Class = 7, Interface = 8, Module = 9, Property = 10, Unit = 11, Value = 12, Enum = 13, Keyword = 14, Snippet = 15, Color = 16, File = 17, Reference = 18, Folder = 19, EnumMember = 20, Constant = 21, Struct = 22, Event = 23, Operator = 24, TypeParameter = 25, _, }; pub const DidCloseTextDocumentParams = struct { textDocument: TextDocumentIdentifier, }; pub const DidOpenTextDocumentParams = struct { textDocument: TextDocumentItem, }; pub const TextDocumentItem = struct { uri: DocumentUri, languageId: String, version: Integer, text: String, };
src/types.zig
const std = @import("std"); const json = std.json; const Tuple = std.meta.Tuple; pub fn IntBackedEnumStringify(comptime T: type) type { return struct { pub fn jsonStringify(value: T, options: json.StringifyOptions, out_stream: anytype) !void { try json.stringify(@enumToInt(value), options, out_stream); } }; } test "int-backed enum stringify" { const MyEnum = enum(i64) { one = 1, two = 2, usingnamespace IntBackedEnumStringify(@This()); }; var buf: [2]u8 = undefined; var fbs = std.io.fixedBufferStream(&buf); try json.stringify(MyEnum.one, .{}, fbs.writer()); try std.testing.expectEqual(@as(u8, '1'), buf[0]); try json.stringify(MyEnum.two, .{}, fbs.writer()); try std.testing.expectEqual(@as(u8, '2'), buf[1]); } pub fn StringBackedEnumStringify(comptime T: type) type { return struct { pub fn jsonStringify(value: T, options: json.StringifyOptions, out_stream: anytype) !void { inline for (std.meta.fields(T)) |field| { if (@enumToInt(value) == field.value) { try json.stringify(field.name, options, out_stream); return; } } unreachable; } }; } test "string-backed enum stringify" { const MyEnum = enum { one, two, usingnamespace StringBackedEnumStringify(@This()); }; var buf: [10]u8 = undefined; var fbs = std.io.fixedBufferStream(&buf); try json.stringify(MyEnum.one, .{}, fbs.writer()); try std.testing.expectEqualSlices(u8, "\"one\"", buf[0..5]); try json.stringify(MyEnum.two, .{}, fbs.writer()); try std.testing.expectEqualSlices(u8, "\"two\"", buf[5..]); } /// The LSP any type pub const LSPAny = std.json.Value; pub const ManuallyTranslateValue = @compileError("bruh 😭"); pub const RequestId = union(enum) { integer: i64, string: []const u8, }; test { // Test for general correctness of structs std.testing.refAllDecls(@This()); } /// A tagging type for string properties that are actually document URIs. pub const DocumentUri = []const u8; /// A tagging type for string properties that are actually URIs pub const URI = []const u8; /// Position in a text document expressed as zero-based line and character offset. /// The offsets are based on a UTF-16 string representation. So a string of the form /// `a𐐀b` the character offset of the character `a` is 0, the character offset of `𐐀` /// is 1 and the character offset of b is 3 since `𐐀` is represented using two code /// units in UTF-16. pub const Position = struct { /// Line position in a document (zero-based). line: i64, /// Character offset on a line in a document (zero-based). Assuming that the line is /// represented as a string, the `character` value represents the gap between the /// `character` and `character + 1`. character: i64, }; /// A range in a text document expressed as (zero-based) start and end positions. pub const Range = struct { /// The range's start position start: Position, /// The range's end position. end: Position, }; /// Represents a location inside a resource, such as a line /// inside a text file. pub const Location = struct { uri: []const u8, range: Range, }; /// Represents the connection of two locations. Provides additional metadata over normal [locations](#Location), /// including an origin range. pub const LocationLink = struct { /// Span of the origin of this link. originSelectionRange: Undefinedable(Range), /// The target resource identifier of this link. targetUri: []const u8, /// The full target range of this link. If the target for example is a symbol then target range is the /// range enclosing this symbol not including leading/trailing whitespace but everything else /// like comments. This information is typically used to highlight the range in the editor. targetRange: Range, /// The range that should be selected and revealed when this link is being followed, e.g the name of a function. /// Must be contained by the the `targetRange`. See also `DocumentSymbol#range` targetSelectionRange: Range, }; /// Represents a color in RGBA space. pub const Color = struct { /// The red component of this color in the range [0-1]. red: i64, /// The green component of this color in the range [0-1]. green: i64, /// The blue component of this color in the range [0-1]. blue: i64, /// The alpha component of this color in the range [0-1]. alpha: i64, }; /// Represents a color range from a document. pub const ColorInformation = struct { /// The range in the document where this color appears. range: Range, /// The actual color value for this color range. color: Color, }; pub const ColorPresentation = struct { /// The label of this color presentation. It will be shown on the color /// picker header. By default this is also the text that is inserted when selecting /// this color presentation. label: []const u8, /// An [edit](#TextEdit) which is applied to a document when selecting /// this presentation for the color. When `falsy` the [label](#ColorPresentation.label) /// is used. textEdit: Undefinedable(TextEdit), /// An optional array of additional [text edits](#TextEdit) that are applied when /// selecting this color presentation. Edits must not overlap with the main [edit](#ColorPresentation.textEdit) nor with themselves. additionalTextEdits: Undefinedable([]TextEdit), }; /// Enum of known range kinds pub const FoldingRangeKind = struct { pub const Comment = "comment"; pub const Imports = "imports"; pub const Region = "region"; }; /// Represents a folding range. To be valid, start and end line must be bigger than zero and smaller /// than the number of lines in the document. Clients are free to ignore invalid ranges. pub const FoldingRange = struct { /// The zero-based start line of the range to fold. The folded area starts after the line's last character. /// To be valid, the end must be zero or larger and smaller than the number of lines in the document. startLine: i64, /// The zero-based character offset from where the folded range starts. If not defined, defaults to the length of the start line. startCharacter: Undefinedable(i64), /// The zero-based end line of the range to fold. The folded area ends with the line's last character. /// To be valid, the end must be zero or larger and smaller than the number of lines in the document. endLine: i64, /// The zero-based character offset before the folded range ends. If not defined, defaults to the length of the end line. endCharacter: Undefinedable(i64), /// Describes the kind of the folding range such as `comment' or 'region'. The kind /// is used to categorize folding ranges and used by commands like 'Fold all comments'. See /// [FoldingRangeKind](#FoldingRangeKind) for an enumeration of standardized kinds. kind: Undefinedable([]const u8), }; /// Represents a related message and source code location for a diagnostic. This should be /// used to point to code locations that cause or related to a diagnostics, e.g when duplicating /// a symbol in a scope. pub const DiagnosticRelatedInformation = struct { /// The location of this related diagnostic information. location: Location, /// The message of this related diagnostic information. message: []const u8, }; /// The diagnostic's severity. pub const DiagnosticSeverity = enum(i64) { Error = 1, Warning = 2, Information = 3, Hint = 4, usingnamespace IntBackedEnumStringify(@This()); }; /// The diagnostic tags. pub const DiagnosticTag = enum(i64) { Unnecessary = 1, Deprecated = 2, usingnamespace IntBackedEnumStringify(@This()); }; /// Structure to capture a description for an error code. pub const CodeDescription = struct { /// An URI to open with more information about the diagnostic error. href: []const u8, }; /// Represents a diagnostic, such as a compiler error or warning. Diagnostic objects /// are only valid in the scope of a resource. pub const Diagnostic = struct { /// The range at which the message applies range: Range, /// The diagnostic's severity. Can be omitted. If omitted it is up to the /// client to interpret diagnostics as error, warning, info or hint. severity: Undefinedable(DiagnosticSeverity), /// The diagnostic's code, which usually appear in the user interface. code: Undefinedable(union(enum) { string: []const u8, number: i64, }), /// An optional property to describe the error code. codeDescription: Undefinedable(CodeDescription), /// A human-readable string describing the source of this /// diagnostic, e.g. 'typescript' or 'super lint'. It usually /// appears in the user interface. source: Undefinedable([]const u8), /// The diagnostic's message. It usually appears in the user interface message: []const u8, /// Additional metadata about the diagnostic. tags: Undefinedable([]DiagnosticTag), /// An array of related diagnostic information, e.g. when symbol-names within /// a scope collide all definitions can be marked via this property. relatedInformation: Undefinedable([]DiagnosticRelatedInformation), /// A data entry field that is preserved between a `textDocument/publishDiagnostics` /// notification and `textDocument/codeAction` request. data: Undefinedable(std.json.Value), }; /// Represents a reference to a command. Provides a title which /// will be used to represent a command in the UI and, optionally, /// an array of arguments which will be passed to the command handler /// function when invoked. pub const Command = struct { /// Title of the command, like `save`. title: []const u8, /// The identifier of the actual command handler. command: []const u8, /// Arguments that the command handler should be /// invoked with. arguments: Undefinedable([]std.json.Value), }; /// A text edit applicable to a text document. pub const TextEdit = struct { /// The range of the text document to be manipulated. To insert /// text into a document create a range where start === end. range: Range, /// The string to be inserted. For delete operations use an /// empty string. newText: []const u8, }; /// Additional information that describes document changes. pub const ChangeAnnotation = struct { /// A human-readable string describing the actual change. The string /// is rendered prominent in the user interface. label: []const u8, /// A flag which indicates that user confirmation is needed /// before applying the change. needsConfirmation: Undefinedable(bool), /// A human-readable string which is rendered less prominent in /// the user interface. description: Undefinedable([]const u8), }; /// An identifier to refer to a change annotation stored with a workspace edit. pub const ChangeAnnotationIdentifier = []const u8; /// A special text edit with an additional change annotation. pub const AnnotatedTextEdit = struct { /// The actual identifier of the change annotation annotationId: []const u8, /// The range of the text document to be manipulated. To insert /// text into a document create a range where start === end. range: Range, /// The string to be inserted. For delete operations use an /// empty string. newText: []const u8, }; /// Describes textual changes on a text document. A TextDocumentEdit describes all changes /// on a document version Si and after they are applied move the document to version Si+1. /// So the creator of a TextDocumentEdit doesn't need to sort the array of edits or do any /// kind of ordering. However the edits must be non overlapping. pub const TextDocumentEdit = struct { /// The text document to change. textDocument: OptionalVersionedTextDocumentIdentifier, /// The edits to be applied. edits: []union(enum) { TextEdit: TextEdit, AnnotatedTextEdit: AnnotatedTextEdit, }, }; /// Options to create a file. pub const CreateFileOptions = struct { /// Overwrite existing file. Overwrite wins over `ignoreIfExists` overwrite: Undefinedable(bool), /// Ignore if exists. ignoreIfExists: Undefinedable(bool), }; /// Create file operation. pub const CreateFile = struct { /// A create comptime kind: []const u8 = "create", /// The resource to create. uri: []const u8, /// Additional options options: Undefinedable(CreateFileOptions), /// An optional annotation identifier describing the operation. annotationId: Undefinedable([]const u8), }; /// Rename file options pub const RenameFileOptions = struct { /// Overwrite target if existing. Overwrite wins over `ignoreIfExists` overwrite: Undefinedable(bool), /// Ignores if target exists. ignoreIfExists: Undefinedable(bool), }; /// Rename file operation pub const RenameFile = struct { /// A rename comptime kind: []const u8 = "rename", /// The old (existing) location. oldUri: []const u8, /// The new location. newUri: []const u8, /// Rename options. options: Undefinedable(RenameFileOptions), /// An optional annotation identifier describing the operation. annotationId: Undefinedable([]const u8), }; /// Delete file options pub const DeleteFileOptions = struct { /// Delete the content recursively if a folder is denoted. recursive: Undefinedable(bool), /// Ignore the operation if the file doesn't exist. ignoreIfNotExists: Undefinedable(bool), }; /// Delete file operation pub const DeleteFile = struct { /// A delete comptime kind: []const u8 = "delete", /// The file to delete. uri: []const u8, /// Delete options. options: Undefinedable(DeleteFileOptions), /// An optional annotation identifier describing the operation. annotationId: Undefinedable([]const u8), }; /// A workspace edit represents changes to many resources managed in the workspace. The edit /// should either provide `changes` or `documentChanges`. If documentChanges are present /// they are preferred over `changes` if the client can handle versioned document edits. pub const WorkspaceEdit = struct { /// Holds changes to existing resources. changes: Undefinedable(ManuallyTranslateValue), /// Depending on the client capability `workspace.workspaceEdit.resourceOperations` document changes /// are either an array of `TextDocumentEdit`s to express changes to n different text documents /// where each text document edit addresses a specific version of a text document. Or it can contain /// above `TextDocumentEdit`s mixed with create, rename and delete file / folder operations. documentChanges: Undefinedable([]union(enum) { TextDocumentEdit: TextDocumentEdit, CreateFile: CreateFile, RenameFile: RenameFile, DeleteFile: DeleteFile, }), /// A map of change annotations that can be referenced in `AnnotatedTextEdit`s or create, rename and /// delete file / folder operations. changeAnnotations: Undefinedable(ManuallyTranslateValue), }; /// A change to capture text edits for existing resources. pub const TextEditChange = struct {}; /// A literal to identify a text document in the client. pub const TextDocumentIdentifier = struct { /// The text document's uri. uri: []const u8, }; /// A text document identifier to denote a specific version of a text document. pub const VersionedTextDocumentIdentifier = struct { /// The version number of this document. version: i64, /// The text document's uri. uri: []const u8, }; /// A text document identifier to optionally denote a specific version of a text document. pub const OptionalVersionedTextDocumentIdentifier = struct { /// The version number of this document. If a versioned text document identifier /// is sent from the server to the client and the file is not open in the editor /// (the server has not received an open notification before) the server can send /// `null` to indicate that the version is unknown and the content on disk is the /// truth (as specified with document content ownership). version: ?i64, /// The text document's uri. uri: []const u8, }; /// An item to transfer a text document from the client to the /// server. pub const TextDocumentItem = struct { /// The text document's uri. uri: []const u8, /// The text document's language identifier languageId: []const u8, /// The version number of this document (it will increase after each /// change, including undo/redo). version: i64, /// The content of the opened text document. text: []const u8, }; pub const MarkupKind = enum { plaintext, markdown, usingnamespace StringBackedEnumStringify(@This()); }; /// A `MarkupContent` literal represents a string value which content is interpreted base on its /// kind flag. Currently the protocol supports `plaintext` and `markdown` as markup kinds. pub const MarkupContent = struct { /// The type of the Markup kind: MarkupKind, /// The content itself value: []const u8, }; /// The kind of a completion entry. pub const CompletionItemKind = enum(i64) { Text = 1, Method = 2, Function = 3, Constructor = 4, Field = 5, Variable = 6, Class = 7, Interface = 8, Module = 9, Property = 10, Unit = 11, Value = 12, Enum = 13, Keyword = 14, Snippet = 15, Color = 16, File = 17, Reference = 18, Folder = 19, EnumMember = 20, Constant = 21, Struct = 22, Event = 23, Operator = 24, TypeParameter = 25, usingnamespace IntBackedEnumStringify(@This()); }; /// Defines whether the insert text in a completion item should be interpreted as /// plain text or a snippet. pub const InsertTextFormat = enum(i64) { PlainText = 1, Snippet = 2, usingnamespace IntBackedEnumStringify(@This()); }; /// Completion item tags are extra annotations that tweak the rendering of a completion /// item. pub const CompletionItemTag = enum(i64) { Deprecated = 1, usingnamespace IntBackedEnumStringify(@This()); }; /// A special text edit to provide an insert and a replace operation. pub const InsertReplaceEdit = struct { /// The string to be inserted. newText: []const u8, /// The range if the insert is requested insert: Range, /// The range if the replace is requested. replace: Range, }; /// How whitespace and indentation is handled during completion /// item insertion. pub const InsertTextMode = enum(i64) { asIs = 1, adjustIndentation = 2, usingnamespace IntBackedEnumStringify(@This()); }; /// A completion item represents a text snippet that is /// proposed to complete text that is being typed. pub const CompletionItem = struct { /// The label of this completion item. By default /// also the text that is inserted when selecting /// this completion. label: []const u8, /// The kind of this completion item. Based of the kind /// an icon is chosen by the editor. kind: Undefinedable(CompletionItemKind), /// Tags for this completion item. tags: Undefinedable([]1), /// A human-readable string with additional information /// about this item, like type or symbol information. detail: Undefinedable([]const u8), /// A human-readable string that represents a doc-comment. documentation: Undefinedable(union(enum) { string: []const u8, MarkupContent: MarkupContent, }), /// Indicates if this item is deprecated. deprecated: Undefinedable(bool), /// Select this item when showing. preselect: Undefinedable(bool), /// A string that should be used when comparing this item /// with other items. When `falsy` the [label](#CompletionItem.label) /// is used. sortText: Undefinedable([]const u8), /// A string that should be used when filtering a set of /// completion items. When `falsy` the [label](#CompletionItem.label) /// is used. filterText: Undefinedable([]const u8), /// A string that should be inserted into a document when selecting /// this completion. When `falsy` the [label](#CompletionItem.label) /// is used. insertText: Undefinedable([]const u8), /// The format of the insert text. The format applies to both the `insertText` property /// and the `newText` property of a provided `textEdit`. If omitted defaults to /// `InsertTextFormat.PlainText`. insertTextFormat: Undefinedable(InsertTextFormat), /// How whitespace and indentation is handled during completion /// item insertion. If ignored the clients default value depends on /// the `textDocument.completion.insertTextMode` client capability. insertTextMode: Undefinedable(InsertTextMode), /// An [edit](#TextEdit) which is applied to a document when selecting /// this completion. When an edit is provided the value of /// [insertText](#CompletionItem.insertText) is ignored. textEdit: Undefinedable(union(enum) { TextEdit: TextEdit, InsertReplaceEdit: InsertReplaceEdit, }), /// An optional array of additional [text edits](#TextEdit) that are applied when /// selecting this completion. Edits must not overlap (including the same insert position) /// with the main [edit](#CompletionItem.textEdit) nor with themselves. additionalTextEdits: Undefinedable([]TextEdit), /// An optional set of characters that when pressed while this completion is active will accept it first and /// then type that character. *Note* that all commit characters should have `length=1` and that superfluous /// characters will be ignored. commitCharacters: Undefinedable([][]const u8), /// An optional [command](#Command) that is executed *after* inserting this completion. *Note* that /// additional modifications to the current document should be described with the /// [additionalTextEdits](#CompletionItem.additionalTextEdits)-property. command: Undefinedable(Command), /// A data entry field that is preserved on a completion item between /// a [CompletionRequest](#CompletionRequest) and a [CompletionResolveRequest] /// (#CompletionResolveRequest) data: Undefinedable(std.json.Value), }; /// Represents a collection of [completion items](#CompletionItem) to be presented /// in the editor. pub const CompletionList = struct { /// This list it not complete. Further typing results in recomputing this list. isIncomplete: bool, /// The completion items. items: []CompletionItem, }; /// MarkedString can be used to render human readable text. It is either a markdown string /// or a code-block that provides a language and a code snippet. The language identifier /// is semantically equal to the optional language identifier in fenced code blocks in GitHub /// issues. See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting pub const MarkedString = union(enum) { string: []const u8, reflection: struct { language: []const u8, value: []const u8, }, }; /// The result of a hover request. pub const Hover = struct { /// The hover's content contents: union(enum) { MarkupContent: MarkupContent, MarkedString: MarkedString, array: []MarkedString, }, /// An optional range range: Undefinedable(Range), }; /// Represents a parameter of a callable-signature. A parameter can /// have a label and a doc-comment. pub const ParameterInformation = struct { /// The label of this parameter information. label: union(enum) { string: []const u8, tuple: Tuple(&[_]type{ i64, i64, }), }, /// The human-readable doc-comment of this signature. Will be shown /// in the UI but can be omitted. documentation: Undefinedable(union(enum) { string: []const u8, MarkupContent: MarkupContent, }), }; /// Represents the signature of something callable. A signature /// can have a label, like a function-name, a doc-comment, and /// a set of parameters. pub const SignatureInformation = struct { /// The label of this signature. Will be shown in /// the UI. label: []const u8, /// The human-readable doc-comment of this signature. Will be shown /// in the UI but can be omitted. documentation: Undefinedable(union(enum) { string: []const u8, MarkupContent: MarkupContent, }), /// The parameters of this signature. parameters: Undefinedable([]ParameterInformation), /// The index of the active parameter. activeParameter: Undefinedable(i64), }; /// Signature help represents the signature of something /// callable. There can be multiple signature but only one /// active and only one active parameter. pub const SignatureHelp = struct { /// One or more signatures. signatures: []SignatureInformation, /// The active signature. Set to `null` if no /// signatures exist. activeSignature: ?i64, /// The active parameter of the active signature. Set to `null` /// if the active signature has no parameters. activeParameter: ?i64, }; /// The definition of a symbol represented as one or many [locations](#Location). /// For most programming languages there is only one location at which a symbol is /// defined. pub const Definition = union(enum) { Location: Location, array: []Location, }; /// Information about where a symbol is defined. pub const DefinitionLink = LocationLink; /// The declaration of a symbol representation as one or many [locations](#Location). pub const Declaration = union(enum) { Location: Location, array: []Location, }; /// Information about where a symbol is declared. pub const DeclarationLink = LocationLink; /// Value-object that contains additional information when /// requesting references. pub const ReferenceContext = struct { /// Include the declaration of the current symbol. includeDeclaration: bool, }; /// A document highlight kind. pub const DocumentHighlightKind = enum(i64) { Text = 1, Read = 2, Write = 3, usingnamespace IntBackedEnumStringify(@This()); }; /// A document highlight is a range inside a text document which deserves /// special attention. Usually a document highlight is visualized by changing /// the background color of its range. pub const DocumentHighlight = struct { /// The range this highlight applies to. range: Range, /// The highlight kind, default is [text](#DocumentHighlightKind.Text). kind: Undefinedable(DocumentHighlightKind), }; /// A symbol kind. pub const SymbolKind = enum(i64) { File = 1, Module = 2, Namespace = 3, Package = 4, Class = 5, Method = 6, Property = 7, Field = 8, Constructor = 9, Enum = 10, Interface = 11, Function = 12, Variable = 13, Constant = 14, String = 15, Number = 16, Boolean = 17, Array = 18, Object = 19, Key = 20, Null = 21, EnumMember = 22, Struct = 23, Event = 24, Operator = 25, TypeParameter = 26, usingnamespace IntBackedEnumStringify(@This()); }; /// Symbol tags are extra annotations that tweak the rendering of a symbol. pub const SymbolTag = enum(i64) { Deprecated = 1, usingnamespace IntBackedEnumStringify(@This()); }; /// Represents information about programming constructs like variables, classes, /// interfaces etc. pub const SymbolInformation = struct { /// The name of this symbol. name: []const u8, /// The kind of this symbol. kind: SymbolKind, /// Tags for this completion item. tags: Undefinedable([]1), /// Indicates if this symbol is deprecated. deprecated: Undefinedable(bool), /// The location of this symbol. The location's range is used by a tool /// to reveal the location in the editor. If the symbol is selected in the /// tool the range's start information is used to position the cursor. So /// the range usually spans more than the actual symbol's name and does /// normally include thinks like visibility modifiers. location: Location, /// The name of the symbol containing this symbol. This information is for /// user interface purposes (e.g. to render a qualifier in the user interface /// if necessary). It can't be used to re-infer a hierarchy for the document /// symbols. containerName: Undefinedable([]const u8), }; /// Represents programming constructs like variables, classes, interfaces etc. /// that appear in a document. Document symbols can be hierarchical and they /// have two ranges: one that encloses its definition and one that points to /// its most interesting range, e.g. the range of an identifier. pub const DocumentSymbol = struct { /// The name of this symbol. Will be displayed in the user interface and therefore must not be /// an empty string or a string only consisting of white spaces. name: []const u8, /// More detail for this symbol, e.g the signature of a function. detail: Undefinedable([]const u8), /// The kind of this symbol. kind: SymbolKind, /// Tags for this completion item. tags: Undefinedable([]1), /// Indicates if this symbol is deprecated. deprecated: Undefinedable(bool), /// The range enclosing this symbol not including leading/trailing whitespace but everything else /// like comments. This information is typically used to determine if the the clients cursor is /// inside the symbol to reveal in the symbol in the UI. range: Range, /// The range that should be selected and revealed when this symbol is being picked, e.g the name of a function. /// Must be contained by the the `range`. selectionRange: Range, /// Children of this symbol, e.g. properties of a class. children: Undefinedable([]DocumentSymbol), }; /// A set of predefined code action kinds pub const CodeActionKind = struct { /// Empty kind. pub const Empty = CodeActionKind; /// Base kind for quickfix actions: 'quickfix' pub const QuickFix = CodeActionKind; /// Base kind for refactoring actions: 'refactor' pub const Refactor = CodeActionKind; /// Base kind for refactoring extraction actions: 'refactor.extract' pub const RefactorExtract = CodeActionKind; /// Base kind for refactoring inline actions: 'refactor.inline' pub const RefactorInline = CodeActionKind; /// Base kind for refactoring rewrite actions: 'refactor.rewrite' pub const RefactorRewrite = CodeActionKind; /// Base kind for source actions: `source` pub const Source = CodeActionKind; /// Base kind for an organize imports source action: `source.organizeImports` pub const SourceOrganizeImports = CodeActionKind; /// Base kind for auto-fix source actions: `source.fixAll`. pub const SourceFixAll = CodeActionKind; }; /// Contains additional diagnostic information about the context in which /// a [code action](#CodeActionProvider.provideCodeActions) is run. pub const CodeActionContext = struct { /// An array of diagnostics known on the client side overlapping the range provided to the /// `textDocument/codeAction` request. They are provided so that the server knows which /// errors are currently presented to the user for the given range. There is no guarantee /// that these accurately reflect the error state of the resource. The primary parameter /// to compute code actions is the provided range. diagnostics: []Diagnostic, /// Requested kind of actions to return. only: Undefinedable([][]const u8), }; /// A code action represents a change that can be performed in code, e.g. to fix a problem or /// to refactor code. pub const CodeAction = struct { /// A short, human-readable, title for this code action. title: []const u8, /// The kind of the code action. kind: Undefinedable([]const u8), /// The diagnostics that this code action resolves. diagnostics: Undefinedable([]Diagnostic), /// Marks this as a preferred action. Preferred actions are used by the `auto fix` command and can be targeted /// by keybindings. isPreferred: Undefinedable(bool), /// Marks that the code action cannot currently be applied. disabled: Undefinedable(struct { /// Human readable description of why the code action is currently disabled. reason: []const u8, }), /// The workspace edit this code action performs. edit: Undefinedable(WorkspaceEdit), /// A command this code action executes. If a code action /// provides a edit and a command, first the edit is /// executed and then the command. command: Undefinedable(Command), /// A data entry field that is preserved on a code action between /// a `textDocument/codeAction` and a `codeAction/resolve` request. data: Undefinedable(std.json.Value), }; /// A code lens represents a [command](#Command) that should be shown along with /// source text, like the number of references, a way to run tests, etc. pub const CodeLens = struct { /// The range in which this code lens is valid. Should only span a single line. range: Range, /// The command this code lens represents. command: Undefinedable(Command), /// A data entry field that is preserved on a code lens item between /// a [CodeLensRequest](#CodeLensRequest) and a [CodeLensResolveRequest] /// (#CodeLensResolveRequest) data: Undefinedable(std.json.Value), }; /// Value-object describing what options formatting should use. pub const FormattingOptions = struct { /// Size of a tab in spaces. tabSize: i64, /// Prefer spaces over tabs. insertSpaces: bool, /// Trim trailing whitespaces on a line. trimTrailingWhitespace: Undefinedable(bool), /// Insert a newline character at the end of the file if one does not exist. insertFinalNewline: Undefinedable(bool), /// Trim all newlines after the final newline at the end of the file. trimFinalNewlines: Undefinedable(bool), }; /// A document link is a range in a text document that links to an internal or external resource, like another /// text document or a web site. pub const DocumentLink = struct { /// The range this link applies to. range: Range, /// The uri this link points to. target: Undefinedable([]const u8), /// The tooltip text when you hover over this link. tooltip: Undefinedable([]const u8), /// A data entry field that is preserved on a document link between a /// DocumentLinkRequest and a DocumentLinkResolveRequest. data: Undefinedable(std.json.Value), }; /// A selection range represents a part of a selection hierarchy. A selection range /// may have a parent selection range that contains it. pub const SelectionRange = struct { /// The [range](#Range) of this selection range. range: Range, /// The parent selection range containing this range. Therefore `parent.range` must contain `this.range`. parent: Undefinedable(SelectionRange), }; /// Represents programming constructs like functions or constructors in the context /// of call hierarchy. pub const CallHierarchyItem = struct { /// The name of this item. name: []const u8, /// The kind of this item. kind: SymbolKind, /// Tags for this item. tags: Undefinedable([]1), /// More detail for this item, e.g. the signature of a function. detail: Undefinedable([]const u8), /// The resource identifier of this item. uri: []const u8, /// The range enclosing this symbol not including leading/trailing whitespace but everything else, e.g. comments and code. range: Range, /// The range that should be selected and revealed when this symbol is being picked, e.g. the name of a function. /// Must be contained by the [`range`](#CallHierarchyItem.range). selectionRange: Range, /// A data entry field that is preserved between a call hierarchy prepare and /// incoming calls or outgoing calls requests. data: Undefinedable(std.json.Value), }; /// Represents an incoming call, e.g. a caller of a method or constructor. pub const CallHierarchyIncomingCall = struct { /// The item that makes the call. from: CallHierarchyItem, /// The ranges at which the calls appear. This is relative to the caller /// denoted by [`this.from`](#CallHierarchyIncomingCall.from). fromRanges: []Range, }; /// Represents an outgoing call, e.g. calling a getter from a method or a method from a constructor etc. pub const CallHierarchyOutgoingCall = struct { /// The item that is called. to: CallHierarchyItem, /// The range at which this item is called. This is the range relative to the caller, e.g the item /// passed to [`provideCallHierarchyOutgoingCalls`](#CallHierarchyItemProvider.provideCallHierarchyOutgoingCalls) /// and not [`this.to`](#CallHierarchyOutgoingCall.to). fromRanges: []Range, }; pub const EOL = [][]const u8; /// A simple text document. Not to be implemented. The document keeps the content /// as string. pub const TextDocument = struct { /// The associated URI for this document. Most documents have the __file__-scheme, indicating that they /// represent files on disk. However, some documents may have other schemes indicating that they are not /// available on disk. uri: []const u8, /// The identifier of the language associated with this document. languageId: []const u8, /// The version number of this document (it will increase after each /// change, including undo/redo). version: i64, /// The number of lines in this document. lineCount: i64, }; pub const WorkspaceFoldersClientCapabilities = struct { /// The workspace client capabilities workspace: Undefinedable(struct { /// The client has support for workspace folders workspaceFolders: Undefinedable(bool), }), }; pub const ConfigurationClientCapabilities = struct { /// The workspace client capabilities workspace: Undefinedable(struct { /// The client supports `workspace/configuration` requests. configuration: Undefinedable(bool), }), }; pub const WorkDoneProgressClientCapabilities = struct { /// Window specific client capabilities. window: Undefinedable(struct { /// Whether client supports server initiated progress using the /// `window/workDoneProgress/create` request. workDoneProgress: Undefinedable(bool), }), }; pub const WorkspaceFoldersServerCapabilities = struct { /// The workspace server capabilities workspace: Undefinedable(struct { workspaceFolders: Undefinedable(struct { /// Whether the server wants to receive workspace folder /// change notifications. changeNotifications: Undefinedable(union(enum) { string: []const u8, boolean: bool, }), /// The Server has support for workspace folders supported: Undefinedable(bool), }), }), }; pub const WorkspaceFoldersInitializeParams = struct { /// The actual configured workspace folders. workspaceFolders: ?[]WorkspaceFolder, }; pub const ProgressToken = union(enum) { number: i64, string: []const u8, }; pub const SemanticTokensWorkspaceClientCapabilities = struct { /// Whether the client implementation supports a refresh request sent from /// the server to the client. refreshSupport: Undefinedable(bool), }; /// Since 3.6.0 pub const TypeDefinitionClientCapabilities = struct { /// Whether implementation supports dynamic registration. If this is set to `true` /// the client supports the new `TypeDefinitionRegistrationOptions` return value /// for the corresponding server capability as well. dynamicRegistration: Undefinedable(bool), /// The client supports additional metadata in the form of definition links. linkSupport: Undefinedable(bool), }; pub const ImplementationClientCapabilities = struct { /// Whether implementation supports dynamic registration. If this is set to `true` /// the client supports the new `ImplementationRegistrationOptions` return value /// for the corresponding server capability as well. dynamicRegistration: Undefinedable(bool), /// The client supports additional metadata in the form of definition links. linkSupport: Undefinedable(bool), }; pub const DocumentColorClientCapabilities = struct { /// Whether implementation supports dynamic registration. If this is set to `true` /// the client supports the new `DocumentColorRegistrationOptions` return value /// for the corresponding server capability as well. dynamicRegistration: Undefinedable(bool), }; /// A filter to describe in which file operation requests or notifications /// the server is interested in. pub const FileOperationFilter = struct { /// A Uri like `file` or `untitled`. scheme: Undefinedable([]const u8), /// The actual file operation pattern. pattern: FileOperationPattern, }; /// Defines a CancellationToken. This interface is not /// intended to be implemented. A CancellationToken must /// be created via a CancellationTokenSource. pub const CancellationToken = struct { /// Is `true` when the token has been cancelled, `false` otherwise. isCancellationRequested: bool, /// An [event](#Event) which fires upon cancellation. onCancellationRequested: Event( std.json.Value, ), }; pub fn Thenable(comptime T: type) type { return struct {}; } /// A pattern to describe in which file operation requests or notifications /// the server is interested in. pub const FileOperationPattern = struct { /// The glob pattern to match. Glob patterns can have the following syntax: /// - `*` to match one or more characters in a path segment /// - `?` to match on one character in a path segment /// - `**` to match any number of path segments, including none /// - `{}` to group conditions (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files) /// - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) /// - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) glob: []const u8, /// Whether to match files or folders with this pattern. matches: Undefinedable(FileOperationPatternKind), /// Additional options used during matching. options: Undefinedable(FileOperationPatternOptions), }; pub fn Event(comptime T: type) type { return struct {}; } /// A generic resource operation. pub const ResourceOperation = struct { /// The resource operation kind. kind: []const u8, /// An optional annotation identifier describing the operation. annotationId: Undefinedable([]const u8), }; pub const Disposable = struct {}; pub fn ResponseErrorLiteral(comptime D: type) type { return struct { /// A number indicating the error type that occured. code: i64, /// A string providing a short decription of the error. message: []const u8, /// A Primitive or Structured value that contains additional /// information about the error. Can be omitted. data: Undefinedable(D), }; } /// A document filter denotes a document by different properties like /// the [language](#TextDocument.languageId), the [scheme](#Uri.scheme) of /// its resource, or a glob-pattern that is applied to the [path](#TextDocument.fileName). pub const DocumentFilter = union(enum) { reflection: struct { /// A language id, like `typescript`. language: []const u8, /// A glob pattern, like `*.{ts,js}`. pattern: Undefinedable([]const u8), /// A Uri [scheme](#Uri.scheme), like `file` or `untitled`. scheme: Undefinedable([]const u8), }, reflection: struct { /// A language id, like `typescript`. language: Undefinedable([]const u8), /// A glob pattern, like `*.{ts,js}`. pattern: Undefinedable([]const u8), /// A Uri [scheme](#Uri.scheme), like `file` or `untitled`. scheme: []const u8, }, reflection: struct { /// A language id, like `typescript`. language: Undefinedable([]const u8), /// A glob pattern, like `*.{ts,js}`. pattern: []const u8, /// A Uri [scheme](#Uri.scheme), like `file` or `untitled`. scheme: Undefinedable([]const u8), }, }; /// A document selector is the combination of one or many document filters. pub const DocumentSelector = []union(enum) { string: []const u8, DocumentFilter: DocumentFilter, }; /// General parameters to to register for an notification or to register a provider. pub const Registration = struct { /// The id used to register the request. The id can be used to deregister /// the request again. id: []const u8, /// The method to register for. method: []const u8, /// Options necessary for the registration. registerOptions: Undefinedable(std.json.Value), }; pub const RegistrationParams = struct { registrations: []Registration, }; /// The `client/registerCapability` request is sent from the server to the client to register a new capability /// handler on the client side. pub const RegistrationRequest = struct { comptime method: []const u8 = "client/registerCapability", id: RequestId, params: RegistrationParams, }; /// General parameters to unregister a request or notification. pub const Unregistration = struct { /// The id used to unregister the request or notification. Usually an id /// provided during the register request. id: []const u8, /// The method to unregister for. method: []const u8, }; pub const UnregistrationParams = struct { unregisterations: []Unregistration, }; /// The `client/unregisterCapability` request is sent from the server to the client to unregister a previously registered capability /// handler on the client side. pub const UnregistrationRequest = struct { comptime method: []const u8 = "client/unregisterCapability", id: RequestId, params: UnregistrationParams, }; pub const WorkDoneProgressParams = struct { /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), }; pub const PartialResultParams = struct { /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// A parameter literal used in requests to pass a text document and a position inside that /// document. pub const TextDocumentPositionParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, }; pub const ResourceOperationKind = struct { /// Supports creating new files and folders. pub const Create = ResourceOperationKind; /// Supports renaming existing files and folders. pub const Rename = ResourceOperationKind; /// Supports deleting existing files and folders. pub const Delete = ResourceOperationKind; }; pub const FailureHandlingKind = struct { /// Applying the workspace change is simply aborted if one of the changes provided /// fails. All operations executed before the failing operation stay executed. pub const Abort = FailureHandlingKind; /// All operations are executed transactional. That means they either all /// succeed or no changes at all are applied to the workspace. pub const Transactional = FailureHandlingKind; /// If the workspace edit contains only textual file changes they are executed transactional. /// If resource changes (create, rename or delete file) are part of the change the failure /// handling strategy is abort. pub const TextOnlyTransactional = FailureHandlingKind; /// The client tries to undo the operations already executed. But there is no /// guarantee that this is succeeding. pub const Undo = FailureHandlingKind; }; /// Workspace specific client capabilities. pub const WorkspaceClientCapabilities = struct { /// The client supports applying batch edits /// to the workspace by supporting the request /// 'workspace/applyEdit' applyEdit: Undefinedable(bool), /// Capabilities specific to `WorkspaceEdit`s workspaceEdit: Undefinedable(WorkspaceEditClientCapabilities), /// Capabilities specific to the `workspace/didChangeConfiguration` notification. didChangeConfiguration: Undefinedable(DidChangeConfigurationClientCapabilities), /// Capabilities specific to the `workspace/didChangeWatchedFiles` notification. didChangeWatchedFiles: Undefinedable(DidChangeWatchedFilesClientCapabilities), /// Capabilities specific to the `workspace/symbol` request. symbol: Undefinedable(WorkspaceSymbolClientCapabilities), /// Capabilities specific to the `workspace/executeCommand` request. executeCommand: Undefinedable(ExecuteCommandClientCapabilities), /// Capabilities specific to the semantic token requests scoped to the /// workspace. semanticTokens: Undefinedable(SemanticTokensWorkspaceClientCapabilities), /// Capabilities specific to the code lens requests scoped to the /// workspace. codeLens: Undefinedable(CodeLensWorkspaceClientCapabilities), /// The client has support for file notifications/requests for user operations on files. fileOperations: Undefinedable(FileOperationClientCapabilities), }; /// Text document specific client capabilities. pub const TextDocumentClientCapabilities = struct { /// Defines which synchronization capabilities the client supports. synchronization: Undefinedable(TextDocumentSyncClientCapabilities), /// Capabilities specific to the `textDocument/completion` completion: Undefinedable(CompletionClientCapabilities), /// Capabilities specific to the `textDocument/hover` hover: Undefinedable(HoverClientCapabilities), /// Capabilities specific to the `textDocument/signatureHelp` signatureHelp: Undefinedable(SignatureHelpClientCapabilities), /// Capabilities specific to the `textDocument/declaration` declaration: Undefinedable(DeclarationClientCapabilities), /// Capabilities specific to the `textDocument/definition` definition: Undefinedable(DefinitionClientCapabilities), /// Capabilities specific to the `textDocument/typeDefinition` typeDefinition: Undefinedable(TypeDefinitionClientCapabilities), /// Capabilities specific to the `textDocument/implementation` implementation: Undefinedable(ImplementationClientCapabilities), /// Capabilities specific to the `textDocument/references` references: Undefinedable(ReferenceClientCapabilities), /// Capabilities specific to the `textDocument/documentHighlight` documentHighlight: Undefinedable(DocumentHighlightClientCapabilities), /// Capabilities specific to the `textDocument/documentSymbol` documentSymbol: Undefinedable(DocumentSymbolClientCapabilities), /// Capabilities specific to the `textDocument/codeAction` codeAction: Undefinedable(CodeActionClientCapabilities), /// Capabilities specific to the `textDocument/codeLens` codeLens: Undefinedable(CodeLensClientCapabilities), /// Capabilities specific to the `textDocument/documentLink` documentLink: Undefinedable(DocumentLinkClientCapabilities), /// Capabilities specific to the `textDocument/documentColor` colorProvider: Undefinedable(DocumentColorClientCapabilities), /// Capabilities specific to the `textDocument/formatting` formatting: Undefinedable(DocumentFormattingClientCapabilities), /// Capabilities specific to the `textDocument/rangeFormatting` rangeFormatting: Undefinedable(DocumentRangeFormattingClientCapabilities), /// Capabilities specific to the `textDocument/onTypeFormatting` onTypeFormatting: Undefinedable(DocumentOnTypeFormattingClientCapabilities), /// Capabilities specific to the `textDocument/rename` rename: Undefinedable(RenameClientCapabilities), /// Capabilities specific to `textDocument/foldingRange` request. foldingRange: Undefinedable(FoldingRangeClientCapabilities), /// Capabilities specific to `textDocument/selectionRange` request. selectionRange: Undefinedable(SelectionRangeClientCapabilities), /// Capabilities specific to `textDocument/publishDiagnostics` notification. publishDiagnostics: Undefinedable(PublishDiagnosticsClientCapabilities), /// Capabilities specific to the various call hierarchy request. callHierarchy: Undefinedable(CallHierarchyClientCapabilities), /// Capabilities specific to the various semantic token request. semanticTokens: Undefinedable(SemanticTokensClientCapabilities), /// Capabilities specific to the linked editing range request. linkedEditingRange: Undefinedable(LinkedEditingRangeClientCapabilities), /// Client capabilities specific to the moniker request. moniker: MonikerClientCapabilities, }; pub const WindowClientCapabilities = struct { /// Whether client supports handling progress notifications. If set /// servers are allowed to report in `workDoneProgress` property in the /// request specific server capabilities. workDoneProgress: Undefinedable(bool), /// Capabilities specific to the showMessage request. showMessage: Undefinedable(ShowMessageRequestClientCapabilities), /// Capabilities specific to the showDocument request. showDocument: Undefinedable(ShowDocumentClientCapabilities), }; /// Client capabilities specific to regular expressions. pub const RegularExpressionsClientCapabilities = struct { /// The engine's name. engine: []const u8, /// The engine's version. version: Undefinedable([]const u8), }; /// Client capabilities specific to the used markdown parser. pub const MarkdownClientCapabilities = struct { /// The name of the parser. parser: []const u8, /// The version of the parser. version: Undefinedable([]const u8), }; /// General client capabilities. pub const GeneralClientCapabilities = struct { /// Client capabilities specific to regular expressions. regularExpressions: Undefinedable(RegularExpressionsClientCapabilities), /// Client capabilities specific to the client's markdown parser. markdown: Undefinedable(MarkdownClientCapabilities), }; pub const ClientCapabilities = struct { /// Workspace specific client capabilities. workspace: Undefinedable(WorkspaceClientCapabilities), /// Text document specific client capabilities. textDocument: Undefinedable(TextDocumentClientCapabilities), /// Window specific client capabilities. window: Undefinedable(WindowClientCapabilities), /// General client capabilities. general: Undefinedable(GeneralClientCapabilities), /// Experimental client capabilities. experimental: Undefinedable(std.json.ObjectMap), /// The workspace client capabilities workspace: Undefinedable(struct { /// The client has support for workspace folders workspaceFolders: Undefinedable(bool), }), /// The workspace client capabilities workspace: Undefinedable(struct { /// The client supports `workspace/configuration` requests. configuration: Undefinedable(bool), }), /// Window specific client capabilities. window: Undefinedable(struct { /// Whether client supports server initiated progress using the /// `window/workDoneProgress/create` request. workDoneProgress: Undefinedable(bool), }), }; /// Static registration options to be returned in the initialize /// request. pub const StaticRegistrationOptions = struct { /// The id used to register the request. The id can be used to deregister /// the request again. See also Registration#id. id: Undefinedable([]const u8), }; /// General text document registration options. pub const TextDocumentRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, }; /// Save options. pub const SaveOptions = struct { /// The client is supposed to include the content on save. includeText: Undefinedable(bool), }; pub const WorkDoneProgressOptions = struct { workDoneProgress: Undefinedable(bool), }; pub const ServerCapabilities = struct { /// Defines how text documents are synced. Is either a detailed structure defining each notification or /// for backwards compatibility the TextDocumentSyncKind number. textDocumentSync: Undefinedable(union(enum) { TextDocumentSyncOptions: TextDocumentSyncOptions, TextDocumentSyncKind: TextDocumentSyncKind, }), /// The server provides completion support. completionProvider: Undefinedable(CompletionOptions), /// The server provides hover support. hoverProvider: Undefinedable(union(enum) { boolean: bool, HoverOptions: HoverOptions, }), /// The server provides signature help support. signatureHelpProvider: Undefinedable(SignatureHelpOptions), /// The server provides Goto Declaration support. declarationProvider: Undefinedable(union(enum) { boolean: bool, DeclarationOptions: DeclarationOptions, DeclarationRegistrationOptions: DeclarationRegistrationOptions, }), /// The server provides goto definition support. definitionProvider: Undefinedable(union(enum) { boolean: bool, DefinitionOptions: DefinitionOptions, }), /// The server provides Goto Type Definition support. typeDefinitionProvider: Undefinedable(union(enum) { boolean: bool, TypeDefinitionOptions: TypeDefinitionOptions, TypeDefinitionRegistrationOptions: TypeDefinitionRegistrationOptions, }), /// The server provides Goto Implementation support. implementationProvider: Undefinedable(union(enum) { boolean: bool, ImplementationOptions: ImplementationOptions, ImplementationRegistrationOptions: ImplementationRegistrationOptions, }), /// The server provides find references support. referencesProvider: Undefinedable(union(enum) { boolean: bool, ReferenceOptions: ReferenceOptions, }), /// The server provides document highlight support. documentHighlightProvider: Undefinedable(union(enum) { boolean: bool, DocumentHighlightOptions: DocumentHighlightOptions, }), /// The server provides document symbol support. documentSymbolProvider: Undefinedable(union(enum) { boolean: bool, DocumentSymbolOptions: DocumentSymbolOptions, }), /// The server provides code actions. CodeActionOptions may only be /// specified if the client states that it supports /// `codeActionLiteralSupport` in its initial `initialize` request. codeActionProvider: Undefinedable(union(enum) { boolean: bool, CodeActionOptions: CodeActionOptions, }), /// The server provides code lens. codeLensProvider: Undefinedable(CodeLensOptions), /// The server provides document link support. documentLinkProvider: Undefinedable(DocumentLinkOptions), /// The server provides color provider support. colorProvider: Undefinedable(union(enum) { boolean: bool, DocumentColorOptions: DocumentColorOptions, DocumentColorRegistrationOptions: DocumentColorRegistrationOptions, }), /// The server provides workspace symbol support. workspaceSymbolProvider: Undefinedable(union(enum) { boolean: bool, WorkspaceSymbolOptions: WorkspaceSymbolOptions, }), /// The server provides document formatting. documentFormattingProvider: Undefinedable(union(enum) { boolean: bool, DocumentFormattingOptions: DocumentFormattingOptions, }), /// The server provides document range formatting. documentRangeFormattingProvider: Undefinedable(union(enum) { boolean: bool, DocumentRangeFormattingOptions: DocumentRangeFormattingOptions, }), /// The server provides document formatting on typing. documentOnTypeFormattingProvider: Undefinedable(DocumentOnTypeFormattingOptions), /// The server provides rename support. RenameOptions may only be /// specified if the client states that it supports /// `prepareSupport` in its initial `initialize` request. renameProvider: Undefinedable(union(enum) { boolean: bool, RenameOptions: RenameOptions, }), /// The server provides folding provider support. foldingRangeProvider: Undefinedable(union(enum) { boolean: bool, FoldingRangeOptions: FoldingRangeOptions, FoldingRangeRegistrationOptions: FoldingRangeRegistrationOptions, }), /// The server provides selection range support. selectionRangeProvider: Undefinedable(union(enum) { boolean: bool, SelectionRangeOptions: SelectionRangeOptions, SelectionRangeRegistrationOptions: SelectionRangeRegistrationOptions, }), /// The server provides execute command support. executeCommandProvider: Undefinedable(ExecuteCommandOptions), /// The server provides call hierarchy support. callHierarchyProvider: Undefinedable(union(enum) { boolean: bool, CallHierarchyOptions: CallHierarchyOptions, CallHierarchyRegistrationOptions: CallHierarchyRegistrationOptions, }), /// The server provides linked editing range support. linkedEditingRangeProvider: Undefinedable(union(enum) { boolean: bool, LinkedEditingRangeOptions: LinkedEditingRangeOptions, LinkedEditingRangeRegistrationOptions: LinkedEditingRangeRegistrationOptions, }), /// The server provides semantic tokens support. semanticTokensProvider: Undefinedable(union(enum) { SemanticTokensOptions: SemanticTokensOptions, SemanticTokensRegistrationOptions: SemanticTokensRegistrationOptions, }), /// Window specific server capabilities. workspace: Undefinedable(struct { /// The server is interested in notifications/requests for operations on files. fileOperations: Undefinedable(FileOperationOptions), }), /// The server provides moniker support. monikerProvider: Undefinedable(union(enum) { boolean: bool, MonikerOptions: MonikerOptions, MonikerRegistrationOptions: MonikerRegistrationOptions, }), /// Experimental server capabilities. experimental: Undefinedable(T), /// The workspace server capabilities workspace: Undefinedable(struct { workspaceFolders: Undefinedable(struct { /// Whether the server wants to receive workspace folder /// change notifications. changeNotifications: Undefinedable(union(enum) { string: []const u8, boolean: bool, }), /// The Server has support for workspace folders supported: Undefinedable(bool), }), }), }; /// The initialize request is sent from the client to the server. /// It is sent once as the request after starting up the server. /// The requests parameter is of type [InitializeParams](#InitializeParams) /// the response if of type [InitializeResult](#InitializeResult) of a Thenable that /// resolves to such. pub const InitializeRequest = struct { comptime method: []const u8 = "initialize", id: RequestId, params: InitializeParams, }; pub const InitializeParams = struct { /// The process Id of the parent process that started /// the server. processId: ?i64, /// Information about the client clientInfo: Undefinedable(struct { /// The name of the client as defined by the client. name: []const u8, /// The client's version as defined by the client. version: Undefinedable([]const u8), }), /// The locale the client is currently showing the user interface /// in. This must not necessarily be the locale of the operating /// system. locale: Undefinedable([]const u8), /// The rootPath of the workspace. Is null /// if no folder is open. rootPath: Undefinedable(?[]const u8), /// The rootUri of the workspace. Is null if no /// folder is open. If both `rootPath` and `rootUri` are set /// `rootUri` wins. rootUri: ?[]const u8, /// The capabilities provided by the client (editor or tool) capabilities: ClientCapabilities, /// User provided initialization options. initializationOptions: Undefinedable(std.json.Value), /// The initial trace setting. If omitted trace is disabled ('off'). trace: Undefinedable(enum { off, messages, verbose, usingnamespace StringBackedEnumStringify(@This()); }), /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// The actual configured workspace folders. workspaceFolders: ?[]WorkspaceFolder, }; /// The result returned from an initialize request. pub fn InitializeResult(comptime T: type) type { return struct { /// The capabilities the language server provides. capabilities: ServerCapabilities( T, ), /// Information about the server. serverInfo: Undefinedable(struct { /// The name of the server as defined by the server. name: []const u8, /// The server's version as defined by the server. version: Undefinedable([]const u8), }), }; } /// The data type of the ResponseError if the /// initialize request fails. pub const InitializeError = struct { /// Indicates whether the client execute the following retry logic: /// (1) show the message provided by the ResponseError to the user /// (2) user selects retry or cancel /// (3) if user selected retry the initialize method is sent again. retry: bool, }; /// Known error codes for an `InitializeError`; pub const InitializeError = enum(i64) { unknownProtocolVersion = 1, usingnamespace IntBackedEnumStringify(@This()); }; pub const InitializedParams = struct {}; /// The initialized notification is sent from the client to the /// server after the client is fully initialized and the server /// is allowed to send requests from the server to the client. pub const InitializedNotification = struct { comptime method: []const u8 = "initialized", id: RequestId, params: InitializedParams, }; /// A shutdown request is sent from the client to the server. /// It is sent once when the client decides to shutdown the /// server. The only notification that is sent after a shutdown request /// is the exit event. pub const ShutdownRequest = struct { comptime method: []const u8 = "shutdown", id: RequestId, }; /// The exit event is sent from the client to the server to /// ask the server to exit its process. pub const ExitNotification = struct { comptime method: []const u8 = "exit", id: RequestId, }; pub const DidChangeConfigurationClientCapabilities = struct { /// Did change configuration notification supports dynamic registration. dynamicRegistration: Undefinedable(bool), }; /// The configuration change notification is sent from the client to the server /// when the client's configuration has changed. The notification contains /// the changed configuration as defined by the language client. pub const DidChangeConfigurationNotification = struct { comptime method: []const u8 = "workspace/didChangeConfiguration", id: RequestId, params: DidChangeConfigurationParams, }; pub const DidChangeConfigurationRegistrationOptions = struct { section: Undefinedable(union(enum) { string: []const u8, array: [][]const u8, }), }; /// The parameters of a change configuration notification. pub const DidChangeConfigurationParams = struct { /// The actual changed settings settings: std.json.Value, }; /// The message type pub const MessageType = enum(i64) { Error = 1, Warning = 2, Info = 3, Log = 4, usingnamespace IntBackedEnumStringify(@This()); }; /// The parameters of a notification message. pub const ShowMessageParams = struct { /// The message type. See {@link MessageType} @"type": MessageType, /// The actual message message: []const u8, }; /// The show message notification is sent from a server to a client to ask /// the client to display a particular message in the user interface. pub const ShowMessageNotification = struct { comptime method: []const u8 = "window/showMessage", id: RequestId, params: ShowMessageParams, }; /// Show message request client capabilities pub const ShowMessageRequestClientCapabilities = struct { /// Capabilities specific to the `MessageActionItem` type. messageActionItem: Undefinedable(struct { /// Whether the client supports additional attributes which /// are preserved and send back to the server in the /// request's response. additionalPropertiesSupport: Undefinedable(bool), }), }; pub const MessageActionItem = struct { /// A short title like 'Retry', 'Open Log' etc. title: []const u8, }; pub const ShowMessageRequestParams = struct { /// The message type. See {@link MessageType} @"type": MessageType, /// The actual message message: []const u8, /// The message action items to present. actions: Undefinedable([]MessageActionItem), }; /// The show message request is sent from the server to the client to show a message /// and a set of options actions to the user. pub const ShowMessageRequest = struct { comptime method: []const u8 = "window/showMessageRequest", id: RequestId, params: ShowMessageRequestParams, }; /// The log message notification is sent from the server to the client to ask /// the client to log a particular message. pub const LogMessageNotification = struct { comptime method: []const u8 = "window/logMessage", id: RequestId, params: LogMessageParams, }; /// The log message parameters. pub const LogMessageParams = struct { /// The message type. See {@link MessageType} @"type": MessageType, /// The actual message message: []const u8, }; /// The telemetry event notification is sent from the server to the client to ask /// the client to log telemetry data. pub const TelemetryEventNotification = struct { comptime method: []const u8 = "telemetry/event", id: RequestId, params: any, }; pub const TextDocumentSyncClientCapabilities = struct { /// Whether text document synchronization supports dynamic registration. dynamicRegistration: Undefinedable(bool), /// The client supports sending will save notifications. willSave: Undefinedable(bool), /// The client supports sending a will save request and /// waits for a response providing text edits which will /// be applied to the document before it is saved. willSaveWaitUntil: Undefinedable(bool), /// The client supports did save notifications. didSave: Undefinedable(bool), }; /// Defines how the host (editor) should sync /// document changes to the language server. pub const TextDocumentSyncKind = enum(i64) { None = 0, Full = 1, Incremental = 2, usingnamespace IntBackedEnumStringify(@This()); }; pub const TextDocumentSyncOptions = struct { /// Open and close notifications are sent to the server. If omitted open close notification should not /// be sent. openClose: Undefinedable(bool), /// Change notifications are sent to the server. See TextDocumentSyncKind.None, TextDocumentSyncKind.Full /// and TextDocumentSyncKind.Incremental. If omitted it defaults to TextDocumentSyncKind.None. change: Undefinedable(TextDocumentSyncKind), /// If present will save notifications are sent to the server. If omitted the notification should not be /// sent. willSave: Undefinedable(bool), /// If present will save wait until requests are sent to the server. If omitted the request should not be /// sent. willSaveWaitUntil: Undefinedable(bool), /// If present save notifications are sent to the server. If omitted the notification should not be /// sent. save: Undefinedable(union(enum) { boolean: bool, SaveOptions: SaveOptions, }), }; /// The parameters send in a open text document notification pub const DidOpenTextDocumentParams = struct { /// The document that was opened. textDocument: TextDocumentItem, }; /// The document open notification is sent from the client to the server to signal /// newly opened text documents. The document's truth is now managed by the client /// and the server must not try to read the document's truth using the document's /// uri. Open in this sense means it is managed by the client. It doesn't necessarily /// mean that its content is presented in an editor. An open notification must not /// be sent more than once without a corresponding close notification send before. /// This means open and close notification must be balanced and the max open count /// is one. pub const DidOpenTextDocumentNotification = struct { comptime method: []const u8 = "textDocument/didOpen", id: RequestId, params: DidOpenTextDocumentParams, }; /// An event describing a change to a text document. If range and rangeLength are omitted /// the new text is considered to be the full content of the document. pub const TextDocumentContentChangeEvent = union(enum) { reflection: struct { /// The range of the document that changed. range: Range, /// The optional length of the range that got replaced. rangeLength: Undefinedable(i64), /// The new text for the provided range. text: []const u8, }, reflection: struct { /// The new text of the whole document. text: []const u8, }, }; /// The change text document notification's parameters. pub const DidChangeTextDocumentParams = struct { /// The document that did change. The version number points /// to the version after all provided content changes have /// been applied. textDocument: VersionedTextDocumentIdentifier, /// The actual content changes. The content changes describe single state changes /// to the document. So if there are two content changes c1 (at array index 0) and /// c2 (at array index 1) for a document in state S then c1 moves the document from /// S to S' and c2 from S' to S''. So c1 is computed on the state S and c2 is computed /// on the state S'. contentChanges: []TextDocumentContentChangeEvent, }; /// Describe options to be used when registered for text document change events. pub const TextDocumentChangeRegistrationOptions = struct { /// How documents are synced to the server. syncKind: TextDocumentSyncKind, /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, }; /// The document change notification is sent from the client to the server to signal /// changes to a text document. pub const DidChangeTextDocumentNotification = struct { comptime method: []const u8 = "textDocument/didChange", id: RequestId, params: DidChangeTextDocumentParams, }; /// The parameters send in a close text document notification pub const DidCloseTextDocumentParams = struct { /// The document that was closed. textDocument: TextDocumentIdentifier, }; /// The document close notification is sent from the client to the server when /// the document got closed in the client. The document's truth now exists where /// the document's uri points to (e.g. if the document's uri is a file uri the /// truth now exists on disk). As with the open notification the close notification /// is about managing the document's content. Receiving a close notification /// doesn't mean that the document was open in an editor before. A close /// notification requires a previous open notification to be sent. pub const DidCloseTextDocumentNotification = struct { comptime method: []const u8 = "textDocument/didClose", id: RequestId, params: DidCloseTextDocumentParams, }; /// The parameters send in a save text document notification pub const DidSaveTextDocumentParams = struct { /// The document that was closed. textDocument: TextDocumentIdentifier, /// Optional the content when saved. Depends on the includeText value /// when the save notification was requested. text: Undefinedable([]const u8), }; /// Save registration options. pub const TextDocumentSaveRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// The client is supposed to include the content on save. includeText: Undefinedable(bool), }; /// The document save notification is sent from the client to the server when /// the document got saved in the client. pub const DidSaveTextDocumentNotification = struct { comptime method: []const u8 = "textDocument/didSave", id: RequestId, params: DidSaveTextDocumentParams, }; /// Represents reasons why a text document is saved. pub const TextDocumentSaveReason = enum(i64) { Manual = 1, AfterDelay = 2, FocusOut = 3, usingnamespace IntBackedEnumStringify(@This()); }; /// The parameters send in a will save text document notification. pub const WillSaveTextDocumentParams = struct { /// The document that will be saved. textDocument: TextDocumentIdentifier, /// The 'TextDocumentSaveReason'. reason: TextDocumentSaveReason, }; /// A document will save notification is sent from the client to the server before /// the document is actually saved. pub const WillSaveTextDocumentNotification = struct { comptime method: []const u8 = "textDocument/willSave", id: RequestId, params: WillSaveTextDocumentParams, }; /// A document will save request is sent from the client to the server before /// the document is actually saved. The request can return an array of TextEdits /// which will be applied to the text document before it is saved. Please note that /// clients might drop results if computing the text edits took too long or if a /// server constantly fails on this request. This is done to keep the save fast and /// reliable. pub const WillSaveTextDocumentWaitUntilRequest = struct { comptime method: []const u8 = "textDocument/willSaveWaitUntil", id: RequestId, params: WillSaveTextDocumentParams, }; pub const DidChangeWatchedFilesClientCapabilities = struct { /// Did change watched files notification supports dynamic registration. Please note /// that the current protocol doesn't support static configuration for file changes /// from the server side. dynamicRegistration: Undefinedable(bool), }; /// The watched files notification is sent from the client to the server when /// the client detects changes to file watched by the language client. pub const DidChangeWatchedFilesNotification = struct { comptime method: []const u8 = "workspace/didChangeWatchedFiles", id: RequestId, params: DidChangeWatchedFilesParams, }; /// The watched files change notification's parameters. pub const DidChangeWatchedFilesParams = struct { /// The actual file events. changes: []FileEvent, }; /// The file event type pub const FileChangeType = enum(i64) { Created = 1, Changed = 2, Deleted = 3, usingnamespace IntBackedEnumStringify(@This()); }; /// An event describing a file change. pub const FileEvent = struct { /// The file's uri. uri: []const u8, /// The change type. @"type": FileChangeType, }; /// Describe options to be used when registered for text document change events. pub const DidChangeWatchedFilesRegistrationOptions = struct { /// The watchers to register. watchers: []FileSystemWatcher, }; pub const FileSystemWatcher = struct { /// The glob pattern to watch. Glob patterns can have the following syntax: /// - `*` to match one or more characters in a path segment /// - `?` to match on one character in a path segment /// - `**` to match any number of path segments, including none /// - `{}` to group conditions (e.g. `**​/*.{ts,js}` matches all TypeScript and JavaScript files) /// - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) /// - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) globPattern: []const u8, /// The kind of events of interest. If omitted it defaults /// to WatchKind.Create | WatchKind.Change | WatchKind.Delete /// which is 7. kind: Undefinedable(i64), }; pub const WatchKind = enum(i64) { Create = 1, Change = 2, Delete = 4, usingnamespace IntBackedEnumStringify(@This()); }; /// The publish diagnostic client capabilities. pub const PublishDiagnosticsClientCapabilities = struct { /// Whether the clients accepts diagnostics with related information. relatedInformation: Undefinedable(bool), /// Client supports the tag property to provide meta data about a diagnostic. /// Clients supporting tags have to handle unknown tags gracefully. tagSupport: Undefinedable(struct { /// The tags supported by the client. valueSet: []DiagnosticTag, }), /// Whether the client interprets the version property of the /// `textDocument/publishDiagnostics` notification`s parameter. versionSupport: Undefinedable(bool), /// Client supports a codeDescription property codeDescriptionSupport: Undefinedable(bool), /// Whether code action supports the `data` property which is /// preserved between a `textDocument/publishDiagnostics` and /// `textDocument/codeAction` request. dataSupport: Undefinedable(bool), }; /// The publish diagnostic notification's parameters. pub const PublishDiagnosticsParams = struct { /// The URI for which diagnostic information is reported. uri: []const u8, /// Optional the version number of the document the diagnostics are published for. version: Undefinedable(i64), /// An array of diagnostic information items. diagnostics: []Diagnostic, }; /// Diagnostics notification are sent from the server to the client to signal /// results of validation runs. pub const PublishDiagnosticsNotification = struct { comptime method: []const u8 = "textDocument/publishDiagnostics", id: RequestId, params: PublishDiagnosticsParams, }; /// Completion client capabilities pub const CompletionClientCapabilities = struct { /// Whether completion supports dynamic registration. dynamicRegistration: Undefinedable(bool), /// The client supports the following `CompletionItem` specific /// capabilities. completionItem: Undefinedable(struct { /// Client supports commit characters on a completion item. commitCharactersSupport: Undefinedable(bool), /// Client supports the deprecated property on a completion item. deprecatedSupport: Undefinedable(bool), /// Client supports the follow content formats for the documentation /// property. The order describes the preferred format of the client. documentationFormat: Undefinedable([]MarkupKind), /// Client support insert replace edit to control different behavior if a /// completion item is inserted in the text or should replace text. insertReplaceSupport: Undefinedable(bool), /// The client supports the `insertTextMode` property on /// a completion item to override the whitespace handling mode /// as defined by the client (see `insertTextMode`). insertTextModeSupport: Undefinedable(struct { valueSet: []InsertTextMode, }), /// Client supports the preselect property on a completion item. preselectSupport: Undefinedable(bool), /// Indicates which properties a client can resolve lazily on a completion /// item. Before version 3.16.0 only the predefined properties `documentation` /// and `details` could be resolved lazily. resolveSupport: Undefinedable(struct { /// The properties that a client can resolve lazily. properties: [][]const u8, }), /// Client supports snippets as insert text. snippetSupport: Undefinedable(bool), /// Client supports the tag property on a completion item. Clients supporting /// tags have to handle unknown tags gracefully. Clients especially need to /// preserve unknown tags when sending a completion item back to the server in /// a resolve call. tagSupport: Undefinedable(struct { /// The tags supported by the client. valueSet: []1, }), }), completionItemKind: Undefinedable(struct { /// The completion item kind values the client supports. When this /// property exists the client also guarantees that it will /// handle values outside its set gracefully and falls back /// to a default value when unknown. valueSet: Undefinedable([]CompletionItemKind), }), /// Defines how the client handles whitespace and indentation /// when accepting a completion item that uses multi line /// text in either `insertText` or `textEdit`. insertTextMode: Undefinedable(InsertTextMode), /// The client supports to send additional context information for a /// `textDocument/completion` request. contextSupport: Undefinedable(bool), }; /// How a completion was triggered pub const CompletionTriggerKind = enum(i64) { Invoked = 1, TriggerCharacter = 2, TriggerForIncompleteCompletions = 3, usingnamespace IntBackedEnumStringify(@This()); }; /// Contains additional information about the context in which a completion request is triggered. pub const CompletionContext = struct { /// How the completion was triggered. triggerKind: CompletionTriggerKind, /// The trigger character (a single character) that has trigger code complete. /// Is undefined if `triggerKind !== CompletionTriggerKind.TriggerCharacter` triggerCharacter: Undefinedable([]const u8), }; /// Completion parameters pub const CompletionParams = struct { /// The completion context. This is only available it the client specifies /// to send this using the client capability `textDocument.completion.contextSupport === true` context: Undefinedable(CompletionContext), /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// Completion options. pub const CompletionOptions = struct { /// Most tools trigger completion request automatically without explicitly requesting /// it using a keyboard shortcut (e.g. Ctrl+Space). Typically they do so when the user /// starts to type an identifier. For example if the user types `c` in a JavaScript file /// code complete will automatically pop up present `console` besides others as a /// completion item. Characters that make up identifiers don't need to be listed here. triggerCharacters: Undefinedable([][]const u8), /// The list of all possible characters that commit a completion. This field can be used /// if clients don't support individual commit characters per completion item. See /// `ClientCapabilities.textDocument.completion.completionItem.commitCharactersSupport` allCommitCharacters: Undefinedable([][]const u8), /// The server provides support to resolve additional /// information for a completion item. resolveProvider: Undefinedable(bool), workDoneProgress: Undefinedable(bool), }; /// Registration options for a [CompletionRequest](#CompletionRequest). pub const CompletionRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// Most tools trigger completion request automatically without explicitly requesting /// it using a keyboard shortcut (e.g. Ctrl+Space). Typically they do so when the user /// starts to type an identifier. For example if the user types `c` in a JavaScript file /// code complete will automatically pop up present `console` besides others as a /// completion item. Characters that make up identifiers don't need to be listed here. triggerCharacters: Undefinedable([][]const u8), /// The list of all possible characters that commit a completion. This field can be used /// if clients don't support individual commit characters per completion item. See /// `ClientCapabilities.textDocument.completion.completionItem.commitCharactersSupport` allCommitCharacters: Undefinedable([][]const u8), /// The server provides support to resolve additional /// information for a completion item. resolveProvider: Undefinedable(bool), workDoneProgress: Undefinedable(bool), }; /// Request to request completion at a given text document position. The request's /// parameter is of type [TextDocumentPosition](#TextDocumentPosition) the response /// is of type [CompletionItem[]](#CompletionItem) or [CompletionList](#CompletionList) /// or a Thenable that resolves to such. pub const CompletionRequest = struct { comptime method: []const u8 = "textDocument/completion", id: RequestId, params: CompletionParams, }; /// Request to resolve additional information for a given completion item.The request's /// parameter is of type [CompletionItem](#CompletionItem) the response /// is of type [CompletionItem](#CompletionItem) or a Thenable that resolves to such. pub const CompletionResolveRequest = struct { comptime method: []const u8 = "completionItem/resolve", id: RequestId, params: CompletionItem, }; pub const HoverClientCapabilities = struct { /// Whether hover supports dynamic registration. dynamicRegistration: Undefinedable(bool), /// Client supports the follow content formats for the content /// property. The order describes the preferred format of the client. contentFormat: Undefinedable([]MarkupKind), }; /// Hover options. pub const HoverOptions = struct { workDoneProgress: Undefinedable(bool), }; /// Parameters for a [HoverRequest](#HoverRequest). pub const HoverParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), }; /// Registration options for a [HoverRequest](#HoverRequest). pub const HoverRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), }; /// Request to request hover information at a given text document position. The request's /// parameter is of type [TextDocumentPosition](#TextDocumentPosition) the response is of /// type [Hover](#Hover) or a Thenable that resolves to such. pub const HoverRequest = struct { comptime method: []const u8 = "textDocument/hover", id: RequestId, params: HoverParams, }; /// Client Capabilities for a [SignatureHelpRequest](#SignatureHelpRequest). pub const SignatureHelpClientCapabilities = struct { /// Whether signature help supports dynamic registration. dynamicRegistration: Undefinedable(bool), /// The client supports the following `SignatureInformation` /// specific properties. signatureInformation: Undefinedable(struct { /// The client support the `activeParameter` property on `SignatureInformation` /// literal. activeParameterSupport: Undefinedable(bool), /// Client supports the follow content formats for the documentation /// property. The order describes the preferred format of the client. documentationFormat: Undefinedable([]MarkupKind), /// Client capabilities specific to parameter information. parameterInformation: Undefinedable(struct { /// The client supports processing label offsets instead of a /// simple label string. labelOffsetSupport: Undefinedable(bool), }), }), /// The client supports to send additional context information for a /// `textDocument/signatureHelp` request. A client that opts into /// contextSupport will also support the `retriggerCharacters` on /// `SignatureHelpOptions`. contextSupport: Undefinedable(bool), }; /// Server Capabilities for a [SignatureHelpRequest](#SignatureHelpRequest). pub const SignatureHelpOptions = struct { /// List of characters that trigger signature help. triggerCharacters: Undefinedable([][]const u8), /// List of characters that re-trigger signature help. retriggerCharacters: Undefinedable([][]const u8), workDoneProgress: Undefinedable(bool), }; /// How a signature help was triggered. pub const SignatureHelpTriggerKind = enum(i64) { Invoked = 1, TriggerCharacter = 2, ContentChange = 3, usingnamespace IntBackedEnumStringify(@This()); }; /// Additional information about the context in which a signature help request was triggered. pub const SignatureHelpContext = struct { /// Action that caused signature help to be triggered. triggerKind: SignatureHelpTriggerKind, /// Character that caused signature help to be triggered. triggerCharacter: Undefinedable([]const u8), /// `true` if signature help was already showing when it was triggered. isRetrigger: bool, /// The currently active `SignatureHelp`. activeSignatureHelp: Undefinedable(SignatureHelp), }; /// Parameters for a [SignatureHelpRequest](#SignatureHelpRequest). pub const SignatureHelpParams = struct { /// The signature help context. This is only available if the client specifies /// to send this using the client capability `textDocument.signatureHelp.contextSupport === true` context: Undefinedable(SignatureHelpContext), /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), }; /// Registration options for a [SignatureHelpRequest](#SignatureHelpRequest). pub const SignatureHelpRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// List of characters that trigger signature help. triggerCharacters: Undefinedable([][]const u8), /// List of characters that re-trigger signature help. retriggerCharacters: Undefinedable([][]const u8), workDoneProgress: Undefinedable(bool), }; pub const SignatureHelpRequest = struct { comptime method: []const u8 = "textDocument/signatureHelp", id: RequestId, params: SignatureHelpParams, }; /// Client Capabilities for a [DefinitionRequest](#DefinitionRequest). pub const DefinitionClientCapabilities = struct { /// Whether definition supports dynamic registration. dynamicRegistration: Undefinedable(bool), /// The client supports additional metadata in the form of definition links. linkSupport: Undefinedable(bool), }; /// Server Capabilities for a [DefinitionRequest](#DefinitionRequest). pub const DefinitionOptions = struct { workDoneProgress: Undefinedable(bool), }; /// Parameters for a [DefinitionRequest](#DefinitionRequest). pub const DefinitionParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// Registration options for a [DefinitionRequest](#DefinitionRequest). pub const DefinitionRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), }; /// A request to resolve the definition location of a symbol at a given text /// document position. The request's parameter is of type [TextDocumentPosition] /// (#TextDocumentPosition) the response is of either type [Definition](#Definition) /// or a typed array of [DefinitionLink](#DefinitionLink) or a Thenable that resolves /// to such. pub const DefinitionRequest = struct { comptime method: []const u8 = "textDocument/definition", id: RequestId, params: DefinitionParams, }; /// Client Capabilities for a [ReferencesRequest](#ReferencesRequest). pub const ReferenceClientCapabilities = struct { /// Whether references supports dynamic registration. dynamicRegistration: Undefinedable(bool), }; /// Parameters for a [ReferencesRequest](#ReferencesRequest). pub const ReferenceParams = struct { context: ReferenceContext, /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// Reference options. pub const ReferenceOptions = struct { workDoneProgress: Undefinedable(bool), }; /// Registration options for a [ReferencesRequest](#ReferencesRequest). pub const ReferenceRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), }; /// A request to resolve project-wide references for the symbol denoted /// by the given text document position. The request's parameter is of /// type [ReferenceParams](#ReferenceParams) the response is of type /// [Location[]](#Location) or a Thenable that resolves to such. pub const ReferencesRequest = struct { comptime method: []const u8 = "textDocument/references", id: RequestId, params: ReferenceParams, }; /// Client Capabilities for a [DocumentHighlightRequest](#DocumentHighlightRequest). pub const DocumentHighlightClientCapabilities = struct { /// Whether document highlight supports dynamic registration. dynamicRegistration: Undefinedable(bool), }; /// Parameters for a [DocumentHighlightRequest](#DocumentHighlightRequest). pub const DocumentHighlightParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// Provider options for a [DocumentHighlightRequest](#DocumentHighlightRequest). pub const DocumentHighlightOptions = struct { workDoneProgress: Undefinedable(bool), }; /// Registration options for a [DocumentHighlightRequest](#DocumentHighlightRequest). pub const DocumentHighlightRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), }; /// Request to resolve a [DocumentHighlight](#DocumentHighlight) for a given /// text document position. The request's parameter is of type [TextDocumentPosition] /// (#TextDocumentPosition) the request response is of type [DocumentHighlight[]] /// (#DocumentHighlight) or a Thenable that resolves to such. pub const DocumentHighlightRequest = struct { comptime method: []const u8 = "textDocument/documentHighlight", id: RequestId, params: DocumentHighlightParams, }; /// Client Capabilities for a [DocumentSymbolRequest](#DocumentSymbolRequest). pub const DocumentSymbolClientCapabilities = struct { /// Whether document symbol supports dynamic registration. dynamicRegistration: Undefinedable(bool), /// Specific capabilities for the `SymbolKind`. symbolKind: Undefinedable(struct { /// The symbol kind values the client supports. When this /// property exists the client also guarantees that it will /// handle values outside its set gracefully and falls back /// to a default value when unknown. valueSet: Undefinedable([]SymbolKind), }), /// The client support hierarchical document symbols. hierarchicalDocumentSymbolSupport: Undefinedable(bool), /// The client supports tags on `SymbolInformation`. Tags are supported on /// `DocumentSymbol` if `hierarchicalDocumentSymbolSupport` is set to true. /// Clients supporting tags have to handle unknown tags gracefully. tagSupport: Undefinedable(struct { /// The tags supported by the client. valueSet: []1, }), /// The client supports an additional label presented in the UI when /// registering a document symbol provider. labelSupport: Undefinedable(bool), }; /// Parameters for a [DocumentSymbolRequest](#DocumentSymbolRequest). pub const DocumentSymbolParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// Provider options for a [DocumentSymbolRequest](#DocumentSymbolRequest). pub const DocumentSymbolOptions = struct { /// A human-readable string that is shown when multiple outlines trees /// are shown for the same document. label: Undefinedable([]const u8), workDoneProgress: Undefinedable(bool), }; /// Registration options for a [DocumentSymbolRequest](#DocumentSymbolRequest). pub const DocumentSymbolRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// A human-readable string that is shown when multiple outlines trees /// are shown for the same document. label: Undefinedable([]const u8), workDoneProgress: Undefinedable(bool), }; /// A request to list all symbols found in a given text document. The request's /// parameter is of type [TextDocumentIdentifier](#TextDocumentIdentifier) the /// response is of type [SymbolInformation[]](#SymbolInformation) or a Thenable /// that resolves to such. pub const DocumentSymbolRequest = struct { comptime method: []const u8 = "textDocument/documentSymbol", id: RequestId, params: DocumentSymbolParams, }; /// The Client Capabilities of a [CodeActionRequest](#CodeActionRequest). pub const CodeActionClientCapabilities = struct { /// Whether code action supports dynamic registration. dynamicRegistration: Undefinedable(bool), /// The client support code action literals of type `CodeAction` as a valid /// response of the `textDocument/codeAction` request. If the property is not /// set the request can only return `Command` literals. codeActionLiteralSupport: Undefinedable(struct { /// The code action kind is support with the following value /// set. codeActionKind: struct { /// The code action kind values the client supports. When this /// property exists the client also guarantees that it will /// handle values outside its set gracefully and falls back /// to a default value when unknown. valueSet: [][]const u8, }, }), /// Whether code action supports the `isPreferred` property. isPreferredSupport: Undefinedable(bool), /// Whether code action supports the `disabled` property. disabledSupport: Undefinedable(bool), /// Whether code action supports the `data` property which is /// preserved between a `textDocument/codeAction` and a /// `codeAction/resolve` request. dataSupport: Undefinedable(bool), /// Whether the client support resolving additional code action /// properties via a separate `codeAction/resolve` request. resolveSupport: Undefinedable(struct { /// The properties that a client can resolve lazily. properties: [][]const u8, }), /// Whether th client honors the change annotations in /// text edits and resource operations returned via the /// `CodeAction#edit` property by for example presenting /// the workspace edit in the user interface and asking /// for confirmation. honorsChangeAnnotations: Undefinedable(bool), }; /// The parameters of a [CodeActionRequest](#CodeActionRequest). pub const CodeActionParams = struct { /// The document in which the command was invoked. textDocument: TextDocumentIdentifier, /// The range for which the command was invoked. range: Range, /// Context carrying additional information. context: CodeActionContext, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// Provider options for a [CodeActionRequest](#CodeActionRequest). pub const CodeActionOptions = struct { /// CodeActionKinds that this server may return. codeActionKinds: Undefinedable([][]const u8), /// The server provides support to resolve additional /// information for a code action. resolveProvider: Undefinedable(bool), workDoneProgress: Undefinedable(bool), }; /// Registration options for a [CodeActionRequest](#CodeActionRequest). pub const CodeActionRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// CodeActionKinds that this server may return. codeActionKinds: Undefinedable([][]const u8), /// The server provides support to resolve additional /// information for a code action. resolveProvider: Undefinedable(bool), workDoneProgress: Undefinedable(bool), }; /// A request to provide commands for the given text document and range. pub const CodeActionRequest = struct { comptime method: []const u8 = "textDocument/codeAction", id: RequestId, params: CodeActionParams, }; /// Request to resolve additional information for a given code action.The request's /// parameter is of type [CodeAction](#CodeAction) the response /// is of type [CodeAction](#CodeAction) or a Thenable that resolves to such. pub const CodeActionResolveRequest = struct { comptime method: []const u8 = "codeAction/resolve", id: RequestId, params: CodeAction, }; /// Client capabilities for a [WorkspaceSymbolRequest](#WorkspaceSymbolRequest). pub const WorkspaceSymbolClientCapabilities = struct { /// Symbol request supports dynamic registration. dynamicRegistration: Undefinedable(bool), /// Specific capabilities for the `SymbolKind` in the `workspace/symbol` request. symbolKind: Undefinedable(struct { /// The symbol kind values the client supports. When this /// property exists the client also guarantees that it will /// handle values outside its set gracefully and falls back /// to a default value when unknown. valueSet: Undefinedable([]SymbolKind), }), /// The client supports tags on `SymbolInformation`. /// Clients supporting tags have to handle unknown tags gracefully. tagSupport: Undefinedable(struct { /// The tags supported by the client. valueSet: []1, }), }; /// The parameters of a [WorkspaceSymbolRequest](#WorkspaceSymbolRequest). pub const WorkspaceSymbolParams = struct { /// A query string to filter symbols by. Clients may send an empty /// string here to request all symbols. query: []const u8, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// Server capabilities for a [WorkspaceSymbolRequest](#WorkspaceSymbolRequest). pub const WorkspaceSymbolOptions = struct { workDoneProgress: Undefinedable(bool), }; /// Registration options for a [WorkspaceSymbolRequest](#WorkspaceSymbolRequest). pub const WorkspaceSymbolRegistrationOptions = struct { workDoneProgress: Undefinedable(bool), }; /// A request to list project-wide symbols matching the query string given /// by the [WorkspaceSymbolParams](#WorkspaceSymbolParams). The response is /// of type [SymbolInformation[]](#SymbolInformation) or a Thenable that /// resolves to such. pub const WorkspaceSymbolRequest = struct { comptime method: []const u8 = "workspace/symbol", id: RequestId, params: WorkspaceSymbolParams, }; /// The client capabilities of a [CodeLensRequest](#CodeLensRequest). pub const CodeLensClientCapabilities = struct { /// Whether code lens supports dynamic registration. dynamicRegistration: Undefinedable(bool), }; pub const CodeLensWorkspaceClientCapabilities = struct { /// Whether the client implementation supports a refresh request sent from the /// server to the client. refreshSupport: Undefinedable(bool), }; /// The parameters of a [CodeLensRequest](#CodeLensRequest). pub const CodeLensParams = struct { /// The document to request code lens for. textDocument: TextDocumentIdentifier, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// Code Lens provider options of a [CodeLensRequest](#CodeLensRequest). pub const CodeLensOptions = struct { /// Code lens has a resolve provider as well. resolveProvider: Undefinedable(bool), workDoneProgress: Undefinedable(bool), }; /// Registration options for a [CodeLensRequest](#CodeLensRequest). pub const CodeLensRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// Code lens has a resolve provider as well. resolveProvider: Undefinedable(bool), workDoneProgress: Undefinedable(bool), }; /// A request to provide code lens for the given text document. pub const CodeLensRequest = struct { comptime method: []const u8 = "textDocument/codeLens", id: RequestId, params: CodeLensParams, }; /// A request to resolve a command for a given code lens. pub const CodeLensResolveRequest = struct { comptime method: []const u8 = "codeLens/resolve", id: RequestId, params: CodeLens, }; /// A request to refresh all code actions pub const CodeLensRefreshRequest = struct { comptime method: []const u8 = "workspace/codeLens/refresh", id: RequestId, }; /// The client capabilities of a [DocumentLinkRequest](#DocumentLinkRequest). pub const DocumentLinkClientCapabilities = struct { /// Whether document link supports dynamic registration. dynamicRegistration: Undefinedable(bool), /// Whether the client support the `tooltip` property on `DocumentLink`. tooltipSupport: Undefinedable(bool), }; /// The parameters of a [DocumentLinkRequest](#DocumentLinkRequest). pub const DocumentLinkParams = struct { /// The document to provide document links for. textDocument: TextDocumentIdentifier, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// Provider options for a [DocumentLinkRequest](#DocumentLinkRequest). pub const DocumentLinkOptions = struct { /// Document links have a resolve provider as well. resolveProvider: Undefinedable(bool), workDoneProgress: Undefinedable(bool), }; /// Registration options for a [DocumentLinkRequest](#DocumentLinkRequest). pub const DocumentLinkRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// Document links have a resolve provider as well. resolveProvider: Undefinedable(bool), workDoneProgress: Undefinedable(bool), }; /// A request to provide document links pub const DocumentLinkRequest = struct { comptime method: []const u8 = "textDocument/documentLink", id: RequestId, params: DocumentLinkParams, }; /// Request to resolve additional information for a given document link. The request's /// parameter is of type [DocumentLink](#DocumentLink) the response /// is of type [DocumentLink](#DocumentLink) or a Thenable that resolves to such. pub const DocumentLinkResolveRequest = struct { comptime method: []const u8 = "documentLink/resolve", id: RequestId, params: DocumentLink, }; /// Client capabilities of a [DocumentFormattingRequest](#DocumentFormattingRequest). pub const DocumentFormattingClientCapabilities = struct { /// Whether formatting supports dynamic registration. dynamicRegistration: Undefinedable(bool), }; /// The parameters of a [DocumentFormattingRequest](#DocumentFormattingRequest). pub const DocumentFormattingParams = struct { /// The document to format. textDocument: TextDocumentIdentifier, /// The format options options: FormattingOptions, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), }; /// Provider options for a [DocumentFormattingRequest](#DocumentFormattingRequest). pub const DocumentFormattingOptions = struct { workDoneProgress: Undefinedable(bool), }; /// Registration options for a [DocumentFormattingRequest](#DocumentFormattingRequest). pub const DocumentFormattingRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), }; /// A request to to format a whole document. pub const DocumentFormattingRequest = struct { comptime method: []const u8 = "textDocument/formatting", id: RequestId, params: DocumentFormattingParams, }; /// Client capabilities of a [DocumentRangeFormattingRequest](#DocumentRangeFormattingRequest). pub const DocumentRangeFormattingClientCapabilities = struct { /// Whether range formatting supports dynamic registration. dynamicRegistration: Undefinedable(bool), }; /// The parameters of a [DocumentRangeFormattingRequest](#DocumentRangeFormattingRequest). pub const DocumentRangeFormattingParams = struct { /// The document to format. textDocument: TextDocumentIdentifier, /// The range to format range: Range, /// The format options options: FormattingOptions, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), }; /// Provider options for a [DocumentRangeFormattingRequest](#DocumentRangeFormattingRequest). pub const DocumentRangeFormattingOptions = struct { workDoneProgress: Undefinedable(bool), }; /// Registration options for a [DocumentRangeFormattingRequest](#DocumentRangeFormattingRequest). pub const DocumentRangeFormattingRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), }; /// A request to to format a range in a document. pub const DocumentRangeFormattingRequest = struct { comptime method: []const u8 = "textDocument/rangeFormatting", id: RequestId, params: DocumentRangeFormattingParams, }; /// Client capabilities of a [DocumentOnTypeFormattingRequest](#DocumentOnTypeFormattingRequest). pub const DocumentOnTypeFormattingClientCapabilities = struct { /// Whether on type formatting supports dynamic registration. dynamicRegistration: Undefinedable(bool), }; /// The parameters of a [DocumentOnTypeFormattingRequest](#DocumentOnTypeFormattingRequest). pub const DocumentOnTypeFormattingParams = struct { /// The document to format. textDocument: TextDocumentIdentifier, /// The position at which this request was send. position: Position, /// The character that has been typed. ch: []const u8, /// The format options. options: FormattingOptions, }; /// Provider options for a [DocumentOnTypeFormattingRequest](#DocumentOnTypeFormattingRequest). pub const DocumentOnTypeFormattingOptions = struct { /// A character on which formatting should be triggered, like `}`. firstTriggerCharacter: []const u8, /// More trigger characters. moreTriggerCharacter: Undefinedable([][]const u8), }; /// Registration options for a [DocumentOnTypeFormattingRequest](#DocumentOnTypeFormattingRequest). pub const DocumentOnTypeFormattingRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// A character on which formatting should be triggered, like `}`. firstTriggerCharacter: []const u8, /// More trigger characters. moreTriggerCharacter: Undefinedable([][]const u8), }; /// A request to format a document on type. pub const DocumentOnTypeFormattingRequest = struct { comptime method: []const u8 = "textDocument/onTypeFormatting", id: RequestId, params: DocumentOnTypeFormattingParams, }; pub const PrepareSupportDefaultBehavior = enum(i64) { Identifier = 1, usingnamespace IntBackedEnumStringify(@This()); }; pub const RenameClientCapabilities = struct { /// Whether rename supports dynamic registration. dynamicRegistration: Undefinedable(bool), /// Client supports testing for validity of rename operations /// before execution. prepareSupport: Undefinedable(bool), /// Client supports the default behavior result. comptime prepareSupportDefaultBehavior: i64 = 1, /// Whether th client honors the change annotations in /// text edits and resource operations returned via the /// rename request's workspace edit by for example presenting /// the workspace edit in the user interface and asking /// for confirmation. honorsChangeAnnotations: Undefinedable(bool), }; /// The parameters of a [RenameRequest](#RenameRequest). pub const RenameParams = struct { /// The document to rename. textDocument: TextDocumentIdentifier, /// The position at which this request was sent. position: Position, /// The new name of the symbol. If the given name is not valid the /// request must return a [ResponseError](#ResponseError) with an /// appropriate message set. newName: []const u8, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), }; /// Provider options for a [RenameRequest](#RenameRequest). pub const RenameOptions = struct { /// Renames should be checked and tested before being executed. prepareProvider: Undefinedable(bool), workDoneProgress: Undefinedable(bool), }; /// Registration options for a [RenameRequest](#RenameRequest). pub const RenameRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// Renames should be checked and tested before being executed. prepareProvider: Undefinedable(bool), workDoneProgress: Undefinedable(bool), }; /// A request to rename a symbol. pub const RenameRequest = struct { comptime method: []const u8 = "textDocument/rename", id: RequestId, params: RenameParams, }; pub const PrepareRenameParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), }; /// A request to test and perform the setup necessary for a rename. pub const PrepareRenameRequest = struct { comptime method: []const u8 = "textDocument/prepareRename", id: RequestId, params: PrepareRenameParams, }; /// The client capabilities of a [ExecuteCommandRequest](#ExecuteCommandRequest). pub const ExecuteCommandClientCapabilities = struct { /// Execute command supports dynamic registration. dynamicRegistration: Undefinedable(bool), }; /// The parameters of a [ExecuteCommandRequest](#ExecuteCommandRequest). pub const ExecuteCommandParams = struct { /// The identifier of the actual command handler. command: []const u8, /// Arguments that the command should be invoked with. arguments: Undefinedable([]std.json.Value), /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), }; /// The server capabilities of a [ExecuteCommandRequest](#ExecuteCommandRequest). pub const ExecuteCommandOptions = struct { /// The commands to be executed on the server commands: [][]const u8, workDoneProgress: Undefinedable(bool), }; /// Registration options for a [ExecuteCommandRequest](#ExecuteCommandRequest). pub const ExecuteCommandRegistrationOptions = struct { /// The commands to be executed on the server commands: [][]const u8, workDoneProgress: Undefinedable(bool), }; /// A request send from the client to the server to execute a command. The request might return /// a workspace edit which the client will apply to the workspace. pub const ExecuteCommandRequest = struct { comptime method: []const u8 = "workspace/executeCommand", id: RequestId, params: ExecuteCommandParams, }; pub const WorkspaceEditClientCapabilities = struct { /// The client supports versioned document changes in `WorkspaceEdit`s documentChanges: Undefinedable(bool), /// The resource operations the client supports. Clients should at least /// support 'create', 'rename' and 'delete' files and folders. resourceOperations: Undefinedable([]ResourceOperationKind), /// The failure handling strategy of a client if applying the workspace edit /// fails. failureHandling: Undefinedable(FailureHandlingKind), /// Whether the client normalizes line endings to the client specific /// setting. /// If set to `true` the client will normalize line ending characters /// in a workspace edit containing to the client specific new line /// character. normalizesLineEndings: Undefinedable(bool), /// Whether the client in general supports change annotations on text edits, /// create file, rename file and delete file changes. changeAnnotationSupport: Undefinedable(struct { /// Whether the client groups edits with equal labels into tree nodes, /// for instance all edits labelled with "Changes in Strings" would /// be a tree node. groupsOnLabel: Undefinedable(bool), }), }; /// The parameters passed via a apply workspace edit request. pub const ApplyWorkspaceEditParams = struct { /// An optional label of the workspace edit. This label is /// presented in the user interface for example on an undo /// stack to undo the workspace edit. label: Undefinedable([]const u8), /// The edits to apply. edit: WorkspaceEdit, }; /// A response returned from the apply workspace edit request. pub const ApplyWorkspaceEditResponse = struct { /// Indicates whether the edit was applied or not. applied: bool, /// An optional textual description for why the edit was not applied. /// This may be used by the server for diagnostic logging or to provide /// a suitable error for a request that triggered the edit. failureReason: Undefinedable([]const u8), /// Depending on the client's failure handling strategy `failedChange` might /// contain the index of the change that failed. This property is only available /// if the client signals a `failureHandlingStrategy` in its client capabilities. failedChange: Undefinedable(i64), }; /// A request sent from the server to the client to modified certain resources. pub const ApplyWorkspaceEditRequest = struct { comptime method: []const u8 = "workspace/applyEdit", id: RequestId, params: ApplyWorkspaceEditParams, }; /// A request to resolve the implementation locations of a symbol at a given text /// document position. The request's parameter is of type [TextDocumentPositioParams] /// (#TextDocumentPositionParams) the response is of type [Definition](#Definition) or a /// Thenable that resolves to such. pub const ImplementationRequest = struct { comptime method: []const u8 = "textDocument/implementation", id: RequestId, params: ImplementationParams, }; pub const ImplementationParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; pub const ImplementationRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), /// The id used to register the request. The id can be used to deregister /// the request again. See also Registration#id. id: Undefinedable([]const u8), }; pub const ImplementationOptions = struct { workDoneProgress: Undefinedable(bool), }; /// A request to resolve the type definition locations of a symbol at a given text /// document position. The request's parameter is of type [TextDocumentPositioParams] /// (#TextDocumentPositionParams) the response is of type [Definition](#Definition) or a /// Thenable that resolves to such. pub const TypeDefinitionRequest = struct { comptime method: []const u8 = "textDocument/typeDefinition", id: RequestId, params: TypeDefinitionParams, }; pub const TypeDefinitionParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; pub const TypeDefinitionRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), /// The id used to register the request. The id can be used to deregister /// the request again. See also Registration#id. id: Undefinedable([]const u8), }; pub const TypeDefinitionOptions = struct { workDoneProgress: Undefinedable(bool), }; /// The `workspace/workspaceFolders` is sent from the server to the client to fetch the open workspace folders. pub const WorkspaceFoldersRequest = struct { comptime method: []const u8 = "workspace/workspaceFolders", id: RequestId, }; /// The `workspace/didChangeWorkspaceFolders` notification is sent from the client to the server when the workspace /// folder configuration changes. pub const DidChangeWorkspaceFoldersNotification = struct { comptime method: []const u8 = "workspace/didChangeWorkspaceFolders", id: RequestId, params: DidChangeWorkspaceFoldersParams, }; /// The parameters of a `workspace/didChangeWorkspaceFolders` notification. pub const DidChangeWorkspaceFoldersParams = struct { /// The actual workspace folder change event. event: WorkspaceFoldersChangeEvent, }; pub const WorkspaceFolder = struct { /// The associated URI for this workspace folder. uri: []const u8, /// The name of the workspace folder. Used to refer to this /// workspace folder in the user interface. name: []const u8, }; /// The workspace folder change event. pub const WorkspaceFoldersChangeEvent = struct { /// The array of added workspace folders added: []WorkspaceFolder, /// The array of the removed workspace folders removed: []WorkspaceFolder, }; /// The 'workspace/configuration' request is sent from the server to the client to fetch a certain /// configuration setting. pub const ConfigurationRequest = struct { comptime method: []const u8 = "workspace/configuration", id: RequestId, params: ConfigurationParams, }; /// The parameters of a configuration request. pub const ConfigurationParams = struct { items: []ConfigurationItem, }; pub const ConfigurationItem = struct { /// The scope to get the configuration section for. scopeUri: Undefinedable([]const u8), /// The configuration section asked for. section: Undefinedable([]const u8), }; /// A request to list all color symbols found in a given text document. The request's /// parameter is of type [DocumentColorParams](#DocumentColorParams) the /// response is of type [ColorInformation[]](#ColorInformation) or a Thenable /// that resolves to such. pub const DocumentColorRequest = struct { comptime method: []const u8 = "textDocument/documentColor", id: RequestId, params: DocumentColorParams, }; /// A request to list all presentation for a color. The request's /// parameter is of type [ColorPresentationParams](#ColorPresentationParams) the /// response is of type [ColorInformation[]](#ColorInformation) or a Thenable /// that resolves to such. pub const ColorPresentationRequest = struct { comptime method: []const u8 = "textDocument/colorPresentation", id: RequestId, params: ColorPresentationParams, }; pub const DocumentColorOptions = struct { workDoneProgress: Undefinedable(bool), }; /// Parameters for a [DocumentColorRequest](#DocumentColorRequest). pub const DocumentColorParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// Parameters for a [ColorPresentationRequest](#ColorPresentationRequest). pub const ColorPresentationParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The color to request presentations for. color: Color, /// The range where the color would be inserted. Serves as a context. range: Range, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; pub const DocumentColorRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// The id used to register the request. The id can be used to deregister /// the request again. See also Registration#id. id: Undefinedable([]const u8), workDoneProgress: Undefinedable(bool), }; pub const FoldingRangeClientCapabilities = struct { /// Whether implementation supports dynamic registration for folding range providers. If this is set to `true` /// the client supports the new `FoldingRangeRegistrationOptions` return value for the corresponding server /// capability as well. dynamicRegistration: Undefinedable(bool), /// The maximum number of folding ranges that the client prefers to receive per document. The value serves as a /// hint, servers are free to follow the limit. rangeLimit: Undefinedable(i64), /// If set, the client signals that it only supports folding complete lines. If set, client will /// ignore specified `startCharacter` and `endCharacter` properties in a FoldingRange. lineFoldingOnly: Undefinedable(bool), }; pub const FoldingRangeOptions = struct { workDoneProgress: Undefinedable(bool), }; /// A request to provide folding ranges in a document. The request's /// parameter is of type [FoldingRangeParams](#FoldingRangeParams), the /// response is of type [FoldingRangeList](#FoldingRangeList) or a Thenable /// that resolves to such. pub const FoldingRangeRequest = struct { comptime method: []const u8 = "textDocument/foldingRange", id: RequestId, params: FoldingRangeParams, }; /// Parameters for a [FoldingRangeRequest](#FoldingRangeRequest). pub const FoldingRangeParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; pub const FoldingRangeRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), /// The id used to register the request. The id can be used to deregister /// the request again. See also Registration#id. id: Undefinedable([]const u8), }; pub const DeclarationClientCapabilities = struct { /// Whether declaration supports dynamic registration. If this is set to `true` /// the client supports the new `DeclarationRegistrationOptions` return value /// for the corresponding server capability as well. dynamicRegistration: Undefinedable(bool), /// The client supports additional metadata in the form of declaration links. linkSupport: Undefinedable(bool), }; /// A request to resolve the type definition locations of a symbol at a given text /// document position. The request's parameter is of type [TextDocumentPositioParams] /// (#TextDocumentPositionParams) the response is of type [Declaration](#Declaration) /// or a typed array of [DeclarationLink](#DeclarationLink) or a Thenable that resolves /// to such. pub const DeclarationRequest = struct { comptime method: []const u8 = "textDocument/declaration", id: RequestId, params: DeclarationParams, }; pub const DeclarationParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; pub const DeclarationRegistrationOptions = struct { workDoneProgress: Undefinedable(bool), /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// The id used to register the request. The id can be used to deregister /// the request again. See also Registration#id. id: Undefinedable([]const u8), }; pub const DeclarationOptions = struct { workDoneProgress: Undefinedable(bool), }; pub const SelectionRangeClientCapabilities = struct { /// Whether implementation supports dynamic registration for selection range providers. If this is set to `true` /// the client supports the new `SelectionRangeRegistrationOptions` return value for the corresponding server /// capability as well. dynamicRegistration: Undefinedable(bool), }; pub const SelectionRangeOptions = struct { workDoneProgress: Undefinedable(bool), }; /// A parameter literal used in selection range requests. pub const SelectionRangeParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The positions inside the text document. positions: []Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// A request to provide selection ranges in a document. The request's /// parameter is of type [SelectionRangeParams](#SelectionRangeParams), the /// response is of type [SelectionRange[]](#SelectionRange[]) or a Thenable /// that resolves to such. pub const SelectionRangeRequest = struct { comptime method: []const u8 = "textDocument/selectionRange", id: RequestId, params: SelectionRangeParams, }; pub const SelectionRangeRegistrationOptions = struct { workDoneProgress: Undefinedable(bool), /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// The id used to register the request. The id can be used to deregister /// the request again. See also Registration#id. id: Undefinedable([]const u8), }; pub const WorkDoneProgressBegin = struct { comptime kind: []const u8 = "begin", /// Mandatory title of the progress operation. Used to briefly inform about /// the kind of operation being performed. title: []const u8, /// Controls if a cancel button should show to allow the user to cancel the /// long running operation. Clients that don't support cancellation are allowed /// to ignore the setting. cancellable: Undefinedable(bool), /// Optional, more detailed associated progress message. Contains /// complementary information to the `title`. message: Undefinedable([]const u8), /// Optional progress percentage to display (value 100 is considered 100%). /// If not provided infinite progress is assumed and clients are allowed /// to ignore the `percentage` value in subsequent in report notifications. percentage: Undefinedable(i64), }; pub const WorkDoneProgressReport = struct { comptime kind: []const u8 = "report", /// Controls enablement state of a cancel button. cancellable: Undefinedable(bool), /// Optional, more detailed associated progress message. Contains /// complementary information to the `title`. message: Undefinedable([]const u8), /// Optional progress percentage to display (value 100 is considered 100%). /// If not provided infinite progress is assumed and clients are allowed /// to ignore the `percentage` value in subsequent in report notifications. percentage: Undefinedable(i64), }; pub const WorkDoneProgressEnd = struct { comptime kind: []const u8 = "end", /// Optional, a final message indicating to for example indicate the outcome /// of the operation. message: Undefinedable([]const u8), }; pub const WorkDoneProgressCreateParams = struct { /// The token to be used to report progress. token: ProgressToken, }; /// The `window/workDoneProgress/create` request is sent from the server to the client to initiate progress /// reporting from the server. pub const WorkDoneProgressCreateRequest = struct { comptime method: []const u8 = "window/workDoneProgress/create", id: RequestId, params: WorkDoneProgressCreateParams, }; pub const WorkDoneProgressCancelParams = struct { /// The token to be used to report progress. token: ProgressToken, }; /// The `window/workDoneProgress/cancel` notification is sent from the client to the server to cancel a progress /// initiated on the server side. pub const WorkDoneProgressCancelNotification = struct { comptime method: []const u8 = "window/workDoneProgress/cancel", id: RequestId, params: WorkDoneProgressCancelParams, }; pub const CallHierarchyClientCapabilities = struct { /// Whether implementation supports dynamic registration. If this is set to `true` /// the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` /// return value for the corresponding server capability as well. dynamicRegistration: Undefinedable(bool), }; /// Call hierarchy options used during static registration. pub const CallHierarchyOptions = struct { workDoneProgress: Undefinedable(bool), }; /// Call hierarchy options used during static or dynamic registration. pub const CallHierarchyRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), /// The id used to register the request. The id can be used to deregister /// the request again. See also Registration#id. id: Undefinedable([]const u8), }; /// The parameter of a `callHierarchy/incomingCalls` request. pub const CallHierarchyIncomingCallsParams = struct { item: CallHierarchyItem, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// A request to resolve the incoming calls for a given `CallHierarchyItem`. pub const CallHierarchyIncomingCallsRequest = struct { comptime method: []const u8 = "callHierarchy/incomingCalls", id: RequestId, params: CallHierarchyIncomingCallsParams, }; /// The parameter of a `callHierarchy/outgoingCalls` request. pub const CallHierarchyOutgoingCallsParams = struct { item: CallHierarchyItem, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// A request to resolve the outgoing calls for a given `CallHierarchyItem`. pub const CallHierarchyOutgoingCallsRequest = struct { comptime method: []const u8 = "callHierarchy/outgoingCalls", id: RequestId, params: CallHierarchyOutgoingCallsParams, }; /// The parameter of a `textDocument/prepareCallHierarchy` request. pub const CallHierarchyPrepareParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), }; /// A request to result a `CallHierarchyItem` in a document at a given position. /// Can be used as an input to a incoming or outgoing call hierarchy. pub const CallHierarchyPrepareRequest = struct { comptime method: []const u8 = "textDocument/prepareCallHierarchy", id: RequestId, params: CallHierarchyPrepareParams, }; /// A set of predefined token types. This set is not fixed /// an clients can specify additional token types via the /// corresponding client capabilities. pub const SemanticTokenTypes = struct { pub const class = "class"; pub const comment = "comment"; pub const @"enum" = "enum"; pub const enumMember = "enumMember"; pub const event = "event"; pub const function = "function"; pub const interface = "interface"; pub const keyword = "keyword"; pub const macro = "macro"; pub const method = "method"; pub const modifier = "modifier"; pub const namespace = "namespace"; pub const number = "number"; pub const operator = "operator"; pub const parameter = "parameter"; pub const property = "property"; pub const regexp = "regexp"; pub const string = "string"; pub const @"struct" = "struct"; pub const @"type" = "type"; pub const typeParameter = "typeParameter"; pub const variable = "variable"; }; /// A set of predefined token modifiers. This set is not fixed /// an clients can specify additional token types via the /// corresponding client capabilities. pub const SemanticTokenModifiers = struct { pub const abstract = "abstract"; pub const @"async" = "async"; pub const declaration = "declaration"; pub const defaultLibrary = "defaultLibrary"; pub const definition = "definition"; pub const deprecated = "deprecated"; pub const documentation = "documentation"; pub const modification = "modification"; pub const readonly = "readonly"; pub const static = "static"; }; pub const SemanticTokensLegend = struct { /// The token types a server uses. tokenTypes: [][]const u8, /// The token modifiers a server uses. tokenModifiers: [][]const u8, }; pub const SemanticTokens = struct { /// An optional result id. If provided and clients support delta updating /// the client will include the result id in the next semantic token request. /// A server can then instead of computing all semantic tokens again simply /// send a delta. resultId: Undefinedable([]const u8), /// The actual tokens. data: []i64, }; pub const SemanticTokensPartialResult = struct { data: []i64, }; pub const SemanticTokensEdit = struct { /// The start offset of the edit. start: i64, /// The count of elements to remove. deleteCount: i64, /// The elements to insert. data: Undefinedable([]i64), }; pub const SemanticTokensDelta = struct { resultId: Undefinedable([]const u8), /// The semantic token edits to transform a previous result into a new result. edits: []SemanticTokensEdit, }; pub const SemanticTokensDeltaPartialResult = struct { edits: []SemanticTokensEdit, }; pub const TokenFormat = struct { pub const Relative = "relative"; }; pub const SemanticTokensClientCapabilities = struct { /// Whether implementation supports dynamic registration. If this is set to `true` /// the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` /// return value for the corresponding server capability as well. dynamicRegistration: Undefinedable(bool), /// Which requests the client supports and might send to the server /// depending on the server's capability. Please note that clients might not /// show semantic tokens or degrade some of the user experience if a range /// or full request is advertised by the client but not provided by the /// server. If for example the client capability `requests.full` and /// `request.range` are both set to true but the server only provides a /// range provider the client might not render a minimap correctly or might /// even decide to not show any semantic tokens at all. requests: struct { /// The client will send the `textDocument/semanticTokens/full` request if /// the server provides a corresponding handler. full: Undefinedable(union(enum) { boolean: bool, reflection: struct { /// The client will send the `textDocument/semanticTokens/full/delta` request if /// the server provides a corresponding handler. delta: Undefinedable(bool), }, }), /// The client will send the `textDocument/semanticTokens/range` request if /// the server provides a corresponding handler. range: Undefinedable(union(enum) { boolean: bool, reflection: ManuallyTranslateValue, }), }, /// The token types that the client supports. tokenTypes: [][]const u8, /// The token modifiers that the client supports. tokenModifiers: [][]const u8, /// The token formats the clients supports. formats: []relative, /// Whether the client supports tokens that can overlap each other. overlappingTokenSupport: Undefinedable(bool), /// Whether the client supports tokens that can span multiple lines. multilineTokenSupport: Undefinedable(bool), }; pub const SemanticTokensOptions = struct { /// The legend used by the server legend: SemanticTokensLegend, /// Server supports providing semantic tokens for a specific range /// of a document. range: Undefinedable(union(enum) { boolean: bool, reflection: ManuallyTranslateValue, }), /// Server supports providing semantic tokens for a full document. full: Undefinedable(union(enum) { boolean: bool, reflection: struct { /// The server supports deltas for full documents. delta: Undefinedable(bool), }, }), workDoneProgress: Undefinedable(bool), }; pub const SemanticTokensRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, /// The legend used by the server legend: SemanticTokensLegend, /// Server supports providing semantic tokens for a specific range /// of a document. range: Undefinedable(union(enum) { boolean: bool, reflection: ManuallyTranslateValue, }), /// Server supports providing semantic tokens for a full document. full: Undefinedable(union(enum) { boolean: bool, reflection: struct { /// The server supports deltas for full documents. delta: Undefinedable(bool), }, }), workDoneProgress: Undefinedable(bool), /// The id used to register the request. The id can be used to deregister /// the request again. See also Registration#id. id: Undefinedable([]const u8), }; pub const SemanticTokensParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; pub const SemanticTokensRequest = struct { comptime method: []const u8 = "textDocument/semanticTokens/full", id: RequestId, params: SemanticTokensParams, }; pub const SemanticTokensDeltaParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The result id of a previous response. The result Id can either point to a full response /// or a delta response depending on what was received last. previousResultId: []const u8, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; pub const SemanticTokensDeltaRequest = struct { comptime method: []const u8 = "textDocument/semanticTokens/full/delta", id: RequestId, params: SemanticTokensDeltaParams, }; pub const SemanticTokensRangeParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The range the semantic tokens are requested for. range: Range, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; pub const SemanticTokensRangeRequest = struct { comptime method: []const u8 = "textDocument/semanticTokens/range", id: RequestId, params: SemanticTokensRangeParams, }; pub const SemanticTokensRefreshRequest = struct { comptime method: []const u8 = "workspace/semanticTokens/refresh", id: RequestId, }; pub const SemanticTokensRegistrationType = struct { pub const method = "textDocument/semanticTokens"; pub const @"type" = RegistrationType( SemanticTokensRegistrationOptions, ); }; /// Params to show a document. pub const ShowDocumentParams = struct { /// The document uri to show. uri: []const u8, /// Indicates to show the resource in an external program. /// To show for example `https://code.visualstudio.com/` /// in the default WEB browser set `external` to `true`. external: Undefinedable(bool), /// An optional property to indicate whether the editor /// showing the document should take focus or not. /// Clients might ignore this property if an external /// program in started. takeFocus: Undefinedable(bool), /// An optional selection range if the document is a text /// document. Clients might ignore the property if an /// external program is started or the file is not a text /// file. selection: Undefinedable(Range), }; /// A request to show a document. This request might open an /// external program depending on the value of the URI to open. /// For example a request to open `https://code.visualstudio.com/` /// will very likely open the URI in a WEB browser. pub const ShowDocumentRequest = struct { comptime method: []const u8 = "window/showDocument", id: RequestId, params: ShowDocumentParams, }; /// The result of an show document request. pub const ShowDocumentResult = struct { /// A boolean indicating if the show was successful. success: bool, }; /// Client capabilities for the show document request. pub const ShowDocumentClientCapabilities = struct { /// The client has support for the show document /// request. support: bool, }; /// Client capabilities for the linked editing range request. pub const LinkedEditingRangeClientCapabilities = struct { /// Whether implementation supports dynamic registration. If this is set to `true` /// the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` /// return value for the corresponding server capability as well. dynamicRegistration: Undefinedable(bool), }; /// The result of a linked editing range request. pub const LinkedEditingRanges = struct { /// A list of ranges that can be edited together. The ranges must have /// identical length and contain identical text content. The ranges cannot overlap. ranges: []Range, /// An optional word pattern (regular expression) that describes valid contents for /// the given ranges. If no pattern is provided, the client configuration's word /// pattern will be used. wordPattern: Undefinedable([]const u8), }; pub const LinkedEditingRangeOptions = struct { workDoneProgress: Undefinedable(bool), }; pub const LinkedEditingRangeParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), }; pub const LinkedEditingRangeRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), /// The id used to register the request. The id can be used to deregister /// the request again. See also Registration#id. id: Undefinedable([]const u8), }; /// A request to provide ranges that can be edited together. pub const LinkedEditingRangeRequest = struct { comptime method: []const u8 = "textDocument/linkedEditingRange", id: RequestId, params: LinkedEditingRangeParams, }; /// Options for notifications/requests for user operations on files. pub const FileOperationOptions = struct { /// The server is interested in didCreateFiles notifications. didCreate: Undefinedable(FileOperationRegistrationOptions), /// The server is interested in willCreateFiles requests. willCreate: Undefinedable(FileOperationRegistrationOptions), /// The server is interested in didRenameFiles notifications. didRename: Undefinedable(FileOperationRegistrationOptions), /// The server is interested in willRenameFiles requests. willRename: Undefinedable(FileOperationRegistrationOptions), /// The server is interested in didDeleteFiles file notifications. didDelete: Undefinedable(FileOperationRegistrationOptions), /// The server is interested in willDeleteFiles file requests. willDelete: Undefinedable(FileOperationRegistrationOptions), }; /// Capabilities relating to events from file operations by the user in the client. pub const FileOperationClientCapabilities = struct { /// Whether the client supports dynamic registration for file requests/notifications. dynamicRegistration: Undefinedable(bool), /// The client has support for sending didCreateFiles notifications. didCreate: Undefinedable(bool), /// The client has support for willCreateFiles requests. willCreate: Undefinedable(bool), /// The client has support for sending didRenameFiles notifications. didRename: Undefinedable(bool), /// The client has support for willRenameFiles requests. willRename: Undefinedable(bool), /// The client has support for sending didDeleteFiles notifications. didDelete: Undefinedable(bool), /// The client has support for willDeleteFiles requests. willDelete: Undefinedable(bool), }; /// The options to register for file operations. pub const FileOperationRegistrationOptions = struct { /// The actual filters. filters: []FileOperationFilter, }; /// Matching options for the file operation pattern. pub const FileOperationPatternOptions = struct { /// The pattern should be matched ignoring casing. ignoreCase: Undefinedable(bool), }; /// A pattern kind describing if a glob pattern matches a file a folder or /// both. pub const FileOperationPatternKind = struct { /// The pattern matches a file only. pub const file = "file"; /// The pattern matches a folder only. pub const folder = "folder"; }; /// The did create files notification is sent from the client to the server when /// files were created from within the client. pub const DidCreateFilesNotification = struct { comptime method: []const u8 = "workspace/didCreateFiles", id: RequestId, params: CreateFilesParams, }; /// The parameters sent in file create requests/notifications. pub const CreateFilesParams = struct { /// An array of all files/folders created in this operation. files: []FileCreate, }; /// Represents information on a file/folder create. pub const FileCreate = struct { /// A file:// URI for the location of the file/folder being created. uri: []const u8, }; /// The will create files request is sent from the client to the server before files are actually /// created as long as the creation is triggered from within the client. pub const WillCreateFilesRequest = struct { comptime method: []const u8 = "workspace/willCreateFiles", id: RequestId, params: CreateFilesParams, }; /// The did rename files notification is sent from the client to the server when /// files were renamed from within the client. pub const DidRenameFilesNotification = struct { comptime method: []const u8 = "workspace/didRenameFiles", id: RequestId, params: RenameFilesParams, }; /// The parameters sent in file rename requests/notifications. pub const RenameFilesParams = struct { /// An array of all files/folders renamed in this operation. When a folder is renamed, only /// the folder will be included, and not its children. files: []FileRename, }; /// Represents information on a file/folder rename. pub const FileRename = struct { /// A file:// URI for the original location of the file/folder being renamed. oldUri: []const u8, /// A file:// URI for the new location of the file/folder being renamed. newUri: []const u8, }; /// The will rename files request is sent from the client to the server before files are actually /// renamed as long as the rename is triggered from within the client. pub const WillRenameFilesRequest = struct { comptime method: []const u8 = "workspace/willRenameFiles", id: RequestId, params: RenameFilesParams, }; /// The will delete files request is sent from the client to the server before files are actually /// deleted as long as the deletion is triggered from within the client. pub const DidDeleteFilesNotification = struct { comptime method: []const u8 = "workspace/didDeleteFiles", id: RequestId, params: DeleteFilesParams, }; /// The parameters sent in file delete requests/notifications. pub const DeleteFilesParams = struct { /// An array of all files/folders deleted in this operation. files: []FileDelete, }; /// Represents information on a file/folder delete. pub const FileDelete = struct { /// A file:// URI for the location of the file/folder being deleted. uri: []const u8, }; /// The did delete files notification is sent from the client to the server when /// files were deleted from within the client. pub const WillDeleteFilesRequest = struct { comptime method: []const u8 = "workspace/willDeleteFiles", id: RequestId, params: DeleteFilesParams, }; /// Moniker uniqueness level to define scope of the moniker. pub const UniquenessLevel = struct { pub const document = "document"; pub const global = "global"; pub const group = "group"; pub const project = "project"; pub const scheme = "scheme"; }; /// The moniker kind. pub const MonikerKind = struct { pub const @"export" = "export"; pub const @"import" = "import"; pub const local = "local"; }; /// Moniker definition to match LSIF 0.5 moniker definition. pub const Moniker = struct { /// The scheme of the moniker. For example tsc or .Net scheme: []const u8, /// The identifier of the moniker. The value is opaque in LSIF however /// schema owners are allowed to define the structure if they want. identifier: []const u8, /// The scope in which the moniker is unique unique: UniquenessLevel, /// The moniker kind if known. kind: Undefinedable(MonikerKind), }; /// Client capabilities specific to the moniker request. pub const MonikerClientCapabilities = struct { /// Whether moniker supports dynamic registration. If this is set to `true` /// the client supports the new `MonikerRegistrationOptions` return value /// for the corresponding server capability as well. dynamicRegistration: Undefinedable(bool), }; pub const MonikerOptions = struct { workDoneProgress: Undefinedable(bool), }; pub const MonikerRegistrationOptions = struct { /// A document selector to identify the scope of the registration. If set to null /// the document selector provided on the client side will be used. documentSelector: ?DocumentSelector, workDoneProgress: Undefinedable(bool), }; pub const MonikerParams = struct { /// The text document. textDocument: TextDocumentIdentifier, /// The position inside the text document. position: Position, /// An optional token that a server can use to report work done progress. workDoneToken: Undefinedable(ProgressToken), /// An optional token that a server can use to report partial results (e.g. streaming) to /// the client. partialResultToken: Undefinedable(ProgressToken), }; /// A request to get the moniker of a symbol at a given text document position. /// The request parameter is of type [TextDocumentPositionParams](#TextDocumentPositionParams). /// The response is of type [Moniker[]](#Moniker[]) or `null`. pub const MonikerRequest = struct { comptime method: []const u8 = "textDocument/moniker", id: RequestId, params: MonikerParams, }; pub const ColorProviderOptions = DocumentColorOptions; pub const ColorOptions = DocumentColorOptions; pub const FoldingRangeProviderOptions = FoldingRangeOptions; pub const SelectionRangeProviderOptions = SelectionRangeOptions; pub const ColorRegistrationOptions = DocumentColorRegistrationOptions; pub const Request = union(enum) { ApplyWorkspaceEditRequest: ApplyWorkspaceEditRequest, CallHierarchyIncomingCallsRequest: CallHierarchyIncomingCallsRequest, CallHierarchyOutgoingCallsRequest: CallHierarchyOutgoingCallsRequest, CallHierarchyPrepareRequest: CallHierarchyPrepareRequest, CodeActionRequest: CodeActionRequest, CodeActionResolveRequest: CodeActionResolveRequest, CodeLensRefreshRequest: CodeLensRefreshRequest, CodeLensRequest: CodeLensRequest, CodeLensResolveRequest: CodeLensResolveRequest, ColorPresentationRequest: ColorPresentationRequest, CompletionRequest: CompletionRequest, CompletionResolveRequest: CompletionResolveRequest, ConfigurationRequest: ConfigurationRequest, DeclarationRequest: DeclarationRequest, DefinitionRequest: DefinitionRequest, DocumentColorRequest: DocumentColorRequest, DocumentFormattingRequest: DocumentFormattingRequest, DocumentHighlightRequest: DocumentHighlightRequest, DocumentLinkRequest: DocumentLinkRequest, DocumentLinkResolveRequest: DocumentLinkResolveRequest, DocumentOnTypeFormattingRequest: DocumentOnTypeFormattingRequest, DocumentRangeFormattingRequest: DocumentRangeFormattingRequest, DocumentSymbolRequest: DocumentSymbolRequest, ExecuteCommandRequest: ExecuteCommandRequest, FoldingRangeRequest: FoldingRangeRequest, HoverRequest: HoverRequest, ImplementationRequest: ImplementationRequest, InitializeRequest: InitializeRequest, LinkedEditingRangeRequest: LinkedEditingRangeRequest, MonikerRequest: MonikerRequest, PrepareRenameRequest: PrepareRenameRequest, ReferencesRequest: ReferencesRequest, RegistrationRequest: RegistrationRequest, RenameRequest: RenameRequest, SelectionRangeRequest: SelectionRangeRequest, SemanticTokensDeltaRequest: SemanticTokensDeltaRequest, SemanticTokensRangeRequest: SemanticTokensRangeRequest, SemanticTokensRefreshRequest: SemanticTokensRefreshRequest, SemanticTokensRequest: SemanticTokensRequest, ShowDocumentRequest: ShowDocumentRequest, ShowMessageRequest: ShowMessageRequest, ShutdownRequest: ShutdownRequest, SignatureHelpRequest: SignatureHelpRequest, TypeDefinitionRequest: TypeDefinitionRequest, UnregistrationRequest: UnregistrationRequest, WillCreateFilesRequest: WillCreateFilesRequest, WillDeleteFilesRequest: WillDeleteFilesRequest, WillRenameFilesRequest: WillRenameFilesRequest, WillSaveTextDocumentWaitUntilRequest: WillSaveTextDocumentWaitUntilRequest, WorkDoneProgressCreateRequest: WorkDoneProgressCreateRequest, WorkspaceFoldersRequest: WorkspaceFoldersRequest, WorkspaceSymbolRequest: WorkspaceSymbolRequest, }; pub const Notification = union(enum) { DidChangeConfigurationNotification: DidChangeConfigurationNotification, DidChangeTextDocumentNotification: DidChangeTextDocumentNotification, DidChangeWatchedFilesNotification: DidChangeWatchedFilesNotification, DidChangeWorkspaceFoldersNotification: DidChangeWorkspaceFoldersNotification, DidCloseTextDocumentNotification: DidCloseTextDocumentNotification, DidCreateFilesNotification: DidCreateFilesNotification, DidDeleteFilesNotification: DidDeleteFilesNotification, DidOpenTextDocumentNotification: DidOpenTextDocumentNotification, DidRenameFilesNotification: DidRenameFilesNotification, DidSaveTextDocumentNotification: DidSaveTextDocumentNotification, ExitNotification: ExitNotification, InitializedNotification: InitializedNotification, LogMessageNotification: LogMessageNotification, PublishDiagnosticsNotification: PublishDiagnosticsNotification, ShowMessageNotification: ShowMessageNotification, TelemetryEventNotification: TelemetryEventNotification, WillSaveTextDocumentNotification: WillSaveTextDocumentNotification, WorkDoneProgressCancelNotification: WorkDoneProgressCancelNotification, };
lsp_generated.zig
const std = @import("std"); const utils = @import("../utils.zig"); pub const TypeFlag = enum(u8) { aregular = 0, regular = '0', link = '1', symlink = '2', char = '3', block = '4', directory = '5', fifo = '6', continuous = '7', ext_header = 'x', ext_global_header = 'g', gnu_longname = 'L', gnu_longlink = 'K', gnu_sparse = 'S', _, }; fn truncate(str: []const u8) []const u8 { if (std.mem.indexOfScalar(u8, str, 0)) |i| { return str[0..i]; } else return str; } pub const Header = struct { const empty = [_]u8{0} ** 512; pub const OldHeader = extern struct { name: [100]u8, mode: [8]u8, uid: [8]u8, gid: [8]u8, size: [12]u8, mtime: [12]u8, checksum: [8]u8, typeflag: TypeFlag, linkname: [100]u8, __padding: [255]u8, }; pub const UstarHeader = extern struct { name: [100]u8, mode: [8]u8, uid: [8]u8, gid: [8]u8, size: [12]u8, mtime: [12]u8, checksum: [8]u8, typeflag: TypeFlag, linkname: [100]u8, magic: [6]u8, version: [2]u8, uname: [32]u8, gname: [32]u8, dev_major: [8]u8, dev_minor: [8]u8, prefix: [155]u8, __padding: [12]u8, }; pub const GnuHeader = extern struct { pub const SparseHeader = extern struct { offset: [12]u8, numbytes: [12]u8, }; name: [100]u8, mode: [8]u8, uid: [8]u8, gid: [8]u8, size: [12]u8, mtime: [12]u8, checksum: [8]u8, typeflag: TypeFlag, linkname: [100]u8, magic: [6]u8, version: [2]u8, uname: [32]u8, gname: [32]u8, dev_major: [8]u8, dev_minor: [8]u8, atime: [12]u8, ctime: [12]u8, offset: [12]u8, long_names: [4]u8, __unused: u8, sparse: [4]SparseHeader, is_extended: u8, real_size: [12]u8, __padding: [17]u8, }; pub const GnuExtSparseHeader = extern struct { sparse: [21]GnuHeader.SparseHeader, is_extended: u8, __padding: [7]u8, }; comptime { std.debug.assert(@sizeOf(OldHeader) == 512 and @bitSizeOf(OldHeader) == 512 * 8); std.debug.assert(@sizeOf(UstarHeader) == 512 and @bitSizeOf(UstarHeader) == 512 * 8); std.debug.assert(@sizeOf(GnuHeader) == 512 and @bitSizeOf(GnuHeader) == 512 * 8); std.debug.assert(@sizeOf(GnuHeader.SparseHeader) == 24 and @bitSizeOf(GnuHeader.SparseHeader) == 24 * 8); std.debug.assert(@sizeOf(GnuExtSparseHeader) == 512 and @bitSizeOf(GnuExtSparseHeader) == 512 * 8); } buffer: [512]u8, offset: usize = 0, longname: ?[]const u8 = null, longlink: ?[]const u8 = null, pub fn asOld(self: Header) *const OldHeader { return @ptrCast(*const OldHeader, &self.buffer); } pub fn asUstar(self: Header) *const UstarHeader { return @ptrCast(*const UstarHeader, &self.buffer); } pub fn asGnu(self: Header) *const GnuHeader { return @ptrCast(*const GnuHeader, &self.buffer); } pub fn isUstar(self: Header) bool { const header = self.asUstar(); return std.mem.eql(u8, &header.magic, "ustar\x00") and std.mem.eql(u8, &header.version, "00"); } pub fn isGnu(self: Header) bool { const header = self.asGnu(); return std.mem.eql(u8, &header.magic, "ustar ") and std.mem.eql(u8, &header.version, " \x00"); } pub fn filename(self: Header) []const u8 { if (self.longname) |name| return name; const header = self.asOld(); return truncate(&header.name); } pub fn kind(self: Header) TypeFlag { const header = self.asOld(); return header.typeflag; } pub fn mode(self: Header) !std.os.mode_t { const header = self.asOld(); const str = truncate(&header.mode); return if (str.len == 0) 0 else try std.fmt.parseUnsigned(std.os.mode_t, str, 8); } pub fn entrySize(self: Header) !u64 { const header = self.asOld(); const str = truncate(&header.size); return if (str.len == 0) 0 else try std.fmt.parseUnsigned(u64, str, 8); } pub fn alignedEntrySize(self: Header) !u64 { return std.mem.alignForwardGeneric(u64, try self.entrySize(), 512); } pub fn realSize(self: Header) !u64 { if (self.kind() == .gnu_sparse) { const header = self.asGnu(); const str = truncate(&header.real_size); return if (str.len == 0) 0 else try std.fmt.parseUnsigned(u64, str, 8); } else return try self.entrySize(); } pub fn preprocess(parser: *Parser, reader: anytype, strings: *usize, entries: *usize) !usize { var header = Header{ .buffer = undefined, }; const read = try reader.readAll(&header.buffer); if (read != 512) return error.InvalidHeader; if (std.mem.eql(u8, &header.buffer, &Header.empty)) return 512; switch (header.kind()) { .gnu_longname, .gnu_longlink => { strings.* += try header.realSize(); }, else => { entries.* += 1; }, } const total_data_len = try header.alignedEntrySize(); try parser.file.seekBy(@intCast(i64, total_data_len)); return total_data_len + 512; } pub fn parse(self: *Header, parser: *Parser, reader: anytype, offset: usize) !usize { const read = try reader.readAll(&self.buffer); if (read != 512) return error.InvalidHeader; if (std.mem.eql(u8, &self.buffer, &Header.empty)) return 512; self.offset = offset; const total_data_len = try self.alignedEntrySize(); switch (self.kind()) { .gnu_longname => { const size = try self.entrySize(); parser.last_longname = truncate(try parser.readString(reader, size)); parser.reuse_last_entry = true; try parser.file.seekBy(@intCast(i64, total_data_len - size)); }, .gnu_longlink => { const size = try self.entrySize(); parser.last_longlink = truncate(try parser.readString(reader, size)); parser.reuse_last_entry = true; try parser.file.seekBy(@intCast(i64, total_data_len - size)); }, else => { if (parser.last_longname) |name| { self.longname = name; parser.last_longname = null; } else { self.longname = null; } if (parser.last_longlink) |name| { self.longlink = name; parser.last_longlink = null; } else { self.longlink = null; } try parser.file.seekBy(@intCast(i64, total_data_len)); }, } return total_data_len + 512; } }; pub const Parser = struct { allocator: *std.mem.Allocator, file: std.fs.File, reader: std.fs.File.Reader, entries: std.ArrayListUnmanaged(Header) = .{}, string_buffer: std.ArrayListUnmanaged(u8) = .{}, reuse_last_entry: bool = false, last_longname: ?[]const u8 = null, last_longlink: ?[]const u8 = null, pub fn init(allocator: *std.mem.Allocator, file: std.fs.File) Parser { return .{ .allocator = allocator, .file = file, .reader = file.reader(), }; } pub fn deinit(self: *Parser) void { self.entries.deinit(self.allocator); self.string_buffer.deinit(self.allocator); } // TODO: update /// Loads a tar file. /// The best solution for loading with our file-tree system in mind seems to be a two-pass one: /// - On the first (PreloadPass) pass, count the number of entries and total length of filenames so we can pre-allocate them /// - On the second (LoadPass) pass, we can actually store things in our entries ArrayList pub fn load(self: *Parser) !void { var num_entries: usize = 0; var num_strings: usize = 0; const filesize = try self.file.getEndPos(); var pos: usize = 0; while (pos < filesize) { pos += try Header.preprocess(self, self.reader, &num_strings, &num_entries); } try self.entries.ensureTotalCapacity(self.allocator, num_entries); try self.string_buffer.ensureTotalCapacity(self.allocator, num_strings); try self.file.seekTo(0); var index: usize = 0; pos = 0; while (index < num_entries) : (index += 1) { var entry = blk: { if (self.reuse_last_entry) { self.reuse_last_entry = false; index -= 1; break :blk &self.entries.items[self.entries.items.len - 1]; } else { break :blk self.entries.addOneAssumeCapacity(); } }; pos += try entry.parse(self, self.reader, pos); } } fn readString(self: *Parser, reader: anytype, len: usize) ![]const u8 { if (len == 0) return ""; try self.string_buffer.ensureUnusedCapacity(self.allocator, len); const prev_len = self.string_buffer.items.len; self.string_buffer.items.len += len; var buf = self.string_buffer.items[prev_len..][0..len]; _ = try reader.readAll(buf); return buf; } pub fn getFileIndex(self: Parser, filename: []const u8) !usize { for (self.directory.items) |*hdr, i| { if (std.mem.eql(u8, hdr.filename, filename)) { return i; } } return error.FileNotFound; } pub fn readFileAlloc(self: Parser, allocator: std.mem.Allocator, index: usize) ![]const u8 { const header = self.directory[index]; try self.seekTo(self.start_offset + header.local_header.offset); var buffer = try allocator.alloc(header.uncompressed_size); errdefer allocator.free(buffer); var read_buffered = std.io.BufferedReader(8192, std.fs.File.Reader){ .unbuffered_reader = self.reader }; var limited_reader = utils.LimitedReader(std.io.BufferedReader(8192, std.fs.File.Reader).Reader).init(read_buffered.reader(), header.compressed_size); const reader = limited_reader.reader(); var write_stream = std.io.fixedBufferStream(buffer); const writer = write_stream.writer(); var fifo = std.fifo.LinearFifo(u8, .{ .Static = 8192 }).init(); switch (header.compression) { .none => { try fifo.pump(reader, writer); }, .deflated => { var window: [0x8000]u8 = undefined; var stream = std.compress.deflate.inflateStream(reader, &window); try fifo.pump(stream.reader(), writer); }, else => return error.CompressionUnsupported, } } pub const ExtractOptions = struct { skip_components: u16 = 0, }; pub fn extract(self: *Parser, dir: std.fs.Dir, options: ExtractOptions) !void { try self.file.seekTo(0); var buffered = std.io.BufferedReader(8192, std.fs.File.Reader){ .unbuffered_reader = self.reader }; const reader = buffered.reader(); var buffer: [8192]u8 = undefined; var index: usize = 0; extract: while (index < self.entries.items.len) : (index += 1) { const header = self.entries.items[index]; const full_filename = header.filename(); const entry_size = try header.entrySize(); const aligned_entry_size = try header.alignedEntrySize(); const new_filename = blk: { var component: usize = 0; var last_pos: usize = 0; while (component < options.skip_components) : (component += 1) { last_pos = std.mem.indexOfPos(u8, full_filename, last_pos, "/") orelse { try reader.skipBytes(aligned_entry_size, .{ .buf_size = 4096 }); continue :extract; }; } if (last_pos + 1 == full_filename.len) continue :extract; break :blk full_filename[last_pos + 1 ..]; }; switch (header.kind()) { .aregular, .regular => { const dirname = std.fs.path.dirname(new_filename); if (dirname) |name| try dir.makePath(name); const fd = try dir.createFile(new_filename, .{ .mode = try header.mode() }); defer fd.close(); const writer = fd.writer(); var size_read: usize = 0; while (size_read < entry_size) { const needed = std.math.min(buffer.len, entry_size - size_read); const read = try reader.readAll(buffer[0..needed]); if (read == 0) return error.Unknown; size_read += read; try writer.writeAll(buffer[0..read]); } try reader.skipBytes(aligned_entry_size - entry_size, .{ .buf_size = 4096 }); continue; }, .directory => { try dir.makePath(new_filename); try reader.skipBytes(aligned_entry_size, .{ .buf_size = 4096 }); continue; }, else => { try reader.skipBytes(aligned_entry_size, .{ .buf_size = 4096 }); continue; }, } } } };
src/formats/tar.zig
const Symbol = @This(); const std = @import("std"); const types = @import("types.zig"); /// Bitfield containings flags for a symbol /// Can contain any of the flags defined in `Flag` flags: u32, /// Symbol name, when undefined this will be taken from the import. name: [*:0]const u8, /// An union that represents both the type of symbol /// as well as the data it holds. tag: Tag, /// Index into the list of objects based on set `tag` /// NOTE: This will be set to `undefined` when `tag` is `data` /// and the symbol is undefined. index: u32, pub const Tag = enum { function, data, global, section, event, table, /// From a given symbol tag, returns the `ExternalType` /// Asserts the given tag can be represented as an external type. pub fn externalType(self: Tag) std.wasm.ExternalKind { return switch (self) { .function => .function, .global => .global, .data => .memory, .section => unreachable, // Not an external type .event => unreachable, // Not an external type .table => .table, }; } }; pub const Flag = enum(u32) { /// Indicates a weak symbol. /// When linking multiple modules defining the same symbol, all weak definitions are discarded /// in favourite of the strong definition. When no strong definition exists, all weak but one definiton is discarded. /// If multiple definitions remain, we get an error: symbol collision. WASM_SYM_BINDING_WEAK = 0x1, /// Indicates a local, non-exported, non-module-linked symbol. /// The names of local symbols are not required to be unique, unlike non-local symbols. WASM_SYM_BINDING_LOCAL = 0x2, /// Represents the binding of a symbol, indicating if it's local or not, and weak or not. WASM_SYM_BINDING_MASK = 0x3, /// Indicates a hidden symbol. Hidden symbols will not be exported to the link result, but may /// link to other modules. WASM_SYM_VISIBILITY_HIDDEN = 0x4, /// Indicates an undefined symbol. For non-data symbols, this must match whether the symbol is /// an import or is defined. For data symbols however, determines whether a segment is specified. WASM_SYM_UNDEFINED = 0x10, /// Indicates a symbol of which its intention is to be exported from the wasm module to the host environment. /// This differs from the visibility flag as this flag affects the static linker. WASM_SYM_EXPORTED = 0x20, /// Indicates the symbol uses an explicit symbol name, rather than reusing the name from a wasm import. /// Allows remapping imports from foreign WASM modules into local symbols with a different name. WASM_SYM_EXPLICIT_NAME = 0x40, /// Indicates the symbol is to be included in the linker output, regardless of whether it is used or has any references to it. WASM_SYM_NO_STRIP = 0x80, /// Indicates a symbol is TLS WASM_SYM_TLS = 0x100, }; /// Verifies if the given symbol should be imported from the /// host environment or not pub fn requiresImport(self: Symbol) bool { if (!self.isUndefined()) return false; if (self.isWeak()) return false; if (self.kind == .data) return false; // if (self.isDefined() and self.isWeak()) return true; //TODO: Only when building shared lib return true; } pub fn hasFlag(self: Symbol, flag: Flag) bool { return self.flags & @enumToInt(flag) != 0; } pub fn setFlag(self: *Symbol, flag: Flag) void { self.flags |= @enumToInt(flag); } pub fn isUndefined(self: Symbol) bool { return self.flags & @enumToInt(Flag.WASM_SYM_UNDEFINED) != 0; } pub fn setUndefined(self: *Symbol, is_undefined: bool) void { if (is_undefined) { self.setFlag(.WASM_SYM_UNDEFINED); } else { self.flags &= ~@enumToInt(Flag.WASM_SYM_UNDEFINED); } } pub fn isDefined(self: Symbol) bool { return !self.isUndefined(); } pub fn isVisible(self: Symbol) bool { return self.flags & @enumToInt(Flag.WASM_SYM_VISIBILITY_HIDDEN) == 0; } pub fn isLocal(self: Symbol) bool { return self.flags & @enumToInt(Flag.WASM_SYM_BINDING_LOCAL) != 0; } pub fn isGlobal(self: Symbol) bool { return self.flags & @enumToInt(Flag.WASM_SYM_BINDING_LOCAL) == 0; } pub fn isHidden(self: Symbol) bool { return self.flags & @enumToInt(Flag.WASM_SYM_VISIBILITY_HIDDEN) != 0; } pub fn isNoStrip(self: Symbol) bool { return self.flags & @enumToInt(Flag.WASM_SYM_NO_STRIP) != 0; } pub fn isExported(self: Symbol) bool { if (self.isUndefined() or self.isLocal()) return false; if (self.isHidden()) return false; return true; } pub fn isWeak(self: Symbol) bool { return self.flags & @enumToInt(Flag.WASM_SYM_BINDING_WEAK) != 0; } /// Formats the symbol into human-readable text pub fn format(self: Symbol, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { _ = fmt; _ = options; const kind_fmt: u8 = switch (self.kind) { .function => 'F', .data => 'D', .global => 'G', .section => 'S', .event => 'E', .table => 'T', }; const visible: []const u8 = if (self.isVisible()) "yes" else "no"; const binding: []const u8 = if (self.isLocal()) "local" else "global"; try writer.print( "{c} binding={s} visible={s} id={d} name={s}", .{ kind_fmt, binding, visible, self.index(), self.name }, ); }
src/link/Wasm/Symbol.zig