code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
const fun = @import("fun");
const nds = @import("index.zig");
const std = @import("std");
const debug = std.debug;
const fmt = std.fmt;
const fs = nds.fs;
const heap = std.heap;
const mem = std.mem;
const os = std.os;
const rand = std.rand;
const lu16 = fun.platform.lu16;
const lu32 = fun.platform.lu32;
const lu64 = fun.platform.lu64;
const lu128 = fun.platform.lu128;
fn countParents(comptime Folder: type, folder: *Folder) usize {
var res: usize = 0;
var tmp = folder;
while (tmp.parent) |par| {
tmp = par;
res += 1;
}
return res;
}
fn randomFs(allocator: *mem.Allocator, random: *rand.Random, comptime Folder: type) !*Folder {
comptime debug.assert(Folder == fs.Nitro or Folder == fs.Narc);
const root = try Folder.create(allocator);
var unique: u64 = 0;
var curr: ?*Folder = root;
while (curr) |folder| {
const parents = countParents(Folder, folder);
const choice = random.range(usize, 0, 255);
if (choice < parents + folder.nodes.len) {
curr = folder.parent;
break;
}
const is_file = random.scalar(bool);
var name_buf: [50]u8 = undefined;
const name = try fmt.bufPrint(name_buf[0..], "{}", unique);
unique += 1;
if (is_file) {
switch (Folder) {
fs.Nitro => {
_ = try folder.createFile(name, blk: {
const is_narc = random.scalar(bool);
if (is_narc) {
break :blk fs.Nitro.File{ .Narc = try randomFs(allocator, random, fs.Narc) };
}
const data = try allocator.alloc(u8, random.range(usize, 10, 100));
random.bytes(data);
break :blk fs.Nitro.File{
.Binary = fs.Nitro.File.Binary{
.allocator = allocator,
.data = data,
},
};
});
},
fs.Narc => {
const data = try allocator.alloc(u8, random.range(usize, 10, 100));
random.bytes(data);
_ = try folder.createFile(name, fs.Narc.File{
.allocator = allocator,
.data = data,
});
},
else => comptime unreachable,
}
} else {
curr = try folder.createFolder(name);
}
}
return root;
}
fn fsEqual(allocator: *mem.Allocator, comptime Folder: type, fs1: *Folder, fs2: *Folder) !bool {
comptime debug.assert(Folder == fs.Nitro or Folder == fs.Narc);
const FolderPair = struct {
f1: *Folder,
f2: *Folder,
};
var folders_to_compare = std.ArrayList(FolderPair).init(allocator);
defer folders_to_compare.deinit();
try folders_to_compare.append(FolderPair{
.f1 = fs1,
.f2 = fs2,
});
while (folders_to_compare.popOrNull()) |pair| {
for (pair.f1.nodes.toSliceConst()) |n1| {
switch (n1.kind) {
Folder.Node.Kind.File => |f1| {
const f2 = pair.f2.getFile(n1.name) orelse return false;
switch (Folder) {
fs.Nitro => {
const Tag = @TagType(fs.Nitro.File);
switch (f1.*) {
Tag.Binary => {
if (f2.* != Tag.Binary)
return false;
if (!mem.eql(u8, f1.Binary.data, f2.Binary.data))
return false;
},
Tag.Narc => {
if (f2.* != Tag.Narc)
return false;
if (!try fsEqual(allocator, fs.Narc, f1.Narc, f2.Narc))
return false;
},
}
},
fs.Narc => {
if (!mem.eql(u8, f1.data, f2.data))
return false;
},
else => comptime unreachable,
}
},
Folder.Node.Kind.Folder => |f1| {
const f2 = pair.f2.getFolder(n1.name) orelse return false;
try folders_to_compare.append(FolderPair{
.f1 = f1,
.f2 = f2,
});
},
}
}
}
return true;
}
const TestFolder = fs.Folder(struct {
// TODO: We cannot compare pointers to zero sized types, so this field have to exist.
a: u8,
fn deinit(file: *@This()) void {}
});
fn assertError(res: var, expected: anyerror) void {
if (res) |_| {
unreachable;
} else |actual| {
debug.assert(expected == actual);
}
}
test "nds.fs.Folder.deinit" {
var buf: [2 * 1024]u8 = undefined;
var fix_buf_alloc = heap.FixedBufferAllocator.init(buf[0..]);
const allocator = &fix_buf_alloc.allocator;
const root = try TestFolder.create(allocator);
root.deinit();
}
test "nds.fs.Folder.createFile" {
var buf: [2 * 1024]u8 = undefined;
var fix_buf_alloc = heap.FixedBufferAllocator.init(buf[0..]);
const allocator = &fix_buf_alloc.allocator;
const root = try TestFolder.create(allocator);
const a = try root.createFile("a", undefined);
const b = root.getFile("a") orelse unreachable;
debug.assert(@ptrToInt(a) == @ptrToInt(b));
assertError(root.createFile("a", undefined), error.NameExists);
assertError(root.createFile("/", undefined), error.InvalidName);
}
test "nds.fs.Folder.createFolder" {
var buf: [2 * 1024]u8 = undefined;
var fix_buf_alloc = heap.FixedBufferAllocator.init(buf[0..]);
const allocator = &fix_buf_alloc.allocator;
const root = try TestFolder.create(allocator);
const a = try root.createFolder("a");
const b = root.getFolder("a") orelse unreachable;
debug.assert(@ptrToInt(a) == @ptrToInt(b));
assertError(root.createFolder("a"), error.NameExists);
assertError(root.createFolder("/"), error.InvalidName);
}
test "nds.fs.Folder.createPath" {
var buf: [3 * 1024]u8 = undefined;
var fix_buf_alloc = heap.FixedBufferAllocator.init(buf[0..]);
const allocator = &fix_buf_alloc.allocator;
const root = try TestFolder.create(allocator);
const a = try root.createPath("b/a");
const b = root.getFolder("b/a") orelse unreachable;
debug.assert(@ptrToInt(a) == @ptrToInt(b));
_ = try root.createFile("a", undefined);
assertError(root.createPath("a"), error.FileInPath);
}
test "nds.fs.Folder.getFile" {
var buf: [2 * 1024]u8 = undefined;
var fix_buf_alloc = heap.FixedBufferAllocator.init(buf[0..]);
const allocator = &fix_buf_alloc.allocator;
const root = try TestFolder.create(allocator);
const a = try root.createFolder("a");
_ = try root.createFile("a1", undefined);
_ = try a.createFile("a2", undefined);
const a1 = root.getFile("a1") orelse unreachable;
const a2 = a.getFile("a2") orelse unreachable;
const a1_root = a.getFile("/a1") orelse unreachable;
debug.assert(root.getFile("a2") == null);
debug.assert(a.getFile("a1") == null);
const a2_path = root.getFile("a/a2") orelse unreachable;
debug.assert(@ptrToInt(a1) == @ptrToInt(a1_root));
debug.assert(@ptrToInt(a2) == @ptrToInt(a2_path));
}
test "nds.fs.Folder.getFolder" {
var buf: [3 * 1024]u8 = undefined;
var fix_buf_alloc = heap.FixedBufferAllocator.init(buf[0..]);
const allocator = &fix_buf_alloc.allocator;
const root = try TestFolder.create(allocator);
const a = try root.createFolder("a");
_ = try root.createFolder("a1");
_ = try a.createFolder("a2");
const a1 = root.getFolder("a1") orelse unreachable;
const a2 = a.getFolder("a2") orelse unreachable;
const a1_root = a.getFolder("/a1") orelse unreachable;
debug.assert(root.getFolder("a2") == null);
debug.assert(a.getFolder("a1") == null);
const a2_path = root.getFolder("a/a2") orelse unreachable;
debug.assert(@ptrToInt(a1) == @ptrToInt(a1_root));
debug.assert(@ptrToInt(a2) == @ptrToInt(a2_path));
}
test "nds.fs.Folder.exists" {
var buf: [2 * 1024]u8 = undefined;
var fix_buf_alloc = heap.FixedBufferAllocator.init(buf[0..]);
const allocator = &fix_buf_alloc.allocator;
const root = try TestFolder.create(allocator);
_ = try root.createFolder("a");
_ = try root.createFile("b", undefined);
debug.assert(root.exists("a"));
debug.assert(root.exists("b"));
debug.assert(!root.exists("c"));
}
test "nds.fs.Folder.root" {
var buf: [7 * 1024]u8 = undefined;
var fix_buf_alloc = heap.FixedBufferAllocator.init(buf[0..]);
const allocator = &fix_buf_alloc.allocator;
const root = try TestFolder.create(allocator);
const a = try root.createPath("a");
const b = try root.createPath("a/b");
const c = try root.createPath("a/b/c");
const d = try root.createPath("c/b/d");
debug.assert(@ptrToInt(root) == @ptrToInt(root.root()));
debug.assert(@ptrToInt(root) == @ptrToInt(a.root()));
debug.assert(@ptrToInt(root) == @ptrToInt(b.root()));
debug.assert(@ptrToInt(root) == @ptrToInt(c.root()));
debug.assert(@ptrToInt(root) == @ptrToInt(d.root()));
}
test "nds.fs.read/writeNitro" {
var buf: [400 * 1024]u8 = undefined;
var fix_buf_alloc = heap.FixedBufferAllocator.init(buf[0..]);
var random = rand.DefaultPrng.init(0);
const allocator = &fix_buf_alloc.allocator;
const root = try randomFs(allocator, &random.random, fs.Nitro);
const fntAndFiles = try fs.getFntAndFiles(fs.Nitro, root, allocator);
const files = fntAndFiles.files;
const main_fnt = fntAndFiles.main_fnt;
const sub_fnt = fntAndFiles.sub_fnt;
const fnt_buff_size = @sliceToBytes(main_fnt).len + sub_fnt.len;
const fnt_buff = try allocator.alloc(u8, fnt_buff_size);
const fnt = try fmt.bufPrint(fnt_buff, "{}{}", @sliceToBytes(main_fnt), sub_fnt);
var fat = std.ArrayList(fs.FatEntry).init(allocator);
const test_file = "__nds.fs.test.read.write__";
defer std.fs.deleteFile(test_file) catch unreachable;
{
var file = try std.fs.File.openWrite(test_file);
defer file.close();
for (files) |f| {
const pos = @intCast(u32, try file.getPos());
try fs.writeNitroFile(file, allocator, f.*);
fat.append(fs.FatEntry.init(pos, @intCast(u32, try file.getPos()) - pos)) catch unreachable;
}
}
const fs2 = blk: {
var file = try std.fs.File.openRead(test_file);
defer file.close();
break :blk try fs.readNitro(file, allocator, fnt, fat.toSlice());
};
debug.assert(try fsEqual(allocator, fs.Nitro, root, fs2));
}
test "nds.Rom" {
var buf: [400 * 1024]u8 = undefined;
var fix_buf_alloc = heap.FixedBufferAllocator.init(buf[0..]);
var random = rand.DefaultPrng.init(1);
const allocator = &fix_buf_alloc.allocator;
var rom1 = nds.Rom{
.allocator = allocator,
.header = nds.Header{
.game_title = "A" ** 12,
.gamecode = "A" ** 4,
.makercode = "ST",
.unitcode = 0x00,
.encryption_seed_select = 0x00,
.device_capacity = 0x00,
.reserved1 = [_]u8{0} ** 7,
.reserved2 = 0x00,
.nds_region = 0x00,
.rom_version = 0x00,
.autostart = 0x00,
.arm9_rom_offset = lu32.init(0x4000),
.arm9_entry_address = lu32.init(0x2000000),
.arm9_ram_address = lu32.init(0x2000000),
.arm9_size = lu32.init(0x3BFE00),
.arm7_rom_offset = lu32.init(0x8000),
.arm7_entry_address = lu32.init(0x2000000),
.arm7_ram_address = lu32.init(0x2000000),
.arm7_size = lu32.init(0x3BFE00),
.fnt_offset = lu32.init(0x00),
.fnt_size = lu32.init(0x00),
.fat_offset = lu32.init(0x00),
.fat_size = lu32.init(0x00),
.arm9_overlay_offset = lu32.init(0x00),
.arm9_overlay_size = lu32.init(0x00),
.arm7_overlay_offset = lu32.init(0x00),
.arm7_overlay_size = lu32.init(0x00),
.port_40001A4h_setting_for_normal_commands = [_]u8{0} ** 4,
.port_40001A4h_setting_for_key1_commands = [_]u8{0} ** 4,
.banner_offset = lu32.init(0x00),
.secure_area_checksum = lu16.init(0x00),
.secure_area_delay = lu16.init(0x051E),
.arm9_auto_load_list_ram_address = lu32.init(0x00),
.arm7_auto_load_list_ram_address = lu32.init(0x00),
.secure_area_disable = lu64.init(0x00),
.total_used_rom_size = lu32.init(0x00),
.rom_header_size = lu32.init(0x4000),
.reserved3 = [_]u8{0x00} ** 0x38,
.nintendo_logo = [_]u8{0x00} ** 0x9C,
.nintendo_logo_checksum = lu16.init(0x00),
.header_checksum = lu16.init(0x00),
.debug_rom_offset = lu32.init(0x00),
.debug_size = lu32.init(0x00),
.debug_ram_address = lu32.init(0x00),
.reserved4 = [_]u8{0x00} ** 4,
.reserved5 = [_]u8{0x00} ** 0x10,
.wram_slots = [_]u8{0x00} ** 20,
.arm9_wram_areas = [_]u8{0x00} ** 12,
.arm7_wram_areas = [_]u8{0x00} ** 12,
.wram_slot_master = [_]u8{0x00} ** 3,
.unknown = 0,
.region_flags = [_]u8{0x00} ** 4,
.access_control = [_]u8{0x00} ** 4,
.arm7_scfg_ext_setting = [_]u8{0x00} ** 4,
.reserved6 = [_]u8{0x00} ** 3,
.unknown_flags = 0,
.arm9i_rom_offset = lu32.init(0x00),
.reserved7 = [_]u8{0x00} ** 4,
.arm9i_ram_load_address = lu32.init(0x00),
.arm9i_size = lu32.init(0x00),
.arm7i_rom_offset = lu32.init(0x00),
.device_list_arm7_ram_addr = lu32.init(0x00),
.arm7i_ram_load_address = lu32.init(0x00),
.arm7i_size = lu32.init(0x00),
.digest_ntr_region_offset = lu32.init(0x4000),
.digest_ntr_region_length = lu32.init(0x00),
.digest_twl_region_offset = lu32.init(0x00),
.digest_twl_region_length = lu32.init(0x00),
.digest_sector_hashtable_offset = lu32.init(0x00),
.digest_sector_hashtable_length = lu32.init(0x00),
.digest_block_hashtable_offset = lu32.init(0x00),
.digest_block_hashtable_length = lu32.init(0x00),
.digest_sector_size = lu32.init(0x00),
.digest_block_sectorcount = lu32.init(0x00),
.banner_size = lu32.init(0x00),
.reserved8 = [_]u8{0x00} ** 4,
.total_used_rom_size_including_dsi_area = lu32.init(0x00),
.reserved9 = [_]u8{0x00} ** 4,
.reserved10 = [_]u8{0x00} ** 4,
.reserved11 = [_]u8{0x00} ** 4,
.modcrypt_area_1_offset = lu32.init(0x00),
.modcrypt_area_1_size = lu32.init(0x00),
.modcrypt_area_2_offset = lu32.init(0x00),
.modcrypt_area_2_size = lu32.init(0x00),
.title_id_emagcode = [_]u8{0x00} ** 4,
.title_id_filetype = 0,
.title_id_rest = [_]u8{ 0x00, 0x03, 0x00 },
.public_sav_filesize = lu32.init(0x00),
.private_sav_filesize = lu32.init(0x00),
.reserved12 = [_]u8{0x00} ** 176,
.cero_japan = 0,
.esrb_us_canada = 0,
.reserved13 = 0,
.usk_germany = 0,
.pegi_pan_europe = 0,
.resereved14 = 0,
.pegi_portugal = 0,
.pegi_and_bbfc_uk = 0,
.agcb_australia = 0,
.grb_south_korea = 0,
.reserved15 = [_]u8{0x00} ** 6,
.arm9_hash_with_secure_area = [_]u8{0x00} ** 20,
.arm7_hash = [_]u8{0x00} ** 20,
.digest_master_hash = [_]u8{0x00} ** 20,
.icon_title_hash = [_]u8{0x00} ** 20,
.arm9i_hash = [_]u8{0x00} ** 20,
.arm7i_hash = [_]u8{0x00} ** 20,
.reserved16 = [_]u8{0x00} ** 40,
.arm9_hash_without_secure_area = [_]u8{0x00} ** 20,
.reserved17 = [_]u8{0x00} ** 2636,
.reserved18 = [_]u8{0x00} ** 0x180,
.signature_across_header_entries = [_]u8{0x00} ** 0x80,
},
.banner = nds.Banner{
.version = 0x1,
.has_animated_dsi_icon = 0,
.crc16_across_0020h_083Fh = lu16.init(0x00),
.crc16_across_0020h_093Fh = lu16.init(0x00),
.crc16_across_0020h_0A3Fh = lu16.init(0x00),
.crc16_across_1240h_23BFh = lu16.init(0x00),
.reserved1 = [_]u8{0x00} ** 0x16,
.icon_bitmap = [_]u8{0x00} ** 0x200,
.icon_palette = [_]u8{0x00} ** 0x20,
.title_japanese = [_]u8{0x00} ** 0x100,
.title_english = [_]u8{0x00} ** 0x100,
.title_french = [_]u8{0x00} ** 0x100,
.title_german = [_]u8{0x00} ** 0x100,
.title_italian = [_]u8{0x00} ** 0x100,
.title_spanish = [_]u8{0x00} ** 0x100,
},
.arm9 = [_]u8{},
.arm7 = [_]u8{},
.nitro_footer = [_]lu32{
lu32.init(0),
lu32.init(0),
lu32.init(0),
},
.arm9_overlay_table = [_]nds.Overlay{},
.arm9_overlay_files = [_][]u8{},
.arm7_overlay_table = [_]nds.Overlay{},
.arm7_overlay_files = [_][]u8{},
.root = try randomFs(allocator, &random.random, fs.Nitro),
};
const name = try fmt.allocPrint(allocator, "__{}_{}__", rom1.header.game_title, rom1.header.gamecode);
errdefer allocator.free(name);
{
const file = try std.fs.File.openWrite(name);
errdefer std.fs.deleteFile(name) catch {};
defer file.close();
try rom1.writeToFile(file);
}
defer std.fs.deleteFile(name) catch {};
var rom2 = blk: {
const file = try std.fs.File.openRead(name);
defer file.close();
break :blk try nds.Rom.fromFile(file, allocator);
};
defer rom2.deinit();
}
|
src/test.zig
|
const x86 = @import("machine.zig");
const std = @import("std");
const assert = std.debug.assert;
usingnamespace @import("types.zig");
const AvxOpcode = x86.avx.AvxOpcode;
const Mnemonic = x86.Mnemonic;
const Instruction = x86.Instruction;
const Machine = x86.Machine;
const Operand = x86.operand.Operand;
const Immediate = x86.Immediate;
const OperandType = x86.operand.OperandType;
pub const Signature = struct {
const max_ops = 5;
operands: [max_ops]OperandType,
pub fn ops(
o1: OperandType,
o2: OperandType,
o3: OperandType,
o4: OperandType,
o5: OperandType,
) Signature {
return Signature{
.operands = [max_ops]OperandType{ o1, o2, o3, o4, o5 },
};
}
pub fn ops0() Signature {
return ops(.none, .none, .none, .none, .none);
}
pub fn ops1(o1: OperandType) Signature {
return ops(o1, .none, .none, .none, .none);
}
pub fn ops2(o1: OperandType, o2: OperandType) Signature {
return ops(o1, o2, .none, .none, .none);
}
pub fn ops3(o1: OperandType, o2: OperandType, o3: OperandType) Signature {
return ops(o1, o2, o3, .none, .none);
}
pub fn ops4(o1: OperandType, o2: OperandType, o3: OperandType, o4: OperandType) Signature {
return ops(o1, o2, o3, o4, .none);
}
pub fn ops5(
o1: OperandType,
o2: OperandType,
o3: OperandType,
o4: OperandType,
o5: OperandType,
) Signature {
return ops(o1, o2, o3, o4, o5);
}
pub fn fromOperands(
operand1: ?*const Operand,
operand2: ?*const Operand,
operand3: ?*const Operand,
operand4: ?*const Operand,
operand5: ?*const Operand,
) Signature {
const o1 = if (operand1) |op| op.operandType() else OperandType.none;
const o2 = if (operand2) |op| op.operandType() else OperandType.none;
const o3 = if (operand3) |op| op.operandType() else OperandType.none;
const o4 = if (operand4) |op| op.operandType() else OperandType.none;
const o5 = if (operand5) |op| op.operandType() else OperandType.none;
return ops(o1, o2, o3, o4, o5);
}
pub fn debugPrint(self: Signature) void {
std.debug.warn("(", .{});
for (self.operands) |operand| {
if (operand == .none) {
continue;
}
std.debug.warn("{},", .{@tagName(operand)});
}
std.debug.warn("): ", .{});
}
pub fn matchTemplate(template: Signature, instance: Signature) bool {
for (template.operands) |templ, i| {
const rhs = instance.operands[i];
if (templ == .none and rhs == .none) {
continue;
} else if (!OperandType.matchTemplate(templ, rhs)) {
return false;
}
}
return true;
}
};
/// Special opcode edge cases
/// Some opcode/instruction combinations have special legacy edge cases we need
/// to check before we can confirm a match.
pub const OpcodeEdgeCase = enum {
None = 0x0,
/// Prior to x86-64, the instructions:
/// 90 XCHG EAX, EAX
/// and
/// 90 NOP
/// were encoded the same way and might be treated specially by the CPU.
/// However, on x86-64 `XCHG EAX,EAX` is no longer a NOP because it zeros
/// the upper 32 bits of the RAX register.
///
/// When AMD designed x86-64 they decide 90 should still be treated as a NOP.
/// This means we have to choose a different encoding on x86-64.
XCHG_EAX = 0x1,
/// sign-extended immediate value (ie this opcode sign extends)
Sign,
/// mark an encoding that will only work if the immediate doesn't need a
/// sign extension (ie this opcode zero extends)
///
/// eg: `MOV r64, imm32` can be encode in the same way as `MOV r32, imm32`
/// as long as the immediate is non-negative since all operations on 32 bit
/// registers implicitly zero extend
NoSign,
/// Not encodable on 64 bit mode
No64,
/// Not encodable on 32/16 bit mode
No32,
/// Valid encoding, but undefined behaviour
Undefined,
pub fn isEdgeCase(
self: OpcodeEdgeCase,
item: InstructionItem,
mode: Mode86,
op1: ?*const Operand,
op2: ?*const Operand,
op3: ?*const Operand,
op4: ?*const Operand,
op5: ?*const Operand,
) bool {
switch (self) {
.XCHG_EAX => {
return (mode == .x64 and op1.?.Reg == .EAX and op2.?.Reg == .EAX);
},
.NoSign, .Sign => {
const sign = self;
var imm_pos: u8 = undefined;
// figure out which operand is the immediate
if (op1 != null and op1.?.tag() == .Imm) {
imm_pos = 0;
} else if (op2 != null and op2.?.tag() == .Imm) {
imm_pos = 1;
} else if (op3 != null and op3.?.tag() == .Imm) {
imm_pos = 2;
} else if (op4 != null and op4.?.tag() == .Imm) {
imm_pos = 3;
} else if (op5 != null and op5.?.tag() == .Imm) {
imm_pos = 4;
} else {
unreachable;
}
const imm = switch (imm_pos) {
0 => op1.?.Imm,
1 => op2.?.Imm,
2 => op3.?.Imm,
3 => op4.?.Imm,
4 => op5.?.Imm,
else => unreachable,
};
switch (sign) {
// Matches edgecase when it's an unsigned immediate that will get sign extended
.Sign => {
const is_unsigned = imm.sign == .Unsigned;
return (is_unsigned and imm.willSignExtend(item.signature.operands[imm_pos]));
},
// Matches edgecase when it's a signed immedate that needs its sign extended
.NoSign => {
const is_signed = imm.sign == .Signed;
return (is_signed and imm.isNegative());
},
else => unreachable,
}
},
.No64 => unreachable,
.No32 => unreachable,
.Undefined => unreachable,
.None => return false,
}
}
};
// Quick refrences:
//
// * https://en.wikichip.org/wiki/intel/cpuid
// * https://en.wikipedia.org/wiki/X86_instruction_listings
// * https://www.felixcloutier.com/x86/
// * Intel manual: Vol 3, Ch 22.13 New instructions in the pentium and later ia-32 processors
pub const CpuFeature = enum {
pub const count = __num_cpu_features;
pub const MaskType = u128;
pub const all_features_mask = ~@as(CpuFeature.MaskType, 0);
pub const Presets = struct {
const _i386 = [_]CpuFeature{ ._8086, ._186, ._286, ._386, ._087, ._287, ._387 };
const _i486 = [_]CpuFeature{ ._8086, ._186, ._286, ._386, ._486, ._087, ._287, ._387 };
const x86_64 = [_]CpuFeature{
._8086, ._186, ._286, ._386, ._486, .x86_64, ._087, ._287, ._387,
.CPUID, .P6, .FPU, .TSC, .MSR, .CX8, .SEP, .CX16, .RSM,
.MMX, .SYSCALL,
};
};
/// Added in 8086 / 8088
_8086,
/// Added in 80186 / 80188
_186,
/// Added in 80286
_286,
/// Added in 80386
_386,
/// Added in 80486
_486,
/// Added in x86-64 (CPUID.80000001H:EDX.LM[29] (Long Mode))
x86_64,
/// Legacy 8086 instructions not implemented on later processors
_8086_Legacy,
/// Legacy 80186 instructions not implemented on later processors
_186_Legacy,
/// Legacy 80286 instructions not implemented on later processors
_286_Legacy,
/// Legacy 80386 instructions not implemented on later processors
_386_Legacy,
/// Legacy 80486 instructions not implemented on later processors
_486_Legacy,
/// Added in 8087 FPU, or CPUID.01H.EDX.FPU[0] = 1
_087,
/// Added in 80287 FPU, or CPUID.01H.EDX.FPU[0] = 1
_287,
/// Added in 80387 FPU, or CPUID.01H.EDX.FPU[0] = 1
_387,
/// Only works on Intel CPUs
Intel,
/// Only works on Amd CPUs
Amd,
/// Only works on Cyrix CPUs
Cyrix,
/// Added in AMD-V
AMD_V,
/// Added in VT-x
VT_X,
/// EFLAGS.ID[bit 21] can be set and cleared
CPUID,
/// CPUID.01H.EAX[11:8] = Family = 6 or 15 = 0110B or 1111B
P6,
/// (IA-32e mode supported) or (CPUID DisplayFamily_DisplayModel = 06H_0CH )
RSM,
//
// CPUID.(EAX=1).ECX
//
/// CPUID.01H.ECX.SSE3[0] = 1
SSE3,
/// CPUID.01H.ECX.PCLMULQDQ[1] = 1
PCLMULQDQ,
/// CPUID.01H.ECX.MONITOR[3] = 1
MONITOR,
/// CPUID.01H.ECX.VMX[5] = 1
VMX,
/// CPUID.01H.ECX.SMX[6] = 1
SMX,
/// CPUID.01H.ECX.SSSE3[9] = 1 (supplemental SSE3)
SSSE3,
/// CPUID.01H.ECX.FMA[12] = 1 (Fused Multiply and Add)
FMA,
/// CPUID.01H.ECX.CX16[13] = 1
CX16,
/// CPUID.01H.ECX.SSE4_1[19] = 1
SSE4_1,
/// CPUID.01H.ECX.SSE4_2[20] = 1
SSE4_2,
/// CPUID.01H.ECX.MOVBE[22] = 1
MOVBE,
/// CPUID.01H.ECX.POPCNT[23] = 1
POPCNT,
/// CPUID.01H.ECX.AES[25] = 1
AES,
/// CPUID.01H.ECX.XSAVE[26] = 1
XSAVE,
/// CPUID.01H.ECX.AVX[28] = 1
AVX,
/// CPUID.01H.ECX.F16C[29] = 1
F16C,
/// CPUID.01H.ECX.RDRAND[30] = 1
RDRAND,
//
// CPUID.(EAX=1).EDX
//
/// CPUID.01H.EDX.FPU[0] = 1
FPU,
/// CPUID.01H.EDX.TSC[4] = 1
TSC,
/// CPUID.01H.EDX.MSR[5] = 1
MSR,
/// CPUID.01H.EDX.CX8[8] = 1
CX8,
/// CPUID.01H.EDX.SEP[11] = 1 (SYSENTER and SYSEXIT)
SEP,
/// CPUID.01H.EDX.CMOV[15] = 1 (CMOVcc and FCMOVcc)
CMOV,
/// CPUID.01H.EDX.CLFSH[19] = 1 (CFLUSH)
CLFSH,
/// CPUID.01H.EDX.MMX[23] = 1
MMX,
/// CPUID.01H.EDX.FXSR[24] = 1 (FXSAVE, FXRSTOR)
FXSR,
/// CPUID.01H.EDX.SSE[25] = 1
SSE,
/// CPUID.01H.EDX.SSE2[26] = 1
SSE2,
//
// CPUID.(EAX=7, ECX=0).EBX
//
/// CPUID.EAX.07H.EBX.FSGSBASE[0] = 1
FSGSBASE,
/// CPUID.EAX.07H.EBX.SGX[2] = 1 (Software Guard eXtensions)
SGX,
/// CPUID.EAX.07H.EBX.BMI1[3] = 1 (Bit Manipulation Instructions 1)
BMI1,
/// CPUID.EAX.07H.EBX.HLE[4] = 1 (Hardware Lock Elision)
HLE,
/// CPUID.EAX.07H.EBX.AVX2[5] = 1 (Advanced Vector eXtension 2)
AVX2,
/// CPUID.EAX.07H.EBX.SMEP[7] = 1 (Supervisor Mode Execution Prevention)
SMEP,
/// CPUID.EAX.07H.EBX.BMI2[8] = 1 (Bit Manipulation Instructions 2)
BMI2,
/// CPUID.EAX.07H.EBX.INVPCID[10] = 1
INVPCID,
/// CPUID.EAX.07H.EBX.RTM[11] = 1
RTM,
/// CPUID.EAX.07H.EBX.MPX[14] = 1 (Memory Protection eXtension)
MPX,
/// CPUID.EAX.07H.EBX.AVX512F[16] = 1
AVX512F,
/// CPUID.EAX.07H.EBX.AVX512DQ[17] = 1 (Doubleword and Quadword)
AVX512DQ,
/// CPUID.EAX.07H.EBX.RDSEED[18] = 1
RDSEED,
/// CPUID.EAX.07H.EBX.ADX[19] = 1
ADX,
/// CPUID.EAX.07H.EBX.SMAP[20] = 1 (Supervisor Mode Access Prevention)
SMAP,
/// CPUID.EAX.07H.EBX.AVX512_I1FMA[21] = 1 (AVX512 Integer Fused Multiply Add)
AVX512_IFMA,
/// CPUID.EAX.07H.EBX.PCOMMIT[22] = 1
PCOMMIT,
/// CPUID.EAX.07H.EBX.CLFLUSHOPT[23] = 1
CLFLUSHOPT,
/// CPUID.EAX.07H.EBX.CLWB[24] = 1
CLWB,
/// CPUID.EAX.07H.EBX.AVX512PF[26] = 1 (AVX512 Prefetch Instructions)
AVX512PF,
/// CPUID.EAX.07H.EBX.AVX512ER[27] = 1 (AVX512 Exponential and Reciprocal)
AVX512ER,
/// CPUID.EAX.07H.EBX.AVX512CD[28] = 1 (AVX512 Conflict Detection)
AVX512CD,
/// CPUID.EAX.07H.EBX.SHA[29] = 1
SHA,
/// CPUID.EAX.07H.EBX.AVX512BW[30] = 1 (AVX512 Byte and Word)
AVX512BW,
/// CPUID.EAX.07H.EBX.AVX512_VL[31] = 1 (AVX512 Vector length extensions)
AVX512VL,
//
// CPUID.(EAX=7, ECX=0).ECX
//
/// CPUID.EAX.07H.ECX.PREFETCHWT1[0] = 1
PREFETCHWT1,
/// CPUID.EAX.07H.ECX.AVX512_VBMI[1] = 1 (AVX512 Vector Byte Manipulation Instructions)
AVX512_VBMI,
/// CPUID.EAX.07H.ECX.PKU[3] = 1 (Protection Key rights register for User pages)
PKU,
/// CPUID.EAX.07H.ECX.WAITPKG[5] = 1
WAITPKG,
/// CPUID.EAX.07H.ECX.AVX512_VBMI2[5] = 1 (AVX512 Vector Byte Manipulation Instructions 2)
AVX512_VBMI2,
/// CPUID.EAX.07H.ECX.CET_SS[7] = 1 (Control-flow Enforcement Technology - Shadow Stack)
CET_SS,
/// CPUID.EAX.07H.ECX.GFNI[8] = 1
GFNI,
/// CPUID.EAX.07H.ECX.VAES[9] = 1
VAES,
/// CPUID.EAX.07H.ECX.VPCLMULQDQ[10] = 1 (Carry-less multiplication )
VPCLMULQDQ,
/// CPUID.EAX.07H.ECX.AVX512_VNNI[11] = 1 (AVX512 Vector Neural Network Instructions)
AVX512_VNNI,
/// CPUID.EAX.07H.ECX.AVX512_BITALG[12] = 1 (AVX512 Bit Algorithms)
AVX512_BITALG,
/// CPUID.EAX.07H.ECX.AVX512_VPOPCNTDQ[14] = 1 (AVX512 Vector Population Count Dword and Qword)
AVX512_VPOPCNTDQ,
/// CPUID.EAX.07H.ECX.RDPID[22] = 1
RDPID,
/// CPUID.EAX.07H.ECX.CLDEMOTE[25] = 1
CLDEMOTE,
/// CPUID.EAX.07H.ECX.MOVDIRI[27] = 1
MOVDIRI,
/// CPUID.EAX.07H.ECX.MOVDIR64B[28] = 1
MOVDIR64B,
//
// CPUID.(EAX=7, ECX=1)
//
/// CPUID.(EAX=7, ECX=1).EAX.AVX512_BF16 (Brain Float16)
AVX512_BF16,
//
// CPUID.(EAX=7, ECX=0).EDX
//
/// CPUID.EAX.07H.EDX.AVX512_4VNNIW[2] = 1 (Vector Neural Network Instructions Word (4VNNIW))
AVX512_4VNNIW,
/// CPUID.EAX.07H.EDX.AVX512_4FMAPS[3] = 1 (Fused Multiply Accumulation Packed Single precision (4FMAPS))
AVX512_4FMAPS,
/// CPUID.EAX.07H.EDX.CET_IBT[20] = 1 (Control-flow Enforcement Technology - Indirect-Branch Tracking)
CET_IBT,
//
// CPUID.(EAX=0DH, ECX=1).EAX
//
/// CPUID.(EAX=0DH, ECX=1).EAX.XSAVEOPT[0]
XSAVEOPT,
/// CPUID.(EAX=0DH, ECX=1).EAX.XSAVEC[1]
XSAVEC,
/// CPUID.(EAX=0DH, ECX=1).EAX.XSS[3]
XSS,
//
// CPUID.(EAX=14H, ECX=0).EBX
//
/// CPUID.[EAX=14H, ECX=0).EBX.PTWRITE[4]
PTWRITE,
//
// CPUID.(EAX=0x8000_0001).ECX
//
/// CPUID.80000001H:ECX.LAHF_SAHF[0] LAHF/SAHF valid in 64 bit mode
LAHF_SAHF,
/// CPUID.80000001H:ECX.LZCNT[5] (AMD: (ABM Advanced Bit Manipulation))
LZCNT,
/// CPUID.80000001H:ECX.SSE4A[6]
SSE4A,
/// CPUID.80000001H:ECX.PREFETCHW[8] (aka 3DNowPrefetch)
PREFETCHW,
/// CPUID.80000001H:ECX.XOP[11]
XOP,
/// CPUID.80000001H:ECX.SKINIT[12] (SKINIT / STGI)
SKINIT,
/// CPUID.80000001H:ECX.LWP[15] (Light Weight Profiling)
LWP,
/// CPUID.80000001H:ECX.FMA4[16] (Fused Multiply Add 4 operands version)
FMA4,
/// CPUID.80000001H:ECX.TBM[21] (Trailing Bit Manipulation)
TBM,
//
// CPUID.(EAX=0x8000_0001).EDX
//
/// CPUID.80000001H:EDX.SYSCALL[8]
SYSCALL,
/// CPUID.80000001H:EDX.MMXEXT[22]
MMXEXT,
/// CPUID.80000001H:EDX.RDTSCP[27]
RDTSCP,
/// CPUID.80000001H:EDX.3DNOWEXT[30] (3DNow! Extensions, 3DNow!+)
_3DNOWEXT,
/// CPUID.80000001H:EDX.3DNOW[31] (3DNow!)
_3DNOW,
/// Cyrix EMMI (Extended Multi-Media Instructions) (6x86 MX and MII)
EMMI,
__num_cpu_features,
pub fn toMask(self: CpuFeature) MaskType {
return @shlExact(@as(MaskType, 1), @enumToInt(self));
}
pub fn arrayToMask(feature_array: []const CpuFeature) MaskType {
var res: MaskType = 0;
for (feature_array) |feature| {
res |= feature.toMask();
}
return res;
}
};
pub const InstructionPrefix = enum {
Lock,
Rep,
Repe,
Repne,
Bnd,
/// Either XACQUIRE or XRELEASE prefix, requires lock prefix to be used
Hle,
/// Either XACQUIRE or XRELEASE perfix, no lock prefix required
HleNoLock,
/// XRELEASE prefix, no lock prefix required
Xrelease,
};
pub const InstructionEncoding = enum {
ZO, // no operands
I, // immediate
I2, // IGNORE immediate
II, // immediate immediate
MII, // ModRM:r/m immediate immediate
RMII, // ModRM:reg ModRM:r/m immediate immediate
O, // opcode+reg.num
O2, // IGNORE opcode+reg.num
M, // ModRM:r/m
RM, // ModRM:reg ModRM:r/m
MR, // ModRM:r/m ModRM:reg
RMI, // ModRM:reg ModRM:r/m immediate
MRI, // ModRM:r/m ModRM:reg immediate
OI, // opcode+reg.num imm8/16/32/64
MI, // ModRM:r/m imm8/16/32/64
MSpec, // Special memory handling for instructions like MOVS,OUTS,XLAT, etc
D, // encode address or offset
FD, // AL/AX/EAX/RAX Moffs NA NA
TD, // Moffs (w) AL/AX/EAX/RAX NA NA
// AvxOpcodes encodings
RVM, // ModRM:reg (E)VEX:vvvv ModRM:r/m
MVR, // ModRM:r/m (E)VEX:vvvv ModRM:reg
RMV, // ModRM:reg ModRM:r/m (E)VEX:vvvv
RVMR, // ModRM:reg (E)VEX:vvvv ModRM:r/m imm8[7:4]:vvvv
RVRM, // ModRM:reg (E)VEX:vvvv imm8[7:4]:vvvv ModRM:r/m
RVMRI, // ModRM:reg (E)VEX:vvvv ModRM:r/m imm8[7:4]:vvvv imm8[3:0]
RVRMI, // ModRM:reg (E)VEX:vvvv imm8[7:4]:vvvv ModRM:r/m imm8[3:0]
RVMI, // ModRM:reg (E)VEX:vvvv ModRM:r/m imm8
VMI, // (E)VEX:vvvv ModRM:r/m imm8
VM, // (E)VEX.vvvv ModRM:r/m
MV, // ModRM:r/m (E)VEX:vvvv
vRMI, // ModRM:reg ModRM:r/m imm8
vMRI, // ModRM:r/m ModRM:reg imm8
vRM, // ModRM:reg ModRM:r/m
vMR, // ModRM:reg ModRM:r/m
vM, // ModRm:r/m
vZO, //
pub fn getMemPos(self: InstructionEncoding) ?u8 {
return switch (self) {
.M, .MR, .MRI, .MI, .MVR, .MV, .vMRI, .vMR, .vM => 0,
.RM, .RMI, .RMV, .VMI, .VM, .vRM => 1,
.RVM, .RVMR, .RVMI => 2,
else => null,
};
}
};
pub const OpcodeAnyTag = enum {
Op,
Avx,
};
pub const OpcodeAny = union(OpcodeAnyTag) {
Op: Opcode,
Avx: AvxOpcode,
};
pub const InstructionItem = struct {
mnemonic: Mnemonic,
signature: Signature,
opcode: OpcodeAny,
encoding: InstructionEncoding,
overides: Overides,
edge_case: OpcodeEdgeCase,
mode_edge_case: OpcodeEdgeCase,
disp_n: u8,
cpu_feature_mask: CpuFeature.MaskType,
fn create(
mnem: Mnemonic,
signature: Signature,
opcode: anytype,
encoding: InstructionEncoding,
overides: Overides,
extra_opts: anytype,
) InstructionItem {
_ = @setEvalBranchQuota(100000);
var edge_case = OpcodeEdgeCase.None;
var mode_edge_case = OpcodeEdgeCase.None;
var cpu_feature_mask: CpuFeature.MaskType = 0;
var disp_n: u8 = 1;
const opcode_any = switch (@TypeOf(opcode)) {
Opcode => OpcodeAny{ .Op = opcode },
AvxOpcode => x: {
if (encoding.getMemPos()) |pos| {
const op_type = signature.operands[pos];
disp_n = x86.avx.calcDispMultiplier(opcode, op_type);
} else {
// don't care if no memory
}
break :x OpcodeAny{ .Avx = opcode };
},
else => @compileError("opcode: Expected Opcode or AvxOpcode, got: " ++ @TypeOf(opcode)),
};
if (@typeInfo(@TypeOf(extra_opts)) != .Struct) {
@compileError("extra_opts: Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args)));
}
for (extra_opts) |opt, i| {
switch (@TypeOf(opt)) {
CpuFeature => cpu_feature_mask |= opt.toMask(),
InstructionPrefix => {
// TODO:
},
OpcodeEdgeCase => {
switch (opt) {
.No32, .No64 => {
assert(mode_edge_case == .None);
mode_edge_case = opt;
},
.Undefined => {
// TODO:
},
else => {
assert(edge_case == .None);
edge_case = opt;
},
}
},
else => |bad_type| {
@compileError("Unsupported feature/version field type: " ++ @typeName(bad_type));
},
}
}
return InstructionItem{
.mnemonic = mnem,
.signature = signature,
.opcode = opcode_any,
.encoding = encoding,
.overides = overides,
.edge_case = edge_case,
.mode_edge_case = mode_edge_case,
.disp_n = disp_n,
.cpu_feature_mask = cpu_feature_mask,
};
}
pub fn hasEdgeCase(self: InstructionItem) callconv(.Inline) bool {
return self.edge_case != .None;
}
pub fn isMachineMatch(self: InstructionItem, machine: Machine) callconv(.Inline) bool {
if (self.cpu_feature_mask & machine.cpu_feature_mask != self.cpu_feature_mask) {
return false;
}
switch (self.mode_edge_case) {
.None => {},
.No64 => if (machine.mode == .x64) return false,
.No32 => if (machine.mode == .x86_32 or machine.mode == .x86_16) return false,
else => unreachable,
}
return true;
}
pub fn matchesEdgeCase(
self: InstructionItem,
machine: Machine,
op1: ?*const Operand,
op2: ?*const Operand,
op3: ?*const Operand,
op4: ?*const Operand,
op5: ?*const Operand,
) bool {
return self.edge_case.isEdgeCase(self, machine.mode, op1, op2, op3, op4, op5);
}
/// Calculates the total length of instruction (without prefixes or rex)
pub fn calcLengthLegacy(self: *const InstructionItem, modrm: ?x86.operand.ModRmResult) u8 {
var result: u8 = 0;
switch (self.opcode) {
.Op => |op| result += op.byteCount(),
else => unreachable,
}
result += self.totalImmediateSize();
if (modrm) |rm| {
if (rm.sib != null) {
// modrm + sib byte
result += 2;
} else {
// modrm byte only
result += 1;
}
result += rm.disp_bit_size.valueBytes();
}
return result;
}
pub fn totalImmediateSize(self: InstructionItem) u8 {
var res: u8 = 0;
for (self.signature.operands) |ops| {
switch (ops) {
.imm8 => res += 1,
.imm16 => res += 2,
.imm32 => res += 4,
.imm64 => res += 8,
.none => break,
else => continue,
}
}
return res;
}
pub fn coerceImm(self: InstructionItem, op: ?*const Operand, pos: u8) Immediate {
switch (self.signature.operands[pos]) {
.imm8 => return op.?.*.Imm,
.imm16 => return op.?.*.Imm.coerce(.Bit16),
.imm32 => return op.?.*.Imm.coerce(.Bit32),
.imm64 => return op.?.*.Imm.coerce(.Bit64),
else => unreachable,
}
}
pub fn encode(
self: *const InstructionItem,
machine: Machine,
ctrl: ?*const EncodingControl,
op1: ?*const Operand,
op2: ?*const Operand,
op3: ?*const Operand,
op4: ?*const Operand,
op5: ?*const Operand,
) AsmError!Instruction {
return switch (self.encoding) {
.ZO => machine.encodeRMI(self, ctrl, null, null, null, self.overides),
.M => machine.encodeRMI(self, ctrl, null, op1, null, self.overides),
.MI => machine.encodeRMI(self, ctrl, null, op1, self.coerceImm(op2, 1), self.overides),
.RM => machine.encodeRMI(self, ctrl, op1, op2, null, self.overides),
.RMI => machine.encodeRMI(self, ctrl, op1, op2, self.coerceImm(op3, 2), self.overides),
.MRI => machine.encodeRMI(self, ctrl, op2, op1, self.coerceImm(op3, 2), self.overides),
.MR => machine.encodeRMI(self, ctrl, op2, op1, null, self.overides),
.I => machine.encodeRMI(self, ctrl, null, null, self.coerceImm(op1, 0), self.overides),
.I2 => machine.encodeRMI(self, ctrl, null, null, self.coerceImm(op2, 1), self.overides),
.II => machine.encodeRMII(self, ctrl, null, null, self.coerceImm(op1, 0), self.coerceImm(op2, 1), self.overides),
.MII => machine.encodeRMII(self, ctrl, null, op1, self.coerceImm(op2, 1), self.coerceImm(op3, 2), self.overides),
.RMII => machine.encodeRMII(self, ctrl, op1, op2, self.coerceImm(op3, 2), self.coerceImm(op4, 3), self.overides),
.O => machine.encodeOI(self, ctrl, op1, null, self.overides),
.O2 => machine.encodeOI(self, ctrl, op2, null, self.overides),
.OI => machine.encodeOI(self, ctrl, op1, self.coerceImm(op2, 1), self.overides),
.D => machine.encodeAddress(self, ctrl, op1, self.overides),
.FD => machine.encodeMOffset(self, ctrl, op1, op2, self.overides),
.TD => machine.encodeMOffset(self, ctrl, op2, op1, self.overides),
.MSpec => machine.encodeMSpecial(self, ctrl, op1, op2, self.overides),
.RVM => machine.encodeAvx(self, ctrl, op1, op2, op3, null, null, self.disp_n),
.MVR => machine.encodeAvx(self, ctrl, op3, op2, op1, null, null, self.disp_n),
.RMV => machine.encodeAvx(self, ctrl, op1, op3, op2, null, null, self.disp_n),
.VM => machine.encodeAvx(self, ctrl, null, op1, op2, null, null, self.disp_n),
.MV => machine.encodeAvx(self, ctrl, null, op2, op1, null, null, self.disp_n),
.RVMI => machine.encodeAvx(self, ctrl, op1, op2, op3, null, op4, self.disp_n),
.VMI => machine.encodeAvx(self, ctrl, null, op1, op2, null, op3, self.disp_n),
.RVMR => machine.encodeAvx(self, ctrl, op1, op2, op3, op4, null, self.disp_n),
.RVMRI => machine.encodeAvx(self, ctrl, op1, op2, op3, op4, op5, self.disp_n),
.RVRM => machine.encodeAvx(self, ctrl, op1, op2, op4, op3, null, self.disp_n),
.RVRMI => machine.encodeAvx(self, ctrl, op1, op2, op4, op3, op5, self.disp_n),
.vRMI => machine.encodeAvx(self, ctrl, op1, null, op2, null, op3, self.disp_n),
.vMRI => machine.encodeAvx(self, ctrl, op2, null, op1, null, op3, self.disp_n),
.vRM => machine.encodeAvx(self, ctrl, op1, null, op2, null, null, self.disp_n),
.vMR => machine.encodeAvx(self, ctrl, op2, null, op1, null, null, self.disp_n),
.vM => machine.encodeAvx(self, ctrl, null, null, op1, null, null, self.disp_n),
.vZO => machine.encodeAvx(self, ctrl, null, null, null, null, null, self.disp_n),
};
}
};
/// Generate a map from Mnemonic -> index in the instruction database
fn genMnemonicLookupTable() [Mnemonic.count]u16 {
comptime {
var result: [Mnemonic.count]u16 = undefined;
var current_mnem = Mnemonic._mnemonic_final;
_ = @setEvalBranchQuota(50000);
for (result) |*val| {
val.* = 0xffff;
}
for (instruction_database) |item, i| {
if (item.mnemonic != current_mnem) {
current_mnem = item.mnemonic;
if (current_mnem == ._mnemonic_final) {
break;
}
if (result[@enumToInt(current_mnem)] != 0xffff) {
@compileError("Mnemonic mislocated in lookup table. " ++ @tagName(current_mnem));
}
result[@enumToInt(current_mnem)] = @intCast(u16, i);
}
}
if (false) {
// Count the number of unimplemented mnemonics
var count: u16 = 0;
for (result) |val, mnem_index| {
if (val == 0xffff) {
@compileLog(@intToEnum(Mnemonic, mnem_index));
count += 1;
}
}
@compileLog("Unreferenced mnemonics: ", count);
}
return result;
}
}
pub fn lookupMnemonic(mnem: Mnemonic) usize {
const result = lookup_table[@enumToInt(mnem)];
if (result == 0xffff) {
std.debug.panic("Instruction {} not implemented yet", .{mnem});
}
return result;
}
pub fn getDatabaseItem(index: usize) *const InstructionItem {
return &instruction_database[index];
}
pub const lookup_table: [Mnemonic.count]u16 = genMnemonicLookupTable();
const Op1 = x86.Opcode.op1;
const Op2 = x86.Opcode.op2;
const Op3 = x86.Opcode.op3;
const Op1r = x86.Opcode.op1r;
const Op2r = x86.Opcode.op2r;
const Op3r = x86.Opcode.op3r;
// Opcodes with compulsory prefix that needs to precede other prefixes
const preOp1 = x86.Opcode.preOp1;
const preOp2 = x86.Opcode.preOp2;
const preOp1r = x86.Opcode.preOp1r;
const preOp2r = x86.Opcode.preOp2r;
const preOp3 = x86.Opcode.preOp3;
const preOp3r = x86.Opcode.preOp3r;
// Opcodes that are actually composed of multiple instructions
// eg: FSTENV needs prefix 0x9B before other prefixes, because 0x9B is
// actually the opcode FWAIT/WAIT
const compOp2 = x86.Opcode.compOp2;
const compOp1r = x86.Opcode.compOp1r;
const Op3DNow = x86.Opcode.op3DNow;
const ops0 = Signature.ops0;
const ops1 = Signature.ops1;
const ops2 = Signature.ops2;
const ops3 = Signature.ops3;
const ops4 = Signature.ops4;
const ops5 = Signature.ops5;
const vex = x86.avx.AvxOpcode.vex;
const vexr = x86.avx.AvxOpcode.vexr;
const evex = x86.avx.AvxOpcode.evex;
const evexr = x86.avx.AvxOpcode.evexr;
const xop = x86.avx.AvxOpcode.xop;
const xopr = x86.avx.AvxOpcode.xopr;
const instr = InstructionItem.create;
/// Create a vector instruction
fn vec(
mnem: Mnemonic,
signature: Signature,
opcode: anytype,
en: InstructionEncoding,
extra_opts: anytype,
) InstructionItem {
return InstructionItem.create(mnem, signature, opcode, en, .ZO, extra_opts);
}
const cpu = CpuFeature;
const edge = OpcodeEdgeCase;
const No64 = OpcodeEdgeCase.No64;
const No32 = OpcodeEdgeCase.No32;
const Sign = OpcodeEdgeCase.Sign;
const NoSign = OpcodeEdgeCase.NoSign;
const Lock = InstructionPrefix.Lock;
const Rep = InstructionPrefix.Rep;
const Repe = InstructionPrefix.Repe;
const Repne = InstructionPrefix.Repne;
const Bnd = InstructionPrefix.Bnd;
const Hle = InstructionPrefix.Hle;
const HleNoLock = InstructionPrefix.HleNoLock;
const Xrelease = InstructionPrefix.Xrelease;
const _8086_Legacy = cpu._8086_Legacy;
const _186_Legacy = cpu._186_Legacy;
const _286_Legacy = cpu._286_Legacy;
const _386_Legacy = cpu._386_Legacy;
const _486_Legacy = cpu._486_Legacy;
const _8086 = cpu._8086;
const _186 = cpu._186;
const _286 = cpu._286;
const _386 = cpu._386;
const _486 = cpu._486;
const x86_64 = cpu.x86_64;
const P6 = cpu.P6;
const _087 = cpu._087;
const _187 = cpu._187;
const _287 = cpu._287;
const _387 = cpu._387;
const MMX = cpu.MMX;
const MMXEXT = cpu.MMXEXT;
const SSE = cpu.SSE;
const SSE2 = cpu.SSE2;
const SSE3 = cpu.SSE3;
const SSSE3 = cpu.SSSE3;
const SSE4A = cpu.SSE4A;
const SSE4_1 = cpu.SSE4_1;
const SSE4_2 = cpu.SSE4_2;
const AVX = cpu.AVX;
const AVX2 = cpu.AVX2;
const AVX512F = cpu.AVX512F;
const AVX512CD = cpu.AVX512CD;
const AVX512ER = cpu.AVX512ER;
const AVX512PF = cpu.AVX512PF;
const AVX512VL = cpu.AVX512VL;
const AVX512BW = cpu.AVX512BW;
const AVX512DQ = cpu.AVX512DQ;
const AVX512_BITALG = cpu.AVX512_BITALG;
const AVX512_IFMA = cpu.AVX512_IFMA;
const AVX512_VBMI = cpu.AVX512_VBMI;
const AVX512_VBMI2 = cpu.AVX512_VBMI2;
const AVX512_VNNI = cpu.AVX512_VNNI;
const AVX512_VPOPCNTDQ = cpu.AVX512_VPOPCNTDQ;
const FMA = cpu.FMA;
const GFNI = cpu.GFNI;
const VAES = cpu.VAES;
const nomem = x86.avx.TupleType.NoMem;
const full = x86.avx.TupleType.Full;
const half = x86.avx.TupleType.Half;
const t1s = x86.avx.TupleType.Tuple1Scalar;
const t1f = x86.avx.TupleType.Tuple1Fixed;
const tup2 = x86.avx.TupleType.Tuple2;
const tup4 = x86.avx.TupleType.Tuple4;
const tup8 = x86.avx.TupleType.Tuple8;
const fmem = x86.avx.TupleType.FullMem;
const hmem = x86.avx.TupleType.HalfMem;
const qmem = x86.avx.TupleType.QuarterMem;
const emem = x86.avx.TupleType.EighthMem;
const mem128 = x86.avx.TupleType.Mem128;
const dup = x86.avx.TupleType.Movddup;
// NOTE: Entries into this table should be from most specific to most general.
// For example, if a instruction takes both a reg64 and rm64 value, an operand
// of Operand.register(.RAX) can match both, while a Operand.registerRm(.RAX)
// can only match the rm64 value.
//
// Putting the most specific opcodes first enables us to reach every possible
// encoding for an instruction.
//
// User supplied immediates without an explicit size are allowed to match
// against larger immediate sizes:
// * imm8 <-> imm8, imm16, imm32, imm64
// * imm16 <-> imm16, imm32, imm64
// * imm32 <-> imm32, imm64
// So if there are multiple operand signatures that only differ by there
// immediate size, we must place the version with the smaller immediate size
// first. This insures that the shortest encoding is chosen when the operand
// is an Immediate without a fixed size.
pub const instruction_database = [_]InstructionItem{
//
// 8086 / 80186
//
// AAA
instr(.AAA, ops0(), Op1(0x37), .ZO, .ZO, .{ _8086, No64 }),
// AAD
instr(.AAD, ops0(), Op2(0xD5, 0x0A), .ZO, .ZO, .{ _8086, No64 }),
instr(.AAD, ops1(.imm8), Op1(0xD5), .I, .ZO, .{ _8086, No64 }),
// AAM
instr(.AAM, ops0(), Op2(0xD4, 0x0A), .ZO, .ZO, .{ _8086, No64 }),
instr(.AAM, ops1(.imm8), Op1(0xD4), .I, .ZO, .{ _8086, No64 }),
// AAS
instr(.AAS, ops0(), Op1(0x3F), .ZO, .ZO, .{ _8086, No64 }),
// ADC
instr(.ADC, ops2(.reg_al, .imm8), Op1(0x14), .I2, .ZO, .{_8086}),
instr(.ADC, ops2(.rm8, .imm8), Op1r(0x80, 2), .MI, .ZO, .{ _8086, Lock, Hle }),
instr(.ADC, ops2(.rm16, .imm8), Op1r(0x83, 2), .MI, .Op16, .{ _8086, Sign, Lock, Hle }),
instr(.ADC, ops2(.rm32, .imm8), Op1r(0x83, 2), .MI, .Op32, .{ _386, Sign, Lock, Hle }),
instr(.ADC, ops2(.rm64, .imm8), Op1r(0x83, 2), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.ADC, ops2(.reg_ax, .imm16), Op1(0x15), .I2, .Op16, .{_8086}),
instr(.ADC, ops2(.reg_eax, .imm32), Op1(0x15), .I2, .Op32, .{_386}),
instr(.ADC, ops2(.reg_rax, .imm32), Op1(0x15), .I2, .REX_W, .{ x86_64, Sign }),
//
instr(.ADC, ops2(.rm16, .imm16), Op1r(0x81, 2), .MI, .Op16, .{ _8086, Lock, Hle }),
instr(.ADC, ops2(.rm32, .imm32), Op1r(0x81, 2), .MI, .Op32, .{ _386, Lock, Hle }),
instr(.ADC, ops2(.rm64, .imm32), Op1r(0x81, 2), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.ADC, ops2(.rm8, .reg8), Op1(0x10), .MR, .ZO, .{ _8086, Lock, Hle }),
instr(.ADC, ops2(.rm16, .reg16), Op1(0x11), .MR, .Op16, .{ _8086, Lock, Hle }),
instr(.ADC, ops2(.rm32, .reg32), Op1(0x11), .MR, .Op32, .{ _386, Lock, Hle }),
instr(.ADC, ops2(.rm64, .reg64), Op1(0x11), .MR, .REX_W, .{ x86_64, Lock, Hle }),
//
instr(.ADC, ops2(.reg8, .rm8), Op1(0x12), .RM, .ZO, .{_8086}),
instr(.ADC, ops2(.reg16, .rm16), Op1(0x13), .RM, .Op16, .{_8086}),
instr(.ADC, ops2(.reg32, .rm32), Op1(0x13), .RM, .Op32, .{_386}),
instr(.ADC, ops2(.reg64, .rm64), Op1(0x13), .RM, .REX_W, .{x86_64}),
// ADD
instr(.ADD, ops2(.reg_al, .imm8), Op1(0x04), .I2, .ZO, .{_8086}),
instr(.ADD, ops2(.rm8, .imm8), Op1r(0x80, 0), .MI, .ZO, .{ _8086, Lock, Hle }),
instr(.ADD, ops2(.rm16, .imm8), Op1r(0x83, 0), .MI, .Op16, .{ _8086, Sign, Lock, Hle }),
instr(.ADD, ops2(.rm32, .imm8), Op1r(0x83, 0), .MI, .Op32, .{ _386, Sign, Lock, Hle }),
instr(.ADD, ops2(.rm64, .imm8), Op1r(0x83, 0), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.ADD, ops2(.reg_ax, .imm16), Op1(0x05), .I2, .Op16, .{_8086}),
instr(.ADD, ops2(.reg_eax, .imm32), Op1(0x05), .I2, .Op32, .{_386}),
instr(.ADD, ops2(.reg_rax, .imm32), Op1(0x05), .I2, .REX_W, .{ x86_64, Sign }),
//
instr(.ADD, ops2(.rm16, .imm16), Op1r(0x81, 0), .MI, .Op16, .{ _8086, Lock, Hle }),
instr(.ADD, ops2(.rm32, .imm32), Op1r(0x81, 0), .MI, .Op32, .{ _386, Lock, Hle }),
instr(.ADD, ops2(.rm64, .imm32), Op1r(0x81, 0), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.ADD, ops2(.rm8, .reg8), Op1(0x00), .MR, .ZO, .{ _8086, Lock, Hle }),
instr(.ADD, ops2(.rm16, .reg16), Op1(0x01), .MR, .Op16, .{ _8086, Lock, Hle }),
instr(.ADD, ops2(.rm32, .reg32), Op1(0x01), .MR, .Op32, .{ _386, Lock, Hle }),
instr(.ADD, ops2(.rm64, .reg64), Op1(0x01), .MR, .REX_W, .{ x86_64, Lock, Hle }),
//
instr(.ADD, ops2(.reg8, .rm8), Op1(0x02), .RM, .ZO, .{_8086}),
instr(.ADD, ops2(.reg16, .rm16), Op1(0x03), .RM, .Op16, .{_8086}),
instr(.ADD, ops2(.reg32, .rm32), Op1(0x03), .RM, .Op32, .{_386}),
instr(.ADD, ops2(.reg64, .rm64), Op1(0x03), .RM, .REX_W, .{x86_64}),
// AND
instr(.AND, ops2(.reg_al, .imm8), Op1(0x24), .I2, .ZO, .{_8086}),
instr(.AND, ops2(.rm8, .imm8), Op1r(0x80, 4), .MI, .ZO, .{ _8086, Lock, Hle }),
instr(.AND, ops2(.rm16, .imm8), Op1r(0x83, 4), .MI, .Op16, .{ _8086, Sign, Lock, Hle }),
instr(.AND, ops2(.rm32, .imm8), Op1r(0x83, 4), .MI, .Op32, .{ _386, Sign, Lock, Hle }),
instr(.AND, ops2(.rm64, .imm8), Op1r(0x83, 4), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.AND, ops2(.reg_ax, .imm16), Op1(0x25), .I2, .Op16, .{_8086}),
instr(.AND, ops2(.reg_eax, .imm32), Op1(0x25), .I2, .Op32, .{_386}),
instr(.AND, ops2(.reg_rax, .imm32), Op1(0x25), .I2, .REX_W, .{ x86_64, Sign }),
//
instr(.AND, ops2(.rm16, .imm16), Op1r(0x81, 4), .MI, .Op16, .{ _8086, Lock, Hle }),
instr(.AND, ops2(.rm32, .imm32), Op1r(0x81, 4), .MI, .Op32, .{ _386, Lock, Hle }),
instr(.AND, ops2(.rm64, .imm32), Op1r(0x81, 4), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.AND, ops2(.rm8, .reg8), Op1(0x20), .MR, .ZO, .{ _8086, Lock, Hle }),
instr(.AND, ops2(.rm16, .reg16), Op1(0x21), .MR, .Op16, .{ _8086, Lock, Hle }),
instr(.AND, ops2(.rm32, .reg32), Op1(0x21), .MR, .Op32, .{ _386, Lock, Hle }),
instr(.AND, ops2(.rm64, .reg64), Op1(0x21), .MR, .REX_W, .{ x86_64, Lock, Hle }),
//
instr(.AND, ops2(.reg8, .rm8), Op1(0x22), .RM, .ZO, .{_8086}),
instr(.AND, ops2(.reg16, .rm16), Op1(0x23), .RM, .Op16, .{_8086}),
instr(.AND, ops2(.reg32, .rm32), Op1(0x23), .RM, .Op32, .{_386}),
instr(.AND, ops2(.reg64, .rm64), Op1(0x23), .RM, .REX_W, .{x86_64}),
// BOUND
instr(.BOUND, ops2(.reg16, .rm16), Op1(0x62), .RM, .Op16, .{ _186, No64 }),
instr(.BOUND, ops2(.reg32, .rm32), Op1(0x62), .RM, .Op32, .{ _186, No64 }),
// CALL
instr(.CALL, ops1(.imm16), Op1(0xE8), .I, .Op16, .{ _8086, Bnd, No64 }),
instr(.CALL, ops1(.imm32), Op1(0xE8), .I, .Op32, .{ _386, Bnd }),
//
instr(.CALL, ops1(.rm16), Op1r(0xFF, 2), .M, .Op16, .{ _8086, Bnd, No64 }),
instr(.CALL, ops1(.rm32), Op1r(0xFF, 2), .M, .Op32, .{ _386, Bnd, No64 }),
instr(.CALL, ops1(.rm64), Op1r(0xFF, 2), .M, .ZO, .{ x86_64, Bnd, No32 }),
//
instr(.CALL, ops1(.ptr16_16), Op1r(0x9A, 4), .D, .Op16, .{ _8086, No64 }),
instr(.CALL, ops1(.ptr16_32), Op1r(0x9A, 4), .D, .Op32, .{ _386, No64 }),
//
instr(.CALL, ops1(.m16_16), Op1r(0xFF, 3), .M, .Op16, .{_8086}),
instr(.CALL, ops1(.m16_32), Op1r(0xFF, 3), .M, .Op32, .{_386}),
instr(.CALL, ops1(.m16_64), Op1r(0xFF, 3), .M, .REX_W, .{x86_64}),
// CBW
instr(.CBW, ops0(), Op1(0x98), .ZO, .Op16, .{_8086}),
instr(.CWDE, ops0(), Op1(0x98), .ZO, .Op32, .{_386}),
instr(.CDQE, ops0(), Op1(0x98), .ZO, .REX_W, .{x86_64}),
//
instr(.CWD, ops0(), Op1(0x99), .ZO, .Op16, .{_8086}),
instr(.CDQ, ops0(), Op1(0x99), .ZO, .Op32, .{_386}),
instr(.CQO, ops0(), Op1(0x99), .ZO, .REX_W, .{x86_64}),
// CLC
instr(.CLC, ops0(), Op1(0xF8), .ZO, .ZO, .{_8086}),
// CLD
instr(.CLD, ops0(), Op1(0xFC), .ZO, .ZO, .{_8086}),
// CLI
instr(.CLI, ops0(), Op1(0xFA), .ZO, .ZO, .{_8086}),
// CMC
instr(.CMC, ops0(), Op1(0xF5), .ZO, .ZO, .{_8086}),
// CMP
instr(.CMP, ops2(.reg_al, .imm8), Op1(0x3C), .I2, .ZO, .{_8086}),
instr(.CMP, ops2(.rm8, .imm8), Op1r(0x80, 7), .MI, .ZO, .{_8086}),
instr(.CMP, ops2(.rm16, .imm8), Op1r(0x83, 7), .MI, .Op16, .{ _8086, Sign }),
instr(.CMP, ops2(.rm32, .imm8), Op1r(0x83, 7), .MI, .Op32, .{ _386, Sign }),
instr(.CMP, ops2(.rm64, .imm8), Op1r(0x83, 7), .MI, .REX_W, .{ x86_64, Sign }),
//
instr(.CMP, ops2(.reg_ax, .imm16), Op1(0x3D), .I2, .Op16, .{_8086}),
instr(.CMP, ops2(.reg_eax, .imm32), Op1(0x3D), .I2, .Op32, .{_386}),
instr(.CMP, ops2(.reg_rax, .imm32), Op1(0x3D), .I2, .REX_W, .{ x86_64, Sign }),
//
instr(.CMP, ops2(.rm16, .imm16), Op1r(0x81, 7), .MI, .Op16, .{_8086}),
instr(.CMP, ops2(.rm32, .imm32), Op1r(0x81, 7), .MI, .Op32, .{_386}),
instr(.CMP, ops2(.rm64, .imm32), Op1r(0x81, 7), .MI, .REX_W, .{ x86_64, Sign }),
//
instr(.CMP, ops2(.rm8, .reg8), Op1(0x38), .MR, .ZO, .{_8086}),
instr(.CMP, ops2(.rm16, .reg16), Op1(0x39), .MR, .Op16, .{_8086}),
instr(.CMP, ops2(.rm32, .reg32), Op1(0x39), .MR, .Op32, .{_386}),
instr(.CMP, ops2(.rm64, .reg64), Op1(0x39), .MR, .REX_W, .{x86_64}),
//
instr(.CMP, ops2(.reg8, .rm8), Op1(0x3A), .RM, .ZO, .{_8086}),
instr(.CMP, ops2(.reg16, .rm16), Op1(0x3B), .RM, .Op16, .{_8086}),
instr(.CMP, ops2(.reg32, .rm32), Op1(0x3B), .RM, .Op32, .{_386}),
instr(.CMP, ops2(.reg64, .rm64), Op1(0x3B), .RM, .REX_W, .{x86_64}),
// CMPS / CMPSB / CMPSW / CMPSD / CMPSQ
instr(.CMPS, ops2(.rm_mem8, .rm_mem8), Op1(0xA6), .MSpec, .ZO, .{ _8086, Repe, Repne }),
instr(.CMPS, ops2(.rm_mem16, .rm_mem16), Op1(0xA7), .MSpec, .Op16, .{ _8086, Repe, Repne }),
instr(.CMPS, ops2(.rm_mem32, .rm_mem32), Op1(0xA7), .MSpec, .Op32, .{ _386, Repe, Repne }),
instr(.CMPS, ops2(.rm_mem64, .rm_mem64), Op1(0xA7), .MSpec, .REX_W, .{ x86_64, Repe, Repne }),
//
instr(.CMPSB, ops0(), Op1(0xA6), .ZO, .ZO, .{ _8086, Repe, Repne }),
instr(.CMPSW, ops0(), Op1(0xA7), .ZO, .Op16, .{ _8086, Repe, Repne }),
// instr(.CMPSD, ops0(), Op1(0xA7), .ZO, .Op32, .{_386, Repe, Repne} ), // overloaded
instr(.CMPSQ, ops0(), Op1(0xA7), .ZO, .REX_W, .{ x86_64, No32, Repe, Repne }),
// DAA
instr(.DAA, ops0(), Op1(0x27), .ZO, .ZO, .{ _8086, No64 }),
// DAS
instr(.DAS, ops0(), Op1(0x2F), .ZO, .ZO, .{ _8086, No64 }),
// DEC
instr(.DEC, ops1(.reg16), Op1(0x48), .O, .Op16, .{ _8086, No64 }),
instr(.DEC, ops1(.reg32), Op1(0x48), .O, .Op32, .{ _386, No64 }),
instr(.DEC, ops1(.rm8), Op1r(0xFE, 1), .M, .ZO, .{ _8086, Lock, Hle }),
instr(.DEC, ops1(.rm16), Op1r(0xFF, 1), .M, .Op16, .{ _8086, Lock, Hle }),
instr(.DEC, ops1(.rm32), Op1r(0xFF, 1), .M, .Op32, .{ _386, Lock, Hle }),
instr(.DEC, ops1(.rm64), Op1r(0xFF, 1), .M, .REX_W, .{ x86_64, Lock, Hle }),
// DIV
instr(.DIV, ops1(.rm8), Op1r(0xF6, 6), .M, .ZO, .{_8086}),
instr(.DIV, ops1(.rm16), Op1r(0xF7, 6), .M, .Op16, .{_8086}),
instr(.DIV, ops1(.rm32), Op1r(0xF7, 6), .M, .Op32, .{_386}),
instr(.DIV, ops1(.rm64), Op1r(0xF7, 6), .M, .REX_W, .{x86_64}),
// ENTER
instr(.ENTER, ops2(.imm16, .imm8), Op1(0xC8), .II, .ZO, .{_186}),
instr(.ENTERW, ops2(.imm16, .imm8), Op1(0xC8), .II, .Op16, .{_186}),
instr(.ENTERD, ops2(.imm16, .imm8), Op1(0xC8), .II, .Op32, .{ _386, No64 }),
instr(.ENTERQ, ops2(.imm16, .imm8), Op1(0xC8), .II, .ZO, .{ x86_64, No32 }),
// HLT
instr(.HLT, ops0(), Op1(0xF4), .ZO, .ZO, .{_8086}),
// IDIV
instr(.IDIV, ops1(.rm8), Op1r(0xF6, 7), .M, .ZO, .{_8086}),
instr(.IDIV, ops1(.rm16), Op1r(0xF7, 7), .M, .Op16, .{_8086}),
instr(.IDIV, ops1(.rm32), Op1r(0xF7, 7), .M, .Op32, .{_386}),
instr(.IDIV, ops1(.rm64), Op1r(0xF7, 7), .M, .REX_W, .{x86_64}),
// IMUL
instr(.IMUL, ops1(.rm8), Op1r(0xF6, 5), .M, .ZO, .{_8086}),
instr(.IMUL, ops1(.rm16), Op1r(0xF7, 5), .M, .Op16, .{_8086}),
instr(.IMUL, ops1(.rm32), Op1r(0xF7, 5), .M, .Op32, .{_386}),
instr(.IMUL, ops1(.rm64), Op1r(0xF7, 5), .M, .REX_W, .{x86_64}),
//
instr(.IMUL, ops2(.reg16, .rm16), Op2(0x0F, 0xAF), .RM, .Op16, .{_8086}),
instr(.IMUL, ops2(.reg32, .rm32), Op2(0x0F, 0xAF), .RM, .Op32, .{_386}),
instr(.IMUL, ops2(.reg64, .rm64), Op2(0x0F, 0xAF), .RM, .REX_W, .{x86_64}),
//
instr(.IMUL, ops3(.reg16, .rm16, .imm8), Op1(0x6B), .RMI, .Op16, .{ _186, Sign }),
instr(.IMUL, ops3(.reg32, .rm32, .imm8), Op1(0x6B), .RMI, .Op32, .{ _386, Sign }),
instr(.IMUL, ops3(.reg64, .rm64, .imm8), Op1(0x6B), .RMI, .REX_W, .{ x86_64, Sign }),
//
instr(.IMUL, ops3(.reg16, .rm16, .imm16), Op1(0x69), .RMI, .Op16, .{_186}),
instr(.IMUL, ops3(.reg32, .rm32, .imm32), Op1(0x69), .RMI, .Op32, .{_386}),
instr(.IMUL, ops3(.reg64, .rm64, .imm32), Op1(0x69), .RMI, .REX_W, .{ x86_64, Sign }),
// IN
instr(.IN, ops2(.reg_al, .imm8), Op1(0xE4), .I2, .ZO, .{_8086}),
instr(.IN, ops2(.reg_ax, .imm8), Op1(0xE5), .I2, .Op16, .{_8086}),
instr(.IN, ops2(.reg_eax, .imm8), Op1(0xE5), .I2, .Op32, .{_386}),
instr(.IN, ops2(.reg_al, .reg_dx), Op1(0xEC), .ZO, .ZO, .{_8086}),
instr(.IN, ops2(.reg_ax, .reg_dx), Op1(0xED), .ZO, .Op16, .{_8086}),
instr(.IN, ops2(.reg_eax, .reg_dx), Op1(0xED), .ZO, .Op32, .{_386}),
// INC
instr(.INC, ops1(.reg16), Op1(0x40), .O, .Op16, .{ _8086, No64 }),
instr(.INC, ops1(.reg32), Op1(0x40), .O, .Op32, .{ _386, No64 }),
instr(.INC, ops1(.rm8), Op1r(0xFE, 0), .M, .ZO, .{ _8086, Lock, Hle }),
instr(.INC, ops1(.rm16), Op1r(0xFF, 0), .M, .Op16, .{ _8086, Lock, Hle }),
instr(.INC, ops1(.rm32), Op1r(0xFF, 0), .M, .Op32, .{ _386, Lock, Hle }),
instr(.INC, ops1(.rm64), Op1r(0xFF, 0), .M, .REX_W, .{ x86_64, Lock, Hle }),
// INS / INSB / INSW / INSD
instr(.INS, ops2(.rm_mem8, .reg_dx), Op1(0x6C), .MSpec, .ZO, .{ _186, Rep }),
instr(.INS, ops2(.rm_mem16, .reg_dx), Op1(0x6D), .MSpec, .Op16, .{ _186, Rep }),
instr(.INS, ops2(.rm_mem32, .reg_dx), Op1(0x6D), .MSpec, .Op32, .{ x86_64, Rep }),
//
instr(.INSB, ops0(), Op1(0x6C), .ZO, .ZO, .{ _186, Rep }),
instr(.INSW, ops0(), Op1(0x6D), .ZO, .Op16, .{ _186, Rep }),
instr(.INSD, ops0(), Op1(0x6D), .ZO, .Op32, .{ _386, Rep }),
// INT
instr(.INT3, ops0(), Op1(0xCC), .ZO, .ZO, .{_8086}),
instr(.INT, ops1(.imm8), Op1(0xCD), .I, .ZO, .{_8086}),
instr(.INTO, ops0(), Op1(0xCE), .ZO, .ZO, .{ _8086, No64 }),
instr(.INT1, ops0(), Op1(0xF1), .ZO, .ZO, .{_386}),
instr(.ICEBP, ops0(), Op1(0xF1), .ZO, .ZO, .{_386}),
// IRET
instr(.IRET, ops0(), Op1(0xCF), .ZO, .ZO, .{_8086}),
instr(.IRETW, ops0(), Op1(0xCF), .ZO, .Op16, .{_8086}),
instr(.IRETD, ops0(), Op1(0xCF), .ZO, .Op32, .{ _386, No64 }),
instr(.IRETQ, ops0(), Op1(0xCF), .ZO, .ZO, .{ x86_64, No32 }),
// Jcc
instr(.JCXZ, ops1(.imm8), Op1(0xE3), .I, .Addr16, .{ _8086, Sign, No64 }),
instr(.JECXZ, ops1(.imm8), Op1(0xE3), .I, .Addr32, .{ _386, Sign }),
instr(.JRCXZ, ops1(.imm8), Op1(0xE3), .I, .Addr64, .{ x86_64, Sign, No32 }),
//
instr(.JA, ops1(.imm8), Op1(0x77), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JA, ops1(.imm16), Op2(0x0F, 0x87), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JA, ops1(.imm32), Op2(0x0F, 0x87), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JAE, ops1(.imm8), Op1(0x73), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JAE, ops1(.imm16), Op2(0x0F, 0x83), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JAE, ops1(.imm32), Op2(0x0F, 0x83), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JB, ops1(.imm8), Op1(0x72), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JB, ops1(.imm16), Op2(0x0F, 0x82), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JB, ops1(.imm32), Op2(0x0F, 0x82), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JBE, ops1(.imm8), Op1(0x76), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JBE, ops1(.imm16), Op2(0x0F, 0x86), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JBE, ops1(.imm32), Op2(0x0F, 0x86), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JC, ops1(.imm8), Op1(0x72), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JC, ops1(.imm16), Op2(0x0F, 0x82), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JC, ops1(.imm32), Op2(0x0F, 0x82), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JE, ops1(.imm8), Op1(0x74), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JE, ops1(.imm16), Op2(0x0F, 0x84), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JE, ops1(.imm32), Op2(0x0F, 0x84), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JG, ops1(.imm8), Op1(0x7F), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JG, ops1(.imm16), Op2(0x0F, 0x8F), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JG, ops1(.imm32), Op2(0x0F, 0x8F), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JGE, ops1(.imm8), Op1(0x7D), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JGE, ops1(.imm16), Op2(0x0F, 0x8D), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JGE, ops1(.imm32), Op2(0x0F, 0x8D), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JL, ops1(.imm8), Op1(0x7C), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JL, ops1(.imm16), Op2(0x0F, 0x8C), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JL, ops1(.imm32), Op2(0x0F, 0x8C), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JLE, ops1(.imm8), Op1(0x7E), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JLE, ops1(.imm16), Op2(0x0F, 0x8E), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JLE, ops1(.imm32), Op2(0x0F, 0x8E), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNA, ops1(.imm8), Op1(0x76), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNA, ops1(.imm16), Op2(0x0F, 0x86), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNA, ops1(.imm32), Op2(0x0F, 0x86), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNAE, ops1(.imm8), Op1(0x72), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNAE, ops1(.imm16), Op2(0x0F, 0x82), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNAE, ops1(.imm32), Op2(0x0F, 0x82), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNB, ops1(.imm8), Op1(0x73), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNB, ops1(.imm16), Op2(0x0F, 0x83), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNB, ops1(.imm32), Op2(0x0F, 0x83), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNBE, ops1(.imm8), Op1(0x77), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNBE, ops1(.imm16), Op2(0x0F, 0x87), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNBE, ops1(.imm32), Op2(0x0F, 0x87), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNC, ops1(.imm8), Op1(0x73), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNC, ops1(.imm16), Op2(0x0F, 0x83), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNC, ops1(.imm32), Op2(0x0F, 0x83), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNE, ops1(.imm8), Op1(0x75), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNE, ops1(.imm16), Op2(0x0F, 0x85), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNE, ops1(.imm32), Op2(0x0F, 0x85), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNG, ops1(.imm8), Op1(0x7E), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNG, ops1(.imm16), Op2(0x0F, 0x8E), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNG, ops1(.imm32), Op2(0x0F, 0x8E), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNGE, ops1(.imm8), Op1(0x7C), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNGE, ops1(.imm16), Op2(0x0F, 0x8C), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNGE, ops1(.imm32), Op2(0x0F, 0x8C), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNL, ops1(.imm8), Op1(0x7D), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNL, ops1(.imm16), Op2(0x0F, 0x8D), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNL, ops1(.imm32), Op2(0x0F, 0x8D), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNLE, ops1(.imm8), Op1(0x7F), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNLE, ops1(.imm16), Op2(0x0F, 0x8F), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNLE, ops1(.imm32), Op2(0x0F, 0x8F), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNO, ops1(.imm8), Op1(0x71), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNO, ops1(.imm16), Op2(0x0F, 0x81), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNO, ops1(.imm32), Op2(0x0F, 0x81), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNP, ops1(.imm8), Op1(0x7B), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNP, ops1(.imm16), Op2(0x0F, 0x8B), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNP, ops1(.imm32), Op2(0x0F, 0x8B), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNS, ops1(.imm8), Op1(0x79), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNS, ops1(.imm16), Op2(0x0F, 0x89), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNS, ops1(.imm32), Op2(0x0F, 0x89), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JNZ, ops1(.imm8), Op1(0x75), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JNZ, ops1(.imm16), Op2(0x0F, 0x85), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JNZ, ops1(.imm32), Op2(0x0F, 0x85), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JO, ops1(.imm8), Op1(0x70), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JO, ops1(.imm16), Op2(0x0F, 0x80), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JO, ops1(.imm32), Op2(0x0F, 0x80), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JP, ops1(.imm8), Op1(0x7A), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JP, ops1(.imm16), Op2(0x0F, 0x8A), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JP, ops1(.imm32), Op2(0x0F, 0x8A), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JPE, ops1(.imm8), Op1(0x7A), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JPE, ops1(.imm16), Op2(0x0F, 0x8A), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JPE, ops1(.imm32), Op2(0x0F, 0x8A), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JPO, ops1(.imm8), Op1(0x7B), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JPO, ops1(.imm16), Op2(0x0F, 0x8B), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JPO, ops1(.imm32), Op2(0x0F, 0x8B), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JS, ops1(.imm8), Op1(0x78), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JS, ops1(.imm16), Op2(0x0F, 0x88), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JS, ops1(.imm32), Op2(0x0F, 0x88), .I, .Op32, .{ _386, Sign, Bnd }),
instr(.JZ, ops1(.imm8), Op1(0x74), .I, .ZO, .{ _8086, Sign, Bnd }),
instr(.JZ, ops1(.imm16), Op2(0x0F, 0x84), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JZ, ops1(.imm32), Op2(0x0F, 0x84), .I, .Op32, .{ _386, Sign, Bnd }),
// JMP
instr(.JMP, ops1(.imm8), Op1(0xEB), .I, .ZO, .{ _8086, Sign }),
instr(.JMP, ops1(.imm16), Op1(0xE9), .I, .Op16, .{ _8086, Sign, Bnd, No64 }),
instr(.JMP, ops1(.imm32), Op1(0xE9), .I, .Op32, .{ _386, Sign, Bnd }),
//
instr(.JMP, ops1(.rm16), Op1r(0xFF, 4), .M, .Op16, .{ _8086, Bnd, No64 }),
instr(.JMP, ops1(.rm32), Op1r(0xFF, 4), .M, .Op32, .{ _386, Bnd, No64 }),
instr(.JMP, ops1(.rm64), Op1r(0xFF, 4), .M, .ZO, .{ x86_64, Bnd, No32 }),
//
instr(.JMP, ops1(.ptr16_16), Op1r(0xEA, 4), .D, .Op16, .{ _8086, No64 }),
instr(.JMP, ops1(.ptr16_32), Op1r(0xEA, 4), .D, .Op32, .{ _386, No64 }),
//
instr(.JMP, ops1(.m16_16), Op1r(0xFF, 5), .M, .Op16, .{_8086}),
instr(.JMP, ops1(.m16_32), Op1r(0xFF, 5), .M, .Op32, .{_386}),
instr(.JMP, ops1(.m16_64), Op1r(0xFF, 5), .M, .REX_W, .{x86_64}),
// LAHF
instr(.LAHF, ops0(), Op1(0x9F), .ZO, .ZO, .{ _8086, No64 }),
instr(.LAHF, ops0(), Op1(0x9F), .ZO, .ZO, .{ _8086, cpu.LAHF_SAHF, No32 }),
// SAHF
instr(.SAHF, ops0(), Op1(0x9E), .ZO, .ZO, .{ _8086, No64 }),
instr(.SAHF, ops0(), Op1(0x9E), .ZO, .ZO, .{ _8086, cpu.LAHF_SAHF, No32 }),
// LDS / LSS / LES / LFS / LGS
instr(.LDS, ops2(.reg16, .m16_16), Op1(0xC5), .RM, .Op16, .{ _8086, No64 }),
instr(.LDS, ops2(.reg32, .m16_32), Op1(0xC5), .RM, .Op32, .{ _386, No64 }),
//
instr(.LSS, ops2(.reg16, .m16_16), Op2(0x0F, 0xB2), .RM, .Op16, .{_386}),
instr(.LSS, ops2(.reg32, .m16_32), Op2(0x0F, 0xB2), .RM, .Op32, .{_386}),
instr(.LSS, ops2(.reg64, .m16_64), Op2(0x0F, 0xB2), .RM, .REX_W, .{x86_64}),
//
instr(.LES, ops2(.reg16, .m16_16), Op1(0xC4), .RM, .Op16, .{ _8086, No64 }),
instr(.LES, ops2(.reg32, .m16_32), Op1(0xC4), .RM, .Op32, .{ _386, No64 }),
//
instr(.LFS, ops2(.reg16, .m16_16), Op2(0x0F, 0xB4), .RM, .Op16, .{_386}),
instr(.LFS, ops2(.reg32, .m16_32), Op2(0x0F, 0xB4), .RM, .Op32, .{_386}),
instr(.LFS, ops2(.reg64, .m16_64), Op2(0x0F, 0xB4), .RM, .REX_W, .{x86_64}),
//
instr(.LGS, ops2(.reg16, .m16_16), Op2(0x0F, 0xB5), .RM, .Op16, .{_386}),
instr(.LGS, ops2(.reg32, .m16_32), Op2(0x0F, 0xB5), .RM, .Op32, .{_386}),
instr(.LGS, ops2(.reg64, .m16_64), Op2(0x0F, 0xB5), .RM, .REX_W, .{x86_64}),
//
// LEA
instr(.LEA, ops2(.reg16, .rm_mem16), Op1(0x8D), .RM, .Op16, .{_8086}),
instr(.LEA, ops2(.reg32, .rm_mem32), Op1(0x8D), .RM, .Op32, .{_386}),
instr(.LEA, ops2(.reg64, .rm_mem64), Op1(0x8D), .RM, .REX_W, .{x86_64}),
// LEAVE
instr(.LEAVE, ops0(), Op1(0xC9), .ZO, .ZO, .{_186}),
instr(.LEAVEW, ops0(), Op1(0xC9), .ZO, .Op16, .{_186}),
instr(.LEAVED, ops0(), Op1(0xC9), .ZO, .Op32, .{ _386, No64 }),
instr(.LEAVEQ, ops0(), Op1(0xC9), .ZO, .ZO, .{ x86_64, No32 }),
// LOCK
instr(.LOCK, ops0(), Op1(0xF0), .ZO, .ZO, .{_8086}),
// LODS / LODSB / LODSW / LODSD / LODSQ
instr(.LODS, ops2(.reg_al, .rm_mem8), Op1(0xAC), .MSpec, .ZO, .{ _8086, Rep }),
instr(.LODS, ops2(.reg_ax, .rm_mem16), Op1(0xAD), .MSpec, .Op16, .{ _8086, Rep }),
instr(.LODS, ops2(.reg_eax, .rm_mem32), Op1(0xAD), .MSpec, .Op32, .{ _386, Rep }),
instr(.LODS, ops2(.reg_rax, .rm_mem64), Op1(0xAD), .MSpec, .REX_W, .{ x86_64, Rep }),
//
instr(.LODSB, ops0(), Op1(0xAC), .ZO, .ZO, .{ _8086, Rep }),
instr(.LODSW, ops0(), Op1(0xAD), .ZO, .Op16, .{ _8086, Rep }),
instr(.LODSD, ops0(), Op1(0xAD), .ZO, .Op32, .{ _386, Rep }),
instr(.LODSQ, ops0(), Op1(0xAD), .ZO, .REX_W, .{ x86_64, No32, Rep }),
// LOOP
instr(.LOOP, ops1(.imm8), Op1(0xE2), .I, .ZO, .{ _8086, Sign }),
instr(.LOOPE, ops1(.imm8), Op1(0xE1), .I, .ZO, .{ _8086, Sign }),
instr(.LOOPNE, ops1(.imm8), Op1(0xE0), .I, .ZO, .{ _8086, Sign }),
//
instr(.LOOPW, ops1(.imm8), Op1(0xE2), .I, .Addr16, .{ _386, Sign, No64 }),
instr(.LOOPEW, ops1(.imm8), Op1(0xE1), .I, .Addr16, .{ _386, Sign, No64 }),
instr(.LOOPNEW, ops1(.imm8), Op1(0xE0), .I, .Addr16, .{ _386, Sign, No64 }),
//
instr(.LOOPD, ops1(.imm8), Op1(0xE2), .I, .Addr32, .{ _386, Sign }),
instr(.LOOPED, ops1(.imm8), Op1(0xE1), .I, .Addr32, .{ _386, Sign }),
instr(.LOOPNED, ops1(.imm8), Op1(0xE0), .I, .Addr32, .{ _386, Sign }),
// MOV
instr(.MOV, ops2(.rm8, .reg8), Op1(0x88), .MR, .ZO, .{ _8086, Xrelease }),
instr(.MOV, ops2(.rm16, .reg16), Op1(0x89), .MR, .Op16, .{ _8086, Xrelease }),
instr(.MOV, ops2(.rm32, .reg32), Op1(0x89), .MR, .Op32, .{ _386, Xrelease }),
instr(.MOV, ops2(.rm64, .reg64), Op1(0x89), .MR, .REX_W, .{ x86_64, Xrelease }),
//
instr(.MOV, ops2(.reg8, .rm8), Op1(0x8A), .RM, .ZO, .{_8086}),
instr(.MOV, ops2(.reg16, .rm16), Op1(0x8B), .RM, .Op16, .{_8086}),
instr(.MOV, ops2(.reg32, .rm32), Op1(0x8B), .RM, .Op32, .{_386}),
instr(.MOV, ops2(.reg64, .rm64), Op1(0x8B), .RM, .REX_W, .{x86_64}),
//
instr(.MOV, ops2(.rm16, .reg_seg), Op1(0x8C), .MR, .Op16, .{_8086}),
instr(.MOV, ops2(.rm32, .reg_seg), Op1(0x8C), .MR, .Op32, .{_386}),
instr(.MOV, ops2(.rm64, .reg_seg), Op1(0x8C), .MR, .REX_W, .{x86_64}),
//
instr(.MOV, ops2(.reg_seg, .rm16), Op1(0x8E), .RM, .Op16, .{_8086}),
instr(.MOV, ops2(.reg_seg, .rm32), Op1(0x8E), .RM, .Op32, .{_386}),
instr(.MOV, ops2(.reg_seg, .rm64), Op1(0x8E), .RM, .REX_W, .{x86_64}),
instr(.MOV, ops2(.reg_al, .moffs8), Op1(0xA0), .FD, .ZO, .{_8086}),
instr(.MOV, ops2(.reg_ax, .moffs16), Op1(0xA1), .FD, .Op16, .{_8086}),
instr(.MOV, ops2(.reg_eax, .moffs32), Op1(0xA1), .FD, .Op32, .{_386}),
instr(.MOV, ops2(.reg_rax, .moffs64), Op1(0xA1), .FD, .REX_W, .{x86_64}),
instr(.MOV, ops2(.moffs8, .reg_al), Op1(0xA2), .TD, .ZO, .{_8086}),
instr(.MOV, ops2(.moffs16, .reg_ax), Op1(0xA3), .TD, .Op16, .{_8086}),
instr(.MOV, ops2(.moffs32, .reg_eax), Op1(0xA3), .TD, .Op32, .{_386}),
instr(.MOV, ops2(.moffs64, .reg_rax), Op1(0xA3), .TD, .REX_W, .{x86_64}),
//
instr(.MOV, ops2(.reg8, .imm8), Op1(0xB0), .OI, .ZO, .{_8086}),
instr(.MOV, ops2(.reg16, .imm16), Op1(0xB8), .OI, .Op16, .{_8086}),
instr(.MOV, ops2(.reg32, .imm32), Op1(0xB8), .OI, .Op32, .{_386}),
instr(.MOV, ops2(.reg64, .imm32), Op1(0xB8), .OI, .ZO, .{ x86_64, NoSign, No32 }),
instr(.MOV, ops2(.reg64, .imm64), Op1(0xB8), .OI, .REX_W, .{x86_64}),
//
instr(.MOV, ops2(.rm8, .imm8), Op1r(0xC6, 0), .MI, .ZO, .{ _8086, Xrelease }),
instr(.MOV, ops2(.rm16, .imm16), Op1r(0xC7, 0), .MI, .Op16, .{ _8086, Xrelease }),
instr(.MOV, ops2(.rm32, .imm32), Op1r(0xC7, 0), .MI, .Op32, .{ _386, Xrelease }),
instr(.MOV, ops2(.rm64, .imm32), Op1r(0xC7, 0), .MI, .REX_W, .{ x86_64, Xrelease }),
// 386 MOV to/from Control Registers
instr(.MOV, ops2(.reg32, .reg_cr), Op2(0x0F, 0x20), .MR, .ZO, .{ _386, No64 }),
instr(.MOV, ops2(.reg64, .reg_cr), Op2(0x0F, 0x20), .MR, .ZO, .{ x86_64, No32 }),
//
instr(.MOV, ops2(.reg_cr, .reg32), Op2(0x0F, 0x22), .RM, .ZO, .{ _386, No64 }),
instr(.MOV, ops2(.reg_cr, .reg64), Op2(0x0F, 0x22), .RM, .ZO, .{ x86_64, No32 }),
// 386 MOV to/from Debug Registers
instr(.MOV, ops2(.reg32, .reg_dr), Op2(0x0F, 0x21), .MR, .ZO, .{ _386, No64 }),
instr(.MOV, ops2(.reg64, .reg_dr), Op2(0x0F, 0x21), .MR, .ZO, .{ x86_64, No32 }),
//
instr(.MOV, ops2(.reg_dr, .reg32), Op2(0x0F, 0x23), .RM, .ZO, .{ _386, No64 }),
instr(.MOV, ops2(.reg_dr, .reg64), Op2(0x0F, 0x23), .RM, .ZO, .{ x86_64, No32 }),
// MOVS / MOVSB / MOVSW / MOVSD / MOVSQ
instr(.MOVS, ops2(.rm_mem8, .rm_mem8), Op1(0xA4), .MSpec, .ZO, .{ _8086, Rep }),
instr(.MOVS, ops2(.rm_mem16, .rm_mem16), Op1(0xA5), .MSpec, .Op16, .{ _8086, Rep }),
instr(.MOVS, ops2(.rm_mem32, .rm_mem32), Op1(0xA5), .MSpec, .Op32, .{ _386, Rep }),
instr(.MOVS, ops2(.rm_mem64, .rm_mem64), Op1(0xA5), .MSpec, .REX_W, .{ x86_64, Rep }),
//
instr(.MOVSB, ops0(), Op1(0xA4), .ZO, .ZO, .{ _8086, Rep }),
instr(.MOVSW, ops0(), Op1(0xA5), .ZO, .Op16, .{ _8086, Rep }),
// instr(.MOVSD, ops0(), Op1(0xA5), .ZO, .Op32, .{_386, Rep} ), // overloaded
instr(.MOVSQ, ops0(), Op1(0xA5), .ZO, .REX_W, .{ x86_64, No32, Rep }),
// MUL
instr(.MUL, ops1(.rm8), Op1r(0xF6, 4), .M, .ZO, .{_8086}),
instr(.MUL, ops1(.rm16), Op1r(0xF7, 4), .M, .Op16, .{_8086}),
instr(.MUL, ops1(.rm32), Op1r(0xF7, 4), .M, .Op32, .{_386}),
instr(.MUL, ops1(.rm64), Op1r(0xF7, 4), .M, .REX_W, .{x86_64}),
// NEG
instr(.NEG, ops1(.rm8), Op1r(0xF6, 3), .M, .ZO, .{ _8086, Lock, Hle }),
instr(.NEG, ops1(.rm16), Op1r(0xF7, 3), .M, .Op16, .{ _8086, Lock, Hle }),
instr(.NEG, ops1(.rm32), Op1r(0xF7, 3), .M, .Op32, .{ _386, Lock, Hle }),
instr(.NEG, ops1(.rm64), Op1r(0xF7, 3), .M, .REX_W, .{ x86_64, Lock, Hle }),
// NOP
instr(.NOP, ops0(), Op1(0x90), .ZO, .ZO, .{_8086}),
instr(.NOP, ops1(.rm16), Op2r(0x0F, 0x1F, 0), .M, .Op16, .{P6}),
instr(.NOP, ops1(.rm32), Op2r(0x0F, 0x1F, 0), .M, .Op32, .{P6}),
instr(.NOP, ops1(.rm64), Op2r(0x0F, 0x1F, 0), .M, .REX_W, .{x86_64}),
// NOT
instr(.NOT, ops1(.rm8), Op1r(0xF6, 2), .M, .ZO, .{ _8086, Lock, Hle }),
instr(.NOT, ops1(.rm16), Op1r(0xF7, 2), .M, .Op16, .{ _8086, Lock, Hle }),
instr(.NOT, ops1(.rm32), Op1r(0xF7, 2), .M, .Op32, .{ _386, Lock, Hle }),
instr(.NOT, ops1(.rm64), Op1r(0xF7, 2), .M, .REX_W, .{ x86_64, Lock, Hle }),
// OR
instr(.OR, ops2(.reg_al, .imm8), Op1(0x0C), .I2, .ZO, .{_8086}),
instr(.OR, ops2(.rm8, .imm8), Op1r(0x80, 1), .MI, .ZO, .{ _8086, Lock, Hle }),
instr(.OR, ops2(.rm16, .imm8), Op1r(0x83, 1), .MI, .Op16, .{ _8086, Sign, Lock, Hle }),
instr(.OR, ops2(.rm32, .imm8), Op1r(0x83, 1), .MI, .Op32, .{ _386, Sign, Lock, Hle }),
instr(.OR, ops2(.rm64, .imm8), Op1r(0x83, 1), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.OR, ops2(.reg_ax, .imm16), Op1(0x0D), .I2, .Op16, .{_8086}),
instr(.OR, ops2(.reg_eax, .imm32), Op1(0x0D), .I2, .Op32, .{_386}),
instr(.OR, ops2(.reg_rax, .imm32), Op1(0x0D), .I2, .REX_W, .{ x86_64, Sign }),
//
instr(.OR, ops2(.rm16, .imm16), Op1r(0x81, 1), .MI, .Op16, .{ _8086, Lock, Hle }),
instr(.OR, ops2(.rm32, .imm32), Op1r(0x81, 1), .MI, .Op32, .{ _386, Lock, Hle }),
instr(.OR, ops2(.rm64, .imm32), Op1r(0x81, 1), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.OR, ops2(.rm8, .reg8), Op1(0x08), .MR, .ZO, .{ _8086, Lock, Hle }),
instr(.OR, ops2(.rm16, .reg16), Op1(0x09), .MR, .Op16, .{ _8086, Lock, Hle }),
instr(.OR, ops2(.rm32, .reg32), Op1(0x09), .MR, .Op32, .{ _386, Lock, Hle }),
instr(.OR, ops2(.rm64, .reg64), Op1(0x09), .MR, .REX_W, .{ x86_64, Lock, Hle }),
//
instr(.OR, ops2(.reg8, .rm8), Op1(0x0A), .RM, .ZO, .{_8086}),
instr(.OR, ops2(.reg16, .rm16), Op1(0x0B), .RM, .Op16, .{_8086}),
instr(.OR, ops2(.reg32, .rm32), Op1(0x0B), .RM, .Op32, .{_386}),
instr(.OR, ops2(.reg64, .rm64), Op1(0x0B), .RM, .REX_W, .{x86_64}),
// OUT
instr(.OUT, ops2(.imm8, .reg_al), Op1(0xE6), .I, .ZO, .{_8086}),
instr(.OUT, ops2(.imm8, .reg_ax), Op1(0xE7), .I, .Op16, .{_8086}),
instr(.OUT, ops2(.imm8, .reg_eax), Op1(0xE7), .I, .Op32, .{_386}),
instr(.OUT, ops2(.reg_dx, .reg_al), Op1(0xEE), .ZO, .ZO, .{_8086}),
instr(.OUT, ops2(.reg_dx, .reg_ax), Op1(0xEF), .ZO, .Op16, .{_8086}),
instr(.OUT, ops2(.reg_dx, .reg_eax), Op1(0xEF), .ZO, .Op32, .{_386}),
// OUTS / OUTSB / OUTSW / OUTSD
instr(.OUTS, ops2(.reg_dx, .rm_mem8), Op1(0x6E), .MSpec, .ZO, .{ _186, Rep }),
instr(.OUTS, ops2(.reg_dx, .rm_mem16), Op1(0x6F), .MSpec, .Op16, .{ _186, Rep }),
instr(.OUTS, ops2(.reg_dx, .rm_mem32), Op1(0x6F), .MSpec, .Op32, .{ x86_64, Rep }),
//
instr(.OUTSB, ops0(), Op1(0x6E), .ZO, .ZO, .{ _186, Rep }),
instr(.OUTSW, ops0(), Op1(0x6F), .ZO, .Op16, .{ _186, Rep }),
instr(.OUTSD, ops0(), Op1(0x6F), .ZO, .Op32, .{ _386, Rep }),
// POP
instr(.POP, ops1(.reg16), Op1(0x58), .O, .Op16, .{_8086}),
instr(.POP, ops1(.reg32), Op1(0x58), .O, .Op32, .{ _386, No64 }),
instr(.POP, ops1(.reg64), Op1(0x58), .O, .ZO, .{ x86_64, No32 }),
//
instr(.POP, ops1(.rm16), Op1r(0x8F, 0), .M, .Op16, .{_8086}),
instr(.POP, ops1(.rm32), Op1r(0x8F, 0), .M, .Op32, .{ _386, No64 }),
instr(.POP, ops1(.rm64), Op1r(0x8F, 0), .M, .ZO, .{ x86_64, No32 }),
//
instr(.POP, ops1(.reg_cs), Op1(0x0F), .ZO, .ZO, .{ _8086_Legacy, No64 }),
instr(.POP, ops1(.reg_ds), Op1(0x1F), .ZO, .ZO, .{ _8086, No64 }),
instr(.POP, ops1(.reg_es), Op1(0x07), .ZO, .ZO, .{ _8086, No64 }),
instr(.POP, ops1(.reg_ss), Op1(0x17), .ZO, .ZO, .{ _8086, No64 }),
instr(.POP, ops1(.reg_fs), Op2(0x0F, 0xA1), .ZO, .ZO, .{_386}),
instr(.POP, ops1(.reg_gs), Op2(0x0F, 0xA9), .ZO, .ZO, .{_386}),
//
instr(.POPW, ops1(.reg_ds), Op1(0x1F), .ZO, .Op16, .{ _8086, No64 }),
instr(.POPW, ops1(.reg_es), Op1(0x07), .ZO, .Op16, .{ _8086, No64 }),
instr(.POPW, ops1(.reg_ss), Op1(0x17), .ZO, .Op16, .{ _8086, No64 }),
instr(.POPW, ops1(.reg_fs), Op2(0x0F, 0xA1), .ZO, .Op16, .{_386}),
instr(.POPW, ops1(.reg_gs), Op2(0x0F, 0xA9), .ZO, .Op16, .{_386}),
//
instr(.POPD, ops1(.reg_ds), Op1(0x1F), .ZO, .Op32, .{ _8086, No64 }),
instr(.POPD, ops1(.reg_es), Op1(0x07), .ZO, .Op32, .{ _8086, No64 }),
instr(.POPD, ops1(.reg_ss), Op1(0x17), .ZO, .Op32, .{ _8086, No64 }),
instr(.POPD, ops1(.reg_fs), Op2(0x0F, 0xA1), .ZO, .Op32, .{ _386, No64 }),
instr(.POPD, ops1(.reg_gs), Op2(0x0F, 0xA9), .ZO, .Op32, .{ _386, No64 }),
//
instr(.POPQ, ops1(.reg_fs), Op2(0x0F, 0xA1), .ZO, .ZO, .{ _386, No32 }),
instr(.POPQ, ops1(.reg_gs), Op2(0x0F, 0xA9), .ZO, .ZO, .{ _386, No32 }),
// POPA
instr(.POPA, ops0(), Op1(0x60), .ZO, .ZO, .{ _186, No64 }),
instr(.POPAW, ops0(), Op1(0x60), .ZO, .Op16, .{ _186, No64 }),
instr(.POPAD, ops0(), Op1(0x60), .ZO, .Op32, .{ _386, No64 }),
// POPF / POPFD / POPFQ
instr(.POPF, ops0(), Op1(0x9D), .ZO, .ZO, .{_8086}),
instr(.POPFW, ops0(), Op1(0x9D), .ZO, .Op16, .{_8086}),
instr(.POPFD, ops0(), Op1(0x9D), .ZO, .Op32, .{ _386, No64 }),
instr(.POPFQ, ops0(), Op1(0x9D), .ZO, .ZO, .{ x86_64, No32 }),
// PUSH
instr(.PUSH, ops1(.imm8), Op1(0x6A), .I, .ZO, .{_186}),
instr(.PUSH, ops1(.imm16), Op1(0x68), .I, .Op16, .{_186}),
instr(.PUSH, ops1(.imm32), Op1(0x68), .I, .Op32, .{_386}),
//
instr(.PUSH, ops1(.reg16), Op1(0x50), .O, .Op16, .{_8086}),
instr(.PUSH, ops1(.reg32), Op1(0x50), .O, .Op32, .{ _386, No64 }),
instr(.PUSH, ops1(.reg64), Op1(0x50), .O, .ZO, .{ x86_64, No32 }),
//
instr(.PUSH, ops1(.rm16), Op1r(0xFF, 6), .M, .Op16, .{_8086}),
instr(.PUSH, ops1(.rm32), Op1r(0xFF, 6), .M, .Op32, .{ _386, No64 }),
instr(.PUSH, ops1(.rm64), Op1r(0xFF, 6), .M, .ZO, .{ x86_64, No32 }),
//
instr(.PUSH, ops1(.reg_cs), Op1(0x0E), .ZO, .ZO, .{ _8086, No64 }),
instr(.PUSH, ops1(.reg_ds), Op1(0x1E), .ZO, .ZO, .{ _8086, No64 }),
instr(.PUSH, ops1(.reg_es), Op1(0x06), .ZO, .ZO, .{ _8086, No64 }),
instr(.PUSH, ops1(.reg_ss), Op1(0x16), .ZO, .ZO, .{ _8086, No64 }),
instr(.PUSH, ops1(.reg_fs), Op2(0x0F, 0xA0), .ZO, .ZO, .{_386}),
instr(.PUSH, ops1(.reg_gs), Op2(0x0F, 0xA8), .ZO, .ZO, .{_386}),
//
instr(.PUSHW, ops1(.reg_cs), Op1(0x0E), .ZO, .Op16, .{ _8086, No64 }),
instr(.PUSHW, ops1(.reg_ds), Op1(0x1E), .ZO, .Op16, .{ _8086, No64 }),
instr(.PUSHW, ops1(.reg_es), Op1(0x06), .ZO, .Op16, .{ _8086, No64 }),
instr(.PUSHW, ops1(.reg_ss), Op1(0x16), .ZO, .Op16, .{ _8086, No64 }),
instr(.PUSHW, ops1(.reg_fs), Op2(0x0F, 0xA0), .ZO, .Op16, .{_386}),
instr(.PUSHW, ops1(.reg_gs), Op2(0x0F, 0xA8), .ZO, .Op16, .{_386}),
//
instr(.PUSHD, ops1(.reg_cs), Op1(0x0E), .ZO, .Op32, .{ _8086, No64 }),
instr(.PUSHD, ops1(.reg_ds), Op1(0x1E), .ZO, .Op32, .{ _8086, No64 }),
instr(.PUSHD, ops1(.reg_es), Op1(0x06), .ZO, .Op32, .{ _8086, No64 }),
instr(.PUSHD, ops1(.reg_ss), Op1(0x16), .ZO, .Op32, .{ _8086, No64 }),
instr(.PUSHD, ops1(.reg_fs), Op2(0x0F, 0xA0), .ZO, .Op32, .{ _386, No64 }),
instr(.PUSHD, ops1(.reg_gs), Op2(0x0F, 0xA8), .ZO, .Op32, .{ _386, No64 }),
//
instr(.PUSHQ, ops1(.reg_fs), Op2(0x0F, 0xA0), .ZO, .ZO, .{ _386, No32 }),
instr(.PUSHQ, ops1(.reg_gs), Op2(0x0F, 0xA8), .ZO, .ZO, .{ _386, No32 }),
// PUSHA
instr(.PUSHA, ops0(), Op1(0x60), .ZO, .ZO, .{ _186, No64 }),
instr(.PUSHAW, ops0(), Op1(0x60), .ZO, .Op16, .{ _186, No64 }),
instr(.PUSHAD, ops0(), Op1(0x60), .ZO, .Op32, .{ _386, No64 }),
// PUSHF / PUSHFW / PUSHFD / PUSHFQ
instr(.PUSHF, ops0(), Op1(0x9C), .ZO, .ZO, .{_8086}),
instr(.PUSHFW, ops0(), Op1(0x9C), .ZO, .Op16, .{_8086}),
instr(.PUSHFD, ops0(), Op1(0x9C), .ZO, .Op32, .{ _386, No64 }),
instr(.PUSHFQ, ops0(), Op1(0x9C), .ZO, .ZO, .{ x86_64, No32 }),
// RCL / RCR / ROL / ROR
instr(.RCL, ops2(.rm8, .imm_1), Op1r(0xD0, 2), .M, .ZO, .{_8086}),
instr(.RCL, ops2(.rm8, .reg_cl), Op1r(0xD2, 2), .M, .ZO, .{_8086}),
instr(.RCL, ops2(.rm8, .imm8), Op1r(0xC0, 2), .MI, .ZO, .{_186}),
instr(.RCL, ops2(.rm16, .imm_1), Op1r(0xD1, 2), .M, .Op16, .{_8086}),
instr(.RCL, ops2(.rm32, .imm_1), Op1r(0xD1, 2), .M, .Op32, .{_386}),
instr(.RCL, ops2(.rm64, .imm_1), Op1r(0xD1, 2), .M, .REX_W, .{x86_64}),
instr(.RCL, ops2(.rm16, .imm8), Op1r(0xC1, 2), .MI, .Op16, .{_186}),
instr(.RCL, ops2(.rm32, .imm8), Op1r(0xC1, 2), .MI, .Op32, .{_386}),
instr(.RCL, ops2(.rm64, .imm8), Op1r(0xC1, 2), .MI, .REX_W, .{x86_64}),
instr(.RCL, ops2(.rm16, .reg_cl), Op1r(0xD3, 2), .M, .Op16, .{_8086}),
instr(.RCL, ops2(.rm32, .reg_cl), Op1r(0xD3, 2), .M, .Op32, .{_386}),
instr(.RCL, ops2(.rm64, .reg_cl), Op1r(0xD3, 2), .M, .REX_W, .{x86_64}),
//
instr(.RCR, ops2(.rm8, .imm_1), Op1r(0xD0, 3), .M, .ZO, .{_8086}),
instr(.RCR, ops2(.rm8, .reg_cl), Op1r(0xD2, 3), .M, .ZO, .{_8086}),
instr(.RCR, ops2(.rm8, .imm8), Op1r(0xC0, 3), .MI, .ZO, .{_186}),
instr(.RCR, ops2(.rm16, .imm_1), Op1r(0xD1, 3), .M, .Op16, .{_8086}),
instr(.RCR, ops2(.rm32, .imm_1), Op1r(0xD1, 3), .M, .Op32, .{_386}),
instr(.RCR, ops2(.rm64, .imm_1), Op1r(0xD1, 3), .M, .REX_W, .{x86_64}),
instr(.RCR, ops2(.rm16, .imm8), Op1r(0xC1, 3), .MI, .Op16, .{_186}),
instr(.RCR, ops2(.rm32, .imm8), Op1r(0xC1, 3), .MI, .Op32, .{_386}),
instr(.RCR, ops2(.rm64, .imm8), Op1r(0xC1, 3), .MI, .REX_W, .{x86_64}),
instr(.RCR, ops2(.rm16, .reg_cl), Op1r(0xD3, 3), .M, .Op16, .{_8086}),
instr(.RCR, ops2(.rm32, .reg_cl), Op1r(0xD3, 3), .M, .Op32, .{_386}),
instr(.RCR, ops2(.rm64, .reg_cl), Op1r(0xD3, 3), .M, .REX_W, .{x86_64}),
//
instr(.ROL, ops2(.rm8, .imm_1), Op1r(0xD0, 0), .M, .ZO, .{_8086}),
instr(.ROL, ops2(.rm8, .reg_cl), Op1r(0xD2, 0), .M, .ZO, .{_8086}),
instr(.ROL, ops2(.rm8, .imm8), Op1r(0xC0, 0), .MI, .ZO, .{_186}),
instr(.ROL, ops2(.rm16, .imm_1), Op1r(0xD1, 0), .M, .Op16, .{_8086}),
instr(.ROL, ops2(.rm32, .imm_1), Op1r(0xD1, 0), .M, .Op32, .{_386}),
instr(.ROL, ops2(.rm64, .imm_1), Op1r(0xD1, 0), .M, .REX_W, .{x86_64}),
instr(.ROL, ops2(.rm16, .imm8), Op1r(0xC1, 0), .MI, .Op16, .{_186}),
instr(.ROL, ops2(.rm32, .imm8), Op1r(0xC1, 0), .MI, .Op32, .{_386}),
instr(.ROL, ops2(.rm64, .imm8), Op1r(0xC1, 0), .MI, .REX_W, .{x86_64}),
instr(.ROL, ops2(.rm16, .reg_cl), Op1r(0xD3, 0), .M, .Op16, .{_8086}),
instr(.ROL, ops2(.rm32, .reg_cl), Op1r(0xD3, 0), .M, .Op32, .{_386}),
instr(.ROL, ops2(.rm64, .reg_cl), Op1r(0xD3, 0), .M, .REX_W, .{x86_64}),
//
instr(.ROR, ops2(.rm8, .imm_1), Op1r(0xD0, 1), .M, .ZO, .{_8086}),
instr(.ROR, ops2(.rm8, .reg_cl), Op1r(0xD2, 1), .M, .ZO, .{_8086}),
instr(.ROR, ops2(.rm8, .imm8), Op1r(0xC0, 1), .MI, .ZO, .{_186}),
instr(.ROR, ops2(.rm16, .imm_1), Op1r(0xD1, 1), .M, .Op16, .{_8086}),
instr(.ROR, ops2(.rm32, .imm_1), Op1r(0xD1, 1), .M, .Op32, .{_386}),
instr(.ROR, ops2(.rm64, .imm_1), Op1r(0xD1, 1), .M, .REX_W, .{x86_64}),
instr(.ROR, ops2(.rm16, .imm8), Op1r(0xC1, 1), .MI, .Op16, .{_186}),
instr(.ROR, ops2(.rm32, .imm8), Op1r(0xC1, 1), .MI, .Op32, .{_386}),
instr(.ROR, ops2(.rm64, .imm8), Op1r(0xC1, 1), .MI, .REX_W, .{x86_64}),
instr(.ROR, ops2(.rm16, .reg_cl), Op1r(0xD3, 1), .M, .Op16, .{_8086}),
instr(.ROR, ops2(.rm32, .reg_cl), Op1r(0xD3, 1), .M, .Op32, .{_386}),
instr(.ROR, ops2(.rm64, .reg_cl), Op1r(0xD3, 1), .M, .REX_W, .{x86_64}),
// REP / REPE / REPZ / REPNE / REPNZ
instr(.REP, ops0(), Op1(0xF3), .ZO, .ZO, .{_8086}),
instr(.REPE, ops0(), Op1(0xF3), .ZO, .ZO, .{_8086}),
instr(.REPZ, ops0(), Op1(0xF3), .ZO, .ZO, .{_8086}),
instr(.REPNE, ops0(), Op1(0xF2), .ZO, .ZO, .{_8086}),
instr(.REPNZ, ops0(), Op1(0xF2), .ZO, .ZO, .{_8086}),
// RET
instr(.RET, ops0(), Op1(0xC3), .ZO, .ZO, .{ _8086, Bnd }),
instr(.RET, ops1(.imm16), Op1(0xC2), .I, .ZO, .{ _8086, Bnd }),
instr(.RETW, ops0(), Op1(0xC3), .ZO, .Op16, .{ _8086, Bnd }),
instr(.RETW, ops1(.imm16), Op1(0xC2), .I, .Op16, .{ _8086, Bnd }),
instr(.RETD, ops0(), Op1(0xC3), .ZO, .Op32, .{ _386, Bnd, No64 }),
instr(.RETD, ops1(.imm16), Op1(0xC2), .I, .Op32, .{ _386, Bnd, No64 }),
instr(.RETQ, ops0(), Op1(0xC3), .ZO, .ZO, .{ x86_64, Bnd, No32 }),
instr(.RETQ, ops1(.imm16), Op1(0xC2), .I, .ZO, .{ x86_64, Bnd, No32 }),
// RETF
instr(.RETF, ops0(), Op1(0xCB), .ZO, .ZO, .{_8086}),
instr(.RETF, ops1(.imm16), Op1(0xCA), .I, .ZO, .{_8086}),
instr(.RETFW, ops0(), Op1(0xCB), .ZO, .Op16, .{_8086}),
instr(.RETFW, ops1(.imm16), Op1(0xCA), .I, .Op16, .{_8086}),
instr(.RETFD, ops0(), Op1(0xCB), .ZO, .Op32, .{ _386, No64 }),
instr(.RETFD, ops1(.imm16), Op1(0xCA), .I, .Op32, .{ _386, No64 }),
instr(.RETFQ, ops0(), Op1(0xCB), .ZO, .ZO, .{ x86_64, No32 }),
instr(.RETFQ, ops1(.imm16), Op1(0xCA), .I, .ZO, .{ x86_64, No32 }),
// RETN
instr(.RETN, ops0(), Op1(0xC3), .ZO, .ZO, .{ _8086, Bnd }),
instr(.RETN, ops1(.imm16), Op1(0xC2), .I, .ZO, .{ _8086, Bnd }),
instr(.RETNW, ops0(), Op1(0xC3), .ZO, .Op16, .{ _8086, Bnd }),
instr(.RETNW, ops1(.imm16), Op1(0xC2), .I, .Op16, .{ _8086, Bnd }),
instr(.RETND, ops0(), Op1(0xC3), .ZO, .Op32, .{ _386, Bnd, No64 }),
instr(.RETND, ops1(.imm16), Op1(0xC2), .I, .Op32, .{ _386, Bnd, No64 }),
instr(.RETNQ, ops0(), Op1(0xC3), .ZO, .ZO, .{ x86_64, Bnd, No32 }),
instr(.RETNQ, ops1(.imm16), Op1(0xC2), .I, .ZO, .{ x86_64, Bnd, No32 }),
// SAL / SAR / SHL / SHR
instr(.SAL, ops2(.rm8, .imm_1), Op1r(0xD0, 4), .M, .ZO, .{_8086}),
instr(.SAL, ops2(.rm8, .reg_cl), Op1r(0xD2, 4), .M, .ZO, .{_8086}),
instr(.SAL, ops2(.rm8, .imm8), Op1r(0xC0, 4), .MI, .ZO, .{_186}),
instr(.SAL, ops2(.rm16, .imm_1), Op1r(0xD1, 4), .M, .Op16, .{_8086}),
instr(.SAL, ops2(.rm32, .imm_1), Op1r(0xD1, 4), .M, .Op32, .{_386}),
instr(.SAL, ops2(.rm64, .imm_1), Op1r(0xD1, 4), .M, .REX_W, .{x86_64}),
instr(.SAL, ops2(.rm16, .imm8), Op1r(0xC1, 4), .MI, .Op16, .{_186}),
instr(.SAL, ops2(.rm32, .imm8), Op1r(0xC1, 4), .MI, .Op32, .{_386}),
instr(.SAL, ops2(.rm64, .imm8), Op1r(0xC1, 4), .MI, .REX_W, .{x86_64}),
instr(.SAL, ops2(.rm16, .reg_cl), Op1r(0xD3, 4), .M, .Op16, .{_8086}),
instr(.SAL, ops2(.rm32, .reg_cl), Op1r(0xD3, 4), .M, .Op32, .{_386}),
instr(.SAL, ops2(.rm64, .reg_cl), Op1r(0xD3, 4), .M, .REX_W, .{x86_64}),
//
instr(.SAR, ops2(.rm8, .imm_1), Op1r(0xD0, 7), .M, .ZO, .{_8086}),
instr(.SAR, ops2(.rm8, .reg_cl), Op1r(0xD2, 7), .M, .ZO, .{_8086}),
instr(.SAR, ops2(.rm8, .imm8), Op1r(0xC0, 7), .MI, .ZO, .{_186}),
instr(.SAR, ops2(.rm16, .imm_1), Op1r(0xD1, 7), .M, .Op16, .{_8086}),
instr(.SAR, ops2(.rm32, .imm_1), Op1r(0xD1, 7), .M, .Op32, .{_386}),
instr(.SAR, ops2(.rm64, .imm_1), Op1r(0xD1, 7), .M, .REX_W, .{x86_64}),
instr(.SAR, ops2(.rm16, .imm8), Op1r(0xC1, 7), .MI, .Op16, .{_186}),
instr(.SAR, ops2(.rm32, .imm8), Op1r(0xC1, 7), .MI, .Op32, .{_386}),
instr(.SAR, ops2(.rm64, .imm8), Op1r(0xC1, 7), .MI, .REX_W, .{x86_64}),
instr(.SAR, ops2(.rm16, .reg_cl), Op1r(0xD3, 7), .M, .Op16, .{_8086}),
instr(.SAR, ops2(.rm32, .reg_cl), Op1r(0xD3, 7), .M, .Op32, .{_386}),
instr(.SAR, ops2(.rm64, .reg_cl), Op1r(0xD3, 7), .M, .REX_W, .{x86_64}),
//
instr(.SHL, ops2(.rm8, .imm_1), Op1r(0xD0, 4), .M, .ZO, .{_8086}),
instr(.SHL, ops2(.rm8, .reg_cl), Op1r(0xD2, 4), .M, .ZO, .{_8086}),
instr(.SHL, ops2(.rm8, .imm8), Op1r(0xC0, 4), .MI, .ZO, .{_186}),
instr(.SHL, ops2(.rm16, .imm_1), Op1r(0xD1, 4), .M, .Op16, .{_8086}),
instr(.SHL, ops2(.rm32, .imm_1), Op1r(0xD1, 4), .M, .Op32, .{_386}),
instr(.SHL, ops2(.rm64, .imm_1), Op1r(0xD1, 4), .M, .REX_W, .{x86_64}),
instr(.SHL, ops2(.rm16, .imm8), Op1r(0xC1, 4), .MI, .Op16, .{_186}),
instr(.SHL, ops2(.rm32, .imm8), Op1r(0xC1, 4), .MI, .Op32, .{_386}),
instr(.SHL, ops2(.rm64, .imm8), Op1r(0xC1, 4), .MI, .REX_W, .{x86_64}),
instr(.SHL, ops2(.rm16, .reg_cl), Op1r(0xD3, 4), .M, .Op16, .{_8086}),
instr(.SHL, ops2(.rm32, .reg_cl), Op1r(0xD3, 4), .M, .Op32, .{_386}),
instr(.SHL, ops2(.rm64, .reg_cl), Op1r(0xD3, 4), .M, .REX_W, .{x86_64}),
//
instr(.SHR, ops2(.rm8, .imm_1), Op1r(0xD0, 5), .M, .ZO, .{_8086}),
instr(.SHR, ops2(.rm8, .reg_cl), Op1r(0xD2, 5), .M, .ZO, .{_8086}),
instr(.SHR, ops2(.rm8, .imm8), Op1r(0xC0, 5), .MI, .ZO, .{_186}),
instr(.SHR, ops2(.rm16, .imm_1), Op1r(0xD1, 5), .M, .Op16, .{_8086}),
instr(.SHR, ops2(.rm32, .imm_1), Op1r(0xD1, 5), .M, .Op32, .{_386}),
instr(.SHR, ops2(.rm64, .imm_1), Op1r(0xD1, 5), .M, .REX_W, .{x86_64}),
instr(.SHR, ops2(.rm16, .imm8), Op1r(0xC1, 5), .MI, .Op16, .{_186}),
instr(.SHR, ops2(.rm32, .imm8), Op1r(0xC1, 5), .MI, .Op32, .{_386}),
instr(.SHR, ops2(.rm64, .imm8), Op1r(0xC1, 5), .MI, .REX_W, .{x86_64}),
instr(.SHR, ops2(.rm16, .reg_cl), Op1r(0xD3, 5), .M, .Op16, .{_8086}),
instr(.SHR, ops2(.rm32, .reg_cl), Op1r(0xD3, 5), .M, .Op32, .{_386}),
instr(.SHR, ops2(.rm64, .reg_cl), Op1r(0xD3, 5), .M, .REX_W, .{x86_64}),
// SBB
instr(.SBB, ops2(.reg_al, .imm8), Op1(0x1C), .I2, .ZO, .{_8086}),
instr(.SBB, ops2(.rm8, .imm8), Op1r(0x80, 3), .MI, .ZO, .{ _8086, Lock, Hle }),
instr(.SBB, ops2(.rm16, .imm8), Op1r(0x83, 3), .MI, .Op16, .{ _8086, Sign, Lock, Hle }),
instr(.SBB, ops2(.rm32, .imm8), Op1r(0x83, 3), .MI, .Op32, .{ _386, Sign, Lock, Hle }),
instr(.SBB, ops2(.rm64, .imm8), Op1r(0x83, 3), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.SBB, ops2(.reg_ax, .imm16), Op1(0x1D), .I2, .Op16, .{_8086}),
instr(.SBB, ops2(.reg_eax, .imm32), Op1(0x1D), .I2, .Op32, .{_386}),
instr(.SBB, ops2(.reg_rax, .imm32), Op1(0x1D), .I2, .REX_W, .{ x86_64, Sign }),
//
instr(.SBB, ops2(.rm16, .imm16), Op1r(0x81, 3), .MI, .Op16, .{ _8086, Lock, Hle }),
instr(.SBB, ops2(.rm32, .imm32), Op1r(0x81, 3), .MI, .Op32, .{ _386, Lock, Hle }),
instr(.SBB, ops2(.rm64, .imm32), Op1r(0x81, 3), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.SBB, ops2(.rm8, .reg8), Op1(0x18), .MR, .ZO, .{ _8086, Lock, Hle }),
instr(.SBB, ops2(.rm16, .reg16), Op1(0x19), .MR, .Op16, .{ _8086, Lock, Hle }),
instr(.SBB, ops2(.rm32, .reg32), Op1(0x19), .MR, .Op32, .{ _386, Lock, Hle }),
instr(.SBB, ops2(.rm64, .reg64), Op1(0x19), .MR, .REX_W, .{ x86_64, Lock, Hle }),
//
instr(.SBB, ops2(.reg8, .rm8), Op1(0x1A), .RM, .ZO, .{_8086}),
instr(.SBB, ops2(.reg16, .rm16), Op1(0x1B), .RM, .Op16, .{_8086}),
instr(.SBB, ops2(.reg32, .rm32), Op1(0x1B), .RM, .Op32, .{_386}),
instr(.SBB, ops2(.reg64, .rm64), Op1(0x1B), .RM, .REX_W, .{x86_64}),
// SCAS / SCASB / SCASW / SCASD / SCASQ
instr(.SCAS, ops2(.rm_mem8, .reg_al), Op1(0xAE), .MSpec, .ZO, .{ _8086, Repe, Repne }),
instr(.SCAS, ops2(.rm_mem16, .reg_ax), Op1(0xAF), .MSpec, .Op16, .{ _8086, Repe, Repne }),
instr(.SCAS, ops2(.rm_mem32, .reg_eax), Op1(0xAF), .MSpec, .Op32, .{ _386, Repe, Repne }),
instr(.SCAS, ops2(.rm_mem64, .reg_rax), Op1(0xAF), .MSpec, .REX_W, .{ x86_64, Repe, Repne }),
//
instr(.SCASB, ops0(), Op1(0xAE), .ZO, .ZO, .{ _8086, Repe, Repne }),
instr(.SCASW, ops0(), Op1(0xAF), .ZO, .Op16, .{ _8086, Repe, Repne }),
instr(.SCASD, ops0(), Op1(0xAF), .ZO, .Op32, .{ _386, Repe, Repne }),
instr(.SCASQ, ops0(), Op1(0xAF), .ZO, .REX_W, .{ x86_64, No32, Repe, Repne }),
// STC
instr(.STC, ops0(), Op1(0xF9), .ZO, .ZO, .{_8086}),
// STD
instr(.STD, ops0(), Op1(0xFD), .ZO, .ZO, .{_8086}),
// STI
instr(.STI, ops0(), Op1(0xFB), .ZO, .ZO, .{_8086}),
// STOS / STOSB / STOSW / STOSD / STOSQ
instr(.STOS, ops2(.rm_mem8, .reg_al), Op1(0xAA), .MSpec, .ZO, .{ _8086, Rep }),
instr(.STOS, ops2(.rm_mem16, .reg_ax), Op1(0xAB), .MSpec, .Op16, .{ _8086, Rep }),
instr(.STOS, ops2(.rm_mem32, .reg_eax), Op1(0xAB), .MSpec, .Op32, .{ _386, Rep }),
instr(.STOS, ops2(.rm_mem64, .reg_rax), Op1(0xAB), .MSpec, .REX_W, .{ x86_64, Rep }),
//
instr(.STOSB, ops0(), Op1(0xAA), .ZO, .ZO, .{ _8086, Rep }),
instr(.STOSW, ops0(), Op1(0xAB), .ZO, .Op16, .{ _8086, Rep }),
instr(.STOSD, ops0(), Op1(0xAB), .ZO, .Op32, .{ _386, Rep }),
instr(.STOSQ, ops0(), Op1(0xAB), .ZO, .REX_W, .{ x86_64, Rep }),
// SUB
instr(.SUB, ops2(.reg_al, .imm8), Op1(0x2C), .I2, .ZO, .{_8086}),
instr(.SUB, ops2(.rm8, .imm8), Op1r(0x80, 5), .MI, .ZO, .{ _8086, Lock, Hle }),
instr(.SUB, ops2(.rm16, .imm8), Op1r(0x83, 5), .MI, .Op16, .{ _8086, Sign, Lock, Hle }),
instr(.SUB, ops2(.rm32, .imm8), Op1r(0x83, 5), .MI, .Op32, .{ _386, Sign, Lock, Hle }),
instr(.SUB, ops2(.rm64, .imm8), Op1r(0x83, 5), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.SUB, ops2(.reg_ax, .imm16), Op1(0x2D), .I2, .Op16, .{_8086}),
instr(.SUB, ops2(.reg_eax, .imm32), Op1(0x2D), .I2, .Op32, .{_386}),
instr(.SUB, ops2(.reg_rax, .imm32), Op1(0x2D), .I2, .REX_W, .{ x86_64, Sign }),
//
instr(.SUB, ops2(.rm16, .imm16), Op1r(0x81, 5), .MI, .Op16, .{ _8086, Lock, Hle }),
instr(.SUB, ops2(.rm32, .imm32), Op1r(0x81, 5), .MI, .Op32, .{ _386, Lock, Hle }),
instr(.SUB, ops2(.rm64, .imm32), Op1r(0x81, 5), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.SUB, ops2(.rm8, .reg8), Op1(0x28), .MR, .ZO, .{ _8086, Lock, Hle }),
instr(.SUB, ops2(.rm16, .reg16), Op1(0x29), .MR, .Op16, .{ _8086, Lock, Hle }),
instr(.SUB, ops2(.rm32, .reg32), Op1(0x29), .MR, .Op32, .{ _386, Lock, Hle }),
instr(.SUB, ops2(.rm64, .reg64), Op1(0x29), .MR, .REX_W, .{ x86_64, Lock, Hle }),
//
instr(.SUB, ops2(.reg8, .rm8), Op1(0x2A), .RM, .ZO, .{_8086}),
instr(.SUB, ops2(.reg16, .rm16), Op1(0x2B), .RM, .Op16, .{_8086}),
instr(.SUB, ops2(.reg32, .rm32), Op1(0x2B), .RM, .Op32, .{_386}),
instr(.SUB, ops2(.reg64, .rm64), Op1(0x2B), .RM, .REX_W, .{x86_64}),
// TEST
instr(.TEST, ops2(.reg_al, .imm8), Op1(0xA8), .I2, .ZO, .{_8086}),
instr(.TEST, ops2(.reg_ax, .imm16), Op1(0xA9), .I2, .Op16, .{_8086}),
instr(.TEST, ops2(.reg_eax, .imm32), Op1(0xA9), .I2, .Op32, .{_386}),
instr(.TEST, ops2(.reg_rax, .imm32), Op1(0xA9), .I2, .REX_W, .{ x86_64, Sign }),
//
instr(.TEST, ops2(.rm8, .imm8), Op1r(0xF6, 0), .MI, .ZO, .{_8086}),
instr(.TEST, ops2(.rm16, .imm16), Op1r(0xF7, 0), .MI, .Op16, .{_8086}),
instr(.TEST, ops2(.rm32, .imm32), Op1r(0xF7, 0), .MI, .Op32, .{_386}),
instr(.TEST, ops2(.rm64, .imm32), Op1r(0xF7, 0), .MI, .REX_W, .{x86_64}),
//
instr(.TEST, ops2(.rm8, .reg8), Op1(0x84), .MR, .ZO, .{_8086}),
instr(.TEST, ops2(.rm16, .reg16), Op1(0x85), .MR, .Op16, .{_8086}),
instr(.TEST, ops2(.rm32, .reg32), Op1(0x85), .MR, .Op32, .{_386}),
instr(.TEST, ops2(.rm64, .reg64), Op1(0x85), .MR, .REX_W, .{x86_64}),
// WAIT
instr(.WAIT, ops0(), Op1(0x9B), .ZO, .ZO, .{_8086}),
// XCHG
instr(.XCHG, ops2(.reg_ax, .reg16), Op1(0x90), .O2, .Op16, .{_8086}),
instr(.XCHG, ops2(.reg16, .reg_ax), Op1(0x90), .O, .Op16, .{_8086}),
instr(.XCHG, ops2(.reg_eax, .reg32), Op1(0x90), .O2, .Op32, .{ _386, edge.XCHG_EAX }),
instr(.XCHG, ops2(.reg32, .reg_eax), Op1(0x90), .O, .Op32, .{ _386, edge.XCHG_EAX }),
instr(.XCHG, ops2(.reg_rax, .reg64), Op1(0x90), .O2, .REX_W, .{x86_64}),
instr(.XCHG, ops2(.reg64, .reg_rax), Op1(0x90), .O, .REX_W, .{x86_64}),
//
instr(.XCHG, ops2(.rm8, .reg8), Op1(0x86), .MR, .ZO, .{ _8086, Lock, HleNoLock }),
instr(.XCHG, ops2(.reg8, .rm8), Op1(0x86), .RM, .ZO, .{ _8086, Lock, HleNoLock }),
instr(.XCHG, ops2(.rm16, .reg16), Op1(0x87), .MR, .Op16, .{ _8086, Lock, HleNoLock }),
instr(.XCHG, ops2(.reg16, .rm16), Op1(0x87), .RM, .Op16, .{ _8086, Lock, HleNoLock }),
instr(.XCHG, ops2(.rm32, .reg32), Op1(0x87), .MR, .Op32, .{ _386, Lock, HleNoLock }),
instr(.XCHG, ops2(.reg32, .rm32), Op1(0x87), .RM, .Op32, .{ _386, Lock, HleNoLock }),
instr(.XCHG, ops2(.rm64, .reg64), Op1(0x87), .MR, .REX_W, .{ x86_64, Lock, HleNoLock }),
instr(.XCHG, ops2(.reg64, .rm64), Op1(0x87), .RM, .REX_W, .{ x86_64, Lock, HleNoLock }),
// XLAT / XLATB
instr(.XLAT, ops2(.reg_al, .rm_mem8), Op1(0xD7), .MSpec, .ZO, .{_8086}),
//
instr(.XLATB, ops0(), Op1(0xD7), .ZO, .ZO, .{_8086}),
// XOR
instr(.XOR, ops2(.reg_al, .imm8), Op1(0x34), .I2, .ZO, .{_8086}),
instr(.XOR, ops2(.rm8, .imm8), Op1r(0x80, 6), .MI, .ZO, .{ _8086, Lock, Hle }),
instr(.XOR, ops2(.rm16, .imm8), Op1r(0x83, 6), .MI, .Op16, .{ _8086, Sign, Lock, Hle }),
instr(.XOR, ops2(.rm32, .imm8), Op1r(0x83, 6), .MI, .Op32, .{ _386, Sign, Lock, Hle }),
instr(.XOR, ops2(.rm64, .imm8), Op1r(0x83, 6), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.XOR, ops2(.reg_ax, .imm16), Op1(0x35), .I2, .Op16, .{_8086}),
instr(.XOR, ops2(.reg_eax, .imm32), Op1(0x35), .I2, .Op32, .{_386}),
instr(.XOR, ops2(.reg_rax, .imm32), Op1(0x35), .I2, .REX_W, .{ x86_64, Sign }),
//
instr(.XOR, ops2(.rm16, .imm16), Op1r(0x81, 6), .MI, .Op16, .{ _8086, Lock, Hle }),
instr(.XOR, ops2(.rm32, .imm32), Op1r(0x81, 6), .MI, .Op32, .{ _386, Lock, Hle }),
instr(.XOR, ops2(.rm64, .imm32), Op1r(0x81, 6), .MI, .REX_W, .{ x86_64, Sign, Lock, Hle }),
//
instr(.XOR, ops2(.rm8, .reg8), Op1(0x30), .MR, .ZO, .{ _8086, Lock, Hle }),
instr(.XOR, ops2(.rm16, .reg16), Op1(0x31), .MR, .Op16, .{ _8086, Lock, Hle }),
instr(.XOR, ops2(.rm32, .reg32), Op1(0x31), .MR, .Op32, .{ _386, Lock, Hle }),
instr(.XOR, ops2(.rm64, .reg64), Op1(0x31), .MR, .REX_W, .{ x86_64, Lock, Hle }),
//
instr(.XOR, ops2(.reg8, .rm8), Op1(0x32), .RM, .ZO, .{_8086}),
instr(.XOR, ops2(.reg16, .rm16), Op1(0x33), .RM, .Op16, .{_8086}),
instr(.XOR, ops2(.reg32, .rm32), Op1(0x33), .RM, .Op32, .{_386}),
instr(.XOR, ops2(.reg64, .rm64), Op1(0x33), .RM, .REX_W, .{x86_64}),
//
// x87 -- 8087 / 80287 / 80387
//
// F2XM1
instr(.F2XM1, ops0(), Op2(0xD9, 0xF0), .ZO, .ZO, .{_087}),
// FABS
instr(.FABS, ops0(), Op2(0xD9, 0xE1), .ZO, .ZO, .{_087}),
// FADD / FADDP / FIADD
instr(.FADD, ops1(.rm_mem32), Op1r(0xD8, 0), .M, .ZO, .{_087}),
instr(.FADD, ops1(.rm_mem64), Op1r(0xDC, 0), .M, .ZO, .{_087}),
instr(.FADD, ops1(.reg_st), Op2(0xD8, 0xC0), .O, .ZO, .{_087}),
instr(.FADD, ops2(.reg_st, .reg_st0), Op2(0xDC, 0xC0), .O, .ZO, .{_087}),
instr(.FADD, ops2(.reg_st0, .reg_st), Op2(0xD8, 0xC0), .O2, .ZO, .{_087}),
//
instr(.FADDP, ops2(.reg_st, .reg_st0), Op2(0xDE, 0xC0), .O, .ZO, .{_087}),
instr(.FADDP, ops0(), Op2(0xDE, 0xC1), .ZO, .ZO, .{_087}),
//
instr(.FIADD, ops1(.rm_mem16), Op1r(0xDE, 0), .M, .ZO, .{_087}),
instr(.FIADD, ops1(.rm_mem32), Op1r(0xDA, 0), .M, .ZO, .{_087}),
// FBLD
instr(.FBLD, ops1(.rm_mem80), Op1r(0xDF, 4), .M, .ZO, .{_087}),
// FBSTP
instr(.FBSTP, ops1(.rm_mem80), Op1r(0xDF, 6), .M, .ZO, .{_087}),
// FCHS
instr(.FCHS, ops0(), Op2(0xD9, 0xE0), .ZO, .ZO, .{_087}),
instr(.FCHS, ops1(.reg_st0), Op2(0xD9, 0xE0), .ZO, .ZO, .{_087}),
// FCLEX / FNCLEX
instr(.FCLEX, ops0(), compOp2(.FWAIT, 0xDB, 0xE2), .ZO, .ZO, .{_087}),
instr(.FNCLEX, ops0(), Op2(0xDB, 0xE2), .ZO, .ZO, .{_087}),
// FCOM / FCOMP / FCOMPP
instr(.FCOM, ops1(.rm_mem32), Op1r(0xD8, 2), .M, .ZO, .{_087}),
instr(.FCOM, ops1(.rm_mem64), Op1r(0xDC, 2), .M, .ZO, .{_087}),
//
instr(.FCOM, ops2(.reg_st0, .reg_st), Op2(0xD8, 0xD0), .O2, .ZO, .{_087}),
instr(.FCOM, ops1(.reg_st), Op2(0xD8, 0xD0), .O, .ZO, .{_087}),
instr(.FCOM, ops0(), Op2(0xD8, 0xD1), .ZO, .ZO, .{_087}),
//
instr(.FCOMP, ops1(.rm_mem32), Op1r(0xD8, 3), .M, .ZO, .{_087}),
instr(.FCOMP, ops1(.rm_mem64), Op1r(0xDC, 3), .M, .ZO, .{_087}),
//
instr(.FCOMP, ops2(.reg_st0, .reg_st), Op2(0xD8, 0xD8), .O2, .ZO, .{_087}),
instr(.FCOMP, ops1(.reg_st), Op2(0xD8, 0xD8), .O, .ZO, .{_087}),
instr(.FCOMP, ops0(), Op2(0xD8, 0xD9), .ZO, .ZO, .{_087}),
//
instr(.FCOMPP, ops0(), Op2(0xDE, 0xD9), .ZO, .ZO, .{_087}),
// FDECSTP
instr(.FDECSTP, ops0(), Op2(0xD9, 0xF6), .ZO, .ZO, .{_087}),
// FDIV / FDIVP / FIDIV
instr(.FDIV, ops1(.rm_mem32), Op1r(0xD8, 6), .M, .ZO, .{_087}),
instr(.FDIV, ops1(.rm_mem64), Op1r(0xDC, 6), .M, .ZO, .{_087}),
//
instr(.FDIV, ops1(.reg_st), Op2(0xD8, 0xF0), .O, .ZO, .{_087}),
instr(.FDIV, ops2(.reg_st0, .reg_st), Op2(0xD8, 0xF0), .O2, .ZO, .{_087}),
instr(.FDIV, ops2(.reg_st, .reg_st0), Op2(0xDC, 0xF8), .O, .ZO, .{_087}),
//
instr(.FDIVP, ops2(.reg_st, .reg_st0), Op2(0xDE, 0xF8), .O, .ZO, .{_087}),
instr(.FDIVP, ops0(), Op2(0xDE, 0xF9), .ZO, .ZO, .{_087}),
//
instr(.FIDIV, ops1(.rm_mem16), Op1r(0xDE, 6), .M, .ZO, .{_087}),
instr(.FIDIV, ops1(.rm_mem32), Op1r(0xDA, 6), .M, .ZO, .{_087}),
// FDIVR / FDIVRP / FIDIVR
instr(.FDIVR, ops1(.rm_mem32), Op1r(0xD8, 7), .M, .ZO, .{_087}),
instr(.FDIVR, ops1(.rm_mem64), Op1r(0xDC, 7), .M, .ZO, .{_087}),
//
instr(.FDIVR, ops1(.reg_st), Op2(0xD8, 0xF8), .O, .ZO, .{_087}),
instr(.FDIVR, ops2(.reg_st, .reg_st0), Op2(0xDC, 0xF0), .O, .ZO, .{_087}),
instr(.FDIVR, ops2(.reg_st0, .reg_st), Op2(0xD8, 0xF8), .O2, .ZO, .{_087}),
//
instr(.FDIVRP, ops2(.reg_st, .reg_st0), Op2(0xDE, 0xF0), .O, .ZO, .{_087}),
instr(.FDIVRP, ops0(), Op2(0xDE, 0xF1), .ZO, .ZO, .{_087}),
//
instr(.FIDIVR, ops1(.rm_mem16), Op1r(0xDE, 7), .M, .ZO, .{_087}),
instr(.FIDIVR, ops1(.rm_mem32), Op1r(0xDA, 7), .M, .ZO, .{_087}),
// FFREE
instr(.FFREE, ops1(.reg_st), Op2(0xDD, 0xC0), .O, .ZO, .{_087}),
// FICOM / FICOMP
instr(.FICOM, ops1(.rm_mem16), Op1r(0xDE, 2), .M, .ZO, .{_087}),
instr(.FICOM, ops1(.rm_mem32), Op1r(0xDA, 2), .M, .ZO, .{_087}),
//
instr(.FICOMP, ops1(.rm_mem16), Op1r(0xDE, 3), .M, .ZO, .{_087}),
instr(.FICOMP, ops1(.rm_mem32), Op1r(0xDA, 3), .M, .ZO, .{_087}),
// FILD
instr(.FILD, ops1(.rm_mem16), Op1r(0xDF, 0), .M, .ZO, .{_087}),
instr(.FILD, ops1(.rm_mem32), Op1r(0xDB, 0), .M, .ZO, .{_087}),
instr(.FILD, ops1(.rm_mem64), Op1r(0xDF, 5), .M, .ZO, .{_087}),
// FINCSTP
instr(.FINCSTP, ops0(), Op2(0xD9, 0xF7), .ZO, .ZO, .{_087}),
// FINIT / FNINIT
instr(.FINIT, ops0(), compOp2(.FWAIT, 0xDB, 0xE3), .ZO, .ZO, .{_087}),
instr(.FNINIT, ops0(), Op2(0xDB, 0xE3), .ZO, .ZO, .{_087}),
// FIST
instr(.FIST, ops1(.rm_mem16), Op1r(0xDF, 2), .M, .ZO, .{_087}),
instr(.FIST, ops1(.rm_mem32), Op1r(0xDB, 2), .M, .ZO, .{_087}),
//
instr(.FISTP, ops1(.rm_mem16), Op1r(0xDF, 3), .M, .ZO, .{_087}),
instr(.FISTP, ops1(.rm_mem32), Op1r(0xDB, 3), .M, .ZO, .{_087}),
instr(.FISTP, ops1(.rm_mem64), Op1r(0xDF, 7), .M, .ZO, .{_087}),
// FLD
instr(.FLD, ops1(.rm_mem32), Op1r(0xD9, 0), .M, .ZO, .{_087}),
instr(.FLD, ops1(.rm_mem64), Op1r(0xDD, 0), .M, .ZO, .{_087}),
instr(.FLD, ops1(.rm_mem80), Op1r(0xDB, 5), .M, .ZO, .{_087}),
instr(.FLD, ops1(.reg_st), Op2(0xD9, 0xC0), .O, .ZO, .{_087}),
instr(.FLDCW, ops1(.rm_mem16), Op1r(0xD9, 5), .M, .ZO, .{_087}),
instr(.FLDCW, ops1(.rm_mem), Op1r(0xD9, 5), .M, .ZO, .{_087}),
instr(.FLD1, ops0(), Op2(0xD9, 0xE8), .ZO, .ZO, .{_087}),
instr(.FLDL2T, ops0(), Op2(0xD9, 0xE9), .ZO, .ZO, .{_087}),
instr(.FLDL2E, ops0(), Op2(0xD9, 0xEA), .ZO, .ZO, .{_087}),
instr(.FLDPI, ops0(), Op2(0xD9, 0xEB), .ZO, .ZO, .{_087}),
instr(.FLDLG2, ops0(), Op2(0xD9, 0xEC), .ZO, .ZO, .{_087}),
instr(.FLDLN2, ops0(), Op2(0xD9, 0xED), .ZO, .ZO, .{_087}),
instr(.FLDZ, ops0(), Op2(0xD9, 0xEE), .ZO, .ZO, .{_087}),
// FLDENV
instr(.FLDENV, ops1(.rm_mem), Op1r(0xD9, 4), .M, .ZO, .{_087}),
instr(.FLDENVW, ops1(.rm_mem), Op1r(0xD9, 4), .M, .Op16, .{_087}),
instr(.FLDENVD, ops1(.rm_mem), Op1r(0xD9, 4), .M, .Op32, .{_387}),
// FMUL / FMULP / FIMUL
instr(.FMUL, ops1(.rm_mem32), Op1r(0xD8, 1), .M, .ZO, .{_087}),
instr(.FMUL, ops1(.rm_mem64), Op1r(0xDC, 1), .M, .ZO, .{_087}),
//
instr(.FMUL, ops1(.reg_st), Op2(0xD8, 0xC8), .O, .ZO, .{_087}),
instr(.FMUL, ops2(.reg_st, .reg_st0), Op2(0xDC, 0xC8), .O, .ZO, .{_087}),
instr(.FMUL, ops2(.reg_st0, .reg_st), Op2(0xD8, 0xC8), .O2, .ZO, .{_087}),
//
instr(.FMULP, ops2(.reg_st, .reg_st0), Op2(0xDE, 0xC8), .O, .ZO, .{_087}),
instr(.FMULP, ops0(), Op2(0xDE, 0xC9), .ZO, .ZO, .{_087}),
//
instr(.FIMUL, ops1(.rm_mem16), Op1r(0xDE, 1), .M, .ZO, .{_087}),
instr(.FIMUL, ops1(.rm_mem32), Op1r(0xDA, 1), .M, .ZO, .{_087}),
// FNOP
instr(.FNOP, ops0(), Op2(0xD9, 0xD0), .ZO, .ZO, .{_087}),
// FPATAN
instr(.FPATAN, ops0(), Op2(0xD9, 0xF3), .ZO, .ZO, .{_087}),
// FPREM
instr(.FPREM, ops0(), Op2(0xD9, 0xF8), .ZO, .ZO, .{_087}),
// FPTAN
instr(.FPTAN, ops0(), Op2(0xD9, 0xF2), .ZO, .ZO, .{_087}),
// FRNDINT
instr(.FRNDINT, ops0(), Op2(0xD9, 0xFC), .ZO, .ZO, .{_087}),
// FRSTOR
instr(.FRSTOR, ops1(.rm_mem), Op1r(0xDD, 4), .M, .ZO, .{_087}),
instr(.FRSTORW, ops1(.rm_mem), Op1r(0xDD, 4), .M, .Op16, .{_087}),
instr(.FRSTORD, ops1(.rm_mem), Op1r(0xDD, 4), .M, .Op32, .{_387}),
// FSAVE / FNSAVE
instr(.FSAVE, ops1(.rm_mem), compOp1r(.FWAIT, 0xDD, 6), .M, .ZO, .{_087}),
instr(.FSAVEW, ops1(.rm_mem), compOp1r(.FWAIT, 0xDD, 6), .M, .Op16, .{_087}),
instr(.FSAVED, ops1(.rm_mem), compOp1r(.FWAIT, 0xDD, 6), .M, .Op32, .{_387}),
instr(.FNSAVE, ops1(.rm_mem), Op1r(0xDD, 6), .M, .ZO, .{_087}),
instr(.FNSAVEW, ops1(.rm_mem), Op1r(0xDD, 6), .M, .Op16, .{_087}),
instr(.FNSAVED, ops1(.rm_mem), Op1r(0xDD, 6), .M, .Op32, .{_387}),
// FSCALE
instr(.FSCALE, ops0(), Op2(0xD9, 0xFD), .ZO, .ZO, .{_087}),
// FSQRT
instr(.FSQRT, ops0(), Op2(0xD9, 0xFA), .ZO, .ZO, .{_087}),
// FST / FSTP
instr(.FST, ops1(.rm_mem32), Op1r(0xD9, 2), .M, .ZO, .{_087}),
instr(.FST, ops1(.rm_mem64), Op1r(0xDD, 2), .M, .ZO, .{_087}),
instr(.FST, ops1(.reg_st), Op2(0xDD, 0xD0), .O, .ZO, .{_087}),
instr(.FST, ops2(.reg_st0, .reg_st), Op2(0xDD, 0xD0), .O2, .ZO, .{_087}),
instr(.FSTP, ops1(.rm_mem32), Op1r(0xD9, 3), .M, .ZO, .{_087}),
instr(.FSTP, ops1(.rm_mem64), Op1r(0xDD, 3), .M, .ZO, .{_087}),
instr(.FSTP, ops1(.rm_mem80), Op1r(0xDB, 7), .M, .ZO, .{_087}),
instr(.FSTP, ops1(.reg_st), Op2(0xDD, 0xD8), .O, .ZO, .{_087}),
instr(.FSTP, ops2(.reg_st0, .reg_st), Op2(0xDD, 0xD8), .O2, .ZO, .{_087}),
// FSTCW / FNSTCW
instr(.FSTCW, ops1(.rm_mem), compOp1r(.FWAIT, 0xD9, 7), .M, .ZO, .{_087}),
instr(.FSTCW, ops1(.rm_mem16), compOp1r(.FWAIT, 0xD9, 7), .M, .ZO, .{_087}),
instr(.FNSTCW, ops1(.rm_mem), Op1r(0xD9, 7), .M, .ZO, .{_087}),
instr(.FNSTCW, ops1(.rm_mem16), Op1r(0xD9, 7), .M, .ZO, .{_087}),
// FSTENV / FNSTENV
instr(.FSTENV, ops1(.rm_mem), compOp1r(.FWAIT, 0xD9, 6), .M, .ZO, .{_087}),
instr(.FSTENVW, ops1(.rm_mem), compOp1r(.FWAIT, 0xD9, 6), .M, .Op16, .{_087}),
instr(.FSTENVD, ops1(.rm_mem), compOp1r(.FWAIT, 0xD9, 6), .M, .Op32, .{_387}),
instr(.FNSTENV, ops1(.rm_mem), Op1r(0xD9, 6), .M, .ZO, .{_087}),
instr(.FNSTENVW, ops1(.rm_mem), Op1r(0xD9, 6), .M, .Op16, .{_087}),
instr(.FNSTENVD, ops1(.rm_mem), Op1r(0xD9, 6), .M, .Op32, .{_387}),
// FSTSW / FNSTSW
instr(.FSTSW, ops1(.rm_mem), compOp1r(.FWAIT, 0xDD, 7), .M, .ZO, .{_087}),
instr(.FSTSW, ops1(.rm_mem16), compOp1r(.FWAIT, 0xDD, 7), .M, .ZO, .{_087}),
instr(.FSTSW, ops1(.reg_ax), compOp2(.FWAIT, 0xDF, 0xE0), .ZO, .ZO, .{_087}),
instr(.FNSTSW, ops1(.rm_mem), Op1r(0xDD, 7), .M, .ZO, .{_087}),
instr(.FNSTSW, ops1(.rm_mem16), Op1r(0xDD, 7), .M, .ZO, .{_087}),
instr(.FNSTSW, ops1(.reg_ax), Op2(0xDF, 0xE0), .ZO, .ZO, .{_087}),
// FSUB / FSUBP / FISUB
instr(.FSUB, ops1(.rm_mem32), Op1r(0xD8, 4), .M, .ZO, .{_087}),
instr(.FSUB, ops1(.rm_mem64), Op1r(0xDC, 4), .M, .ZO, .{_087}),
//
instr(.FSUB, ops1(.reg_st), Op2(0xD8, 0xE0), .O, .ZO, .{_087}),
instr(.FSUB, ops2(.reg_st, .reg_st0), Op2(0xDC, 0xE8), .O, .ZO, .{_087}),
instr(.FSUB, ops2(.reg_st0, .reg_st), Op2(0xD8, 0xE0), .O2, .ZO, .{_087}),
//
instr(.FSUBP, ops2(.reg_st, .reg_st0), Op2(0xDE, 0xE8), .O, .ZO, .{_087}),
instr(.FSUBP, ops0(), Op2(0xDE, 0xE9), .ZO, .ZO, .{_087}),
//
instr(.FISUB, ops1(.rm_mem16), Op1r(0xDE, 4), .M, .ZO, .{_087}),
instr(.FISUB, ops1(.rm_mem32), Op1r(0xDA, 4), .M, .ZO, .{_087}),
// FSUBR / FSUBRP / FISUBR
instr(.FSUBR, ops1(.rm_mem32), Op1r(0xD8, 5), .M, .ZO, .{_087}),
instr(.FSUBR, ops1(.rm_mem64), Op1r(0xDC, 5), .M, .ZO, .{_087}),
//
instr(.FSUBR, ops1(.reg_st), Op2(0xD8, 0xE8), .O, .ZO, .{_087}),
instr(.FSUBR, ops2(.reg_st0, .reg_st), Op2(0xD8, 0xE8), .O2, .ZO, .{_087}),
instr(.FSUBR, ops2(.reg_st, .reg_st0), Op2(0xDC, 0xE0), .O, .ZO, .{_087}),
//
instr(.FSUBRP, ops2(.reg_st, .reg_st0), Op2(0xDE, 0xE0), .O, .ZO, .{_087}),
instr(.FSUBRP, ops0(), Op2(0xDE, 0xE1), .ZO, .ZO, .{_087}),
//
instr(.FISUBR, ops1(.rm_mem16), Op1r(0xDE, 5), .M, .ZO, .{_087}),
instr(.FISUBR, ops1(.rm_mem32), Op1r(0xDA, 5), .M, .ZO, .{_087}),
// FTST
instr(.FTST, ops0(), Op2(0xD9, 0xE4), .ZO, .ZO, .{_087}),
// FWAIT (alternate mnemonic for WAIT)
instr(.FWAIT, ops0(), Op1(0x9B), .ZO, .ZO, .{_087}),
// FXAM
instr(.FXAM, ops0(), Op2(0xD9, 0xE5), .ZO, .ZO, .{_087}),
// FXCH
instr(.FXCH, ops2(.reg_st0, .reg_st), Op2(0xD9, 0xC8), .O2, .ZO, .{_087}),
instr(.FXCH, ops1(.reg_st), Op2(0xD9, 0xC8), .O, .ZO, .{_087}),
instr(.FXCH, ops0(), Op2(0xD9, 0xC9), .ZO, .ZO, .{_087}),
// FXTRACT
instr(.FXTRACT, ops0(), Op2(0xD9, 0xF4), .ZO, .ZO, .{_087}),
// FYL2X
instr(.FYL2X, ops0(), Op2(0xD9, 0xF1), .ZO, .ZO, .{_087}),
// FYL2XP1
instr(.FYL2XP1, ops0(), Op2(0xD9, 0xF9), .ZO, .ZO, .{_087}),
//
// 80287
//
// instr(.FSETPM, ops0(), Op2(0xDB, 0xE4), .ZO, .ZO, .{cpu._287, edge.obsolete} ),
//
// 80387
//
instr(.FCOS, ops0(), Op2(0xD9, 0xFF), .ZO, .ZO, .{_387}),
instr(.FPREM1, ops0(), Op2(0xD9, 0xF5), .ZO, .ZO, .{_387}),
instr(.FSIN, ops0(), Op2(0xD9, 0xFE), .ZO, .ZO, .{_387}),
instr(.FSINCOS, ops0(), Op2(0xD9, 0xFB), .ZO, .ZO, .{_387}),
// FUCOM / FUCOMP / FUCOMPP
instr(.FUCOM, ops2(.reg_st0, .reg_st), Op2(0xDD, 0xE0), .O2, .ZO, .{_387}),
instr(.FUCOM, ops1(.reg_st), Op2(0xDD, 0xE0), .O, .ZO, .{_387}),
instr(.FUCOM, ops0(), Op2(0xDD, 0xE1), .ZO, .ZO, .{_387}),
//
instr(.FUCOMP, ops2(.reg_st0, .reg_st), Op2(0xDD, 0xE8), .O2, .ZO, .{_387}),
instr(.FUCOMP, ops1(.reg_st), Op2(0xDD, 0xE8), .O, .ZO, .{_387}),
instr(.FUCOMP, ops0(), Op2(0xDD, 0xE9), .ZO, .ZO, .{_387}),
//
instr(.FUCOMPP, ops0(), Op2(0xDA, 0xE9), .ZO, .ZO, .{_387}),
//
// x87 -- Pentium Pro / P6
//
// FCMOVcc
instr(.FCMOVB, ops2(.reg_st0, .reg_st), Op2(0xDA, 0xC0), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FCMOVB, ops1(.reg_st), Op2(0xDA, 0xC0), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
//
instr(.FCMOVE, ops2(.reg_st0, .reg_st), Op2(0xDA, 0xC8), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FCMOVE, ops1(.reg_st), Op2(0xDA, 0xC8), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
//
instr(.FCMOVBE, ops2(.reg_st0, .reg_st), Op2(0xDA, 0xD0), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FCMOVBE, ops1(.reg_st), Op2(0xDA, 0xD0), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
//
instr(.FCMOVU, ops2(.reg_st0, .reg_st), Op2(0xDA, 0xD8), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FCMOVU, ops1(.reg_st), Op2(0xDA, 0xD8), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
//
instr(.FCMOVNB, ops2(.reg_st0, .reg_st), Op2(0xDB, 0xC0), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FCMOVNB, ops1(.reg_st), Op2(0xDB, 0xC0), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
//
instr(.FCMOVNE, ops2(.reg_st0, .reg_st), Op2(0xDB, 0xC8), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FCMOVNE, ops1(.reg_st), Op2(0xDB, 0xC8), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
//
instr(.FCMOVNBE, ops2(.reg_st0, .reg_st), Op2(0xDB, 0xD0), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FCMOVNBE, ops1(.reg_st), Op2(0xDB, 0xD0), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
//
instr(.FCMOVNU, ops2(.reg_st0, .reg_st), Op2(0xDB, 0xD8), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FCMOVNU, ops1(.reg_st), Op2(0xDB, 0xD8), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
// FCOMI
instr(.FCOMI, ops2(.reg_st0, .reg_st), Op2(0xDB, 0xF0), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FCOMI, ops1(.reg_st), Op2(0xDB, 0xF0), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
// FCOMIP
instr(.FCOMIP, ops2(.reg_st0, .reg_st), Op2(0xDF, 0xF0), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FCOMIP, ops1(.reg_st), Op2(0xDF, 0xF0), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
// FUCOMI
instr(.FUCOMI, ops2(.reg_st0, .reg_st), Op2(0xDB, 0xE8), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FUCOMI, ops1(.reg_st), Op2(0xDB, 0xE8), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
// FUCOMIP
instr(.FUCOMIP, ops2(.reg_st0, .reg_st), Op2(0xDF, 0xE8), .O2, .ZO, .{ cpu.CMOV, cpu.FPU }),
instr(.FUCOMIP, ops1(.reg_st), Op2(0xDF, 0xE8), .O, .ZO, .{ cpu.CMOV, cpu.FPU }),
//
// x87 - other
// FISTTP - same as FSTP but won't cause a stack underflow exception
instr(.FISTTP, ops1(.rm_mem16), Op1r(0xDF, 1), .M, .ZO, .{SSE3}),
instr(.FISTTP, ops1(.rm_mem32), Op1r(0xDB, 1), .M, .ZO, .{SSE3}),
instr(.FISTTP, ops1(.rm_mem64), Op1r(0xDD, 1), .M, .ZO, .{SSE3}),
//
// 80286
//
// NOTES: might want to handle operands like `r32/m16` better
instr(.ARPL, ops2(.rm16, .reg16), Op1(0x63), .MR, .ZO, .{ No64, _286 }),
//
instr(.CLTS, ops0(), Op2(0x0F, 0x06), .ZO, .ZO, .{_286}),
//
instr(.LAR, ops2(.reg16, .rm16), Op2(0x0F, 0x02), .RM, .Op16, .{_286}),
instr(.LAR, ops2(.reg32, .rm32), Op2(0x0F, 0x02), .RM, .Op32, .{_386}),
instr(.LAR, ops2(.reg64, .rm64), Op2(0x0F, 0x02), .RM, .REX_W, .{x86_64}),
//
instr(.LGDT, ops1(.rm_mem), Op2r(0x0F, 0x01, 2), .M, .ZO, .{ No64, _286 }),
instr(.LGDT, ops1(.rm_mem), Op2r(0x0F, 0x01, 2), .M, .ZO, .{ No32, _286 }),
//
instr(.LIDT, ops1(.rm_mem), Op2r(0x0F, 0x01, 3), .M, .ZO, .{ No64, _286 }),
instr(.LIDT, ops1(.rm_mem), Op2r(0x0F, 0x01, 3), .M, .ZO, .{ No32, _286 }),
//
instr(.LLDT, ops1(.rm16), Op2r(0x0F, 0x00, 2), .M, .ZO, .{_286}),
//
instr(.LMSW, ops1(.rm16), Op2r(0x0F, 0x01, 6), .M, .ZO, .{_286}),
//
// instr(.LOADALL, ops1(.rm16), Op2r(0x0F, 0x01, 6), .M, .ZO, .{_286, _386} ), // undocumented
//
instr(.LSL, ops2(.reg16, .rm16), Op2(0x0F, 0x03), .RM, .Op16, .{_286}),
instr(.LSL, ops2(.reg32, .rm32), Op2(0x0F, 0x03), .RM, .Op32, .{_386}),
instr(.LSL, ops2(.reg64, .rm64), Op2(0x0F, 0x03), .RM, .REX_W, .{x86_64}),
//
instr(.LTR, ops1(.rm16), Op2r(0x0F, 0x00, 3), .M, .ZO, .{_286}),
//
instr(.SGDT, ops1(.rm_mem), Op2r(0x0F, 0x01, 0), .M, .ZO, .{_286}),
//
instr(.SIDT, ops1(.rm_mem), Op2r(0x0F, 0x01, 1), .M, .ZO, .{_286}),
//
instr(.SLDT, ops1(.rm_mem16), Op2r(0x0F, 0x00, 0), .M, .ZO, .{_286}),
instr(.SLDT, ops1(.rm_reg16), Op2r(0x0F, 0x00, 0), .M, .Op16, .{_286}),
instr(.SLDT, ops1(.rm_reg32), Op2r(0x0F, 0x00, 0), .M, .Op32, .{_386}),
instr(.SLDT, ops1(.rm_reg64), Op2r(0x0F, 0x00, 0), .M, .REX_W, .{x86_64}),
//
instr(.SMSW, ops1(.rm_mem16), Op2r(0x0F, 0x01, 4), .M, .ZO, .{_286}),
instr(.SMSW, ops1(.rm_reg16), Op2r(0x0F, 0x01, 4), .M, .Op16, .{_286}),
instr(.SMSW, ops1(.rm_reg32), Op2r(0x0F, 0x01, 4), .M, .Op32, .{_386}),
instr(.SMSW, ops1(.rm_reg64), Op2r(0x0F, 0x01, 4), .M, .REX_W, .{x86_64}),
//
instr(.STR, ops1(.rm_mem16), Op2r(0x0F, 0x00, 1), .M, .ZO, .{_286}),
instr(.STR, ops1(.rm_reg16), Op2r(0x0F, 0x00, 1), .M, .Op16, .{_286}),
instr(.STR, ops1(.rm_reg32), Op2r(0x0F, 0x00, 1), .M, .Op32, .{_386}),
instr(.STR, ops1(.rm_reg64), Op2r(0x0F, 0x00, 1), .M, .REX_W, .{x86_64}),
//
instr(.VERR, ops1(.rm16), Op2r(0x0F, 0x00, 4), .M, .ZO, .{_286}),
instr(.VERW, ops1(.rm16), Op2r(0x0F, 0x00, 5), .M, .ZO, .{_286}),
//
// 80386
//
// BSF
instr(.BSF, ops2(.reg16, .rm16), Op2(0x0F, 0xBC), .RM, .Op16, .{_386}),
instr(.BSF, ops2(.reg32, .rm32), Op2(0x0F, 0xBC), .RM, .Op32, .{_386}),
instr(.BSF, ops2(.reg64, .rm64), Op2(0x0F, 0xBC), .RM, .REX_W, .{x86_64}),
// BSR
instr(.BSR, ops2(.reg16, .rm16), Op2(0x0F, 0xBD), .RM, .Op16, .{_386}),
instr(.BSR, ops2(.reg32, .rm32), Op2(0x0F, 0xBD), .RM, .Op32, .{_386}),
instr(.BSR, ops2(.reg64, .rm64), Op2(0x0F, 0xBD), .RM, .REX_W, .{x86_64}),
// BSR
instr(.BT, ops2(.rm16, .reg16), Op2(0x0F, 0xA3), .MR, .Op16, .{_386}),
instr(.BT, ops2(.rm32, .reg32), Op2(0x0F, 0xA3), .MR, .Op32, .{_386}),
instr(.BT, ops2(.rm64, .reg64), Op2(0x0F, 0xA3), .MR, .REX_W, .{x86_64}),
//
instr(.BT, ops2(.rm16, .imm8), Op2r(0x0F, 0xBA, 4), .MI, .Op16, .{_386}),
instr(.BT, ops2(.rm32, .imm8), Op2r(0x0F, 0xBA, 4), .MI, .Op32, .{_386}),
instr(.BT, ops2(.rm64, .imm8), Op2r(0x0F, 0xBA, 4), .MI, .REX_W, .{x86_64}),
// BTC
instr(.BTC, ops2(.rm16, .reg16), Op2(0x0F, 0xBB), .MR, .Op16, .{ _386, Lock, Hle }),
instr(.BTC, ops2(.rm32, .reg32), Op2(0x0F, 0xBB), .MR, .Op32, .{ _386, Lock, Hle }),
instr(.BTC, ops2(.rm64, .reg64), Op2(0x0F, 0xBB), .MR, .REX_W, .{ x86_64, Lock, Hle }),
//
instr(.BTC, ops2(.rm16, .imm8), Op2r(0x0F, 0xBA, 7), .MI, .Op16, .{ _386, Lock, Hle }),
instr(.BTC, ops2(.rm32, .imm8), Op2r(0x0F, 0xBA, 7), .MI, .Op32, .{ _386, Lock, Hle }),
instr(.BTC, ops2(.rm64, .imm8), Op2r(0x0F, 0xBA, 7), .MI, .REX_W, .{ x86_64, Lock, Hle }),
// BTR
instr(.BTR, ops2(.rm16, .reg16), Op2(0x0F, 0xB3), .MR, .Op16, .{ _386, Lock, Hle }),
instr(.BTR, ops2(.rm32, .reg32), Op2(0x0F, 0xB3), .MR, .Op32, .{ _386, Lock, Hle }),
instr(.BTR, ops2(.rm64, .reg64), Op2(0x0F, 0xB3), .MR, .REX_W, .{ x86_64, Lock, Hle }),
//
instr(.BTR, ops2(.rm16, .imm8), Op2r(0x0F, 0xBA, 6), .MI, .Op16, .{ _386, Lock, Hle }),
instr(.BTR, ops2(.rm32, .imm8), Op2r(0x0F, 0xBA, 6), .MI, .Op32, .{ _386, Lock, Hle }),
instr(.BTR, ops2(.rm64, .imm8), Op2r(0x0F, 0xBA, 6), .MI, .REX_W, .{ x86_64, Lock, Hle }),
// BTS
instr(.BTS, ops2(.rm16, .reg16), Op2(0x0F, 0xAB), .MR, .Op16, .{ _386, Lock, Hle }),
instr(.BTS, ops2(.rm32, .reg32), Op2(0x0F, 0xAB), .MR, .Op32, .{ _386, Lock, Hle }),
instr(.BTS, ops2(.rm64, .reg64), Op2(0x0F, 0xAB), .MR, .REX_W, .{ x86_64, Lock, Hle }),
//
instr(.BTS, ops2(.rm16, .imm8), Op2r(0x0F, 0xBA, 5), .MI, .Op16, .{_386}),
instr(.BTS, ops2(.rm32, .imm8), Op2r(0x0F, 0xBA, 5), .MI, .Op32, .{_386}),
instr(.BTS, ops2(.rm64, .imm8), Op2r(0x0F, 0xBA, 5), .MI, .REX_W, .{x86_64}),
// IBTS
instr(.IBTS, ops2(.rm16, .reg16), Op2(0x0F, 0xA7), .MR, .Op16, .{ _386_Legacy, No64 }),
instr(.IBTS, ops2(.rm32, .reg32), Op2(0x0F, 0xA7), .MR, .Op32, .{ _386_Legacy, No64 }),
// MOVSX / MOVSXD
instr(.MOVSX, ops2(.reg16, .rm8), Op2(0x0F, 0xBE), .RM, .Op16, .{_386}),
instr(.MOVSX, ops2(.reg32, .rm8), Op2(0x0F, 0xBE), .RM, .Op32, .{_386}),
instr(.MOVSX, ops2(.reg64, .rm8), Op2(0x0F, 0xBE), .RM, .REX_W, .{x86_64}),
//
instr(.MOVSX, ops2(.reg16, .rm16), Op2(0x0F, 0xBF), .RM, .Op16, .{_386}),
instr(.MOVSX, ops2(.reg32, .rm16), Op2(0x0F, 0xBF), .RM, .Op32, .{_386}),
instr(.MOVSX, ops2(.reg64, .rm16), Op2(0x0F, 0xBF), .RM, .REX_W, .{x86_64}),
//
instr(.MOVSXD, ops2(.reg16, .rm16), Op1(0x63), .RM, .Op16, .{_386}),
instr(.MOVSXD, ops2(.reg16, .rm32), Op1(0x63), .RM, .Op16, .{_386}),
instr(.MOVSXD, ops2(.reg32, .rm32), Op1(0x63), .RM, .Op32, .{_386}),
instr(.MOVSXD, ops2(.reg64, .rm32), Op1(0x63), .RM, .REX_W, .{x86_64}),
// MOVZX
instr(.MOVZX, ops2(.reg16, .rm8), Op2(0x0F, 0xB6), .RM, .Op16, .{_386}),
instr(.MOVZX, ops2(.reg32, .rm8), Op2(0x0F, 0xB6), .RM, .Op32, .{_386}),
instr(.MOVZX, ops2(.reg64, .rm8), Op2(0x0F, 0xB6), .RM, .REX_W, .{x86_64}),
//
instr(.MOVZX, ops2(.reg16, .rm16), Op2(0x0F, 0xB7), .RM, .Op16, .{_386}),
instr(.MOVZX, ops2(.reg32, .rm16), Op2(0x0F, 0xB7), .RM, .Op32, .{_386}),
instr(.MOVZX, ops2(.reg64, .rm16), Op2(0x0F, 0xB7), .RM, .REX_W, .{x86_64}),
// SETcc
instr(.SETA, ops1(.rm8), Op2(0x0F, 0x97), .M, .ZO, .{_386}),
instr(.SETAE, ops1(.rm8), Op2(0x0F, 0x93), .M, .ZO, .{_386}),
instr(.SETB, ops1(.rm8), Op2(0x0F, 0x92), .M, .ZO, .{_386}),
instr(.SETBE, ops1(.rm8), Op2(0x0F, 0x96), .M, .ZO, .{_386}),
instr(.SETC, ops1(.rm8), Op2(0x0F, 0x92), .M, .ZO, .{_386}),
instr(.SETE, ops1(.rm8), Op2(0x0F, 0x94), .M, .ZO, .{_386}),
instr(.SETG, ops1(.rm8), Op2(0x0F, 0x9F), .M, .ZO, .{_386}),
instr(.SETGE, ops1(.rm8), Op2(0x0F, 0x9D), .M, .ZO, .{_386}),
instr(.SETL, ops1(.rm8), Op2(0x0F, 0x9C), .M, .ZO, .{_386}),
instr(.SETLE, ops1(.rm8), Op2(0x0F, 0x9E), .M, .ZO, .{_386}),
instr(.SETNA, ops1(.rm8), Op2(0x0F, 0x96), .M, .ZO, .{_386}),
instr(.SETNAE, ops1(.rm8), Op2(0x0F, 0x92), .M, .ZO, .{_386}),
instr(.SETNB, ops1(.rm8), Op2(0x0F, 0x93), .M, .ZO, .{_386}),
instr(.SETNBE, ops1(.rm8), Op2(0x0F, 0x97), .M, .ZO, .{_386}),
instr(.SETNC, ops1(.rm8), Op2(0x0F, 0x93), .M, .ZO, .{_386}),
instr(.SETNE, ops1(.rm8), Op2(0x0F, 0x95), .M, .ZO, .{_386}),
instr(.SETNG, ops1(.rm8), Op2(0x0F, 0x9E), .M, .ZO, .{_386}),
instr(.SETNGE, ops1(.rm8), Op2(0x0F, 0x9C), .M, .ZO, .{_386}),
instr(.SETNL, ops1(.rm8), Op2(0x0F, 0x9D), .M, .ZO, .{_386}),
instr(.SETNLE, ops1(.rm8), Op2(0x0F, 0x9F), .M, .ZO, .{_386}),
instr(.SETNO, ops1(.rm8), Op2(0x0F, 0x91), .M, .ZO, .{_386}),
instr(.SETNP, ops1(.rm8), Op2(0x0F, 0x9B), .M, .ZO, .{_386}),
instr(.SETNS, ops1(.rm8), Op2(0x0F, 0x99), .M, .ZO, .{_386}),
instr(.SETNZ, ops1(.rm8), Op2(0x0F, 0x95), .M, .ZO, .{_386}),
instr(.SETO, ops1(.rm8), Op2(0x0F, 0x90), .M, .ZO, .{_386}),
instr(.SETP, ops1(.rm8), Op2(0x0F, 0x9A), .M, .ZO, .{_386}),
instr(.SETPE, ops1(.rm8), Op2(0x0F, 0x9A), .M, .ZO, .{_386}),
instr(.SETPO, ops1(.rm8), Op2(0x0F, 0x9B), .M, .ZO, .{_386}),
instr(.SETS, ops1(.rm8), Op2(0x0F, 0x98), .M, .ZO, .{_386}),
instr(.SETZ, ops1(.rm8), Op2(0x0F, 0x94), .M, .ZO, .{_386}),
// SHLD
instr(.SHLD, ops3(.rm16, .reg16, .imm8), Op2(0x0F, 0xA4), .MRI, .Op16, .{_386}),
instr(.SHLD, ops3(.rm32, .reg32, .imm8), Op2(0x0F, 0xA4), .MRI, .Op32, .{_386}),
instr(.SHLD, ops3(.rm64, .reg64, .imm8), Op2(0x0F, 0xA4), .MRI, .REX_W, .{x86_64}),
//
instr(.SHLD, ops3(.rm16, .reg16, .reg_cl), Op2(0x0F, 0xA5), .MR, .Op16, .{_386}),
instr(.SHLD, ops3(.rm32, .reg32, .reg_cl), Op2(0x0F, 0xA5), .MR, .Op32, .{_386}),
instr(.SHLD, ops3(.rm64, .reg64, .reg_cl), Op2(0x0F, 0xA5), .MR, .REX_W, .{x86_64}),
// SHLD
instr(.SHRD, ops3(.rm16, .reg16, .imm8), Op2(0x0F, 0xAC), .MRI, .Op16, .{_386}),
instr(.SHRD, ops3(.rm32, .reg32, .imm8), Op2(0x0F, 0xAC), .MRI, .Op32, .{_386}),
instr(.SHRD, ops3(.rm64, .reg64, .imm8), Op2(0x0F, 0xAC), .MRI, .REX_W, .{x86_64}),
//
instr(.SHRD, ops3(.rm16, .reg16, .reg_cl), Op2(0x0F, 0xAD), .MR, .Op16, .{_386}),
instr(.SHRD, ops3(.rm32, .reg32, .reg_cl), Op2(0x0F, 0xAD), .MR, .Op32, .{_386}),
instr(.SHRD, ops3(.rm64, .reg64, .reg_cl), Op2(0x0F, 0xAD), .MR, .REX_W, .{x86_64}),
// XBTS
instr(.XBTS, ops2(.reg16, .rm16), Op2(0x0F, 0xA6), .RM, .Op16, .{ _386_Legacy, No64 }),
instr(.XBTS, ops2(.reg32, .rm32), Op2(0x0F, 0xA6), .RM, .Op32, .{ _386_Legacy, No64 }),
//
// 80486
instr(.BSWAP, ops1(.reg16), Op2(0x0F, 0xC8), .O, .Op16, .{ _486, edge.Undefined }),
instr(.BSWAP, ops1(.reg32), Op2(0x0F, 0xC8), .O, .Op32, .{_486}),
instr(.BSWAP, ops1(.reg64), Op2(0x0F, 0xC8), .O, .REX_W, .{x86_64}),
// CMPXCHG
instr(.CMPXCHG, ops2(.rm8, .reg8), Op2(0x0F, 0xB0), .MR, .ZO, .{ _486, Lock, Hle }),
instr(.CMPXCHG, ops2(.rm16, .reg16), Op2(0x0F, 0xB1), .MR, .Op16, .{ _486, Lock, Hle }),
instr(.CMPXCHG, ops2(.rm32, .reg32), Op2(0x0F, 0xB1), .MR, .Op32, .{ _486, Lock, Hle }),
instr(.CMPXCHG, ops2(.rm64, .reg64), Op2(0x0F, 0xB1), .MR, .REX_W, .{ x86_64, Lock, Hle }),
// INVD
instr(.INVD, ops0(), Op2(0x0F, 0x08), .ZO, .ZO, .{_486}),
// INVLPG
instr(.INVLPG, ops1(.rm_mem), Op2r(0x0F, 0x01, 7), .M, .ZO, .{_486}),
// WBINVD
instr(.WBINVD, ops0(), Op2(0x0F, 0x09), .ZO, .ZO, .{_486}),
// XADD
instr(.XADD, ops2(.rm8, .reg8), Op2(0x0F, 0xC0), .MR, .ZO, .{_486}),
instr(.XADD, ops2(.rm16, .reg16), Op2(0x0F, 0xC1), .MR, .Op16, .{_486}),
instr(.XADD, ops2(.rm32, .reg32), Op2(0x0F, 0xC1), .MR, .Op32, .{_486}),
instr(.XADD, ops2(.rm64, .reg64), Op2(0x0F, 0xC1), .MR, .REX_W, .{x86_64}),
//
// Pentium
//
instr(.CPUID, ops0(), Op2(0x0F, 0xA2), .ZO, .ZO, .{cpu.CPUID}),
// CMPXCHG8B / CMPXCHG16B
instr(.CMPXCHG8B, ops1(.rm_mem64), Op2r(0x0F, 0xC7, 1), .M, .ZO, .{ cpu.CX8, Lock, Hle }),
instr(.CMPXCHG16B, ops1(.rm_mem128), Op2r(0x0F, 0xC7, 1), .M, .REX_W, .{ cpu.CX16, Lock, Hle }),
// RDMSR
instr(.RDMSR, ops0(), Op2(0x0F, 0x32), .ZO, .ZO, .{cpu.MSR}),
// RDTSC
instr(.RDTSC, ops0(), Op2(0x0F, 0x31), .ZO, .ZO, .{cpu.TSC}),
// WRMSR
instr(.WRMSR, ops0(), Op2(0x0F, 0x30), .ZO, .ZO, .{cpu.MSR}),
// RSM
instr(.RSM, ops0(), Op2(0x0F, 0xAA), .ZO, .ZO, .{cpu.RSM}),
//
// Pentium MMX
//
// RDPMC
instr(.RDPMC, ops0(), Op2(0x0F, 0x33), .ZO, .ZO, .{P6}),
// EMMS
instr(.EMMS, ops0(), preOp2(._NP, 0x0F, 0x77), .ZO, .ZO, .{cpu.MMX}),
//
// K6
//
instr(.SYSCALL, ops0(), Op2(0x0F, 0x05), .ZO, .ZO, .{ cpu.SYSCALL, cpu.Intel, No32 }),
instr(.SYSCALL, ops0(), Op2(0x0F, 0x05), .ZO, .ZO, .{ cpu.SYSCALL, cpu.Amd }),
instr(.SYSCALL, ops0(), Op2(0x0F, 0x05), .ZO, .ZO, .{ cpu.SYSCALL, No32 }),
//
instr(.SYSRET, ops0(), Op2(0x0F, 0x07), .ZO, .ZO, .{ cpu.SYSCALL, cpu.Intel, No32 }),
instr(.SYSRET, ops0(), Op2(0x0F, 0x07), .ZO, .ZO, .{ cpu.SYSCALL, cpu.Amd }),
instr(.SYSRET, ops0(), Op2(0x0F, 0x07), .ZO, .ZO, .{ cpu.SYSCALL, No32 }),
instr(.SYSRETQ, ops0(), Op2(0x0F, 0x07), .ZO, .REX_W, .{x86_64}),
//
// Pentium Pro
//
// CMOVcc
instr(.CMOVA, ops2(.reg16, .rm16), Op2(0x0F, 0x47), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVA, ops2(.reg32, .rm32), Op2(0x0F, 0x47), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVA, ops2(.reg64, .rm64), Op2(0x0F, 0x47), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVAE, ops2(.reg16, .rm16), Op2(0x0F, 0x43), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVAE, ops2(.reg32, .rm32), Op2(0x0F, 0x43), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVAE, ops2(.reg64, .rm64), Op2(0x0F, 0x43), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVB, ops2(.reg16, .rm16), Op2(0x0F, 0x42), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVB, ops2(.reg32, .rm32), Op2(0x0F, 0x42), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVB, ops2(.reg64, .rm64), Op2(0x0F, 0x42), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVBE, ops2(.reg16, .rm16), Op2(0x0F, 0x46), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVBE, ops2(.reg32, .rm32), Op2(0x0F, 0x46), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVBE, ops2(.reg64, .rm64), Op2(0x0F, 0x46), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVC, ops2(.reg16, .rm16), Op2(0x0F, 0x42), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVC, ops2(.reg32, .rm32), Op2(0x0F, 0x42), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVC, ops2(.reg64, .rm64), Op2(0x0F, 0x42), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVE, ops2(.reg16, .rm16), Op2(0x0F, 0x44), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVE, ops2(.reg32, .rm32), Op2(0x0F, 0x44), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVE, ops2(.reg64, .rm64), Op2(0x0F, 0x44), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVG, ops2(.reg16, .rm16), Op2(0x0F, 0x4F), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVG, ops2(.reg32, .rm32), Op2(0x0F, 0x4F), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVG, ops2(.reg64, .rm64), Op2(0x0F, 0x4F), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVGE, ops2(.reg16, .rm16), Op2(0x0F, 0x4D), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVGE, ops2(.reg32, .rm32), Op2(0x0F, 0x4D), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVGE, ops2(.reg64, .rm64), Op2(0x0F, 0x4D), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVL, ops2(.reg16, .rm16), Op2(0x0F, 0x4C), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVL, ops2(.reg32, .rm32), Op2(0x0F, 0x4C), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVL, ops2(.reg64, .rm64), Op2(0x0F, 0x4C), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVLE, ops2(.reg16, .rm16), Op2(0x0F, 0x4E), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVLE, ops2(.reg32, .rm32), Op2(0x0F, 0x4E), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVLE, ops2(.reg64, .rm64), Op2(0x0F, 0x4E), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNA, ops2(.reg16, .rm16), Op2(0x0F, 0x46), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNA, ops2(.reg32, .rm32), Op2(0x0F, 0x46), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNA, ops2(.reg64, .rm64), Op2(0x0F, 0x46), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNAE, ops2(.reg16, .rm16), Op2(0x0F, 0x42), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNAE, ops2(.reg32, .rm32), Op2(0x0F, 0x42), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNAE, ops2(.reg64, .rm64), Op2(0x0F, 0x42), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNB, ops2(.reg16, .rm16), Op2(0x0F, 0x43), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNB, ops2(.reg32, .rm32), Op2(0x0F, 0x43), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNB, ops2(.reg64, .rm64), Op2(0x0F, 0x43), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNBE, ops2(.reg16, .rm16), Op2(0x0F, 0x47), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNBE, ops2(.reg32, .rm32), Op2(0x0F, 0x47), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNBE, ops2(.reg64, .rm64), Op2(0x0F, 0x47), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNC, ops2(.reg16, .rm16), Op2(0x0F, 0x43), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNC, ops2(.reg32, .rm32), Op2(0x0F, 0x43), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNC, ops2(.reg64, .rm64), Op2(0x0F, 0x43), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNE, ops2(.reg16, .rm16), Op2(0x0F, 0x45), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNE, ops2(.reg32, .rm32), Op2(0x0F, 0x45), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNE, ops2(.reg64, .rm64), Op2(0x0F, 0x45), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNG, ops2(.reg16, .rm16), Op2(0x0F, 0x4E), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNG, ops2(.reg32, .rm32), Op2(0x0F, 0x4E), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNG, ops2(.reg64, .rm64), Op2(0x0F, 0x4E), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNGE, ops2(.reg16, .rm16), Op2(0x0F, 0x4C), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNGE, ops2(.reg32, .rm32), Op2(0x0F, 0x4C), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNGE, ops2(.reg64, .rm64), Op2(0x0F, 0x4C), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNL, ops2(.reg16, .rm16), Op2(0x0F, 0x4D), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNL, ops2(.reg32, .rm32), Op2(0x0F, 0x4D), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNL, ops2(.reg64, .rm64), Op2(0x0F, 0x4D), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNLE, ops2(.reg16, .rm16), Op2(0x0F, 0x4F), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNLE, ops2(.reg32, .rm32), Op2(0x0F, 0x4F), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNLE, ops2(.reg64, .rm64), Op2(0x0F, 0x4F), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNO, ops2(.reg16, .rm16), Op2(0x0F, 0x41), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNO, ops2(.reg32, .rm32), Op2(0x0F, 0x41), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNO, ops2(.reg64, .rm64), Op2(0x0F, 0x41), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNP, ops2(.reg16, .rm16), Op2(0x0F, 0x4B), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNP, ops2(.reg32, .rm32), Op2(0x0F, 0x4B), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNP, ops2(.reg64, .rm64), Op2(0x0F, 0x4B), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNS, ops2(.reg16, .rm16), Op2(0x0F, 0x49), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNS, ops2(.reg32, .rm32), Op2(0x0F, 0x49), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNS, ops2(.reg64, .rm64), Op2(0x0F, 0x49), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVNZ, ops2(.reg16, .rm16), Op2(0x0F, 0x45), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVNZ, ops2(.reg32, .rm32), Op2(0x0F, 0x45), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVNZ, ops2(.reg64, .rm64), Op2(0x0F, 0x45), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVO, ops2(.reg16, .rm16), Op2(0x0F, 0x40), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVO, ops2(.reg32, .rm32), Op2(0x0F, 0x40), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVO, ops2(.reg64, .rm64), Op2(0x0F, 0x40), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVP, ops2(.reg16, .rm16), Op2(0x0F, 0x4A), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVP, ops2(.reg32, .rm32), Op2(0x0F, 0x4A), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVP, ops2(.reg64, .rm64), Op2(0x0F, 0x4A), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVPE, ops2(.reg16, .rm16), Op2(0x0F, 0x4A), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVPE, ops2(.reg32, .rm32), Op2(0x0F, 0x4A), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVPE, ops2(.reg64, .rm64), Op2(0x0F, 0x4A), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVPO, ops2(.reg16, .rm16), Op2(0x0F, 0x4B), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVPO, ops2(.reg32, .rm32), Op2(0x0F, 0x4B), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVPO, ops2(.reg64, .rm64), Op2(0x0F, 0x4B), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVS, ops2(.reg16, .rm16), Op2(0x0F, 0x48), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVS, ops2(.reg32, .rm32), Op2(0x0F, 0x48), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVS, ops2(.reg64, .rm64), Op2(0x0F, 0x48), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
//
instr(.CMOVZ, ops2(.reg16, .rm16), Op2(0x0F, 0x44), .RM, .Op16, .{cpu.CMOV}),
instr(.CMOVZ, ops2(.reg32, .rm32), Op2(0x0F, 0x44), .RM, .Op32, .{cpu.CMOV}),
instr(.CMOVZ, ops2(.reg64, .rm64), Op2(0x0F, 0x44), .RM, .REX_W, .{ cpu.CMOV, x86_64 }),
// UD (first documented, but actually available since 80186)
instr(.UD0, ops0(), Op2(0x0F, 0xFF), .ZO, .ZO, .{ _186_Legacy, No64 }),
instr(.UD0, ops2(.reg16, .rm16), Op2(0x0F, 0xFF), .RM, .Op16, .{_186}),
instr(.UD0, ops2(.reg32, .rm32), Op2(0x0F, 0xFF), .RM, .Op32, .{_186}),
instr(.UD0, ops2(.reg64, .rm64), Op2(0x0F, 0xFF), .RM, .REX_W, .{x86_64}),
//
instr(.UD1, ops2(.reg16, .rm16), Op2(0x0F, 0xB9), .RM, .Op16, .{_186}),
instr(.UD1, ops2(.reg32, .rm32), Op2(0x0F, 0xB9), .RM, .Op32, .{_186}),
instr(.UD1, ops2(.reg64, .rm64), Op2(0x0F, 0xB9), .RM, .REX_W, .{x86_64}),
//
instr(.UD2, ops0(), Op2(0x0F, 0x0B), .ZO, .ZO, .{P6}),
//
// Pentium II
//
instr(.SYSENTER, ops0(), Op2(0x0F, 0x34), .ZO, .ZO, .{ cpu.SEP, cpu.Amd, No64 }),
instr(.SYSENTER, ops0(), Op2(0x0F, 0x34), .ZO, .ZO, .{ cpu.SEP, cpu.Intel }),
instr(.SYSENTER, ops0(), Op2(0x0F, 0x34), .ZO, .ZO, .{ cpu.SEP, No64 }),
//
instr(.SYSEXIT, ops0(), Op2(0x0F, 0x35), .ZO, .ZO, .{ cpu.SEP, cpu.Amd, No64 }),
instr(.SYSEXIT, ops0(), Op2(0x0F, 0x35), .ZO, .ZO, .{ cpu.SEP, cpu.Intel }),
instr(.SYSEXIT, ops0(), Op2(0x0F, 0x35), .ZO, .ZO, .{ cpu.SEP, No64 }),
//
instr(.SYSEXITQ, ops0(), Op2(0x0F, 0x35), .ZO, .REX_W, .{ cpu.SEP, x86_64, cpu.Intel, No32 }),
//
// x86-64
//
instr(.RDTSCP, ops0(), Op3(0x0F, 0x01, 0xF9), .ZO, .ZO, .{x86_64}),
instr(.SWAPGS, ops0(), Op3(0x0F, 0x01, 0xF8), .ZO, .ZO, .{x86_64}),
//
// bit manipulation (ABM / BMI1 / BMI2)
//
// LZCNT
instr(.LZCNT, ops2(.reg16, .rm16), preOp2(._F3, 0x0F, 0xBD), .RM, .Op16, .{cpu.LZCNT}),
instr(.LZCNT, ops2(.reg32, .rm32), preOp2(._F3, 0x0F, 0xBD), .RM, .Op32, .{cpu.LZCNT}),
instr(.LZCNT, ops2(.reg64, .rm64), preOp2(._F3, 0x0F, 0xBD), .RM, .REX_W, .{ cpu.LZCNT, x86_64 }),
// LZCNT
instr(.POPCNT, ops2(.reg16, .rm16), preOp2(._F3, 0x0F, 0xB8), .RM, .Op16, .{cpu.POPCNT}),
instr(.POPCNT, ops2(.reg32, .rm32), preOp2(._F3, 0x0F, 0xB8), .RM, .Op32, .{cpu.POPCNT}),
instr(.POPCNT, ops2(.reg64, .rm64), preOp2(._F3, 0x0F, 0xB8), .RM, .REX_W, .{ cpu.POPCNT, x86_64 }),
// ANDN
instr(.ANDN, ops3(.reg32, .reg32, .rm32), vex(.LZ, ._NP, ._0F38, .W0, 0xF2), .RVM, .ZO, .{cpu.BMI1}),
instr(.ANDN, ops3(.reg64, .reg64, .rm64), vex(.LZ, ._NP, ._0F38, .W1, 0xF2), .RVM, .ZO, .{cpu.BMI1}),
// BEXTR
instr(.BEXTR, ops3(.reg32, .rm32, .reg32), vex(.LZ, ._NP, ._0F38, .W0, 0xF7), .RMV, .ZO, .{cpu.BMI1}),
instr(.BEXTR, ops3(.reg64, .rm64, .reg64), vex(.LZ, ._NP, ._0F38, .W1, 0xF7), .RMV, .ZO, .{cpu.BMI1}),
instr(.BEXTR, ops3(.reg32, .rm32, .imm32), xop(.LZ, ._NP, ._0Ah, .W0, 0x10), .vRMI, .ZO, .{cpu.TBM}),
instr(.BEXTR, ops3(.reg64, .rm64, .imm32), xop(.LZ, ._NP, ._0Ah, .W1, 0x10), .vRMI, .ZO, .{cpu.TBM}),
// BLSI
instr(.BLSI, ops2(.reg32, .rm32), vexr(.LZ, ._NP, ._0F38, .W0, 0xF3, 3), .VM, .ZO, .{cpu.BMI1}),
instr(.BLSI, ops2(.reg64, .rm64), vexr(.LZ, ._NP, ._0F38, .W1, 0xF3, 3), .VM, .ZO, .{cpu.BMI1}),
// BLSMSK
instr(.BLSMSK, ops2(.reg32, .rm32), vexr(.LZ, ._NP, ._0F38, .W0, 0xF3, 2), .VM, .ZO, .{cpu.BMI1}),
instr(.BLSMSK, ops2(.reg64, .rm64), vexr(.LZ, ._NP, ._0F38, .W1, 0xF3, 2), .VM, .ZO, .{cpu.BMI1}),
// BLSR
instr(.BLSR, ops2(.reg32, .rm32), vexr(.LZ, ._NP, ._0F38, .W0, 0xF3, 1), .VM, .ZO, .{cpu.BMI1}),
instr(.BLSR, ops2(.reg64, .rm64), vexr(.LZ, ._NP, ._0F38, .W1, 0xF3, 1), .VM, .ZO, .{cpu.BMI1}),
// BZHI
instr(.BZHI, ops3(.reg32, .rm32, .reg32), vex(.LZ, ._NP, ._0F38, .W0, 0xF5), .RMV, .ZO, .{cpu.BMI2}),
instr(.BZHI, ops3(.reg64, .rm64, .reg64), vex(.LZ, ._NP, ._0F38, .W1, 0xF5), .RMV, .ZO, .{cpu.BMI2}),
// MULX
instr(.MULX, ops3(.reg32, .reg32, .rm32), vex(.LZ, ._F2, ._0F38, .W0, 0xF6), .RVM, .ZO, .{cpu.BMI2}),
instr(.MULX, ops3(.reg64, .reg64, .rm64), vex(.LZ, ._F2, ._0F38, .W1, 0xF6), .RVM, .ZO, .{cpu.BMI2}),
// PDEP
instr(.PDEP, ops3(.reg32, .reg32, .rm32), vex(.LZ, ._F2, ._0F38, .W0, 0xF5), .RVM, .ZO, .{cpu.BMI2}),
instr(.PDEP, ops3(.reg64, .reg64, .rm64), vex(.LZ, ._F2, ._0F38, .W1, 0xF5), .RVM, .ZO, .{cpu.BMI2}),
// PEXT
instr(.PEXT, ops3(.reg32, .reg32, .rm32), vex(.LZ, ._F3, ._0F38, .W0, 0xF5), .RVM, .ZO, .{cpu.BMI2}),
instr(.PEXT, ops3(.reg64, .reg64, .rm64), vex(.LZ, ._F3, ._0F38, .W1, 0xF5), .RVM, .ZO, .{cpu.BMI2}),
// RORX
instr(.RORX, ops3(.reg32, .rm32, .imm8), vex(.LZ, ._F2, ._0F3A, .W0, 0xF0), .vRMI, .ZO, .{cpu.BMI2}),
instr(.RORX, ops3(.reg64, .rm64, .imm8), vex(.LZ, ._F2, ._0F3A, .W1, 0xF0), .vRMI, .ZO, .{cpu.BMI2}),
// SARX
instr(.SARX, ops3(.reg32, .rm32, .reg32), vex(.LZ, ._F3, ._0F38, .W0, 0xF7), .RMV, .ZO, .{cpu.BMI2}),
instr(.SARX, ops3(.reg64, .rm64, .reg64), vex(.LZ, ._F3, ._0F38, .W1, 0xF7), .RMV, .ZO, .{cpu.BMI2}),
// SHLX
instr(.SHLX, ops3(.reg32, .rm32, .reg32), vex(.LZ, ._66, ._0F38, .W0, 0xF7), .RMV, .ZO, .{cpu.BMI2}),
instr(.SHLX, ops3(.reg64, .rm64, .reg64), vex(.LZ, ._66, ._0F38, .W1, 0xF7), .RMV, .ZO, .{cpu.BMI2}),
// SHRX
instr(.SHRX, ops3(.reg32, .rm32, .reg32), vex(.LZ, ._F2, ._0F38, .W0, 0xF7), .RMV, .ZO, .{cpu.BMI2}),
instr(.SHRX, ops3(.reg64, .rm64, .reg64), vex(.LZ, ._F2, ._0F38, .W1, 0xF7), .RMV, .ZO, .{cpu.BMI2}),
// TZCNT
instr(.TZCNT, ops2(.reg16, .rm16), preOp2(._F3, 0x0F, 0xBC), .RM, .Op16, .{cpu.BMI1}),
instr(.TZCNT, ops2(.reg32, .rm32), preOp2(._F3, 0x0F, 0xBC), .RM, .Op32, .{cpu.BMI1}),
instr(.TZCNT, ops2(.reg64, .rm64), preOp2(._F3, 0x0F, 0xBC), .RM, .REX_W, .{ cpu.BMI1, x86_64 }),
//
// XOP opcodes
//
//
// TBM
//
// BEXTR
// see above .BEXTR
// BLCFILL
instr(.BLCFILL, ops2(.reg32, .rm32), xopr(.LZ, ._NP, ._09h, .W0, 0x01, 1), .VM, .ZO, .{cpu.TBM}),
instr(.BLCFILL, ops2(.reg64, .rm64), xopr(.LZ, ._NP, ._09h, .W1, 0x01, 1), .VM, .ZO, .{cpu.TBM}),
// BLCI
instr(.BLCI, ops2(.reg32, .rm32), xopr(.LZ, ._NP, ._09h, .W0, 0x02, 6), .VM, .ZO, .{cpu.TBM}),
instr(.BLCI, ops2(.reg64, .rm64), xopr(.LZ, ._NP, ._09h, .W1, 0x02, 6), .VM, .ZO, .{cpu.TBM}),
// BLCIC
instr(.BLCIC, ops2(.reg32, .rm32), xopr(.LZ, ._NP, ._09h, .W0, 0x01, 5), .VM, .ZO, .{cpu.TBM}),
instr(.BLCIC, ops2(.reg64, .rm64), xopr(.LZ, ._NP, ._09h, .W1, 0x01, 5), .VM, .ZO, .{cpu.TBM}),
// BLCMSK
instr(.BLCMSK, ops2(.reg32, .rm32), xopr(.LZ, ._NP, ._09h, .W0, 0x02, 1), .VM, .ZO, .{cpu.TBM}),
instr(.BLCMSK, ops2(.reg64, .rm64), xopr(.LZ, ._NP, ._09h, .W1, 0x02, 1), .VM, .ZO, .{cpu.TBM}),
// BLCS
instr(.BLCS, ops2(.reg32, .rm32), xopr(.LZ, ._NP, ._09h, .W0, 0x01, 3), .VM, .ZO, .{cpu.TBM}),
instr(.BLCS, ops2(.reg64, .rm64), xopr(.LZ, ._NP, ._09h, .W1, 0x01, 3), .VM, .ZO, .{cpu.TBM}),
// BLSFILL
instr(.BLSFILL, ops2(.reg32, .rm32), xopr(.LZ, ._NP, ._09h, .W0, 0x01, 2), .VM, .ZO, .{cpu.TBM}),
instr(.BLSFILL, ops2(.reg64, .rm64), xopr(.LZ, ._NP, ._09h, .W1, 0x01, 2), .VM, .ZO, .{cpu.TBM}),
// BLSIC
instr(.BLSIC, ops2(.reg32, .rm32), xopr(.LZ, ._NP, ._09h, .W0, 0x01, 6), .VM, .ZO, .{cpu.TBM}),
instr(.BLSIC, ops2(.reg64, .rm64), xopr(.LZ, ._NP, ._09h, .W1, 0x01, 6), .VM, .ZO, .{cpu.TBM}),
// T1MSKC
instr(.T1MSKC, ops2(.reg32, .rm32), xopr(.LZ, ._NP, ._09h, .W0, 0x01, 7), .VM, .ZO, .{cpu.TBM}),
instr(.T1MSKC, ops2(.reg64, .rm64), xopr(.LZ, ._NP, ._09h, .W1, 0x01, 7), .VM, .ZO, .{cpu.TBM}),
// TZMSK
instr(.TZMSK, ops2(.reg32, .rm32), xopr(.LZ, ._NP, ._09h, .W0, 0x01, 4), .VM, .ZO, .{cpu.TBM}),
instr(.TZMSK, ops2(.reg64, .rm64), xopr(.LZ, ._NP, ._09h, .W1, 0x01, 4), .VM, .ZO, .{cpu.TBM}),
//
// LWP
//
// LLWPCB
instr(.LLWPCB, ops1(.reg32), xopr(.LZ, ._NP, ._09h, .W0, 0x12, 0), .vM, .ZO, .{cpu.LWP}),
instr(.LLWPCB, ops1(.reg64), xopr(.LZ, ._NP, ._09h, .W1, 0x12, 0), .vM, .ZO, .{cpu.LWP}),
// LWPINS
instr(.LWPINS, ops3(.reg32, .rm32, .imm32), xopr(.LZ, ._NP, ._0Ah, .W0, 0x12, 0), .VMI, .ZO, .{cpu.LWP}),
instr(.LWPINS, ops3(.reg64, .rm32, .imm32), xopr(.LZ, ._NP, ._0Ah, .W1, 0x12, 0), .VMI, .ZO, .{cpu.LWP}),
// LWPVAL
instr(.LWPVAL, ops3(.reg32, .rm32, .imm32), xopr(.LZ, ._NP, ._0Ah, .W0, 0x12, 1), .VMI, .ZO, .{cpu.LWP}),
instr(.LWPVAL, ops3(.reg64, .rm32, .imm32), xopr(.LZ, ._NP, ._0Ah, .W1, 0x12, 1), .VMI, .ZO, .{cpu.LWP}),
// SLWPCB
instr(.SLWPCB, ops1(.reg32), xopr(.LZ, ._NP, ._09h, .W0, 0x12, 1), .vM, .ZO, .{cpu.LWP}),
instr(.SLWPCB, ops1(.reg64), xopr(.LZ, ._NP, ._09h, .W1, 0x12, 1), .vM, .ZO, .{cpu.LWP}),
//
// XOP vector
//
// VFRCZPD
vec(.VFRCZPD, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0x81), .vRM, .{cpu.XOP}),
vec(.VFRCZPD, ops2(.ymml, .ymml_m256), xop(.L256, ._NP, ._09h, .W0, 0x81), .vRM, .{cpu.XOP}),
// VFRCZPS
vec(.VFRCZPS, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0x80), .vRM, .{cpu.XOP}),
vec(.VFRCZPS, ops2(.ymml, .ymml_m256), xop(.L256, ._NP, ._09h, .W0, 0x80), .vRM, .{cpu.XOP}),
// VFRCZSD
vec(.VFRCZSD, ops2(.xmml, .xmml_m64), xop(.LZ, ._NP, ._09h, .W0, 0x83), .vRM, .{cpu.XOP}),
// VFRCZSS
vec(.VFRCZSS, ops2(.xmml, .xmml_m32), xop(.LZ, ._NP, ._09h, .W0, 0x82), .vRM, .{cpu.XOP}),
// VPCMOV
vec(.VPCMOV, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0xA2), .RVMR, .{cpu.XOP}),
vec(.VPCMOV, ops4(.ymml, .ymml, .ymml_m256, .ymml), xop(.L256, ._NP, ._08h, .W0, 0xA2), .RVMR, .{cpu.XOP}),
vec(.VPCMOV, ops4(.xmml, .xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._08h, .W1, 0xA2), .RVRM, .{cpu.XOP}),
vec(.VPCMOV, ops4(.ymml, .ymml, .ymml, .ymml_m256), xop(.L256, ._NP, ._08h, .W1, 0xA2), .RVRM, .{cpu.XOP}),
// VPCOMB / VPCOMW / VPCOMD / VPCOMQ
vec(.VPCOMB, ops4(.xmml, .xmml, .xmml_m128, .imm8), xop(.LZ, ._NP, ._08h, .W0, 0xCC), .RVMI, .{cpu.XOP}),
vec(.VPCOMW, ops4(.xmml, .xmml, .xmml_m128, .imm8), xop(.LZ, ._NP, ._08h, .W0, 0xCD), .RVMI, .{cpu.XOP}),
vec(.VPCOMD, ops4(.xmml, .xmml, .xmml_m128, .imm8), xop(.LZ, ._NP, ._08h, .W0, 0xCE), .RVMI, .{cpu.XOP}),
vec(.VPCOMQ, ops4(.xmml, .xmml, .xmml_m128, .imm8), xop(.LZ, ._NP, ._08h, .W0, 0xCF), .RVMI, .{cpu.XOP}),
// VPCOMUB / VPCOMUW / VPCOMUD / VPCOMUQ
vec(.VPCOMUB, ops4(.xmml, .xmml, .xmml_m128, .imm8), xop(.LZ, ._NP, ._08h, .W0, 0xEC), .RVMI, .{cpu.XOP}),
vec(.VPCOMUW, ops4(.xmml, .xmml, .xmml_m128, .imm8), xop(.LZ, ._NP, ._08h, .W0, 0xED), .RVMI, .{cpu.XOP}),
vec(.VPCOMUD, ops4(.xmml, .xmml, .xmml_m128, .imm8), xop(.LZ, ._NP, ._08h, .W0, 0xEE), .RVMI, .{cpu.XOP}),
vec(.VPCOMUQ, ops4(.xmml, .xmml, .xmml_m128, .imm8), xop(.LZ, ._NP, ._08h, .W0, 0xEF), .RVMI, .{cpu.XOP}),
// VPERMIL2PD
vec(.VPERMIL2PD, ops5(.xmml, .xmml, .xmml_m128, .xmml, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x49), .RVMRI, .{cpu.XOP}),
vec(.VPERMIL2PD, ops5(.xmml, .xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .W1, 0x49), .RVRMI, .{cpu.XOP}),
vec(.VPERMIL2PD, ops5(.ymml, .ymml, .ymml_m256, .ymml, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x49), .RVMRI, .{cpu.XOP}),
vec(.VPERMIL2PD, ops5(.ymml, .ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .W1, 0x49), .RVRMI, .{cpu.XOP}),
// VPERMIL2PS
vec(.VPERMIL2PS, ops5(.xmml, .xmml, .xmml_m128, .xmml, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x48), .RVMRI, .{cpu.XOP}),
vec(.VPERMIL2PS, ops5(.xmml, .xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .W1, 0x48), .RVRMI, .{cpu.XOP}),
vec(.VPERMIL2PS, ops5(.ymml, .ymml, .ymml_m256, .ymml, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x48), .RVMRI, .{cpu.XOP}),
vec(.VPERMIL2PS, ops5(.ymml, .ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .W1, 0x48), .RVRMI, .{cpu.XOP}),
// VPHADDBD / VPHADDBW / VPHADDBQ
vec(.VPHADDBW, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xC1), .vRM, .{cpu.XOP}),
vec(.VPHADDBD, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xC2), .vRM, .{cpu.XOP}),
vec(.VPHADDBQ, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xC3), .vRM, .{cpu.XOP}),
// VPHADDWD / VPHADDWQ
vec(.VPHADDWD, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xC6), .vRM, .{cpu.XOP}),
vec(.VPHADDWQ, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xC7), .vRM, .{cpu.XOP}),
// VPHADDDQ
vec(.VPHADDDQ, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xCB), .vRM, .{cpu.XOP}),
// VPHADDUBD / VPHADDUBW / VPHADDUBQ
vec(.VPHADDUBW, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xD1), .vRM, .{cpu.XOP}),
vec(.VPHADDUBD, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xD2), .vRM, .{cpu.XOP}),
vec(.VPHADDUBQ, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xD3), .vRM, .{cpu.XOP}),
// VPHADDUWD / VPHADDUWQ
vec(.VPHADDUWD, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xD6), .vRM, .{cpu.XOP}),
vec(.VPHADDUWQ, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xD7), .vRM, .{cpu.XOP}),
// VPHADDUDQ
vec(.VPHADDUDQ, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xDB), .vRM, .{cpu.XOP}),
// VPHSUBBW
vec(.VPHSUBBW, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xE1), .vRM, .{cpu.XOP}),
// VPHSUBDQ
vec(.VPHSUBDQ, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xE3), .vRM, .{cpu.XOP}),
// VPHSUBWD
vec(.VPHSUBWD, ops2(.xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W0, 0xE2), .vRM, .{cpu.XOP}),
// VPMACSDD
vec(.VPMACSDD, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0x9E), .RVMR, .{cpu.XOP}),
// VPMACSDQH
vec(.VPMACSDQH, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0x9F), .RVMR, .{cpu.XOP}),
// VPMACSDQL
vec(.VPMACSDQL, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0x97), .RVMR, .{cpu.XOP}),
// VPMACSSDD
vec(.VPMACSSDD, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0x8E), .RVMR, .{cpu.XOP}),
// VPMACSSDQH
vec(.VPMACSSDQH, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0x8F), .RVMR, .{cpu.XOP}),
// VPMACSSDQL
vec(.VPMACSSDQL, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0x87), .RVMR, .{cpu.XOP}),
// VPMACSSWD
vec(.VPMACSSWD, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0x86), .RVMR, .{cpu.XOP}),
// VPMACSSWW
vec(.VPMACSSWW, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0x85), .RVMR, .{cpu.XOP}),
// VPMACSWD
vec(.VPMACSWD, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0x96), .RVMR, .{cpu.XOP}),
// VPMACSWW
vec(.VPMACSWW, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0x95), .RVMR, .{cpu.XOP}),
// VPMADCSSWD
vec(.VPMADCSSWD, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0xA6), .RVMR, .{cpu.XOP}),
// VPMADCSWD
vec(.VPMADCSWD, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0xB6), .RVMR, .{cpu.XOP}),
// VPPERM
vec(.VPPERM, ops4(.xmml, .xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._08h, .W0, 0xA3), .RVMR, .{cpu.XOP}),
vec(.VPPERM, ops4(.xmml, .xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._08h, .W1, 0xA3), .RVRM, .{cpu.XOP}),
// VPROTB / VPROTW / VPROTD / VPROTQ
// VPROTB
vec(.VPROTB, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x90), .RMV, .{cpu.XOP}),
vec(.VPROTB, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x90), .RVM, .{cpu.XOP}),
vec(.VPROTB, ops3(.xmml, .xmml_m128, .imm8), xop(.L128, ._NP, ._08h, .W0, 0xC0), .vRMI, .{cpu.XOP}),
// VPROTW
vec(.VPROTW, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x91), .RMV, .{cpu.XOP}),
vec(.VPROTW, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x91), .RVM, .{cpu.XOP}),
vec(.VPROTW, ops3(.xmml, .xmml_m128, .imm8), xop(.L128, ._NP, ._08h, .W0, 0xC1), .vRMI, .{cpu.XOP}),
// VPROTD
vec(.VPROTD, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x92), .RMV, .{cpu.XOP}),
vec(.VPROTD, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x92), .RVM, .{cpu.XOP}),
vec(.VPROTD, ops3(.xmml, .xmml_m128, .imm8), xop(.L128, ._NP, ._08h, .W0, 0xC2), .vRMI, .{cpu.XOP}),
// VPROTQ
vec(.VPROTQ, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x93), .RMV, .{cpu.XOP}),
vec(.VPROTQ, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x93), .RVM, .{cpu.XOP}),
vec(.VPROTQ, ops3(.xmml, .xmml_m128, .imm8), xop(.L128, ._NP, ._08h, .W0, 0xC3), .vRMI, .{cpu.XOP}),
// VPSHAB / VPSHAW / VPSHAD / VPSHAQ
// VPSHAB
vec(.VPSHAB, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x98), .RMV, .{cpu.XOP}),
vec(.VPSHAB, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x98), .RVM, .{cpu.XOP}),
// VPSHAW
vec(.VPSHAW, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x99), .RMV, .{cpu.XOP}),
vec(.VPSHAW, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x99), .RVM, .{cpu.XOP}),
// VPSHAD
vec(.VPSHAD, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x9A), .RMV, .{cpu.XOP}),
vec(.VPSHAD, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x9A), .RVM, .{cpu.XOP}),
// VPSHAQ
vec(.VPSHAQ, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x9B), .RMV, .{cpu.XOP}),
vec(.VPSHAQ, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x9B), .RVM, .{cpu.XOP}),
// VPSHLB / VPSHLW / VPSHLD / VPSHLQ
// VPSHLB
vec(.VPSHLB, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x94), .RMV, .{cpu.XOP}),
vec(.VPSHLB, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x94), .RVM, .{cpu.XOP}),
// VPSHLW
vec(.VPSHLW, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x95), .RMV, .{cpu.XOP}),
vec(.VPSHLW, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x95), .RVM, .{cpu.XOP}),
// VPSHLD
vec(.VPSHLD, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x96), .RMV, .{cpu.XOP}),
vec(.VPSHLD, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x96), .RVM, .{cpu.XOP}),
// VPSHLQ
vec(.VPSHLQ, ops3(.xmml, .xmml_m128, .xmml), xop(.L128, ._NP, ._09h, .W0, 0x97), .RMV, .{cpu.XOP}),
vec(.VPSHLQ, ops3(.xmml, .xmml, .xmml_m128), xop(.L128, ._NP, ._09h, .W1, 0x97), .RVM, .{cpu.XOP}),
//
// FMA4 (Fused Multiply Add 4 operands)
//
// VFMADDPD
vec(.VFMADDPD, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x69), .RVMR, .{cpu.FMA4}),
vec(.VFMADDPD, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x69), .RVMR, .{cpu.FMA4}),
vec(.VFMADDPD, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x69), .RVRM, .{cpu.FMA4}),
vec(.VFMADDPD, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x69), .RVRM, .{cpu.FMA4}),
// VFMADDPS
vec(.VFMADDPS, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x68), .RVMR, .{cpu.FMA4}),
vec(.VFMADDPS, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x68), .RVMR, .{cpu.FMA4}),
vec(.VFMADDPS, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x68), .RVRM, .{cpu.FMA4}),
vec(.VFMADDPS, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x68), .RVRM, .{cpu.FMA4}),
// VFMADDSD
vec(.VFMADDSD, ops4(.xmml, .xmml, .xmml_m64, .xmml), vex(.LIG, ._66, ._0F3A, .W0, 0x6B), .RVMR, .{cpu.FMA4}),
vec(.VFMADDSD, ops4(.xmml, .xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F3A, .W1, 0x6B), .RVRM, .{cpu.FMA4}),
// VFMADDSS
vec(.VFMADDSS, ops4(.xmml, .xmml, .xmml_m32, .xmml), vex(.LIG, ._66, ._0F3A, .W0, 0x6A), .RVMR, .{cpu.FMA4}),
vec(.VFMADDSS, ops4(.xmml, .xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F3A, .W1, 0x6A), .RVRM, .{cpu.FMA4}),
// VFMADDSUBPD
vec(.VFMADDSUBPD, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x5D), .RVMR, .{cpu.FMA4}),
vec(.VFMADDSUBPD, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x5D), .RVMR, .{cpu.FMA4}),
vec(.VFMADDSUBPD, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x5D), .RVRM, .{cpu.FMA4}),
vec(.VFMADDSUBPD, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x5D), .RVRM, .{cpu.FMA4}),
// VFMADDSUBPS
vec(.VFMADDSUBPS, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x5C), .RVMR, .{cpu.FMA4}),
vec(.VFMADDSUBPS, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x5C), .RVMR, .{cpu.FMA4}),
vec(.VFMADDSUBPS, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x5C), .RVRM, .{cpu.FMA4}),
vec(.VFMADDSUBPS, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x5C), .RVRM, .{cpu.FMA4}),
// VFMSUBADDPD
vec(.VFMSUBADDPD, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x5F), .RVMR, .{cpu.FMA4}),
vec(.VFMSUBADDPD, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x5F), .RVMR, .{cpu.FMA4}),
vec(.VFMSUBADDPD, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x5F), .RVRM, .{cpu.FMA4}),
vec(.VFMSUBADDPD, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x5F), .RVRM, .{cpu.FMA4}),
// VFMSUBADDPS
vec(.VFMSUBADDPS, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x5E), .RVMR, .{cpu.FMA4}),
vec(.VFMSUBADDPS, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x5E), .RVMR, .{cpu.FMA4}),
vec(.VFMSUBADDPS, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x5E), .RVRM, .{cpu.FMA4}),
vec(.VFMSUBADDPS, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x5E), .RVRM, .{cpu.FMA4}),
// VFMSUBPD
vec(.VFMSUBPD, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x6D), .RVMR, .{cpu.FMA4}),
vec(.VFMSUBPD, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x6D), .RVMR, .{cpu.FMA4}),
vec(.VFMSUBPD, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x6D), .RVRM, .{cpu.FMA4}),
vec(.VFMSUBPD, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x6D), .RVRM, .{cpu.FMA4}),
// VFMSUBPS
vec(.VFMSUBPS, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x6C), .RVMR, .{cpu.FMA4}),
vec(.VFMSUBPS, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x6C), .RVMR, .{cpu.FMA4}),
vec(.VFMSUBPS, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x6C), .RVRM, .{cpu.FMA4}),
vec(.VFMSUBPS, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x6C), .RVRM, .{cpu.FMA4}),
// VFMSUBSD
vec(.VFMSUBSD, ops4(.xmml, .xmml, .xmml_m64, .xmml), vex(.LIG, ._66, ._0F3A, .W0, 0x6F), .RVMR, .{cpu.FMA4}),
vec(.VFMSUBSD, ops4(.xmml, .xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F3A, .W1, 0x6F), .RVRM, .{cpu.FMA4}),
// VFMSUBSS
vec(.VFMSUBSS, ops4(.xmml, .xmml, .xmml_m32, .xmml), vex(.LIG, ._66, ._0F3A, .W0, 0x6E), .RVMR, .{cpu.FMA4}),
vec(.VFMSUBSS, ops4(.xmml, .xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F3A, .W1, 0x6E), .RVRM, .{cpu.FMA4}),
// VFNMADDPD
vec(.VFNMADDPD, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x79), .RVMR, .{cpu.FMA4}),
vec(.VFNMADDPD, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x79), .RVMR, .{cpu.FMA4}),
vec(.VFNMADDPD, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x79), .RVRM, .{cpu.FMA4}),
vec(.VFNMADDPD, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x79), .RVRM, .{cpu.FMA4}),
// VFNMADDPS
vec(.VFNMADDPS, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x78), .RVMR, .{cpu.FMA4}),
vec(.VFNMADDPS, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x78), .RVMR, .{cpu.FMA4}),
vec(.VFNMADDPS, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x78), .RVRM, .{cpu.FMA4}),
vec(.VFNMADDPS, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x78), .RVRM, .{cpu.FMA4}),
// VFNMADDSD
vec(.VFNMADDSD, ops4(.xmml, .xmml, .xmml_m64, .xmml), vex(.LIG, ._66, ._0F3A, .W0, 0x7B), .RVMR, .{cpu.FMA4}),
vec(.VFNMADDSD, ops4(.xmml, .xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F3A, .W1, 0x7B), .RVRM, .{cpu.FMA4}),
// VFNMADDSS
vec(.VFNMADDSS, ops4(.xmml, .xmml, .xmml_m32, .xmml), vex(.LIG, ._66, ._0F3A, .W0, 0x7A), .RVMR, .{cpu.FMA4}),
vec(.VFNMADDSS, ops4(.xmml, .xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F3A, .W1, 0x7A), .RVRM, .{cpu.FMA4}),
// VFNMSUBPD
vec(.VFNMSUBPD, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x7D), .RVMR, .{cpu.FMA4}),
vec(.VFNMSUBPD, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x7D), .RVMR, .{cpu.FMA4}),
vec(.VFNMSUBPD, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x7D), .RVRM, .{cpu.FMA4}),
vec(.VFNMSUBPD, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x7D), .RVRM, .{cpu.FMA4}),
// VFNMSUBPS
vec(.VFNMSUBPS, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x7C), .RVMR, .{cpu.FMA4}),
vec(.VFNMSUBPS, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x7C), .RVMR, .{cpu.FMA4}),
vec(.VFNMSUBPS, ops4(.xmml, .xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F3A, .W1, 0x7C), .RVRM, .{cpu.FMA4}),
vec(.VFNMSUBPS, ops4(.ymml, .ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F3A, .W1, 0x7C), .RVRM, .{cpu.FMA4}),
// VFNMSUBSD
vec(.VFNMSUBSD, ops4(.xmml, .xmml, .xmml_m64, .xmml), vex(.LIG, ._66, ._0F3A, .W0, 0x7F), .RVMR, .{cpu.FMA4}),
vec(.VFNMSUBSD, ops4(.xmml, .xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F3A, .W1, 0x7F), .RVRM, .{cpu.FMA4}),
// VFNMSUBSS
vec(.VFNMSUBSS, ops4(.xmml, .xmml, .xmml_m32, .xmml), vex(.LIG, ._66, ._0F3A, .W0, 0x7E), .RVMR, .{cpu.FMA4}),
vec(.VFNMSUBSS, ops4(.xmml, .xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F3A, .W1, 0x7E), .RVRM, .{cpu.FMA4}),
//
// Misc Extensions
//
// ADX
// ADCX
instr(.ADCX, ops2(.reg32, .rm32), preOp3(._66, 0x0F, 0x38, 0xF6), .RM, .Op32, .{cpu.ADX}),
instr(.ADCX, ops2(.reg64, .rm64), preOp3(._66, 0x0F, 0x38, 0xF6), .RM, .REX_W, .{cpu.ADX}),
// ADOX
instr(.ADOX, ops2(.reg32, .rm32), preOp3(._F3, 0x0F, 0x38, 0xF6), .RM, .Op32, .{cpu.ADX}),
instr(.ADOX, ops2(.reg64, .rm64), preOp3(._F3, 0x0F, 0x38, 0xF6), .RM, .REX_W, .{cpu.ADX}),
// CLDEMOTE
instr(.CLDEMOTE, ops1(.rm_mem8), preOp2r(._NP, 0x0F, 0x1C, 0), .M, .ZO, .{cpu.CLDEMOTE}),
// CLFLUSHOPT
instr(.CLFLUSHOPT, ops1(.rm_mem8), preOp2r(._66, 0x0F, 0xAE, 7), .M, .ZO, .{cpu.CLFLUSHOPT}),
// CET_IBT
instr(.ENDBR32, ops0(), preOp3(._F3, 0x0F, 0x1E, 0xFB), .ZO, .ZO, .{cpu.CET_IBT}),
instr(.ENDBR64, ops0(), preOp3(._F3, 0x0F, 0x1E, 0xFA), .ZO, .ZO, .{cpu.CET_IBT}),
// CET_SS
// CLRSSBSY
instr(.CLRSSBSY, ops1(.rm_mem64), preOp2r(._F3, 0x0F, 0xAE, 6), .M, .ZO, .{cpu.CET_SS}),
// INCSSPD / INCSSPQ
instr(.INCSSPD, ops1(.reg32), preOp2r(._F3, 0x0F, 0xAE, 5), .M, .Op32, .{cpu.CET_SS}),
instr(.INCSSPQ, ops1(.reg64), preOp2r(._F3, 0x0F, 0xAE, 5), .M, .REX_W, .{cpu.CET_SS}),
// RDSSP
instr(.RDSSPD, ops1(.reg32), preOp2r(._F3, 0x0F, 0x1E, 1), .M, .Op32, .{cpu.CET_SS}),
instr(.RDSSPQ, ops1(.reg64), preOp2r(._F3, 0x0F, 0x1E, 1), .M, .REX_W, .{cpu.CET_SS}),
// RSTORSSP
instr(.RSTORSSP, ops1(.rm_mem64), preOp2r(._F3, 0x0F, 0x01, 5), .M, .ZO, .{cpu.CET_SS}),
// SAVEPREVSSP
instr(.SAVEPREVSSP, ops0(), preOp3(._F3, 0x0F, 0x01, 0xEA), .ZO, .ZO, .{cpu.CET_SS}),
// SETSSBSY
instr(.SETSSBSY, ops0(), preOp3(._F3, 0x0F, 0x01, 0xE8), .ZO, .ZO, .{cpu.CET_SS}),
// WRSS
instr(.WRSSD, ops2(.rm32, .reg32), Op3(0x0F, 0x38, 0xF6), .MR, .Op32, .{cpu.CET_SS}),
instr(.WRSSQ, ops2(.rm64, .reg64), Op3(0x0F, 0x38, 0xF6), .MR, .REX_W, .{cpu.CET_SS}),
// WRUSS
instr(.WRUSSD, ops2(.rm32, .reg32), preOp3(._66, 0x0F, 0x38, 0xF5), .MR, .Op32, .{cpu.CET_SS}),
instr(.WRUSSQ, ops2(.rm64, .reg64), preOp3(._66, 0x0F, 0x38, 0xF5), .MR, .REX_W, .{cpu.CET_SS}),
// CLWB
instr(.CLWB, ops1(.rm_mem8), preOp2r(._66, 0x0F, 0xAE, 6), .M, .ZO, .{cpu.CLWB}),
// FSGSBASE
// RDFSBASE
instr(.RDFSBASE, ops1(.reg32), preOp2r(._F3, 0x0F, 0xAE, 0), .M, .Op32, .{cpu.FSGSBASE}),
instr(.RDFSBASE, ops1(.reg64), preOp2r(._F3, 0x0F, 0xAE, 0), .M, .REX_W, .{cpu.FSGSBASE}),
// RDGSBASE
instr(.RDGSBASE, ops1(.reg32), preOp2r(._F3, 0x0F, 0xAE, 1), .M, .Op32, .{cpu.FSGSBASE}),
instr(.RDGSBASE, ops1(.reg64), preOp2r(._F3, 0x0F, 0xAE, 1), .M, .REX_W, .{cpu.FSGSBASE}),
// WRFSBASE
instr(.WRFSBASE, ops1(.reg32), preOp2r(._F3, 0x0F, 0xAE, 2), .M, .Op32, .{cpu.FSGSBASE}),
instr(.WRFSBASE, ops1(.reg64), preOp2r(._F3, 0x0F, 0xAE, 2), .M, .REX_W, .{cpu.FSGSBASE}),
// WRGSBASE
instr(.WRGSBASE, ops1(.reg32), preOp2r(._F3, 0x0F, 0xAE, 3), .M, .Op32, .{cpu.FSGSBASE}),
instr(.WRGSBASE, ops1(.reg64), preOp2r(._F3, 0x0F, 0xAE, 3), .M, .REX_W, .{cpu.FSGSBASE}),
// FXRSTOR
instr(.FXRSTOR, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xAE, 1), .M, .ZO, .{cpu.FXSR}),
instr(.FXRSTOR64, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xAE, 1), .M, .REX_W, .{ cpu.FXSR, No32 }),
// FXSAVE
instr(.FXSAVE, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xAE, 0), .M, .ZO, .{cpu.FXSR}),
instr(.FXSAVE64, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xAE, 0), .M, .REX_W, .{ cpu.FXSR, No32 }),
// SGX
instr(.ENCLS, ops0(), preOp3(._NP, 0x0F, 0x01, 0xCF), .ZO, .ZO, .{cpu.SGX}),
instr(.ENCLU, ops0(), preOp3(._NP, 0x0F, 0x01, 0xD7), .ZO, .ZO, .{cpu.SGX}),
instr(.ENCLV, ops0(), preOp3(._NP, 0x0F, 0x01, 0xC0), .ZO, .ZO, .{cpu.SGX}),
// SMX
instr(.GETSEC, ops0(), preOp2(._NP, 0x0F, 0x37), .ZO, .ZO, .{cpu.SMX}),
// GFNI
instr(.GF2P8AFFINEINVQB, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0xCF), .RMI, .ZO, .{GFNI}),
instr(.GF2P8AFFINEQB, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0xCE), .RMI, .ZO, .{GFNI}),
instr(.GF2P8MULB, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0xCF), .RM, .ZO, .{GFNI}),
// INVPCID
instr(.INVPCID, ops2(.reg32, .rm_mem128), preOp3(._66, 0x0F, 0x38, 0x82), .RM, .ZO, .{ cpu.INVPCID, No64 }),
instr(.INVPCID, ops2(.reg64, .rm_mem128), preOp3(._66, 0x0F, 0x38, 0x82), .RM, .ZO, .{ cpu.INVPCID, No32 }),
// MOVBE
instr(.MOVBE, ops2(.reg16, .rm_mem16), Op3(0x0F, 0x38, 0xF0), .RM, .Op16, .{cpu.MOVBE}),
instr(.MOVBE, ops2(.reg32, .rm_mem32), Op3(0x0F, 0x38, 0xF0), .RM, .Op32, .{cpu.MOVBE}),
instr(.MOVBE, ops2(.reg64, .rm_mem64), Op3(0x0F, 0x38, 0xF0), .RM, .REX_W, .{cpu.MOVBE}),
//
instr(.MOVBE, ops2(.rm_mem16, .reg16), Op3(0x0F, 0x38, 0xF1), .MR, .Op16, .{cpu.MOVBE}),
instr(.MOVBE, ops2(.rm_mem32, .reg32), Op3(0x0F, 0x38, 0xF1), .MR, .Op32, .{cpu.MOVBE}),
instr(.MOVBE, ops2(.rm_mem64, .reg64), Op3(0x0F, 0x38, 0xF1), .MR, .REX_W, .{cpu.MOVBE}),
// MOVDIRI
instr(.MOVDIRI, ops2(.rm_mem32, .reg32), preOp3(._NP, 0x0F, 0x38, 0xF9), .MR, .Op32, .{cpu.MOVDIRI}),
instr(.MOVDIRI, ops2(.rm_mem64, .reg64), preOp3(._NP, 0x0F, 0x38, 0xF9), .MR, .REX_W, .{cpu.MOVDIRI}),
// MOVDIR64B
instr(.MOVDIR64B, ops2(.reg16, .rm_mem), preOp3(._66, 0x0F, 0x38, 0xF8), .RM, .Addr16, .{ cpu.MOVDIR64B, No64 }),
instr(.MOVDIR64B, ops2(.reg32, .rm_mem), preOp3(._66, 0x0F, 0x38, 0xF8), .RM, .Addr32, .{cpu.MOVDIR64B}),
instr(.MOVDIR64B, ops2(.reg64, .rm_mem), preOp3(._66, 0x0F, 0x38, 0xF8), .RM, .Addr64, .{ cpu.MOVDIR64B, No32 }),
// MPX
instr(.BNDCL, ops2(.bnd, .rm32), preOp2(._F3, 0x0F, 0x1A), .RM, .ZO, .{ cpu.MPX, No64 }),
instr(.BNDCL, ops2(.bnd, .rm64), preOp2(._F3, 0x0F, 0x1A), .RM, .ZO, .{ cpu.MPX, No32 }),
//
instr(.BNDCU, ops2(.bnd, .rm32), preOp2(._F2, 0x0F, 0x1A), .RM, .ZO, .{ cpu.MPX, No64 }),
instr(.BNDCU, ops2(.bnd, .rm64), preOp2(._F2, 0x0F, 0x1A), .RM, .ZO, .{ cpu.MPX, No32 }),
instr(.BNDCN, ops2(.bnd, .rm32), preOp2(._F2, 0x0F, 0x1B), .RM, .ZO, .{ cpu.MPX, No64 }),
instr(.BNDCN, ops2(.bnd, .rm64), preOp2(._F2, 0x0F, 0x1B), .RM, .ZO, .{ cpu.MPX, No32 }),
// TODO/NOTE: special `mib` encoding actually requires SIB and is not used as normal memory
instr(.BNDLDX, ops2(.bnd, .rm_mem), preOp2(._NP, 0x0F, 0x1A), .RM, .ZO, .{cpu.MPX}),
//
instr(.BNDMK, ops2(.bnd, .rm_mem32), preOp2(._F3, 0x0F, 0x1B), .RM, .ZO, .{ cpu.MPX, No64 }),
instr(.BNDMK, ops2(.bnd, .rm_mem64), preOp2(._F3, 0x0F, 0x1B), .RM, .ZO, .{ cpu.MPX, No32 }),
//
instr(.BNDMOV, ops2(.bnd, .bnd_m64), preOp2(._66, 0x0F, 0x1A), .RM, .ZO, .{ cpu.MPX, No64 }),
instr(.BNDMOV, ops2(.bnd, .bnd_m128), preOp2(._66, 0x0F, 0x1A), .RM, .ZO, .{ cpu.MPX, No32 }),
instr(.BNDMOV, ops2(.bnd_m64, .bnd), preOp2(._66, 0x0F, 0x1B), .MR, .ZO, .{ cpu.MPX, No64 }),
instr(.BNDMOV, ops2(.bnd_m128, .bnd), preOp2(._66, 0x0F, 0x1B), .MR, .ZO, .{ cpu.MPX, No32 }),
// TODO/NOTE: special `mib` encoding actually requires SIB and is not used as normal memory
instr(.BNDSTX, ops2(.rm_mem, .bnd), preOp2(._NP, 0x0F, 0x1B), .MR, .ZO, .{cpu.MPX}),
// PCOMMIT
instr(.PCOMMIT, ops0(), preOp3(._66, 0x0F, 0xAE, 0xF8), .ZO, .ZO, .{cpu.PCOMMIT}),
// PKU
instr(.RDPKRU, ops0(), preOp3(._NP, 0x0F, 0x01, 0xEE), .ZO, .ZO, .{cpu.PKU}),
instr(.WRPKRU, ops0(), preOp3(._NP, 0x0F, 0x01, 0xEF), .ZO, .ZO, .{cpu.PKU}),
// PREFETCHW
instr(.PREFETCH, ops1(.rm_mem8), Op2r(0x0F, 0x0D, 0), .M, .ZO, .{cpu._3DNOW}),
instr(.PREFETCH, ops1(.rm_mem8), Op2r(0x0F, 0x0D, 0), .M, .ZO, .{ cpu.PREFETCHW, cpu.Amd }),
instr(.PREFETCH, ops1(.rm_mem8), Op2r(0x0F, 0x0D, 0), .M, .ZO, .{ x86_64, cpu.Amd }),
instr(.PREFETCHW, ops1(.rm_mem8), Op2r(0x0F, 0x0D, 1), .M, .ZO, .{cpu.PREFETCHW}),
instr(.PREFETCHW, ops1(.rm_mem8), Op2r(0x0F, 0x0D, 1), .M, .ZO, .{cpu._3DNOW}),
instr(.PREFETCHW, ops1(.rm_mem8), Op2r(0x0F, 0x0D, 1), .M, .ZO, .{x86_64}),
// PTWRITE
instr(.PTWRITE, ops1(.rm32), preOp2r(._F3, 0x0F, 0xAE, 4), .M, .Op32, .{cpu.PTWRITE}),
instr(.PTWRITE, ops1(.rm64), preOp2r(._F3, 0x0F, 0xAE, 4), .M, .REX_W, .{cpu.PTWRITE}),
// RDPID
instr(.RDPID, ops1(.reg32), preOp2r(._F3, 0x0F, 0xC7, 7), .M, .ZO, .{ cpu.RDPID, No64 }),
instr(.RDPID, ops1(.reg64), preOp2r(._F3, 0x0F, 0xC7, 7), .M, .ZO, .{ cpu.RDPID, No32 }),
// RDRAND
instr(.RDRAND, ops1(.reg16), preOp2r(.NFx, 0x0F, 0xC7, 6), .M, .Op16, .{cpu.RDRAND}),
instr(.RDRAND, ops1(.reg32), preOp2r(.NFx, 0x0F, 0xC7, 6), .M, .Op32, .{cpu.RDRAND}),
instr(.RDRAND, ops1(.reg64), preOp2r(.NFx, 0x0F, 0xC7, 6), .M, .REX_W, .{cpu.RDRAND}),
// RDSEED
instr(.RDSEED, ops1(.reg16), preOp2r(.NFx, 0x0F, 0xC7, 7), .M, .Op16, .{cpu.RDSEED}),
instr(.RDSEED, ops1(.reg32), preOp2r(.NFx, 0x0F, 0xC7, 7), .M, .Op32, .{cpu.RDSEED}),
instr(.RDSEED, ops1(.reg64), preOp2r(.NFx, 0x0F, 0xC7, 7), .M, .REX_W, .{cpu.RDSEED}),
// SMAP
instr(.CLAC, ops0(), preOp3(._NP, 0x0F, 0x01, 0xCA), .ZO, .ZO, .{cpu.SMAP}),
instr(.STAC, ops0(), preOp3(._NP, 0x0F, 0x01, 0xCB), .ZO, .ZO, .{cpu.SMAP}),
// TDX (HLE / RTM)
// HLE (NOTE: these instructions actually act as prefixes)
instr(.XACQUIRE, ops0(), Op1(0xF2), .ZO, .ZO, .{cpu.HLE}),
instr(.XRELEASE, ops0(), Op1(0xF3), .ZO, .ZO, .{cpu.HLE}),
// RTM
instr(.XABORT, ops1(.imm8), Op2(0xC6, 0xF8), .I, .ZO, .{cpu.RTM}),
instr(.XBEGIN, ops1(.imm16), Op2(0xC7, 0xF8), .I, .Op16, .{ cpu.RTM, Sign }),
instr(.XBEGIN, ops1(.imm32), Op2(0xC7, 0xF8), .I, .Op32, .{ cpu.RTM, Sign }),
instr(.XEND, ops0(), preOp3(._NP, 0x0F, 0x01, 0xD5), .ZO, .ZO, .{cpu.RTM}),
// HLE or RTM
instr(.XTEST, ops0(), preOp3(._NP, 0x0F, 0x01, 0xD6), .ZO, .ZO, .{cpu.HLE}),
instr(.XTEST, ops0(), preOp3(._NP, 0x0F, 0x01, 0xD6), .ZO, .ZO, .{cpu.RTM}),
// WAITPKG
//
instr(.UMONITOR, ops1(.reg16), preOp2r(._F3, 0x0F, 0xAE, 6), .M, .Addr16, .{ cpu.WAITPKG, No64 }),
instr(.UMONITOR, ops1(.reg32), preOp2r(._F3, 0x0F, 0xAE, 6), .M, .Addr32, .{cpu.WAITPKG}),
instr(.UMONITOR, ops1(.reg64), preOp2r(._F3, 0x0F, 0xAE, 6), .M, .ZO, .{ cpu.WAITPKG, No32 }),
//
instr(.UMWAIT, ops3(.reg32, .reg_edx, .reg_eax), preOp2r(._F2, 0x0F, 0xAE, 6), .M, .ZO, .{cpu.WAITPKG}),
instr(.UMWAIT, ops1(.reg32), preOp2r(._F2, 0x0F, 0xAE, 6), .M, .ZO, .{cpu.WAITPKG}),
//
instr(.TPAUSE, ops3(.reg32, .reg_edx, .reg_eax), preOp2r(._66, 0x0F, 0xAE, 6), .M, .ZO, .{cpu.WAITPKG}),
instr(.TPAUSE, ops1(.reg32), preOp2r(._66, 0x0F, 0xAE, 6), .M, .ZO, .{cpu.WAITPKG}),
// XSAVE
// XGETBV
instr(.XGETBV, ops0(), preOp3(._NP, 0x0F, 0x01, 0xD0), .ZO, .ZO, .{cpu.XSAVE}),
// XSETBV
instr(.XSETBV, ops0(), preOp3(._NP, 0x0F, 0x01, 0xD1), .ZO, .ZO, .{cpu.XSAVE}),
// FXSAE
instr(.XSAVE, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xAE, 4), .M, .ZO, .{cpu.XSAVE}),
instr(.XSAVE64, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xAE, 4), .M, .REX_W, .{ cpu.XSAVE, No32 }),
// FXRSTO
instr(.XRSTOR, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xAE, 5), .M, .ZO, .{cpu.XSAVE}),
instr(.XRSTOR64, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xAE, 5), .M, .REX_W, .{ cpu.XSAVE, No32 }),
// XSAVEOPT
instr(.XSAVEOPT, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xAE, 6), .M, .ZO, .{cpu.XSAVEOPT}),
instr(.XSAVEOPT64, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xAE, 6), .M, .REX_W, .{ cpu.XSAVEOPT, No32 }),
// XSAVEC
instr(.XSAVEC, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xC7, 4), .M, .ZO, .{cpu.XSAVEC}),
instr(.XSAVEC64, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xC7, 4), .M, .REX_W, .{ cpu.XSAVEC, No32 }),
// XSS
instr(.XSAVES, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xC7, 5), .M, .ZO, .{cpu.XSS}),
instr(.XSAVES64, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xC7, 5), .M, .REX_W, .{ cpu.XSS, No32 }),
//
instr(.XRSTORS, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xC7, 3), .M, .ZO, .{cpu.XSS}),
instr(.XRSTORS64, ops1(.rm_mem), preOp2r(._NP, 0x0F, 0xC7, 3), .M, .REX_W, .{ cpu.XSS, No32 }),
//
// AES instructions
//
instr(.AESDEC, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0xDE), .RM, .ZO, .{cpu.AES}),
instr(.AESDECLAST, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0xDF), .RM, .ZO, .{cpu.AES}),
instr(.AESENC, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0xDC), .RM, .ZO, .{cpu.AES}),
instr(.AESENCLAST, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0xDD), .RM, .ZO, .{cpu.AES}),
instr(.AESIMC, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0xDB), .RM, .ZO, .{cpu.AES}),
instr(.AESKEYGENASSIST, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0xDF), .RMI, .ZO, .{cpu.AES}),
//
// SHA instructions
//
instr(.SHA1RNDS4, ops3(.xmml, .xmml_m128, .imm8), preOp3(._NP, 0x0F, 0x3A, 0xCC), .RMI, .ZO, .{cpu.SHA}),
instr(.SHA1NEXTE, ops2(.xmml, .xmml_m128), preOp3(._NP, 0x0F, 0x38, 0xC8), .RM, .ZO, .{cpu.SHA}),
instr(.SHA1MSG1, ops2(.xmml, .xmml_m128), preOp3(._NP, 0x0F, 0x38, 0xC9), .RM, .ZO, .{cpu.SHA}),
instr(.SHA1MSG2, ops2(.xmml, .xmml_m128), preOp3(._NP, 0x0F, 0x38, 0xCA), .RM, .ZO, .{cpu.SHA}),
instr(.SHA256RNDS2, ops3(.xmml, .xmml_m128, .xmm0), preOp3(._NP, 0x0F, 0x38, 0xCB), .RM, .ZO, .{cpu.SHA}),
instr(.SHA256RNDS2, ops2(.xmml, .xmml_m128), preOp3(._NP, 0x0F, 0x38, 0xCB), .RM, .ZO, .{cpu.SHA}),
instr(.SHA256MSG1, ops2(.xmml, .xmml_m128), preOp3(._NP, 0x0F, 0x38, 0xCC), .RM, .ZO, .{cpu.SHA}),
instr(.SHA256MSG2, ops2(.xmml, .xmml_m128), preOp3(._NP, 0x0F, 0x38, 0xCD), .RM, .ZO, .{cpu.SHA}),
//
// SSE non-VEX/EVEX opcodes
//
// LDMXCSR
instr(.LDMXCSR, ops1(.rm_mem32), preOp2r(._NP, 0x0F, 0xAE, 2), .M, .ZO, .{SSE}),
// STMXCSR
instr(.STMXCSR, ops1(.rm_mem32), preOp2r(._NP, 0x0F, 0xAE, 3), .M, .ZO, .{SSE}),
// PREFETCH
instr(.PREFETCHNTA, ops1(.rm_mem8), Op2r(0x0F, 0x18, 0), .M, .ZO, .{SSE}),
instr(.PREFETCHNTA, ops1(.rm_mem8), Op2r(0x0F, 0x18, 0), .M, .ZO, .{MMXEXT}),
instr(.PREFETCHT0, ops1(.rm_mem8), Op2r(0x0F, 0x18, 1), .M, .ZO, .{SSE}),
instr(.PREFETCHT0, ops1(.rm_mem8), Op2r(0x0F, 0x18, 1), .M, .ZO, .{MMXEXT}),
instr(.PREFETCHT1, ops1(.rm_mem8), Op2r(0x0F, 0x18, 2), .M, .ZO, .{SSE}),
instr(.PREFETCHT1, ops1(.rm_mem8), Op2r(0x0F, 0x18, 2), .M, .ZO, .{MMXEXT}),
instr(.PREFETCHT2, ops1(.rm_mem8), Op2r(0x0F, 0x18, 3), .M, .ZO, .{SSE}),
instr(.PREFETCHT2, ops1(.rm_mem8), Op2r(0x0F, 0x18, 3), .M, .ZO, .{MMXEXT}),
// SFENCE
instr(.SFENCE, ops0(), preOp3(._NP, 0x0F, 0xAE, 0xF8), .ZO, .ZO, .{SSE}),
instr(.SFENCE, ops0(), preOp3(._NP, 0x0F, 0xAE, 0xF8), .ZO, .ZO, .{MMXEXT}),
// CLFLUSH
instr(.CLFLUSH, ops1(.rm_mem8), preOp2r(._NP, 0x0F, 0xAE, 7), .M, .ZO, .{SSE2}),
// LFENCE
instr(.LFENCE, ops0(), preOp3(._NP, 0x0F, 0xAE, 0xE8), .ZO, .ZO, .{SSE2}),
// MFENCE
instr(.MFENCE, ops0(), preOp3(._NP, 0x0F, 0xAE, 0xF0), .ZO, .ZO, .{SSE2}),
// MOVNTI
instr(.MOVNTI, ops2(.rm_mem32, .reg32), preOp2(._NP, 0x0F, 0xC3), .MR, .Op32, .{SSE2}),
instr(.MOVNTI, ops2(.rm_mem64, .reg64), preOp2(._NP, 0x0F, 0xC3), .MR, .REX_W, .{SSE2}),
// PAUSE
instr(.PAUSE, ops0(), preOp1(._F3, 0x90), .ZO, .ZO, .{SSE2}),
// MONITOR
instr(.MONITOR, ops0(), Op3(0x0F, 0x01, 0xC8), .ZO, .ZO, .{SSE3}),
// MWAIT
instr(.MWAIT, ops0(), Op3(0x0F, 0x01, 0xC9), .ZO, .ZO, .{SSE3}),
// CRC32
instr(.CRC32, ops2(.reg32, .rm8), preOp3(._F2, 0x0F, 0x38, 0xF0), .RM, .ZO, .{SSE4_2}),
instr(.CRC32, ops2(.reg32, .rm16), preOp3(._F2, 0x0F, 0x38, 0xF1), .RM, .Op16, .{SSE4_2}),
instr(.CRC32, ops2(.reg32, .rm32), preOp3(._F2, 0x0F, 0x38, 0xF1), .RM, .Op32, .{SSE4_2}),
instr(.CRC32, ops2(.reg64, .rm64), preOp3(._F2, 0x0F, 0x38, 0xF1), .RM, .REX_W, .{SSE4_2}),
//
// AMD-V
// CLGI
instr(.CLGI, ops0(), Op3(0x0F, 0x01, 0xDD), .ZO, .ZO, .{cpu.AMD_V}),
// INVLPGA
instr(.INVLPGA, ops0(), Op3(0x0F, 0x01, 0xDF), .ZO, .ZO, .{cpu.AMD_V}),
instr(.INVLPGA, ops2(.reg_ax, .reg_ecx), Op3(0x0F, 0x01, 0xDF), .ZO, .Addr16, .{ cpu.AMD_V, No64 }),
instr(.INVLPGA, ops2(.reg_eax, .reg_ecx), Op3(0x0F, 0x01, 0xDF), .ZO, .Addr32, .{cpu.AMD_V}),
instr(.INVLPGA, ops2(.reg_rax, .reg_ecx), Op3(0x0F, 0x01, 0xDF), .ZO, .Addr64, .{ cpu.AMD_V, No32 }),
// SKINIT
instr(.SKINIT, ops0(), Op3(0x0F, 0x01, 0xDE), .ZO, .ZO, .{ cpu.AMD_V, cpu.SKINIT }),
instr(.SKINIT, ops1(.reg_eax), Op3(0x0F, 0x01, 0xDE), .ZO, .ZO, .{ cpu.AMD_V, cpu.SKINIT }),
// STGI
instr(.STGI, ops0(), Op3(0x0F, 0x01, 0xDC), .ZO, .ZO, .{ cpu.AMD_V, cpu.SKINIT }),
// VMLOAD
instr(.VMLOAD, ops0(), Op3(0x0F, 0x01, 0xDA), .ZO, .ZO, .{cpu.AMD_V}),
instr(.VMLOAD, ops1(.reg_ax), Op3(0x0F, 0x01, 0xDA), .ZO, .Addr16, .{ cpu.AMD_V, No64 }),
instr(.VMLOAD, ops1(.reg_eax), Op3(0x0F, 0x01, 0xDA), .ZO, .Addr32, .{cpu.AMD_V}),
instr(.VMLOAD, ops1(.reg_rax), Op3(0x0F, 0x01, 0xDA), .ZO, .Addr64, .{ cpu.AMD_V, No32 }),
// VMMCALL
instr(.VMMCALL, ops0(), Op3(0x0F, 0x01, 0xD9), .ZO, .ZO, .{cpu.AMD_V}),
// VMRUN
instr(.VMRUN, ops0(), Op3(0x0F, 0x01, 0xD8), .ZO, .ZO, .{cpu.AMD_V}),
instr(.VMRUN, ops1(.reg_ax), Op3(0x0F, 0x01, 0xD8), .ZO, .Addr16, .{ cpu.AMD_V, No64 }),
instr(.VMRUN, ops1(.reg_eax), Op3(0x0F, 0x01, 0xD8), .ZO, .Addr32, .{cpu.AMD_V}),
instr(.VMRUN, ops1(.reg_rax), Op3(0x0F, 0x01, 0xD8), .ZO, .Addr64, .{ cpu.AMD_V, No32 }),
// VMSAVE
instr(.VMSAVE, ops0(), Op3(0x0F, 0x01, 0xDB), .ZO, .ZO, .{cpu.AMD_V}),
instr(.VMSAVE, ops1(.reg_ax), Op3(0x0F, 0x01, 0xDB), .ZO, .Addr16, .{ cpu.AMD_V, No64 }),
instr(.VMSAVE, ops1(.reg_eax), Op3(0x0F, 0x01, 0xDB), .ZO, .Addr32, .{cpu.AMD_V}),
instr(.VMSAVE, ops1(.reg_rax), Op3(0x0F, 0x01, 0xDB), .ZO, .Addr64, .{ cpu.AMD_V, No32 }),
//
// Intel VT-x
//
// INVEPT
instr(.INVEPT, ops2(.reg32, .rm_mem128), preOp3(._66, 0x0F, 0x38, 0x80), .M, .ZO, .{ cpu.VT_X, No64 }),
instr(.INVEPT, ops2(.reg64, .rm_mem128), preOp3(._66, 0x0F, 0x38, 0x80), .M, .ZO, .{ cpu.VT_X, No32 }),
// INVVPID
instr(.INVVPID, ops2(.reg32, .rm_mem128), preOp3(._66, 0x0F, 0x38, 0x80), .M, .ZO, .{ cpu.VT_X, No64 }),
instr(.INVVPID, ops2(.reg64, .rm_mem128), preOp3(._66, 0x0F, 0x38, 0x80), .M, .ZO, .{ cpu.VT_X, No32 }),
// VMCLEAR
instr(.VMCLEAR, ops1(.rm_mem64), preOp2r(._66, 0x0F, 0xC7, 6), .M, .ZO, .{cpu.VT_X}),
// VMFUNC
instr(.VMFUNC, ops0(), preOp3(._NP, 0x0F, 0x01, 0xD4), .ZO, .ZO, .{cpu.VT_X}),
// VMPTRLD
instr(.VMPTRLD, ops1(.rm_mem64), preOp2r(._NP, 0x0F, 0xC7, 6), .M, .ZO, .{cpu.VT_X}),
// VMPTRST
instr(.VMPTRST, ops1(.rm_mem64), preOp2r(._NP, 0x0F, 0xC7, 7), .M, .ZO, .{cpu.VT_X}),
// VMREAD
instr(.VMREAD, ops2(.rm32, .reg32), preOp2(._NP, 0x0F, 0x78), .MR, .Op32, .{ cpu.VT_X, No64 }),
instr(.VMREAD, ops2(.rm64, .reg64), preOp2(._NP, 0x0F, 0x78), .MR, .ZO, .{ cpu.VT_X, No32 }),
// VMWRITE
instr(.VMWRITE, ops2(.reg32, .rm32), preOp2(._NP, 0x0F, 0x79), .RM, .Op32, .{ cpu.VT_X, No64 }),
instr(.VMWRITE, ops2(.reg64, .rm64), preOp2(._NP, 0x0F, 0x79), .RM, .ZO, .{ cpu.VT_X, No32 }),
// VMCALL
instr(.VMCALL, ops0(), Op3(0x0F, 0x01, 0xC1), .ZO, .ZO, .{cpu.VT_X}),
// VMLAUNCH
instr(.VMLAUNCH, ops0(), Op3(0x0F, 0x01, 0xC2), .ZO, .ZO, .{cpu.VT_X}),
// VMRESUME
instr(.VMRESUME, ops0(), Op3(0x0F, 0x01, 0xC3), .ZO, .ZO, .{cpu.VT_X}),
// VMXOFF
instr(.VMXOFF, ops0(), Op3(0x0F, 0x01, 0xC4), .ZO, .ZO, .{cpu.VT_X}),
// VMXON
instr(.VMXON, ops1(.rm_mem64), Op3r(0x0F, 0x01, 0xC7, 6), .M, .ZO, .{cpu.VT_X}),
//
// SIMD legacy instructions (MMX + SSE)
//
// ADDPD
instr(.ADDPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x58), .RM, .ZO, .{SSE2}),
// ADDPS
instr(.ADDPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x58), .RM, .ZO, .{SSE}),
// ADDSD
instr(.ADDSD, ops2(.xmml, .xmml_m64), preOp2(._F2, 0x0F, 0x58), .RM, .ZO, .{SSE2}),
// ADDSS
instr(.ADDSS, ops2(.xmml, .xmml_m32), preOp2(._F3, 0x0F, 0x58), .RM, .ZO, .{SSE}),
// ADDSUBPD
instr(.ADDSUBPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xD0), .RM, .ZO, .{SSE3}),
// ADDSUBPS
instr(.ADDSUBPS, ops2(.xmml, .xmml_m128), preOp2(._F2, 0x0F, 0xD0), .RM, .ZO, .{SSE3}),
// ANDPD
instr(.ANDPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x54), .RM, .ZO, .{SSE2}),
// ANDPS
instr(.ANDPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x54), .RM, .ZO, .{SSE}),
// ANDNPD
instr(.ANDNPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x55), .RM, .ZO, .{SSE2}),
// ANDNPS
instr(.ANDNPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x55), .RM, .ZO, .{SSE}),
// BLENDPD
instr(.BLENDPD, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x0D), .RMI, .ZO, .{SSE4_1}),
// BLENDPS
instr(.BLENDPS, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x0C), .RMI, .ZO, .{SSE4_1}),
// BLENDVPD
instr(.BLENDVPD, ops3(.xmml, .xmml_m128, .xmm0), preOp3(._66, 0x0F, 0x38, 0x15), .RM, .ZO, .{SSE4_1}),
instr(.BLENDVPD, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x15), .RM, .ZO, .{SSE4_1}),
// BLENDVPS
instr(.BLENDVPS, ops3(.xmml, .xmml_m128, .xmm0), preOp3(._66, 0x0F, 0x38, 0x14), .RM, .ZO, .{SSE4_1}),
instr(.BLENDVPS, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x14), .RM, .ZO, .{SSE4_1}),
// CMPPD
instr(.CMPPD, ops3(.xmml, .xmml_m128, .imm8), preOp2(._66, 0x0F, 0xC2), .RMI, .ZO, .{SSE2}),
// CMPPS
instr(.CMPPS, ops3(.xmml, .xmml_m128, .imm8), preOp2(._NP, 0x0F, 0xC2), .RMI, .ZO, .{SSE}),
// CMPSD
instr(.CMPSD, ops0(), Op1(0xA7), .ZO, .Op32, .{ _386, Repe, Repne }), // overloaded
instr(.CMPSD, ops3(.xmml, .xmml_m64, .imm8), preOp2(._F2, 0x0F, 0xC2), .RMI, .ZO, .{SSE2}),
// CMPSS
instr(.CMPSS, ops3(.xmml, .xmml_m32, .imm8), preOp2(._F3, 0x0F, 0xC2), .RMI, .ZO, .{SSE}),
// COMISD
instr(.COMISD, ops2(.xmml, .xmml_m64), preOp2(._66, 0x0F, 0x2F), .RM, .ZO, .{SSE2}),
// COMISS
instr(.COMISS, ops2(.xmml, .xmml_m32), preOp2(._NP, 0x0F, 0x2F), .RM, .ZO, .{SSE}),
// CVTDQ2PD
instr(.CVTDQ2PD, ops2(.xmml, .xmml_m64), preOp2(._F3, 0x0F, 0xE6), .RM, .ZO, .{SSE2}),
// CVTDQ2PS
instr(.CVTDQ2PS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x5B), .RM, .ZO, .{SSE2}),
// CVTPD2DQ
instr(.CVTPD2DQ, ops2(.xmml, .xmml_m128), preOp2(._F2, 0x0F, 0xE6), .RM, .ZO, .{SSE2}),
// CVTPD2PI
instr(.CVTPD2PI, ops2(.mm, .xmml_m128), preOp2(._66, 0x0F, 0x2D), .RM, .ZO, .{SSE2}),
// CVTPD2PS
instr(.CVTPD2PS, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x5A), .RM, .ZO, .{SSE2}),
// CVTPI2PD
instr(.CVTPI2PD, ops2(.xmml, .mm_m64), preOp2(._66, 0x0F, 0x2A), .RM, .ZO, .{SSE2}),
// CVTPI2PS
instr(.CVTPI2PS, ops2(.xmml, .mm_m64), preOp2(._NP, 0x0F, 0x2A), .RM, .ZO, .{SSE}),
// CVTPS2DQ
instr(.CVTPS2DQ, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x5B), .RM, .ZO, .{SSE2}),
// CVTPS2PD
instr(.CVTPS2PD, ops2(.xmml, .xmml_m64), preOp2(._NP, 0x0F, 0x5A), .RM, .ZO, .{SSE2}),
// CVTPS2PI
instr(.CVTPS2PI, ops2(.mm, .xmml_m64), preOp2(._NP, 0x0F, 0x2D), .RM, .ZO, .{SSE}),
// CVTSD2SI
instr(.CVTSD2SI, ops2(.reg32, .xmml_m64), preOp2(._F2, 0x0F, 0x2D), .RM, .ZO, .{SSE2}),
instr(.CVTSD2SI, ops2(.reg64, .xmml_m64), preOp2(._F2, 0x0F, 0x2D), .RM, .REX_W, .{SSE2}),
// CVTSD2SS
instr(.CVTSD2SS, ops2(.xmml, .xmml_m64), preOp2(._F2, 0x0F, 0x5A), .RM, .ZO, .{SSE2}),
// CVTSI2SD
instr(.CVTSI2SD, ops2(.xmml, .rm32), preOp2(._F2, 0x0F, 0x2A), .RM, .ZO, .{SSE2}),
instr(.CVTSI2SD, ops2(.xmml, .rm64), preOp2(._F2, 0x0F, 0x2A), .RM, .REX_W, .{SSE2}),
// CVTSI2SS
instr(.CVTSI2SS, ops2(.xmml, .rm32), preOp2(._F3, 0x0F, 0x2A), .RM, .ZO, .{SSE}),
instr(.CVTSI2SS, ops2(.xmml, .rm64), preOp2(._F3, 0x0F, 0x2A), .RM, .REX_W, .{SSE}),
// CVTSS2SD
instr(.CVTSS2SD, ops2(.xmml, .xmml_m32), preOp2(._F3, 0x0F, 0x5A), .RM, .ZO, .{SSE2}),
// CVTSS2SI
instr(.CVTSS2SI, ops2(.reg32, .xmml_m32), preOp2(._F3, 0x0F, 0x2D), .RM, .ZO, .{SSE}),
instr(.CVTSS2SI, ops2(.reg64, .xmml_m32), preOp2(._F3, 0x0F, 0x2D), .RM, .REX_W, .{SSE}),
// CVTTPD2DQ
instr(.CVTTPD2DQ, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xE6), .RM, .ZO, .{SSE2}),
// CVTTPD2PI
instr(.CVTTPD2PI, ops2(.mm, .xmml_m128), preOp2(._66, 0x0F, 0x2C), .RM, .ZO, .{SSE2}),
// CVTTPS2DQ
instr(.CVTTPS2DQ, ops2(.xmml, .xmml_m128), preOp2(._F3, 0x0F, 0x5B), .RM, .ZO, .{SSE2}),
// CVTTPS2PI
instr(.CVTTPS2PI, ops2(.mm, .xmml_m64), preOp2(._NP, 0x0F, 0x2C), .RM, .ZO, .{SSE}),
// CVTTSD2SI
instr(.CVTTSD2SI, ops2(.reg32, .xmml_m64), preOp2(._F2, 0x0F, 0x2C), .RM, .ZO, .{SSE2}),
instr(.CVTTSD2SI, ops2(.reg64, .xmml_m64), preOp2(._F2, 0x0F, 0x2C), .RM, .REX_W, .{SSE2}),
// CVTTSS2SI
instr(.CVTTSS2SI, ops2(.reg32, .xmml_m32), preOp2(._F3, 0x0F, 0x2C), .RM, .ZO, .{SSE}),
instr(.CVTTSS2SI, ops2(.reg64, .xmml_m32), preOp2(._F3, 0x0F, 0x2C), .RM, .REX_W, .{SSE}),
// DIVPD
instr(.DIVPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x5E), .RM, .ZO, .{SSE2}),
// DIVPS
instr(.DIVPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x5E), .RM, .ZO, .{SSE}),
// DIVSD
instr(.DIVSD, ops2(.xmml, .xmml_m64), preOp2(._F2, 0x0F, 0x5E), .RM, .ZO, .{SSE2}),
// DIVSS
instr(.DIVSS, ops2(.xmml, .xmml_m32), preOp2(._F3, 0x0F, 0x5E), .RM, .ZO, .{SSE}),
// DPPD
instr(.DPPD, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x41), .RMI, .ZO, .{SSE4_1}),
// DPPS
instr(.DPPS, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x40), .RMI, .ZO, .{SSE4_1}),
// EXTRACTPS
instr(.EXTRACTPS, ops3(.rm32, .xmml, .imm8), preOp3(._66, 0x0F, 0x3A, 0x17), .MRI, .ZO, .{SSE4_1}),
instr(.EXTRACTPS, ops3(.reg64, .xmml, .imm8), preOp3(._66, 0x0F, 0x3A, 0x17), .MRI, .ZO, .{ SSE4_1, No32 }),
// HADDPD
instr(.HADDPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x7C), .RM, .ZO, .{SSE3}),
// HADDPS
instr(.HADDPS, ops2(.xmml, .xmml_m128), preOp2(._F2, 0x0F, 0x7C), .RM, .ZO, .{SSE3}),
// HSUBPD
instr(.HSUBPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x7D), .RM, .ZO, .{SSE3}),
// HSUBPS
instr(.HSUBPS, ops2(.xmml, .xmml_m128), preOp2(._F2, 0x0F, 0x7D), .RM, .ZO, .{SSE3}),
// INSERTPS
instr(.INSERTPS, ops3(.xmml, .xmml_m32, .imm8), preOp3(._66, 0x0F, 0x3A, 0x21), .RMI, .ZO, .{SSE4_1}),
// LDDQU
instr(.LDDQU, ops2(.xmml, .rm_mem128), preOp2(._F2, 0x0F, 0xF0), .RM, .ZO, .{SSE3}),
// MASKMOVDQU
instr(.MASKMOVDQU, ops2(.xmml, .xmml), preOp2(._66, 0x0F, 0xF7), .RM, .ZO, .{SSE2}),
// MASKMOVQ
instr(.MASKMOVQ, ops2(.mm, .mm), preOp2(._NP, 0x0F, 0xF7), .RM, .ZO, .{SSE}),
instr(.MASKMOVQ, ops2(.mm, .mm), preOp2(._NP, 0x0F, 0xF7), .RM, .ZO, .{MMXEXT}),
// MAXPD
instr(.MAXPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x5F), .RM, .ZO, .{SSE2}),
// MAXPS
instr(.MAXPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x5F), .RM, .ZO, .{SSE}),
// MAXSD
instr(.MAXSD, ops2(.xmml, .xmml_m64), preOp2(._F2, 0x0F, 0x5F), .RM, .ZO, .{SSE2}),
// MAXSS
instr(.MAXSS, ops2(.xmml, .xmml_m32), preOp2(._F3, 0x0F, 0x5F), .RM, .ZO, .{SSE}),
// MINPD
instr(.MINPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x5D), .RM, .ZO, .{SSE2}),
// MINPS
instr(.MINPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x5D), .RM, .ZO, .{SSE}),
// MINSD
instr(.MINSD, ops2(.xmml, .xmml_m64), preOp2(._F2, 0x0F, 0x5D), .RM, .ZO, .{SSE2}),
// MINSS
instr(.MINSS, ops2(.xmml, .xmml_m32), preOp2(._F3, 0x0F, 0x5D), .RM, .ZO, .{SSE}),
// MOVAPD
instr(.MOVAPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x28), .RM, .ZO, .{SSE2}),
instr(.MOVAPD, ops2(.xmml_m128, .xmml), preOp2(._66, 0x0F, 0x29), .MR, .ZO, .{SSE2}),
// MOVAPS
instr(.MOVAPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x28), .RM, .ZO, .{SSE}),
instr(.MOVAPS, ops2(.xmml_m128, .xmml), preOp2(._NP, 0x0F, 0x29), .MR, .ZO, .{SSE}),
// MOVD / MOVQ / MOVQ (2)
instr(.MOVD, ops2(.mm, .rm32), preOp2(._NP, 0x0F, 0x6E), .RM, .ZO, .{MMX}),
instr(.MOVD, ops2(.rm32, .mm), preOp2(._NP, 0x0F, 0x7E), .MR, .ZO, .{MMX}),
instr(.MOVD, ops2(.mm, .rm64), preOp2(._NP, 0x0F, 0x6E), .RM, .REX_W, .{MMX}),
instr(.MOVD, ops2(.rm64, .mm), preOp2(._NP, 0x0F, 0x7E), .MR, .REX_W, .{MMX}),
// xmm
instr(.MOVD, ops2(.xmml, .rm32), preOp2(._66, 0x0F, 0x6E), .RM, .ZO, .{SSE2}),
instr(.MOVD, ops2(.rm32, .xmml), preOp2(._66, 0x0F, 0x7E), .MR, .ZO, .{SSE2}),
instr(.MOVD, ops2(.xmml, .rm64), preOp2(._66, 0x0F, 0x6E), .RM, .REX_W, .{SSE2}),
instr(.MOVD, ops2(.rm64, .xmml), preOp2(._66, 0x0F, 0x7E), .MR, .REX_W, .{SSE2}),
// MOVQ
instr(.MOVQ, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x6F), .RM, .ZO, .{MMX}),
instr(.MOVQ, ops2(.mm_m64, .mm), preOp2(._NP, 0x0F, 0x7F), .MR, .ZO, .{MMX}),
instr(.MOVQ, ops2(.mm, .rm64), preOp2(._NP, 0x0F, 0x6E), .RM, .REX_W, .{MMX}),
instr(.MOVQ, ops2(.rm64, .mm), preOp2(._NP, 0x0F, 0x7E), .MR, .REX_W, .{MMX}),
// xmm
instr(.MOVQ, ops2(.xmml, .xmml_m64), preOp2(._F3, 0x0F, 0x7E), .RM, .ZO, .{SSE2}),
instr(.MOVQ, ops2(.xmml_m64, .xmml), preOp2(._66, 0x0F, 0xD6), .MR, .ZO, .{SSE2}),
instr(.MOVQ, ops2(.xmml, .rm64), preOp2(._66, 0x0F, 0x6E), .RM, .REX_W, .{SSE2}),
instr(.MOVQ, ops2(.rm64, .xmml), preOp2(._66, 0x0F, 0x7E), .MR, .REX_W, .{SSE2}),
// MOVDQA
instr(.MOVDDUP, ops2(.xmml, .xmml_m64), preOp2(._F2, 0x0F, 0x12), .RM, .ZO, .{SSE3}),
// MOVDQA
instr(.MOVDQA, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x6F), .RM, .ZO, .{SSE2}),
instr(.MOVDQA, ops2(.xmml_m128, .xmml), preOp2(._66, 0x0F, 0x7F), .MR, .ZO, .{SSE2}),
// MOVDQU
instr(.MOVDQU, ops2(.xmml, .xmml_m128), preOp2(._F3, 0x0F, 0x6F), .RM, .ZO, .{SSE2}),
instr(.MOVDQU, ops2(.xmml_m128, .xmml), preOp2(._F3, 0x0F, 0x7F), .MR, .ZO, .{SSE2}),
// MOVDQ2Q
instr(.MOVDQ2Q, ops2(.mm, .xmml), preOp2(._F2, 0x0F, 0xD6), .RM, .ZO, .{SSE2}),
// MOVHLPS
instr(.MOVHLPS, ops2(.xmml, .xmml), preOp2(._NP, 0x0F, 0x12), .RM, .ZO, .{SSE}),
// MOVHPD
instr(.MOVHPD, ops2(.xmml, .rm_mem64), preOp2(._66, 0x0F, 0x16), .RM, .ZO, .{SSE2}),
// MOVHPS
instr(.MOVHPS, ops2(.xmml, .rm_mem64), preOp2(._NP, 0x0F, 0x16), .RM, .ZO, .{SSE}),
// MOVLHPS
instr(.MOVLHPS, ops2(.xmml, .xmml), preOp2(._NP, 0x0F, 0x16), .RM, .ZO, .{SSE}),
// MOVLPD
instr(.MOVLPD, ops2(.xmml, .rm_mem64), preOp2(._66, 0x0F, 0x12), .RM, .ZO, .{SSE2}),
// MOVLPS
instr(.MOVLPS, ops2(.xmml, .rm_mem64), preOp2(._NP, 0x0F, 0x12), .RM, .ZO, .{SSE}),
// MOVMSKPD
instr(.MOVMSKPD, ops2(.reg32, .xmml), preOp2(._66, 0x0F, 0x50), .RM, .ZO, .{SSE2}),
instr(.MOVMSKPD, ops2(.reg64, .xmml), preOp2(._66, 0x0F, 0x50), .RM, .ZO, .{ No32, SSE2 }),
// MOVMSKPS
instr(.MOVMSKPS, ops2(.reg32, .xmml), preOp2(._NP, 0x0F, 0x50), .RM, .ZO, .{SSE}),
instr(.MOVMSKPS, ops2(.reg64, .xmml), preOp2(._NP, 0x0F, 0x50), .RM, .ZO, .{ No32, SSE2 }),
// MOVNTDQA
instr(.MOVNTDQA, ops2(.xmml, .rm_mem128), preOp3(._66, 0x0F, 0x38, 0x2A), .RM, .ZO, .{SSE4_1}),
// MOVNTDQ
instr(.MOVNTDQ, ops2(.rm_mem128, .xmml), preOp2(._66, 0x0F, 0xE7), .MR, .ZO, .{SSE2}),
// MOVNTPD
instr(.MOVNTPD, ops2(.rm_mem128, .xmml), preOp2(._66, 0x0F, 0x2B), .MR, .ZO, .{SSE2}),
// MOVNTPS
instr(.MOVNTPS, ops2(.rm_mem128, .xmml), preOp2(._NP, 0x0F, 0x2B), .MR, .ZO, .{SSE}),
// MOVNTQ
instr(.MOVNTQ, ops2(.rm_mem64, .mm), preOp2(._NP, 0x0F, 0xE7), .MR, .ZO, .{SSE}),
instr(.MOVNTQ, ops2(.rm_mem64, .mm), preOp2(._NP, 0x0F, 0xE7), .MR, .ZO, .{MMXEXT}),
// MOVQ2DQ
instr(.MOVQ2DQ, ops2(.xmml, .mm), preOp2(._F3, 0x0F, 0xD6), .RM, .ZO, .{SSE2}),
// MOVSD
instr(.MOVSD, ops0(), Op1(0xA5), .ZO, .Op32, .{ _386, Rep }), // overloaded
instr(.MOVSD, ops2(.xmml, .xmml_m64), preOp2(._F2, 0x0F, 0x10), .RM, .ZO, .{SSE2}),
instr(.MOVSD, ops2(.xmml_m64, .xmml), preOp2(._F2, 0x0F, 0x11), .MR, .ZO, .{SSE2}),
// MOVSHDUP
instr(.MOVSHDUP, ops2(.xmml, .xmml_m128), preOp2(._F3, 0x0F, 0x16), .RM, .ZO, .{SSE3}),
// MOVSLDUP
instr(.MOVSLDUP, ops2(.xmml, .xmml_m128), preOp2(._F3, 0x0F, 0x12), .RM, .ZO, .{SSE3}),
// MOVSS
instr(.MOVSS, ops2(.xmml, .xmml_m32), preOp2(._F3, 0x0F, 0x10), .RM, .ZO, .{SSE}),
instr(.MOVSS, ops2(.xmml_m32, .xmml), preOp2(._F3, 0x0F, 0x11), .MR, .ZO, .{SSE}),
// MOVUPD
instr(.MOVUPD, ops2(.xmml, .xmml_m64), preOp2(._66, 0x0F, 0x10), .RM, .ZO, .{SSE2}),
instr(.MOVUPD, ops2(.xmml_m64, .xmml), preOp2(._66, 0x0F, 0x11), .MR, .ZO, .{SSE2}),
// MOVUPS
instr(.MOVUPS, ops2(.xmml, .xmml_m32), preOp2(._NP, 0x0F, 0x10), .RM, .ZO, .{SSE}),
instr(.MOVUPS, ops2(.xmml_m32, .xmml), preOp2(._NP, 0x0F, 0x11), .MR, .ZO, .{SSE}),
// MPSADBW
instr(.MPSADBW, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x42), .RMI, .ZO, .{SSE4_1}),
// MULPD
instr(.MULPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x59), .RM, .ZO, .{SSE2}),
// MULPS
instr(.MULPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x59), .RM, .ZO, .{SSE}),
// MULSD
instr(.MULSD, ops2(.xmml, .xmml_m64), preOp2(._F2, 0x0F, 0x59), .RM, .ZO, .{SSE2}),
// MULSS
instr(.MULSS, ops2(.xmml, .xmml_m32), preOp2(._F3, 0x0F, 0x59), .RM, .ZO, .{SSE}),
// ORPD
instr(.ORPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x56), .RM, .ZO, .{SSE2}),
// ORPS
instr(.ORPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x56), .RM, .ZO, .{SSE}),
// PABSB / PABSW /PABSD
// PABSB
instr(.PABSB, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x1C), .RM, .ZO, .{SSSE3}),
instr(.PABSB, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x1C), .RM, .ZO, .{SSSE3}),
// PABSW
instr(.PABSW, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x1D), .RM, .ZO, .{SSSE3}),
instr(.PABSW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x1D), .RM, .ZO, .{SSSE3}),
// PABSD
instr(.PABSD, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x1E), .RM, .ZO, .{SSSE3}),
instr(.PABSD, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x1E), .RM, .ZO, .{SSSE3}),
// PACKSSWB / PACKSSDW
instr(.PACKSSWB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x63), .RM, .ZO, .{MMX}),
instr(.PACKSSWB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x63), .RM, .ZO, .{SSE2}),
//
instr(.PACKSSDW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x6B), .RM, .ZO, .{MMX}),
instr(.PACKSSDW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x6B), .RM, .ZO, .{SSE2}),
// PACKUSWB
instr(.PACKUSWB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x67), .RM, .ZO, .{MMX}),
instr(.PACKUSWB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x67), .RM, .ZO, .{SSE2}),
// PACKUSDW
instr(.PACKUSDW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x2B), .RM, .ZO, .{SSE4_1}),
// PADDB / PADDW / PADDD / PADDQ
instr(.PADDB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xFC), .RM, .ZO, .{MMX}),
instr(.PADDB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xFC), .RM, .ZO, .{SSE2}),
//
instr(.PADDW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xFD), .RM, .ZO, .{MMX}),
instr(.PADDW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xFD), .RM, .ZO, .{SSE2}),
//
instr(.PADDD, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xFE), .RM, .ZO, .{MMX}),
instr(.PADDD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xFE), .RM, .ZO, .{SSE2}),
//
instr(.PADDQ, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xD4), .RM, .ZO, .{MMX}),
instr(.PADDQ, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xD4), .RM, .ZO, .{SSE2}),
// PADDSB / PADDSW
instr(.PADDSB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xEC), .RM, .ZO, .{MMX}),
instr(.PADDSB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xEC), .RM, .ZO, .{SSE2}),
//
instr(.PADDSW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xED), .RM, .ZO, .{MMX}),
instr(.PADDSW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xED), .RM, .ZO, .{SSE2}),
// PADDUSB / PADDSW
instr(.PADDUSB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xDC), .RM, .ZO, .{MMX}),
instr(.PADDUSB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xDC), .RM, .ZO, .{SSE2}),
//
instr(.PADDUSW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xDD), .RM, .ZO, .{MMX}),
instr(.PADDUSW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xDD), .RM, .ZO, .{SSE2}),
// PALIGNR
instr(.PALIGNR, ops3(.mm, .mm_m64, .imm8), preOp3(._NP, 0x0F, 0x3A, 0x0F), .RMI, .ZO, .{SSSE3}),
instr(.PALIGNR, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x0F), .RMI, .ZO, .{SSSE3}),
// PAND
instr(.PAND, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xDB), .RM, .ZO, .{MMX}),
instr(.PAND, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xDB), .RM, .ZO, .{SSE2}),
// PANDN
instr(.PANDN, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xDF), .RM, .ZO, .{MMX}),
instr(.PANDN, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xDF), .RM, .ZO, .{SSE2}),
// PAVGB / PAVGW
instr(.PAVGB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xE0), .RM, .ZO, .{SSE}),
instr(.PAVGB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xE0), .RM, .ZO, .{MMXEXT}),
instr(.PAVGB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xE0), .RM, .ZO, .{SSE2}),
//
instr(.PAVGW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xE3), .RM, .ZO, .{SSE}),
instr(.PAVGW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xE3), .RM, .ZO, .{MMXEXT}),
instr(.PAVGW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xE3), .RM, .ZO, .{SSE2}),
// PBLENDVB
instr(.PBLENDVB, ops3(.xmml, .xmml_m128, .xmm0), preOp3(._66, 0x0F, 0x38, 0x10), .RM, .ZO, .{SSE4_1}),
instr(.PBLENDVB, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x10), .RM, .ZO, .{SSE4_1}),
// PBLENDVW
instr(.PBLENDW, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x0E), .RMI, .ZO, .{SSE4_1}),
// PCLMULQDQ
instr(.PCLMULQDQ, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x44), .RMI, .ZO, .{cpu.PCLMULQDQ}),
// PCMPEQB / PCMPEQW / PCMPEQD
instr(.PCMPEQB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x74), .RM, .ZO, .{MMX}),
instr(.PCMPEQB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x74), .RM, .ZO, .{SSE2}),
//
instr(.PCMPEQW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x75), .RM, .ZO, .{MMX}),
instr(.PCMPEQW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x75), .RM, .ZO, .{SSE2}),
//
instr(.PCMPEQD, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x76), .RM, .ZO, .{MMX}),
instr(.PCMPEQD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x76), .RM, .ZO, .{SSE2}),
// PCMPEQQ
instr(.PCMPEQQ, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x29), .RM, .ZO, .{SSE2}),
// PCMPESTRI
instr(.PCMPESTRI, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x61), .RMI, .ZO, .{SSE4_2}),
// PCMPESTRM
instr(.PCMPESTRM, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x60), .RMI, .ZO, .{SSE4_2}),
// PCMPGTB / PCMPGTW / PCMPGTD
instr(.PCMPGTB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x64), .RM, .ZO, .{MMX}),
instr(.PCMPGTB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x64), .RM, .ZO, .{SSE2}),
//
instr(.PCMPGTW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x65), .RM, .ZO, .{MMX}),
instr(.PCMPGTW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x65), .RM, .ZO, .{SSE2}),
//
instr(.PCMPGTD, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x66), .RM, .ZO, .{MMX}),
instr(.PCMPGTD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x66), .RM, .ZO, .{SSE2}),
// PCMPISTRI
instr(.PCMPISTRI, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x63), .RMI, .ZO, .{SSE4_2}),
// PCMPISTRM
instr(.PCMPISTRM, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x62), .RMI, .ZO, .{SSE4_2}),
// PEXTRB / PEXTRD / PEXTRQ
instr(.PEXTRB, ops3(.reg32_m8, .xmml, .imm8), preOp3(._66, 0x0F, 0x3A, 0x14), .MRI, .ZO, .{SSE4_1}),
instr(.PEXTRB, ops3(.rm_reg64, .xmml, .imm8), preOp3(._66, 0x0F, 0x3A, 0x14), .MRI, .ZO, .{ SSE4_1, No32 }),
instr(.PEXTRD, ops3(.rm32, .xmml, .imm8), preOp3(._66, 0x0F, 0x3A, 0x16), .MRI, .ZO, .{SSE4_1}),
instr(.PEXTRQ, ops3(.rm64, .xmml, .imm8), preOp3(._66, 0x0F, 0x3A, 0x16), .MRI, .REX_W, .{SSE4_1}),
// PEXTRW
instr(.PEXTRW, ops3(.reg32, .mm, .imm8), preOp2(._NP, 0x0F, 0xC5), .RMI, .ZO, .{SSE}),
instr(.PEXTRW, ops3(.reg32, .mm, .imm8), preOp2(._NP, 0x0F, 0xC5), .RMI, .ZO, .{MMXEXT}),
instr(.PEXTRW, ops3(.reg64, .mm, .imm8), preOp2(._NP, 0x0F, 0xC5), .RMI, .ZO, .{ SSE, No32 }),
instr(.PEXTRW, ops3(.reg64, .mm, .imm8), preOp2(._NP, 0x0F, 0xC5), .RMI, .ZO, .{ MMXEXT, No32 }),
instr(.PEXTRW, ops3(.reg32, .xmml, .imm8), preOp2(._66, 0x0F, 0xC5), .RMI, .ZO, .{SSE2}),
instr(.PEXTRW, ops3(.reg64, .xmml, .imm8), preOp2(._66, 0x0F, 0xC5), .RMI, .ZO, .{ SSE2, No32 }),
instr(.PEXTRW, ops3(.reg32_m16, .xmml, .imm8), preOp3(._66, 0x0F, 0x3A, 0x15), .MRI, .ZO, .{SSE4_1}),
instr(.PEXTRW, ops3(.rm_reg64, .xmml, .imm8), preOp3(._66, 0x0F, 0x3A, 0x15), .MRI, .ZO, .{ SSE4_1, No32 }),
// PHADDW / PHADDD
instr(.PHADDW, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x01), .RM, .ZO, .{SSSE3}),
instr(.PHADDW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x01), .RM, .ZO, .{SSSE3}),
instr(.PHADDD, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x02), .RM, .ZO, .{SSSE3}),
instr(.PHADDD, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x02), .RM, .ZO, .{SSSE3}),
// PHADDSW
instr(.PHADDSW, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x03), .RM, .ZO, .{SSSE3}),
instr(.PHADDSW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x03), .RM, .ZO, .{SSSE3}),
// PHMINPOSUW
instr(.PHMINPOSUW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x41), .RM, .ZO, .{SSE4_1}),
// PHSUBW / PHSUBD
instr(.PHSUBW, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x05), .RM, .ZO, .{SSSE3}),
instr(.PHSUBW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x05), .RM, .ZO, .{SSSE3}),
instr(.PHSUBD, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x06), .RM, .ZO, .{SSSE3}),
instr(.PHSUBD, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x06), .RM, .ZO, .{SSSE3}),
// PHSUBSW
instr(.PHSUBSW, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x07), .RM, .ZO, .{SSSE3}),
instr(.PHSUBSW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x07), .RM, .ZO, .{SSSE3}),
// PINSRB / PINSRD / PINSRQ
instr(.PINSRB, ops3(.xmml, .reg32_m8, .imm8), preOp3(._66, 0x0F, 0x3A, 0x20), .RMI, .ZO, .{SSE4_1}),
//
instr(.PINSRD, ops3(.xmml, .rm32, .imm8), preOp3(._66, 0x0F, 0x3A, 0x22), .RMI, .ZO, .{SSE4_1}),
//
instr(.PINSRQ, ops3(.xmml, .rm64, .imm8), preOp3(._66, 0x0F, 0x3A, 0x22), .RMI, .REX_W, .{SSE4_1}),
// PINSRW
instr(.PINSRW, ops3(.mm, .reg32_m16, .imm8), preOp2(._NP, 0x0F, 0xC4), .RMI, .ZO, .{SSE}),
instr(.PINSRW, ops3(.mm, .reg32_m16, .imm8), preOp2(._NP, 0x0F, 0xC4), .RMI, .ZO, .{MMXEXT}),
instr(.PINSRW, ops3(.xmml, .reg32_m16, .imm8), preOp2(._66, 0x0F, 0xC4), .RMI, .ZO, .{SSE2}),
// PMADDUBSW
instr(.PMADDUBSW, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x04), .RM, .ZO, .{SSSE3}),
instr(.PMADDUBSW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x04), .RM, .ZO, .{SSSE3}),
// PMADDWD
instr(.PMADDWD, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xF5), .RM, .ZO, .{MMX}),
instr(.PMADDWD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xF5), .RM, .ZO, .{SSE2}),
// PMAXSB / PMAXSW / PMAXSD
instr(.PMAXSW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xEE), .RM, .ZO, .{SSE}),
instr(.PMAXSW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xEE), .RM, .ZO, .{MMXEXT}),
instr(.PMAXSW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xEE), .RM, .ZO, .{SSE2}),
instr(.PMAXSB, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x3C), .RM, .ZO, .{SSE4_1}),
instr(.PMAXSD, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x3D), .RM, .ZO, .{SSE4_1}),
// PMAXUB / PMAXUW
instr(.PMAXUB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xDE), .RM, .ZO, .{SSE}),
instr(.PMAXUB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xDE), .RM, .ZO, .{MMXEXT}),
instr(.PMAXUB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xDE), .RM, .ZO, .{SSE2}),
instr(.PMAXUW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x3E), .RM, .ZO, .{SSE4_1}),
// PMAXUD
instr(.PMAXUD, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x3F), .RM, .ZO, .{SSE4_1}),
// PMINSB / PMINSW
instr(.PMINSW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xEA), .RM, .ZO, .{SSE}),
instr(.PMINSW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xEA), .RM, .ZO, .{MMXEXT}),
instr(.PMINSW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xEA), .RM, .ZO, .{SSE2}),
instr(.PMINSB, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x38), .RM, .ZO, .{SSE4_1}),
// PMINSD
instr(.PMINSD, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x39), .RM, .ZO, .{SSE4_1}),
// PMINUB / PMINUW
instr(.PMINUB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xDA), .RM, .ZO, .{SSE}),
instr(.PMINUB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xDA), .RM, .ZO, .{MMXEXT}),
instr(.PMINUB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xDA), .RM, .ZO, .{SSE2}),
instr(.PMINUW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x3A), .RM, .ZO, .{SSE4_1}),
// PMINUD
instr(.PMINUD, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x3B), .RM, .ZO, .{SSE4_1}),
// PMOVMSKB
instr(.PMOVMSKB, ops2(.reg32, .rm_mm), preOp2(._NP, 0x0F, 0xD7), .RM, .ZO, .{SSE}),
instr(.PMOVMSKB, ops2(.reg32, .rm_mm), preOp2(._NP, 0x0F, 0xD7), .RM, .ZO, .{MMXEXT}),
instr(.PMOVMSKB, ops2(.reg64, .rm_mm), preOp2(._NP, 0x0F, 0xD7), .RM, .ZO, .{ SSE, No32 }),
instr(.PMOVMSKB, ops2(.reg64, .rm_mm), preOp2(._NP, 0x0F, 0xD7), .RM, .ZO, .{ MMXEXT, No32 }),
instr(.PMOVMSKB, ops2(.reg32, .rm_xmml), preOp2(._66, 0x0F, 0xD7), .RM, .ZO, .{SSE}),
instr(.PMOVMSKB, ops2(.reg64, .rm_xmml), preOp2(._66, 0x0F, 0xD7), .RM, .ZO, .{ SSE, No32 }),
// PMOVSX
instr(.PMOVSXBW, ops2(.xmml, .xmml_m64), preOp3(._66, 0x0F, 0x38, 0x20), .RM, .ZO, .{SSE4_1}),
instr(.PMOVSXBD, ops2(.xmml, .xmml_m32), preOp3(._66, 0x0F, 0x38, 0x21), .RM, .ZO, .{SSE4_1}),
instr(.PMOVSXBQ, ops2(.xmml, .xmml_m16), preOp3(._66, 0x0F, 0x38, 0x22), .RM, .ZO, .{SSE4_1}),
//
instr(.PMOVSXWD, ops2(.xmml, .xmml_m64), preOp3(._66, 0x0F, 0x38, 0x23), .RM, .ZO, .{SSE4_1}),
instr(.PMOVSXWQ, ops2(.xmml, .xmml_m32), preOp3(._66, 0x0F, 0x38, 0x24), .RM, .ZO, .{SSE4_1}),
instr(.PMOVSXDQ, ops2(.xmml, .xmml_m64), preOp3(._66, 0x0F, 0x38, 0x25), .RM, .ZO, .{SSE4_1}),
// PMOVZX
instr(.PMOVZXBW, ops2(.xmml, .xmml_m64), preOp3(._66, 0x0F, 0x38, 0x30), .RM, .ZO, .{SSE4_1}),
instr(.PMOVZXBD, ops2(.xmml, .xmml_m32), preOp3(._66, 0x0F, 0x38, 0x31), .RM, .ZO, .{SSE4_1}),
instr(.PMOVZXBQ, ops2(.xmml, .xmml_m16), preOp3(._66, 0x0F, 0x38, 0x32), .RM, .ZO, .{SSE4_1}),
//
instr(.PMOVZXWD, ops2(.xmml, .xmml_m64), preOp3(._66, 0x0F, 0x38, 0x33), .RM, .ZO, .{SSE4_1}),
instr(.PMOVZXWQ, ops2(.xmml, .xmml_m32), preOp3(._66, 0x0F, 0x38, 0x34), .RM, .ZO, .{SSE4_1}),
instr(.PMOVZXDQ, ops2(.xmml, .xmml_m64), preOp3(._66, 0x0F, 0x38, 0x35), .RM, .ZO, .{SSE4_1}),
// PMULDQ
instr(.PMULDQ, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x28), .RM, .ZO, .{SSE4_1}),
// PMULHRSW
instr(.PMULHRSW, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x0B), .RM, .ZO, .{SSSE3}),
instr(.PMULHRSW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x0B), .RM, .ZO, .{SSSE3}),
// PMULHUW
instr(.PMULHUW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xE4), .RM, .ZO, .{SSE}),
instr(.PMULHUW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xE4), .RM, .ZO, .{MMXEXT}),
instr(.PMULHUW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xE4), .RM, .ZO, .{SSE2}),
// PMULHW
instr(.PMULHW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xE5), .RM, .ZO, .{MMX}),
instr(.PMULHW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xE5), .RM, .ZO, .{SSE2}),
// PMULLD
instr(.PMULLD, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x40), .RM, .ZO, .{SSE4_1}),
// PMULLW
instr(.PMULLW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xD5), .RM, .ZO, .{MMX}),
instr(.PMULLW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xD5), .RM, .ZO, .{SSE2}),
// PMULUDQ
instr(.PMULUDQ, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xF4), .RM, .ZO, .{SSE2}),
instr(.PMULUDQ, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xF4), .RM, .ZO, .{SSE2}),
// POR
instr(.POR, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xEB), .RM, .ZO, .{MMX}),
instr(.POR, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xEB), .RM, .ZO, .{SSE2}),
// PSADBW
instr(.PSADBW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xF6), .RM, .ZO, .{SSE}),
instr(.PSADBW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xF6), .RM, .ZO, .{MMXEXT}),
instr(.PSADBW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xF6), .RM, .ZO, .{SSE2}),
// PSHUFB
instr(.PSHUFB, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x00), .RM, .ZO, .{SSSE3}),
instr(.PSHUFB, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x00), .RM, .ZO, .{SSSE3}),
// PSHUFD
instr(.PSHUFD, ops3(.xmml, .xmml_m128, .imm8), preOp2(._66, 0x0F, 0x70), .RMI, .ZO, .{SSE2}),
// PSHUFHW
instr(.PSHUFHW, ops3(.xmml, .xmml_m128, .imm8), preOp2(._F3, 0x0F, 0x70), .RMI, .ZO, .{SSE2}),
// PSHUFLW
instr(.PSHUFLW, ops3(.xmml, .xmml_m128, .imm8), preOp2(._F2, 0x0F, 0x70), .RMI, .ZO, .{SSE2}),
// PSHUFW
instr(.PSHUFW, ops3(.mm, .mm_m64, .imm8), preOp2(._NP, 0x0F, 0x70), .RMI, .ZO, .{SSE}),
instr(.PSHUFW, ops3(.mm, .mm_m64, .imm8), preOp2(._NP, 0x0F, 0x70), .RMI, .ZO, .{MMXEXT}),
// PSIGNB / PSIGNW / PSIGND
instr(.PSIGNB, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x08), .RM, .ZO, .{SSSE3}),
instr(.PSIGNB, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x08), .RM, .ZO, .{SSSE3}),
//
instr(.PSIGNW, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x09), .RM, .ZO, .{SSSE3}),
instr(.PSIGNW, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x09), .RM, .ZO, .{SSSE3}),
//
instr(.PSIGND, ops2(.mm, .mm_m64), preOp3(._NP, 0x0F, 0x38, 0x0A), .RM, .ZO, .{SSSE3}),
instr(.PSIGND, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x0A), .RM, .ZO, .{SSSE3}),
// PSLLDQ
instr(.PSLLDQ, ops2(.rm_xmml, .imm8), preOp2r(._66, 0x0F, 0x73, 7), .MI, .ZO, .{SSE2}),
// PSLLW / PSLLD / PSLLQ
// PSLLW
instr(.PSLLW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xF1), .RM, .ZO, .{MMX}),
instr(.PSLLW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xF1), .RM, .ZO, .{SSE2}),
//
instr(.PSLLW, ops2(.rm_mm, .imm8), preOp2r(._NP, 0x0F, 0x71, 6), .MI, .ZO, .{MMX}),
instr(.PSLLW, ops2(.rm_xmml, .imm8), preOp2r(._66, 0x0F, 0x71, 6), .MI, .ZO, .{SSE2}),
// PSLLD
instr(.PSLLD, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xF2), .RM, .ZO, .{MMX}),
instr(.PSLLD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xF2), .RM, .ZO, .{SSE2}),
//
instr(.PSLLD, ops2(.rm_mm, .imm8), preOp2r(._NP, 0x0F, 0x72, 6), .MI, .ZO, .{MMX}),
instr(.PSLLD, ops2(.rm_xmml, .imm8), preOp2r(._66, 0x0F, 0x72, 6), .MI, .ZO, .{SSE2}),
// PSLLQ
instr(.PSLLQ, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xF3), .RM, .ZO, .{MMX}),
instr(.PSLLQ, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xF3), .RM, .ZO, .{SSE2}),
//
instr(.PSLLQ, ops2(.rm_mm, .imm8), preOp2r(._NP, 0x0F, 0x73, 6), .MI, .ZO, .{MMX}),
instr(.PSLLQ, ops2(.rm_xmml, .imm8), preOp2r(._66, 0x0F, 0x73, 6), .MI, .ZO, .{SSE2}),
// PSRAW / PSRAD
// PSRAW
instr(.PSRAW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xE1), .RM, .ZO, .{MMX}),
instr(.PSRAW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xE1), .RM, .ZO, .{SSE2}),
//
instr(.PSRAW, ops2(.mm, .imm8), preOp2r(._NP, 0x0F, 0x71, 4), .MI, .ZO, .{MMX}),
instr(.PSRAW, ops2(.xmml, .imm8), preOp2r(._66, 0x0F, 0x71, 4), .MI, .ZO, .{SSE2}),
// PSRAD
instr(.PSRAD, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xE2), .RM, .ZO, .{MMX}),
instr(.PSRAD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xE2), .RM, .ZO, .{SSE2}),
//
instr(.PSRAD, ops2(.mm, .imm8), preOp2r(._NP, 0x0F, 0x72, 4), .MI, .ZO, .{MMX}),
instr(.PSRAD, ops2(.xmml, .imm8), preOp2r(._66, 0x0F, 0x72, 4), .MI, .ZO, .{SSE2}),
// PSRLDQ
instr(.PSRLDQ, ops2(.rm_xmml, .imm8), preOp2r(._66, 0x0F, 0x73, 3), .MI, .ZO, .{SSE2}),
// PSRLW / PSRLD / PSRLQ
// PSRLW
instr(.PSRLW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xD1), .RM, .ZO, .{MMX}),
instr(.PSRLW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xD1), .RM, .ZO, .{SSE2}),
//
instr(.PSRLW, ops2(.mm, .imm8), preOp2r(._NP, 0x0F, 0x71, 2), .MI, .ZO, .{MMX}),
instr(.PSRLW, ops2(.xmml, .imm8), preOp2r(._66, 0x0F, 0x71, 2), .MI, .ZO, .{SSE2}),
// PSRLD
instr(.PSRLD, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xD2), .RM, .ZO, .{MMX}),
instr(.PSRLD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xD2), .RM, .ZO, .{SSE2}),
//
instr(.PSRLD, ops2(.mm, .imm8), preOp2r(._NP, 0x0F, 0x72, 2), .MI, .ZO, .{MMX}),
instr(.PSRLD, ops2(.xmml, .imm8), preOp2r(._66, 0x0F, 0x72, 2), .MI, .ZO, .{SSE2}),
// PSRLQ
instr(.PSRLQ, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xD3), .RM, .ZO, .{MMX}),
instr(.PSRLQ, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xD3), .RM, .ZO, .{SSE2}),
//
instr(.PSRLQ, ops2(.mm, .imm8), preOp2r(._NP, 0x0F, 0x73, 2), .MI, .ZO, .{MMX}),
instr(.PSRLQ, ops2(.xmml, .imm8), preOp2r(._66, 0x0F, 0x73, 2), .MI, .ZO, .{SSE2}),
// PSUBB / PSUBW / PSUBD
// PSUBB
instr(.PSUBB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xF8), .RM, .ZO, .{MMX}),
instr(.PSUBB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xF8), .RM, .ZO, .{SSE2}),
// PSUBW
instr(.PSUBW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xF9), .RM, .ZO, .{MMX}),
instr(.PSUBW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xF9), .RM, .ZO, .{SSE2}),
// PSUBD
instr(.PSUBD, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xFA), .RM, .ZO, .{MMX}),
instr(.PSUBD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xFA), .RM, .ZO, .{SSE2}),
// PSUBQ
instr(.PSUBQ, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xFB), .RM, .ZO, .{SSE2}),
instr(.PSUBQ, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xFB), .RM, .ZO, .{SSE2}),
// PSUBSB / PSUBSW
// PSUBSB
instr(.PSUBSB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xE8), .RM, .ZO, .{MMX}),
instr(.PSUBSB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xE8), .RM, .ZO, .{SSE2}),
// PSUBSW
instr(.PSUBSW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xE9), .RM, .ZO, .{MMX}),
instr(.PSUBSW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xE9), .RM, .ZO, .{SSE2}),
// PSUBUSB / PSUBUSW
// PSUBUSB
instr(.PSUBUSB, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xD8), .RM, .ZO, .{MMX}),
instr(.PSUBUSB, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xD8), .RM, .ZO, .{SSE2}),
// PSUBUSW
instr(.PSUBUSW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xD9), .RM, .ZO, .{MMX}),
instr(.PSUBUSW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xD9), .RM, .ZO, .{SSE2}),
// PTEST
instr(.PTEST, ops2(.xmml, .xmml_m128), preOp3(._66, 0x0F, 0x38, 0x17), .RM, .ZO, .{SSE4_1}),
// PUNPCKHBW / PUNPCKHWD / PUNPCKHDQ / PUNPCKHQDQ
instr(.PUNPCKHBW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x68), .RM, .ZO, .{MMX}),
instr(.PUNPCKHBW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x68), .RM, .ZO, .{SSE2}),
//
instr(.PUNPCKHWD, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x69), .RM, .ZO, .{MMX}),
instr(.PUNPCKHWD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x69), .RM, .ZO, .{SSE2}),
//
instr(.PUNPCKHDQ, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x6A), .RM, .ZO, .{MMX}),
instr(.PUNPCKHDQ, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x6A), .RM, .ZO, .{SSE2}),
//
instr(.PUNPCKHQDQ, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x6D), .RM, .ZO, .{SSE2}),
// PUNPCKLBW / PUNPCKLWD / PUNPCKLDQ / PUNPCKLQDQ
instr(.PUNPCKLBW, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x60), .RM, .ZO, .{MMX}),
instr(.PUNPCKLBW, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x60), .RM, .ZO, .{SSE2}),
//
instr(.PUNPCKLWD, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x61), .RM, .ZO, .{MMX}),
instr(.PUNPCKLWD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x61), .RM, .ZO, .{SSE2}),
//
instr(.PUNPCKLDQ, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0x62), .RM, .ZO, .{MMX}),
instr(.PUNPCKLDQ, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x62), .RM, .ZO, .{SSE2}),
//
instr(.PUNPCKLQDQ, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x6C), .RM, .ZO, .{SSE2}),
// PXOR
instr(.PXOR, ops2(.mm, .mm_m64), preOp2(._NP, 0x0F, 0xEF), .RM, .ZO, .{MMX}),
instr(.PXOR, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0xEF), .RM, .ZO, .{SSE2}),
// RCPPS
instr(.RCPPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x53), .RM, .ZO, .{SSE}),
// RCPSS
instr(.RCPSS, ops2(.xmml, .xmml_m32), preOp2(._F3, 0x0F, 0x53), .RM, .ZO, .{SSE}),
// ROUNDPD
instr(.ROUNDPD, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x09), .RMI, .ZO, .{SSE4_1}),
// ROUNDPS
instr(.ROUNDPS, ops3(.xmml, .xmml_m128, .imm8), preOp3(._66, 0x0F, 0x3A, 0x08), .RMI, .ZO, .{SSE4_1}),
// ROUNDSD
instr(.ROUNDSD, ops3(.xmml, .xmml_m64, .imm8), preOp3(._66, 0x0F, 0x3A, 0x0B), .RMI, .ZO, .{SSE4_1}),
// ROUNDSS
instr(.ROUNDSS, ops3(.xmml, .xmml_m32, .imm8), preOp3(._66, 0x0F, 0x3A, 0x0A), .RMI, .ZO, .{SSE4_1}),
// RSQRTPS
instr(.RSQRTPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x52), .RM, .ZO, .{SSE}),
// RSQRTSS
instr(.RSQRTSS, ops2(.xmml, .xmml_m32), preOp2(._F3, 0x0F, 0x52), .RM, .ZO, .{SSE}),
// SHUFPD
instr(.SHUFPD, ops3(.xmml, .xmml_m128, .imm8), preOp2(._66, 0x0F, 0xC6), .RMI, .ZO, .{SSE2}),
// SHUFPS
instr(.SHUFPS, ops3(.xmml, .xmml_m128, .imm8), preOp2(._NP, 0x0F, 0xC6), .RMI, .ZO, .{SSE}),
// SQRTPD
instr(.SQRTPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x51), .RM, .ZO, .{SSE2}),
// SQRTPS
instr(.SQRTPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x51), .RM, .ZO, .{SSE}),
// SQRTSD
instr(.SQRTSD, ops2(.xmml, .xmml_m64), preOp2(._F2, 0x0F, 0x51), .RM, .ZO, .{SSE2}),
// SQRTSS
instr(.SQRTSS, ops2(.xmml, .xmml_m32), preOp2(._F3, 0x0F, 0x51), .RM, .ZO, .{SSE}),
// SUBPD
instr(.SUBPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x5C), .RM, .ZO, .{SSE2}),
// SUBPS
instr(.SUBPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x5C), .RM, .ZO, .{SSE}),
// SUBSD
instr(.SUBSD, ops2(.xmml, .xmml_m64), preOp2(._F2, 0x0F, 0x5C), .RM, .ZO, .{SSE2}),
// SUBSS
instr(.SUBSS, ops2(.xmml, .xmml_m32), preOp2(._F3, 0x0F, 0x5C), .RM, .ZO, .{SSE}),
// UCOMISD
instr(.UCOMISD, ops2(.xmml, .xmml_m64), preOp2(._66, 0x0F, 0x2E), .RM, .ZO, .{SSE2}),
// UCOMISS
instr(.UCOMISS, ops2(.xmml, .xmml_m32), preOp2(._NP, 0x0F, 0x2E), .RM, .ZO, .{SSE}),
// UNPCKHPD
instr(.UNPCKHPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x15), .RM, .ZO, .{SSE2}),
// UNPCKHPS
instr(.UNPCKHPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x15), .RM, .ZO, .{SSE}),
// UNPCKLPD
instr(.UNPCKLPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x14), .RM, .ZO, .{SSE2}),
// UNPCKLPS
instr(.UNPCKLPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x14), .RM, .ZO, .{SSE}),
// XORPD
instr(.XORPD, ops2(.xmml, .xmml_m128), preOp2(._66, 0x0F, 0x57), .RM, .ZO, .{SSE2}),
// XORPS
instr(.XORPS, ops2(.xmml, .xmml_m128), preOp2(._NP, 0x0F, 0x57), .RM, .ZO, .{SSE}),
//
// 3DNow!
//
// FEMMS
instr(.FEMMS, ops0(), Op2(0x0F, 0x0E), .RM, .ZO, .{cpu._3DNOW}),
// PAVGUSB
instr(.PAVGUSB, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0xBF), .RM, .ZO, .{cpu._3DNOW}),
// PF2ID
instr(.PF2ID, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x1D), .RM, .ZO, .{cpu._3DNOW}),
// PFACC
instr(.PFACC, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0xAE), .RM, .ZO, .{cpu._3DNOW}),
// PFADD
instr(.PFADD, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x9E), .RM, .ZO, .{cpu._3DNOW}),
// PFCMPEQ
instr(.PFCMPEQ, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0xB0), .RM, .ZO, .{cpu._3DNOW}),
// PFCMPGE
instr(.PFCMPGE, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x90), .RM, .ZO, .{cpu._3DNOW}),
// PFCMPGT
instr(.PFCMPGT, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0xA0), .RM, .ZO, .{cpu._3DNOW}),
// PFMAX
instr(.PFMAX, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0xA4), .RM, .ZO, .{cpu._3DNOW}),
// PFMIN
instr(.PFMIN, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x94), .RM, .ZO, .{cpu._3DNOW}),
// PFMUL
instr(.PFMUL, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0xB4), .RM, .ZO, .{cpu._3DNOW}),
// PFRCP
instr(.PFRCP, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x96), .RM, .ZO, .{cpu._3DNOW}),
// PFRCPIT1
instr(.PFRCPIT1, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0xA6), .RM, .ZO, .{cpu._3DNOW}),
// PFRCPIT2
instr(.PFRCPIT2, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0xB6), .RM, .ZO, .{cpu._3DNOW}),
// PFRSQIT1
instr(.PFRSQIT1, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0xA7), .RM, .ZO, .{cpu._3DNOW}),
// PFRSQRT
instr(.PFRSQRT, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x97), .RM, .ZO, .{cpu._3DNOW}),
// PFSUB
instr(.PFSUB, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x9A), .RM, .ZO, .{cpu._3DNOW}),
// PFSUBR
instr(.PFSUBR, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0xAA), .RM, .ZO, .{cpu._3DNOW}),
// PI2FD
instr(.PI2FD, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x0D), .RM, .ZO, .{cpu._3DNOW}),
// PMULHRW
instr(.PMULHRW, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x0C), .RM, .ZO, .{cpu._3DNOW}),
instr(.PMULHRW, ops2(.mm, .mm_m64), Op2(0x0F, 0x59), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
// PREFETCH
// see above
// PREFETCHW
// see above
// PFRCPV
instr(.PFRCPV, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x87), .RM, .ZO, .{ cpu._3DNOW, cpu.Cyrix }),
// PFRSQRTV
instr(.PFRSQRTV, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x86), .RM, .ZO, .{ cpu._3DNOW, cpu.Cyrix }),
//
// 3DNow! Extensions
//
// PF2IW
instr(.PF2IW, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x1C), .RM, .ZO, .{cpu._3DNOWEXT}),
// PFNACC
instr(.PFNACC, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x8A), .RM, .ZO, .{cpu._3DNOWEXT}),
// PFPNACC
instr(.PFPNACC, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x8E), .RM, .ZO, .{cpu._3DNOWEXT}),
// PI2FW
instr(.PI2FW, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0x0C), .RM, .ZO, .{cpu._3DNOWEXT}),
// PSWAPD
instr(.PSWAPD, ops2(.mm, .mm_m64), Op3DNow(0x0F, 0x0F, 0xBB), .RM, .ZO, .{cpu._3DNOWEXT}),
//
// SSE4A
//
// EXTRQ
instr(.EXTRQ, ops3(.rm_xmml, .imm8, .imm8), preOp2r(._66, 0x0F, 0x78, 0), .MII, .ZO, .{SSE4A}),
instr(.EXTRQ, ops2(.xmml, .rm_xmml), preOp2(._66, 0x0F, 0x79), .RM, .ZO, .{SSE4A}),
// INSERTQ
instr(.INSERTQ, ops4(.xmml, .rm_xmml, .imm8, .imm8), preOp2r(._F2, 0x0F, 0x78, 0), .RMII, .ZO, .{SSE4A}),
instr(.INSERTQ, ops2(.xmml, .rm_xmml), preOp2(._F2, 0x0F, 0x79), .RM, .ZO, .{SSE4A}),
// MOVNTSD
instr(.MOVNTSD, ops2(.rm_mem64, .xmml), preOp2(._F2, 0x0F, 0x2B), .MR, .ZO, .{SSE4A}),
// MOVNTSS
instr(.MOVNTSS, ops2(.rm_mem32, .xmml), preOp2(._F3, 0x0F, 0x2B), .MR, .ZO, .{SSE4A}),
//
// Cyrix EMMI (Extended Multi-Media Instructions)
//
// PADDSIW
instr(.PADDSIW, ops2(.mm, .mm_m64), Op2(0x0F, 0x51), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
// PAVEB
instr(.PAVEB, ops2(.mm, .mm_m64), Op2(0x0F, 0x50), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
// PDISTIB
instr(.PDISTIB, ops2(.mm, .rm_mem64), Op2(0x0F, 0x54), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
// PMACHRIW
instr(.PMACHRIW, ops2(.mm, .rm_mem64), Op2(0x0F, 0x5E), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
// PMAGW
instr(.PMAGW, ops2(.mm, .mm_m64), Op2(0x0F, 0x52), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
// PMULHRW / PMULHRIW
// instr(.PMULHRW, ops2(.mm, .mm_m64), Op2(0x0F, 0x59), .RM, .ZO, .{cpu.EMMI, cpu.Cyrix} ), // see above
instr(.PMULHRIW, ops2(.mm, .mm_m64), Op2(0x0F, 0x5D), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
// PMVZB / PMVNZB / PMVLZB / PMVGEZB
instr(.PMVZB, ops2(.mm, .rm_mem64), Op2(0x0F, 0x58), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
instr(.PMVNZB, ops2(.mm, .rm_mem64), Op2(0x0F, 0x5A), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
instr(.PMVLZB, ops2(.mm, .rm_mem64), Op2(0x0F, 0x5B), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
instr(.PMVGEZB, ops2(.mm, .rm_mem64), Op2(0x0F, 0x5C), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
// PSUBSIW
instr(.PSUBSIW, ops2(.mm, .mm_m64), Op2(0x0F, 0x55), .RM, .ZO, .{ cpu.EMMI, cpu.Cyrix }),
//
// SIMD vector instructions (AVX, AVX2, AVX512)
//
// VADDPD
vec(.VADDPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x58), .RVM, .{AVX}),
vec(.VADDPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x58), .RVM, .{AVX}),
vec(.VADDPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x58, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VADDPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x58, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VADDPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F, .W1, 0x58, full), .RVM, .{AVX512F}),
// VADDPS
vec(.VADDPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x58), .RVM, .{AVX}),
vec(.VADDPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x58), .RVM, .{AVX}),
vec(.VADDPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x58, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VADDPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x58, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VADDPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst_er), evex(.L512, ._NP, ._0F, .W0, 0x58, full), .RVM, .{AVX512F}),
// VADDSD
vec(.VADDSD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._F2, ._0F, .WIG, 0x58), .RVM, .{AVX}),
vec(.VADDSD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._F2, ._0F, .W1, 0x58, t1s), .RVM, .{AVX512F}),
// VADDSS
vec(.VADDSS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._F3, ._0F, .WIG, 0x58), .RVM, .{AVX}),
vec(.VADDSS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._F3, ._0F, .W0, 0x58, t1s), .RVM, .{AVX512F}),
// VADDSUBPD
vec(.VADDSUBPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xD0), .RVM, .{AVX}),
vec(.VADDSUBPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xD0), .RVM, .{AVX}),
// VADDSUBPS
vec(.VADDSUBPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._F2, ._0F, .WIG, 0xD0), .RVM, .{AVX}),
vec(.VADDSUBPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._F2, ._0F, .WIG, 0xD0), .RVM, .{AVX}),
// VANDPD
vec(.VANDPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x54), .RVM, .{AVX}),
vec(.VANDPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x54), .RVM, .{AVX}),
vec(.VANDPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x54, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VANDPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x54, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VANDPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0x54, full), .RVM, .{AVX512DQ}),
// VANDPS
vec(.VANDPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x54), .RVM, .{AVX}),
vec(.VANDPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x54), .RVM, .{AVX}),
vec(.VANDPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x54, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VANDPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x54, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VANDPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._NP, ._0F, .W0, 0x54, full), .RVM, .{AVX512DQ}),
// VANDNPD
vec(.VANDNPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x55), .RVM, .{AVX}),
vec(.VANDNPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x55), .RVM, .{AVX}),
vec(.VANDNPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x55, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VANDNPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x55, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VANDNPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0x55, full), .RVM, .{AVX512DQ}),
// VANDNPS
vec(.VANDNPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x55), .RVM, .{AVX}),
vec(.VANDNPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x55), .RVM, .{AVX}),
vec(.VANDNPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x55, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VANDNPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x55, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VANDNPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._NP, ._0F, .W0, 0x55, full), .RVM, .{AVX512DQ}),
// VBLENDPS
vec(.VBLENDPD, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x0D), .RVMI, .{AVX}),
vec(.VBLENDPD, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .WIG, 0x0D), .RVMI, .{AVX}),
// VBLENDPS
vec(.VBLENDPS, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x0C), .RVMI, .{AVX}),
vec(.VBLENDPS, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .WIG, 0x0C), .RVMI, .{AVX}),
// VBLENDVPD
vec(.VBLENDVPD, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x4B), .RVMR, .{AVX}),
vec(.VBLENDVPD, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x4B), .RVMR, .{AVX}),
// VBLENDVPS
vec(.VBLENDVPS, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x4A), .RVMR, .{AVX}),
vec(.VBLENDVPS, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x4A), .RVMR, .{AVX}),
// VCMPPD
vec(.VCMPPD, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F, .WIG, 0xC2), .RVMI, .{AVX}),
vec(.VCMPPD, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F, .WIG, 0xC2), .RVMI, .{AVX}),
vec(.VCMPPD, ops4(.reg_k_k, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F, .W1, 0xC2, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VCMPPD, ops4(.reg_k_k, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F, .W1, 0xC2, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VCMPPD, ops4(.reg_k_k, .zmm, .zmm_m512_m64bcst_sae, .imm8), evex(.L512, ._66, ._0F, .W1, 0xC2, full), .RVMI, .{AVX512F}),
// VCMPPS
vec(.VCMPPS, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._NP, ._0F, .WIG, 0xC2), .RVMI, .{AVX}),
vec(.VCMPPS, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._NP, ._0F, .WIG, 0xC2), .RVMI, .{AVX}),
vec(.VCMPPS, ops4(.reg_k_k, .xmm, .xmm_m128_m32bcst, .imm8), evex(.L128, ._NP, ._0F, .W0, 0xC2, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VCMPPS, ops4(.reg_k_k, .ymm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._NP, ._0F, .W0, 0xC2, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VCMPPS, ops4(.reg_k_k, .zmm, .zmm_m512_m32bcst_sae, .imm8), evex(.L512, ._NP, ._0F, .W0, 0xC2, full), .RVMI, .{AVX512F}),
// VCMPSD
vec(.VCMPSD, ops4(.xmml, .xmml, .xmml_m64, .imm8), vex(.LIG, ._F2, ._0F, .WIG, 0xC2), .RVMI, .{AVX}),
vec(.VCMPSD, ops4(.reg_k_k, .xmm, .xmm_m64_sae, .imm8), evex(.LIG, ._F2, ._0F, .W1, 0xC2, t1s), .RVMI, .{ AVX512VL, AVX512F }),
// VCMPSS
vec(.VCMPSS, ops4(.xmml, .xmml, .xmml_m32, .imm8), vex(.LIG, ._F3, ._0F, .WIG, 0xC2), .RVMI, .{AVX}),
vec(.VCMPSS, ops4(.reg_k_k, .xmm, .xmm_m32_sae, .imm8), evex(.LIG, ._F3, ._0F, .W0, 0xC2, t1s), .RVMI, .{ AVX512VL, AVX512F }),
// VCOMISD
vec(.VCOMISD, ops2(.xmml, .xmml_m64), vex(.LIG, ._66, ._0F, .WIG, 0x2F), .vRM, .{AVX}),
vec(.VCOMISD, ops2(.xmm, .xmm_m64_sae), evex(.LIG, ._66, ._0F, .W1, 0x2F, t1s), .vRM, .{AVX512F}),
// VCOMISS
vec(.VCOMISS, ops2(.xmml, .xmml_m32), vex(.LIG, ._NP, ._0F, .WIG, 0x2F), .vRM, .{AVX}),
vec(.VCOMISS, ops2(.xmm, .xmm_m32_sae), evex(.LIG, ._NP, ._0F, .W0, 0x2F, t1s), .vRM, .{AVX512F}),
// VCVTDQ2PD
vec(.VCVTDQ2PD, ops2(.xmml, .xmml_m64), vex(.L128, ._F3, ._0F, .WIG, 0xE6), .vRM, .{AVX}),
vec(.VCVTDQ2PD, ops2(.ymml, .xmml_m128), vex(.L256, ._F3, ._0F, .WIG, 0xE6), .vRM, .{AVX}),
vec(.VCVTDQ2PD, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._F3, ._0F, .W0, 0xE6, half), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTDQ2PD, ops2(.ymm_kz, .xmm_m128_m32bcst), evex(.L256, ._F3, ._0F, .W0, 0xE6, half), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTDQ2PD, ops2(.zmm_kz, .ymm_m256_m32bcst), evex(.L512, ._F3, ._0F, .W0, 0xE6, half), .vRM, .{AVX512F}),
// VCVTDQ2PS
vec(.VCVTDQ2PS, ops2(.xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x5B), .vRM, .{AVX}),
vec(.VCVTDQ2PS, ops2(.ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x5B), .vRM, .{AVX}),
vec(.VCVTDQ2PS, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x5B, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTDQ2PS, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x5B, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTDQ2PS, ops2(.zmm_kz, .zmm_m512_m32bcst_er), evex(.L512, ._NP, ._0F, .W0, 0x5B, full), .vRM, .{AVX512F}),
// VCVTPD2DQ
vec(.VCVTPD2DQ, ops2(.xmml, .xmml_m128), vex(.L128, ._F2, ._0F, .WIG, 0xE6), .vRM, .{AVX}),
vec(.VCVTPD2DQ, ops2(.xmml, .ymml_m256), vex(.L256, ._F2, ._0F, .WIG, 0xE6), .vRM, .{AVX}),
vec(.VCVTPD2DQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._F2, ._0F, .W1, 0xE6, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPD2DQ, ops2(.xmm_kz, .ymm_m256_m64bcst), evex(.L256, ._F2, ._0F, .W1, 0xE6, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPD2DQ, ops2(.ymm_kz, .zmm_m512_m64bcst_er), evex(.L512, ._F2, ._0F, .W1, 0xE6, full), .vRM, .{AVX512F}),
// VCVTPD2PS
vec(.VCVTPD2PS, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x5A), .vRM, .{AVX}),
vec(.VCVTPD2PS, ops2(.xmml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x5A), .vRM, .{AVX}),
vec(.VCVTPD2PS, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x5A, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPD2PS, ops2(.xmm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x5A, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPD2PS, ops2(.ymm_kz, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F, .W1, 0x5A, full), .vRM, .{AVX512F}),
// VCVTPS2DQ
vec(.VCVTPS2DQ, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x5B), .vRM, .{AVX}),
vec(.VCVTPS2DQ, ops2(.ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x5B), .vRM, .{AVX}),
vec(.VCVTPS2DQ, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0x5B, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPS2DQ, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0x5B, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPS2DQ, ops2(.zmm_kz, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F, .W0, 0x5B, full), .vRM, .{AVX512F}),
// VCVTPS2PD
vec(.VCVTPS2PD, ops2(.xmml, .xmml_m64), vex(.L128, ._NP, ._0F, .WIG, 0x5A), .vRM, .{AVX}),
vec(.VCVTPS2PD, ops2(.ymml, .xmml_m128), vex(.L256, ._NP, ._0F, .WIG, 0x5A), .vRM, .{AVX}),
vec(.VCVTPS2PD, ops2(.xmm_kz, .xmm_m64_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x5A, half), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPS2PD, ops2(.ymm_kz, .xmm_m128_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x5A, half), .vRM, .{AVX512VL}),
vec(.VCVTPS2PD, ops2(.zmm_kz, .ymm_m256_m32bcst_sae), evex(.L512, ._NP, ._0F, .W0, 0x5A, half), .vRM, .{AVX512F}),
// VCVTSD2SI
vec(.VCVTSD2SI, ops2(.reg32, .xmml_m64), vex(.LIG, ._F2, ._0F, .W0, 0x2D), .vRM, .{AVX}),
vec(.VCVTSD2SI, ops2(.reg64, .xmml_m64), vex(.LIG, ._F2, ._0F, .W1, 0x2D), .vRM, .{AVX}),
vec(.VCVTSD2SI, ops2(.reg32, .xmm_m64_er), evex(.LIG, ._F2, ._0F, .W0, 0x2D, t1f), .vRM, .{AVX512F}),
vec(.VCVTSD2SI, ops2(.reg64, .xmm_m64_er), evex(.LIG, ._F2, ._0F, .W1, 0x2D, t1f), .vRM, .{AVX512F}),
// VCVTSD2SS
vec(.VCVTSD2SS, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._F2, ._0F, .WIG, 0x5A), .RVM, .{AVX}),
vec(.VCVTSD2SS, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._F2, ._0F, .W1, 0x5A, t1s), .RVM, .{AVX512F}),
// VCVTSI2SD
vec(.VCVTSI2SD, ops3(.xmml, .xmml, .rm32), vex(.LIG, ._F2, ._0F, .W0, 0x2A), .RVM, .{AVX}),
vec(.VCVTSI2SD, ops3(.xmml, .xmml, .rm64), vex(.LIG, ._F2, ._0F, .W1, 0x2A), .RVM, .{ AVX, No32 }),
vec(.VCVTSI2SD, ops3(.xmm, .xmm, .rm32), evex(.LIG, ._F2, ._0F, .W0, 0x2A, t1s), .RVM, .{AVX512F}),
vec(.VCVTSI2SD, ops3(.xmm, .xmm, .rm64_er), evex(.LIG, ._F2, ._0F, .W1, 0x2A, t1s), .RVM, .{ AVX512F, No32 }),
// VCVTSI2SS
vec(.VCVTSI2SS, ops3(.xmml, .xmml, .rm32), vex(.LIG, ._F3, ._0F, .W0, 0x2A), .RVM, .{AVX}),
vec(.VCVTSI2SS, ops3(.xmml, .xmml, .rm64), vex(.LIG, ._F3, ._0F, .W1, 0x2A), .RVM, .{ AVX, No32 }),
vec(.VCVTSI2SS, ops3(.xmm, .xmm, .rm32_er), evex(.LIG, ._F3, ._0F, .W0, 0x2A, t1s), .RVM, .{AVX512F}),
vec(.VCVTSI2SS, ops3(.xmm, .xmm, .rm64_er), evex(.LIG, ._F3, ._0F, .W1, 0x2A, t1s), .RVM, .{ AVX512F, No32 }),
// VCVTSS2SD
vec(.VCVTSS2SD, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._F3, ._0F, .WIG, 0x5A), .RVM, .{AVX}),
vec(.VCVTSS2SD, ops3(.xmm_kz, .xmm, .xmm_m32_sae), evex(.LIG, ._F3, ._0F, .W0, 0x5A, t1s), .RVM, .{AVX512F}),
// VCVTSS2SI
vec(.VCVTSS2SI, ops2(.reg32, .xmml_m32), vex(.LIG, ._F3, ._0F, .W0, 0x2D), .vRM, .{AVX}),
vec(.VCVTSS2SI, ops2(.reg64, .xmml_m32), vex(.LIG, ._F3, ._0F, .W1, 0x2D), .vRM, .{ AVX, No32 }),
vec(.VCVTSS2SI, ops2(.reg32, .xmm_m32_er), evex(.LIG, ._F3, ._0F, .W0, 0x2D, t1f), .vRM, .{AVX512F}),
vec(.VCVTSS2SI, ops2(.reg64, .xmm_m32_er), evex(.LIG, ._F3, ._0F, .W1, 0x2D, t1f), .vRM, .{ AVX512F, No32 }),
// VCVTTPD2DQ
vec(.VCVTTPD2DQ, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xE6), .vRM, .{AVX}),
vec(.VCVTTPD2DQ, ops2(.xmml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xE6), .vRM, .{AVX}),
vec(.VCVTTPD2DQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0xE6, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTTPD2DQ, ops2(.xmm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0xE6, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTTPD2DQ, ops2(.ymm_kz, .zmm_m512_m64bcst_sae), evex(.L512, ._66, ._0F, .W1, 0xE6, full), .vRM, .{AVX512F}),
// VCVTTPS2DQ
vec(.VCVTTPS2DQ, ops2(.xmml, .xmml_m128), vex(.L128, ._F3, ._0F, .WIG, 0x5B), .vRM, .{AVX}),
vec(.VCVTTPS2DQ, ops2(.ymml, .ymml_m256), vex(.L256, ._F3, ._0F, .WIG, 0x5B), .vRM, .{AVX}),
vec(.VCVTTPS2DQ, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._F3, ._0F, .W0, 0x5B, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTTPS2DQ, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._F3, ._0F, .W0, 0x5B, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTTPS2DQ, ops2(.zmm_kz, .zmm_m512_m32bcst_sae), evex(.L512, ._F3, ._0F, .W0, 0x5B, full), .vRM, .{AVX512F}),
// VCVTTSD2SI
vec(.VCVTTSD2SI, ops2(.reg32, .xmml_m64), vex(.LIG, ._F2, ._0F, .W0, 0x2C), .vRM, .{AVX}),
vec(.VCVTTSD2SI, ops2(.reg64, .xmml_m64), vex(.LIG, ._F2, ._0F, .W1, 0x2C), .vRM, .{ AVX, No32 }),
vec(.VCVTTSD2SI, ops2(.reg32, .xmm_m64_sae), evex(.LIG, ._F2, ._0F, .W0, 0x2C, t1f), .vRM, .{AVX512F}),
vec(.VCVTTSD2SI, ops2(.reg64, .xmm_m64_sae), evex(.LIG, ._F2, ._0F, .W1, 0x2C, t1f), .vRM, .{ AVX512F, No32 }),
// VCVTTSS2SI
vec(.VCVTTSS2SI, ops2(.reg32, .xmml_m32), vex(.LIG, ._F3, ._0F, .W0, 0x2C), .vRM, .{AVX}),
vec(.VCVTTSS2SI, ops2(.reg64, .xmml_m32), vex(.LIG, ._F3, ._0F, .W1, 0x2C), .vRM, .{ AVX, No32 }),
vec(.VCVTTSS2SI, ops2(.reg32, .xmm_m32_sae), evex(.LIG, ._F3, ._0F, .W0, 0x2C, t1f), .vRM, .{AVX512F}),
vec(.VCVTTSS2SI, ops2(.reg64, .xmm_m32_sae), evex(.LIG, ._F3, ._0F, .W1, 0x2C, t1f), .vRM, .{ AVX512F, No32 }),
// VDIVPD
vec(.VDIVPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x5E), .RVM, .{AVX}),
vec(.VDIVPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x5E), .RVM, .{AVX}),
vec(.VDIVPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x5E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VDIVPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x5E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VDIVPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F, .W1, 0x5E, full), .RVM, .{AVX512F}),
// VDIVPS
vec(.VDIVPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x5E), .RVM, .{AVX}),
vec(.VDIVPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x5E), .RVM, .{AVX}),
vec(.VDIVPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x5E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VDIVPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x5E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VDIVPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst_er), evex(.L512, ._NP, ._0F, .W0, 0x5E, full), .RVM, .{AVX512F}),
// VDIVSD
vec(.VDIVSD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._F2, ._0F, .WIG, 0x5E), .RVM, .{AVX}),
vec(.VDIVSD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._F2, ._0F, .W1, 0x5E, t1s), .RVM, .{AVX512F}),
// VDIVSS
vec(.VDIVSS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._F3, ._0F, .WIG, 0x5E), .RVM, .{AVX}),
vec(.VDIVSS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._F3, ._0F, .W0, 0x5E, t1s), .RVM, .{AVX512F}),
// VDPPD
vec(.VDPPD, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x41), .RVMI, .{AVX}),
// VDPPS
vec(.VDPPS, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x40), .RVMI, .{AVX}),
vec(.VDPPS, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .WIG, 0x40), .RVMI, .{AVX}),
// VEXTRACTPS
vec(.VEXTRACTPS, ops3(.rm32, .xmml, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x17), .vMRI, .{AVX}),
vec(.VEXTRACTPS, ops3(.reg64, .xmml, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x17), .vMRI, .{ AVX, No32 }),
vec(.VEXTRACTPS, ops3(.rm32, .xmm, .imm8), evex(.L128, ._66, ._0F3A, .WIG, 0x17, t1s), .vMRI, .{AVX512F}),
vec(.VEXTRACTPS, ops3(.reg64, .xmm, .imm8), evex(.L128, ._66, ._0F3A, .WIG, 0x17, t1s), .vMRI, .{ AVX512F, No32 }),
// VGF2P8AFFINEINVQB
vec(.VGF2P8AFFINEINVQB, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .W1, 0xCF), .RVMI, .{ AVX, GFNI }),
vec(.VGF2P8AFFINEINVQB, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .W1, 0xCF), .RVMI, .{ AVX, GFNI }),
vec(.VGF2P8AFFINEINVQB, ops4(.xmm_kz, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0xCF, full), .RVMI, .{ AVX512VL, GFNI }),
vec(.VGF2P8AFFINEINVQB, ops4(.ymm_kz, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0xCF, full), .RVMI, .{ AVX512VL, GFNI }),
vec(.VGF2P8AFFINEINVQB, ops4(.zmm_kz, .zmm, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0xCF, full), .RVMI, .{ AVX512F, GFNI }),
// VGF2P8AFFINEQB
vec(.VGF2P8AFFINEQB, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .W1, 0xCE), .RVMI, .{ AVX, GFNI }),
vec(.VGF2P8AFFINEQB, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .W1, 0xCE), .RVMI, .{ AVX, GFNI }),
vec(.VGF2P8AFFINEQB, ops4(.xmm_kz, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0xCE, full), .RVMI, .{ AVX512VL, GFNI }),
vec(.VGF2P8AFFINEQB, ops4(.ymm_kz, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0xCE, full), .RVMI, .{ AVX512VL, GFNI }),
vec(.VGF2P8AFFINEQB, ops4(.zmm_kz, .zmm, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0xCE, full), .RVMI, .{ AVX512F, GFNI }),
// VGF2P8MULB
vec(.VGF2P8MULB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xCF), .RVM, .{ AVX, GFNI }),
vec(.VGF2P8MULB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xCF), .RVM, .{ AVX, GFNI }),
vec(.VGF2P8MULB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W0, 0xCF, fmem), .RVM, .{ AVX512VL, GFNI }),
vec(.VGF2P8MULB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W0, 0xCF, fmem), .RVM, .{ AVX512VL, GFNI }),
vec(.VGF2P8MULB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W0, 0xCF, fmem), .RVM, .{ AVX512F, GFNI }),
// VHADDPD
vec(.VHADDPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x7C), .RVM, .{AVX}),
vec(.VHADDPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x7C), .RVM, .{AVX}),
// VHADDPS
vec(.VHADDPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._F2, ._0F, .WIG, 0x7C), .RVM, .{AVX}),
vec(.VHADDPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._F2, ._0F, .WIG, 0x7C), .RVM, .{AVX}),
// VHSUBPD
vec(.VHSUBPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x7D), .RVM, .{AVX}),
vec(.VHSUBPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x7D), .RVM, .{AVX}),
// VHSUBPS
vec(.VHSUBPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._F2, ._0F, .WIG, 0x7D), .RVM, .{AVX}),
vec(.VHSUBPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._F2, ._0F, .WIG, 0x7D), .RVM, .{AVX}),
// vecERTPS
vec(.VINSERTPS, ops4(.xmml, .xmml, .xmml_m32, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x21), .RVMI, .{AVX}),
vec(.VINSERTPS, ops4(.xmm, .xmm, .xmm_m32, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x21, t1s), .RVMI, .{AVX512F}),
// LDDQU
vec(.VLDDQU, ops2(.xmml, .rm_mem128), vex(.L128, ._F2, ._0F, .WIG, 0xF0), .vRM, .{AVX}),
vec(.VLDDQU, ops2(.ymml, .rm_mem256), vex(.L256, ._F2, ._0F, .WIG, 0xF0), .vRM, .{AVX}),
// VLDMXCSR
vec(.VLDMXCSR, ops1(.rm_mem32), vexr(.LZ, ._NP, ._0F, .WIG, 0xAE, 2), .vM, .{AVX}),
// VMASKMOVDQU
vec(.VMASKMOVDQU, ops2(.xmml, .xmml), vex(.L128, ._66, ._0F, .WIG, 0xF7), .vRM, .{AVX}),
// VMAXPD
vec(.VMAXPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x5F), .RVM, .{AVX}),
vec(.VMAXPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x5F), .RVM, .{AVX}),
vec(.VMAXPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x5F, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VMAXPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x5F, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VMAXPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst_sae), evex(.L512, ._66, ._0F, .W1, 0x5F, full), .RVM, .{AVX512DQ}),
// VMAXPS
vec(.VMAXPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x5F), .RVM, .{AVX}),
vec(.VMAXPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x5F), .RVM, .{AVX}),
vec(.VMAXPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x5F, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VMAXPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x5F, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VMAXPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst_sae), evex(.L512, ._NP, ._0F, .W0, 0x5F, full), .RVM, .{AVX512DQ}),
// VMAXSD
vec(.VMAXSD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._F2, ._0F, .WIG, 0x5F), .RVM, .{AVX}),
vec(.VMAXSD, ops3(.xmm_kz, .xmm, .xmm_m64_sae), evex(.LIG, ._F2, ._0F, .W1, 0x5F, t1s), .RVM, .{AVX512F}),
// VMAXSS
vec(.VMAXSS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._F3, ._0F, .WIG, 0x5F), .RVM, .{AVX}),
vec(.VMAXSS, ops3(.xmm_kz, .xmm, .xmm_m32_sae), evex(.LIG, ._F3, ._0F, .W0, 0x5F, t1s), .RVM, .{AVX512F}),
// VMINPD
vec(.VMINPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x5D), .RVM, .{AVX}),
vec(.VMINPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x5D), .RVM, .{AVX}),
vec(.VMINPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x5D, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VMINPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x5D, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VMINPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst_sae), evex(.L512, ._66, ._0F, .W1, 0x5D, full), .RVM, .{AVX512DQ}),
// VMINPS
vec(.VMINPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x5D), .RVM, .{AVX}),
vec(.VMINPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x5D), .RVM, .{AVX}),
vec(.VMINPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x5D, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VMINPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x5D, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VMINPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst_sae), evex(.L512, ._NP, ._0F, .W0, 0x5D, full), .RVM, .{AVX512DQ}),
// VMINSD
vec(.VMINSD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._F2, ._0F, .WIG, 0x5D), .RVM, .{AVX}),
vec(.VMINSD, ops3(.xmm_kz, .xmm, .xmm_m64_sae), evex(.LIG, ._F2, ._0F, .W1, 0x5D, t1s), .RVM, .{AVX512F}),
// VMINSS
vec(.VMINSS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._F3, ._0F, .WIG, 0x5D), .RVM, .{AVX}),
vec(.VMINSS, ops3(.xmm_kz, .xmm, .xmm_m32_sae), evex(.LIG, ._F3, ._0F, .W0, 0x5D, t1s), .RVM, .{AVX512F}),
// VMOVAPD
vec(.VMOVAPD, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x28), .vRM, .{AVX}),
vec(.VMOVAPD, ops2(.ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x28), .vRM, .{AVX}),
vec(.VMOVAPD, ops2(.xmml_m128, .xmml), vex(.L128, ._66, ._0F, .WIG, 0x29), .vMR, .{AVX}),
vec(.VMOVAPD, ops2(.ymml_m256, .ymml), vex(.L256, ._66, ._0F, .WIG, 0x29), .vMR, .{AVX}),
//
vec(.VMOVAPD, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F, .W1, 0x28, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVAPD, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F, .W1, 0x28, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVAPD, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F, .W1, 0x28, fmem), .vRM, .{AVX512F}),
vec(.VMOVAPD, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._66, ._0F, .W1, 0x29, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVAPD, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._66, ._0F, .W1, 0x29, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVAPD, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._66, ._0F, .W1, 0x29, fmem), .vMR, .{AVX512F}),
// VMOVAPS
vec(.VMOVAPS, ops2(.xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x28), .vRM, .{AVX}),
vec(.VMOVAPS, ops2(.ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x28), .vRM, .{AVX}),
vec(.VMOVAPS, ops2(.xmml_m128, .xmml), vex(.L128, ._NP, ._0F, .WIG, 0x29), .vMR, .{AVX}),
vec(.VMOVAPS, ops2(.ymml_m256, .ymml), vex(.L256, ._NP, ._0F, .WIG, 0x29), .vMR, .{AVX}),
//
vec(.VMOVAPS, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._NP, ._0F, .W0, 0x28, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVAPS, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._NP, ._0F, .W0, 0x28, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVAPS, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._NP, ._0F, .W0, 0x28, fmem), .vRM, .{AVX512F}),
vec(.VMOVAPS, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._NP, ._0F, .W0, 0x29, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVAPS, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._NP, ._0F, .W0, 0x29, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVAPS, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._NP, ._0F, .W0, 0x29, fmem), .vMR, .{AVX512F}),
// VMOVD
// xmm[0..15]
vec(.VMOVD, ops2(.xmml, .rm32), vex(.L128, ._66, ._0F, .W0, 0x6E), .vRM, .{AVX}),
vec(.VMOVD, ops2(.rm32, .xmml), vex(.L128, ._66, ._0F, .W0, 0x7E), .vMR, .{AVX}),
vec(.VMOVD, ops2(.xmml, .rm64), vex(.L128, ._66, ._0F, .W1, 0x6E), .vRM, .{ AVX, No32 }),
vec(.VMOVD, ops2(.rm64, .xmml), vex(.L128, ._66, ._0F, .W1, 0x7E), .vMR, .{ AVX, No32 }),
// xmm[0..31]
vec(.VMOVD, ops2(.xmm, .rm32), evex(.L128, ._66, ._0F, .W0, 0x6E, t1s), .vRM, .{AVX512F}),
vec(.VMOVD, ops2(.rm32, .xmm), evex(.L128, ._66, ._0F, .W0, 0x7E, t1s), .vMR, .{AVX512F}),
vec(.VMOVD, ops2(.xmm, .rm64), evex(.L128, ._66, ._0F, .W1, 0x6E, t1s), .vRM, .{ AVX512F, No32 }),
vec(.VMOVD, ops2(.rm64, .xmm), evex(.L128, ._66, ._0F, .W1, 0x7E, t1s), .vMR, .{ AVX512F, No32 }),
// VMOVQ
// xmm[0..15]
vec(.VMOVQ, ops2(.xmml, .xmml_m64), vex(.L128, ._F3, ._0F, .WIG, 0x7E), .vRM, .{AVX}),
vec(.VMOVQ, ops2(.xmml_m64, .xmml), vex(.L128, ._66, ._0F, .WIG, 0xD6), .vMR, .{AVX}),
vec(.VMOVQ, ops2(.xmml, .rm64), vex(.L128, ._66, ._0F, .W1, 0x6E), .vRM, .{ AVX, No32 }),
vec(.VMOVQ, ops2(.rm64, .xmml), vex(.L128, ._66, ._0F, .W1, 0x7E), .vMR, .{ AVX, No32 }),
// xmm[0..31]
vec(.VMOVQ, ops2(.xmm, .xmm_m64), evex(.L128, ._F3, ._0F, .W1, 0x7E, t1s), .vRM, .{AVX512F}),
vec(.VMOVQ, ops2(.xmm_m64, .xmm), evex(.L128, ._66, ._0F, .W1, 0xD6, t1s), .vMR, .{AVX512F}),
vec(.VMOVQ, ops2(.xmm, .rm64), evex(.L128, ._66, ._0F, .W1, 0x6E, t1s), .vRM, .{ AVX512F, No32 }),
vec(.VMOVQ, ops2(.rm64, .xmm), evex(.L128, ._66, ._0F, .W1, 0x7E, t1s), .vMR, .{ AVX512F, No32 }),
// VMOVDDUP
vec(.VMOVDDUP, ops2(.xmml, .xmml_m64), vex(.L128, ._F2, ._0F, .WIG, 0x12), .vRM, .{AVX}),
vec(.VMOVDDUP, ops2(.ymml, .ymml_m256), vex(.L256, ._F2, ._0F, .WIG, 0x12), .vRM, .{AVX}),
vec(.VMOVDDUP, ops2(.xmm_kz, .xmm_m64), evex(.L128, ._F2, ._0F, .W1, 0x12, dup), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVDDUP, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._F2, ._0F, .W1, 0x12, dup), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVDDUP, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._F2, ._0F, .W1, 0x12, dup), .vRM, .{AVX512F}),
// VMOVDQA / VMOVDQA32 / VMOVDQA64
vec(.VMOVDQA, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x6F), .vRM, .{AVX}),
vec(.VMOVDQA, ops2(.ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x6F), .vRM, .{AVX}),
vec(.VMOVDQA, ops2(.xmml_m128, .xmml), vex(.L128, ._66, ._0F, .WIG, 0x7F), .vMR, .{AVX}),
vec(.VMOVDQA, ops2(.ymml_m256, .ymml), vex(.L256, ._66, ._0F, .WIG, 0x7F), .vMR, .{AVX}),
// VMOVDQA32
vec(.VMOVDQA32, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F, .W0, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVDQA32, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F, .W0, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVDQA32, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F, .W0, 0x6F, fmem), .vRM, .{AVX512F}),
vec(.VMOVDQA32, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._66, ._0F, .W0, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVDQA32, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._66, ._0F, .W0, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVDQA32, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._66, ._0F, .W0, 0x7F, fmem), .vMR, .{AVX512F}),
// VMOVDQA64
vec(.VMOVDQA64, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F, .W1, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVDQA64, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F, .W1, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVDQA64, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F, .W1, 0x6F, fmem), .vRM, .{AVX512F}),
vec(.VMOVDQA64, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._66, ._0F, .W1, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVDQA64, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._66, ._0F, .W1, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVDQA64, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._66, ._0F, .W1, 0x7F, fmem), .vMR, .{AVX512F}),
// VMOVDQU / VMOVDQU8 / VMOVDQU16 / VMOVDQU32 / VMOVDQU64
vec(.VMOVDQU, ops2(.xmml, .xmml_m128), vex(.L128, ._F3, ._0F, .WIG, 0x6F), .vRM, .{AVX}),
vec(.VMOVDQU, ops2(.ymml, .ymml_m256), vex(.L256, ._F3, ._0F, .WIG, 0x6F), .vRM, .{AVX}),
vec(.VMOVDQU, ops2(.xmml_m128, .xmml), vex(.L128, ._F3, ._0F, .WIG, 0x7F), .vMR, .{AVX}),
vec(.VMOVDQU, ops2(.ymml_m256, .ymml), vex(.L256, ._F3, ._0F, .WIG, 0x7F), .vMR, .{AVX}),
// VMOVDQU8
vec(.VMOVDQU8, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._F2, ._0F, .W0, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VMOVDQU8, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._F2, ._0F, .W0, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VMOVDQU8, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._F2, ._0F, .W0, 0x6F, fmem), .vRM, .{AVX512BW}),
vec(.VMOVDQU8, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._F2, ._0F, .W0, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512BW }),
vec(.VMOVDQU8, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._F2, ._0F, .W0, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512BW }),
vec(.VMOVDQU8, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._F2, ._0F, .W0, 0x7F, fmem), .vMR, .{AVX512BW}),
// VMOVDQU16
vec(.VMOVDQU16, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._F2, ._0F, .W1, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VMOVDQU16, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._F2, ._0F, .W1, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VMOVDQU16, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._F2, ._0F, .W1, 0x6F, fmem), .vRM, .{AVX512BW}),
vec(.VMOVDQU16, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._F2, ._0F, .W1, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512BW }),
vec(.VMOVDQU16, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._F2, ._0F, .W1, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512BW }),
vec(.VMOVDQU16, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._F2, ._0F, .W1, 0x7F, fmem), .vMR, .{AVX512BW}),
// VMOVDQU32
vec(.VMOVDQU32, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._F3, ._0F, .W0, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVDQU32, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._F3, ._0F, .W0, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVDQU32, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._F3, ._0F, .W0, 0x6F, fmem), .vRM, .{AVX512F}),
vec(.VMOVDQU32, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._F3, ._0F, .W0, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVDQU32, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._F3, ._0F, .W0, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVDQU32, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._F3, ._0F, .W0, 0x7F, fmem), .vMR, .{AVX512F}),
// VMOVDQU64
vec(.VMOVDQU64, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._F3, ._0F, .W1, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVDQU64, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._F3, ._0F, .W1, 0x6F, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVDQU64, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._F3, ._0F, .W1, 0x6F, fmem), .vRM, .{AVX512F}),
vec(.VMOVDQU64, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._F3, ._0F, .W1, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVDQU64, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._F3, ._0F, .W1, 0x7F, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVDQU64, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._F3, ._0F, .W1, 0x7F, fmem), .vMR, .{AVX512F}),
// VMOVHLPS
vec(.VMOVHLPS, ops3(.xmml, .xmml, .xmml), vex(.L128, ._NP, ._0F, .WIG, 0x12), .RVM, .{AVX}),
vec(.VMOVHLPS, ops3(.xmm, .xmm, .xmm), evex(.L128, ._NP, ._0F, .W0, 0x12, nomem), .RVM, .{AVX512F}),
// VMOVHPD
vec(.VMOVHPD, ops3(.xmml, .xmml, .rm_mem64), vex(.L128, ._66, ._0F, .WIG, 0x16), .RVM, .{AVX}),
vec(.VMOVHPD, ops2(.rm_mem64, .xmml), vex(.L128, ._66, ._0F, .WIG, 0x17), .vMR, .{AVX}),
vec(.VMOVHPD, ops3(.xmm, .xmm, .rm_mem64), evex(.L128, ._66, ._0F, .W1, 0x16, t1s), .RVM, .{AVX512F}),
vec(.VMOVHPD, ops2(.rm_mem64, .xmm), evex(.L128, ._66, ._0F, .W1, 0x17, t1s), .vMR, .{AVX512F}),
// VMOVHPS
vec(.VMOVHPS, ops3(.xmml, .xmml, .rm_mem64), vex(.L128, ._NP, ._0F, .WIG, 0x16), .RVM, .{AVX}),
vec(.VMOVHPS, ops2(.rm_mem64, .xmml), vex(.L128, ._NP, ._0F, .WIG, 0x17), .vMR, .{AVX}),
vec(.VMOVHPS, ops3(.xmm, .xmm, .rm_mem64), evex(.L128, ._NP, ._0F, .W0, 0x16, tup2), .RVM, .{AVX512F}),
vec(.VMOVHPS, ops2(.rm_mem64, .xmm), evex(.L128, ._NP, ._0F, .W0, 0x17, tup2), .vMR, .{AVX512F}),
// VMOVLHPS
vec(.VMOVLHPS, ops3(.xmml, .xmml, .xmml), vex(.L128, ._NP, ._0F, .WIG, 0x16), .RVM, .{AVX}),
vec(.VMOVLHPS, ops3(.xmm, .xmm, .xmm), evex(.L128, ._NP, ._0F, .W0, 0x16, nomem), .RVM, .{AVX512F}),
// VMOVLPD
vec(.VMOVLPD, ops3(.xmml, .xmml, .rm_mem64), vex(.L128, ._66, ._0F, .WIG, 0x12), .RVM, .{AVX}),
vec(.VMOVLPD, ops2(.rm_mem64, .xmml), vex(.L128, ._66, ._0F, .WIG, 0x13), .vMR, .{AVX}),
vec(.VMOVLPD, ops3(.xmm, .xmm, .rm_mem64), evex(.L128, ._66, ._0F, .W1, 0x12, t1s), .RVM, .{AVX512F}),
vec(.VMOVLPD, ops2(.rm_mem64, .xmm), evex(.L128, ._66, ._0F, .W1, 0x13, t1s), .vMR, .{AVX512F}),
// VMOVLPS
vec(.VMOVLPS, ops3(.xmml, .xmml, .rm_mem64), vex(.L128, ._NP, ._0F, .WIG, 0x12), .RVM, .{AVX}),
vec(.VMOVLPS, ops2(.rm_mem64, .xmml), vex(.L128, ._NP, ._0F, .WIG, 0x13), .vMR, .{AVX}),
vec(.VMOVLPS, ops3(.xmm, .xmm, .rm_mem64), evex(.L128, ._NP, ._0F, .W0, 0x12, tup2), .RVM, .{AVX512F}),
vec(.VMOVLPS, ops2(.rm_mem64, .xmm), evex(.L128, ._NP, ._0F, .W0, 0x13, tup2), .vMR, .{AVX512F}),
// VMOVMSKPD
vec(.VMOVMSKPD, ops2(.reg32, .xmml), vex(.L128, ._66, ._0F, .WIG, 0x50), .vRM, .{AVX}),
vec(.VMOVMSKPD, ops2(.reg64, .xmml), vex(.L128, ._66, ._0F, .WIG, 0x50), .vRM, .{AVX}),
vec(.VMOVMSKPD, ops2(.reg32, .ymml), vex(.L256, ._66, ._0F, .WIG, 0x50), .vRM, .{AVX}),
vec(.VMOVMSKPD, ops2(.reg64, .ymml), vex(.L256, ._66, ._0F, .WIG, 0x50), .vRM, .{AVX}),
// VMOVMSKPS
vec(.VMOVMSKPS, ops2(.reg32, .xmml), vex(.L128, ._NP, ._0F, .WIG, 0x50), .vRM, .{AVX}),
vec(.VMOVMSKPS, ops2(.reg64, .xmml), vex(.L128, ._NP, ._0F, .WIG, 0x50), .vRM, .{AVX}),
vec(.VMOVMSKPS, ops2(.reg32, .ymml), vex(.L256, ._NP, ._0F, .WIG, 0x50), .vRM, .{AVX}),
vec(.VMOVMSKPS, ops2(.reg64, .ymml), vex(.L256, ._NP, ._0F, .WIG, 0x50), .vRM, .{AVX}),
// VMOVNTDQA
vec(.VMOVNTDQA, ops2(.xmml, .rm_mem128), vex(.L128, ._66, ._0F38, .WIG, 0x2A), .vRM, .{AVX}),
vec(.VMOVNTDQA, ops2(.ymml, .rm_mem256), vex(.L256, ._66, ._0F38, .WIG, 0x2A), .vRM, .{AVX2}),
vec(.VMOVNTDQA, ops2(.xmm, .rm_mem128), evex(.L128, ._66, ._0F38, .W0, 0x2A, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVNTDQA, ops2(.ymm, .rm_mem256), evex(.L256, ._66, ._0F38, .W0, 0x2A, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVNTDQA, ops2(.zmm, .rm_mem512), evex(.L512, ._66, ._0F38, .W0, 0x2A, fmem), .vRM, .{AVX512F}),
// VMOVNTDQ
vec(.VMOVNTDQ, ops2(.rm_mem128, .xmml), vex(.L128, ._66, ._0F, .WIG, 0xE7), .vMR, .{AVX}),
vec(.VMOVNTDQ, ops2(.rm_mem256, .ymml), vex(.L256, ._66, ._0F, .WIG, 0xE7), .vMR, .{AVX}),
vec(.VMOVNTDQ, ops2(.rm_mem128, .xmm), evex(.L128, ._66, ._0F, .W0, 0xE7, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVNTDQ, ops2(.rm_mem256, .ymm), evex(.L256, ._66, ._0F, .W0, 0xE7, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVNTDQ, ops2(.rm_mem512, .zmm), evex(.L512, ._66, ._0F, .W0, 0xE7, fmem), .vMR, .{AVX512F}),
// VMOVNTPD
vec(.VMOVNTPD, ops2(.rm_mem128, .xmml), vex(.L128, ._66, ._0F, .WIG, 0x2B), .vMR, .{AVX}),
vec(.VMOVNTPD, ops2(.rm_mem256, .ymml), vex(.L256, ._66, ._0F, .WIG, 0x2B), .vMR, .{AVX}),
vec(.VMOVNTPD, ops2(.rm_mem128, .xmm), evex(.L128, ._66, ._0F, .W1, 0x2B, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVNTPD, ops2(.rm_mem256, .ymm), evex(.L256, ._66, ._0F, .W1, 0x2B, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVNTPD, ops2(.rm_mem512, .zmm), evex(.L512, ._66, ._0F, .W1, 0x2B, fmem), .vMR, .{AVX512F}),
// VMOVNTPS
vec(.VMOVNTPS, ops2(.rm_mem128, .xmml), vex(.L128, ._NP, ._0F, .WIG, 0x2B), .vMR, .{AVX}),
vec(.VMOVNTPS, ops2(.rm_mem256, .ymml), vex(.L256, ._NP, ._0F, .WIG, 0x2B), .vMR, .{AVX}),
vec(.VMOVNTPS, ops2(.rm_mem128, .xmm), evex(.L128, ._NP, ._0F, .W0, 0x2B, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVNTPS, ops2(.rm_mem256, .ymm), evex(.L256, ._NP, ._0F, .W0, 0x2B, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVNTPS, ops2(.rm_mem512, .zmm), evex(.L512, ._NP, ._0F, .W0, 0x2B, fmem), .vMR, .{AVX512F}),
// VMOVSD
vec(.VMOVSD, ops3(.xmml, .xmml, .rm_xmml), vex(.LIG, ._F2, ._0F, .WIG, 0x10), .RVM, .{AVX}),
vec(.VMOVSD, ops2(.xmml, .rm_mem64), vex(.LIG, ._F2, ._0F, .WIG, 0x10), .vRM, .{AVX}),
vec(.VMOVSD, ops3(.rm_xmml, .xmml, .xmml), vex(.LIG, ._F2, ._0F, .WIG, 0x11), .MVR, .{AVX}),
vec(.VMOVSD, ops2(.rm_mem64, .xmml), vex(.LIG, ._F2, ._0F, .WIG, 0x11), .vMR, .{AVX}),
vec(.VMOVSD, ops3(.xmm_kz, .xmm, .rm_xmm), evex(.LIG, ._F2, ._0F, .W1, 0x10, nomem), .RVM, .{AVX512F}),
vec(.VMOVSD, ops2(.xmm_kz, .rm_mem64), evex(.LIG, ._F2, ._0F, .W1, 0x10, t1s), .vRM, .{AVX512F}),
vec(.VMOVSD, ops3(.rm_xmm_kz, .xmm, .xmm), evex(.LIG, ._F2, ._0F, .W1, 0x11, nomem), .MVR, .{AVX512F}),
vec(.VMOVSD, ops2(.rm_mem64_kz, .xmm), evex(.LIG, ._F2, ._0F, .W1, 0x11, t1s), .vMR, .{AVX512F}),
// VMOVSHDUP
vec(.VMOVSHDUP, ops2(.xmml, .xmml_m128), vex(.L128, ._F3, ._0F, .WIG, 0x16), .vRM, .{AVX}),
vec(.VMOVSHDUP, ops2(.ymml, .ymml_m256), vex(.L256, ._F3, ._0F, .WIG, 0x16), .vRM, .{AVX}),
vec(.VMOVSHDUP, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._F3, ._0F, .W0, 0x16, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVSHDUP, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._F3, ._0F, .W0, 0x16, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVSHDUP, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._F3, ._0F, .W0, 0x16, fmem), .vRM, .{AVX512F}),
// VMOVSLDUP
vec(.VMOVSLDUP, ops2(.xmml, .xmml_m128), vex(.L128, ._F3, ._0F, .WIG, 0x12), .vRM, .{AVX}),
vec(.VMOVSLDUP, ops2(.ymml, .ymml_m256), vex(.L256, ._F3, ._0F, .WIG, 0x12), .vRM, .{AVX}),
vec(.VMOVSLDUP, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._F3, ._0F, .W0, 0x12, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVSLDUP, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._F3, ._0F, .W0, 0x12, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVSLDUP, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._F3, ._0F, .W0, 0x12, fmem), .vRM, .{AVX512F}),
// VMOVSS
vec(.VMOVSS, ops3(.xmml, .xmml, .rm_xmml), vex(.LIG, ._F3, ._0F, .WIG, 0x10), .RVM, .{AVX}),
vec(.VMOVSS, ops2(.xmml, .rm_mem64), vex(.LIG, ._F3, ._0F, .WIG, 0x10), .vRM, .{AVX}),
vec(.VMOVSS, ops3(.rm_xmml, .xmml, .xmml), vex(.LIG, ._F3, ._0F, .WIG, 0x11), .MVR, .{AVX}),
vec(.VMOVSS, ops2(.rm_mem64, .xmml), vex(.LIG, ._F3, ._0F, .WIG, 0x11), .vMR, .{AVX}),
vec(.VMOVSS, ops3(.xmm_kz, .xmm, .xmm), evex(.LIG, ._F3, ._0F, .W0, 0x10, nomem), .RVM, .{AVX512F}),
vec(.VMOVSS, ops2(.xmm_kz, .rm_mem64), evex(.LIG, ._F3, ._0F, .W0, 0x10, t1s), .vRM, .{AVX512F}),
vec(.VMOVSS, ops3(.rm_xmm_kz, .xmm, .xmm), evex(.LIG, ._F3, ._0F, .W0, 0x11, nomem), .MVR, .{AVX512F}),
vec(.VMOVSS, ops2(.rm_mem64_kz, .xmm), evex(.LIG, ._F3, ._0F, .W0, 0x11, t1s), .vMR, .{AVX512F}),
// VMOVUPD
vec(.VMOVUPD, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x10), .vRM, .{AVX}),
vec(.VMOVUPD, ops2(.ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x10), .vRM, .{AVX}),
vec(.VMOVUPD, ops2(.xmml_m128, .xmml), vex(.L128, ._66, ._0F, .WIG, 0x11), .vMR, .{AVX}),
vec(.VMOVUPD, ops2(.ymml_m256, .ymml), vex(.L256, ._66, ._0F, .WIG, 0x11), .vMR, .{AVX}),
vec(.VMOVUPD, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F, .W1, 0x10, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVUPD, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F, .W1, 0x10, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVUPD, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F, .W1, 0x10, fmem), .vRM, .{AVX512F}),
vec(.VMOVUPD, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._66, ._0F, .W1, 0x11, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVUPD, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._66, ._0F, .W1, 0x11, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVUPD, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._66, ._0F, .W1, 0x11, fmem), .vMR, .{AVX512F}),
// VMOVUPS
vec(.VMOVUPS, ops2(.xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x10), .vRM, .{AVX}),
vec(.VMOVUPS, ops2(.ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x10), .vRM, .{AVX}),
vec(.VMOVUPS, ops2(.xmml_m128, .xmml), vex(.L128, ._NP, ._0F, .WIG, 0x11), .vMR, .{AVX}),
vec(.VMOVUPS, ops2(.ymml_m256, .ymml), vex(.L256, ._NP, ._0F, .WIG, 0x11), .vMR, .{AVX}),
vec(.VMOVUPS, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._NP, ._0F, .W0, 0x10, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVUPS, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._NP, ._0F, .W0, 0x10, fmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VMOVUPS, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._NP, ._0F, .W0, 0x10, fmem), .vRM, .{AVX512F}),
vec(.VMOVUPS, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._NP, ._0F, .W0, 0x11, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVUPS, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._NP, ._0F, .W0, 0x11, fmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VMOVUPS, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._NP, ._0F, .W0, 0x11, fmem), .vMR, .{AVX512F}),
// VMPSADBW
vec(.VMPSADBW, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x42), .RVMI, .{AVX}),
vec(.VMPSADBW, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .WIG, 0x42), .RVMI, .{AVX2}),
// VMULPD
vec(.VMULPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x59), .RVM, .{AVX}),
vec(.VMULPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x59), .RVM, .{AVX}),
vec(.VMULPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x59, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VMULPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x59, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VMULPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F, .W1, 0x59, full), .RVM, .{AVX512F}),
// VMULPS
vec(.VMULPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x59), .RVM, .{AVX}),
vec(.VMULPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x59), .RVM, .{AVX}),
vec(.VMULPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x59, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VMULPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x59, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VMULPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst_er), evex(.L512, ._NP, ._0F, .W0, 0x59, full), .RVM, .{AVX512F}),
// VMULSD
vec(.VMULSD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._F2, ._0F, .WIG, 0x59), .RVM, .{AVX}),
vec(.VMULSD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._F2, ._0F, .W1, 0x59, t1s), .RVM, .{AVX512F}),
// VMULSS
vec(.VMULSS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._F3, ._0F, .WIG, 0x59), .RVM, .{AVX}),
vec(.VMULSS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._F3, ._0F, .W0, 0x59, t1s), .RVM, .{AVX512F}),
// VORPD
vec(.VORPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x56), .RVM, .{AVX}),
vec(.VORPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x56), .RVM, .{AVX}),
vec(.VORPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x56, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VORPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x56, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VORPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0x56, full), .RVM, .{AVX512DQ}),
// VORPS
vec(.VORPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x56), .RVM, .{AVX}),
vec(.VORPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x56), .RVM, .{AVX}),
vec(.VORPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x56, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VORPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x56, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VORPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._NP, ._0F, .W0, 0x56, full), .RVM, .{AVX512DQ}),
// VPABSB / VPABSW / VPABSD / VPABSQ
vec(.VPABSB, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x1C), .vRM, .{AVX}),
vec(.VPABSB, ops2(.ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x1C), .vRM, .{AVX2}),
vec(.VPABSB, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F38, .WIG, 0x1C, fmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPABSB, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F38, .WIG, 0x1C, fmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPABSB, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F38, .WIG, 0x1C, fmem), .vRM, .{AVX512BW}),
// VPABSW
vec(.VPABSW, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x1D), .vRM, .{AVX}),
vec(.VPABSW, ops2(.ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x1D), .vRM, .{AVX2}),
vec(.VPABSW, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F38, .WIG, 0x1D, fmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPABSW, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F38, .WIG, 0x1D, fmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPABSW, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F38, .WIG, 0x1D, fmem), .vRM, .{AVX512BW}),
// VPABSD
vec(.VPABSD, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x1E), .vRM, .{AVX}),
vec(.VPABSD, ops2(.ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x1E), .vRM, .{AVX2}),
vec(.VPABSD, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x1E, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPABSD, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x1E, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPABSD, ops2(.zmm_kz, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x1E, full), .vRM, .{AVX512F}),
// VPABSQ
vec(.VPABSQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x1F, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPABSQ, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x1F, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPABSQ, ops2(.zmm_kz, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x1F, full), .vRM, .{AVX512F}),
// VPACKSSWB / PACKSSDW
// VPACKSSWB
vec(.VPACKSSWB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x63), .RVM, .{AVX}),
vec(.VPACKSSWB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x63), .RVM, .{AVX2}),
vec(.VPACKSSWB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0x63, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPACKSSWB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0x63, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPACKSSWB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0x63, fmem), .RVM, .{AVX512BW}),
// VPACKSSDW
vec(.VPACKSSDW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x6B), .RVM, .{AVX}),
vec(.VPACKSSDW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x6B), .RVM, .{AVX2}),
vec(.VPACKSSDW, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0x6B, full), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPACKSSDW, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0x6B, full), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPACKSSDW, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F, .W0, 0x6B, full), .RVM, .{AVX512BW}),
// VPACKUSWB
vec(.VPACKUSWB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x67), .RVM, .{AVX}),
vec(.VPACKUSWB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x67), .RVM, .{AVX2}),
vec(.VPACKUSWB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0x67, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPACKUSWB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0x67, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPACKUSWB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0x67, fmem), .RVM, .{AVX512BW}),
// VPACKUSDW
vec(.VPACKUSDW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x2B), .RVM, .{AVX}),
vec(.VPACKUSDW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x2B), .RVM, .{AVX2}),
vec(.VPACKUSDW, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x2B, full), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPACKUSDW, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x2B, full), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPACKUSDW, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x2B, full), .RVM, .{AVX512BW}),
// VPADDB / PADDW / PADDD / PADDQ
// VPADDB
vec(.VPADDB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xFC), .RVM, .{AVX}),
vec(.VPADDB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xFC), .RVM, .{AVX2}),
vec(.VPADDB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xFC, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xFC, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xFC, fmem), .RVM, .{AVX512BW}),
// VPADDW
vec(.VPADDW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xFD), .RVM, .{AVX}),
vec(.VPADDW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xFD), .RVM, .{AVX2}),
vec(.VPADDW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xFD, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xFD, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xFD, fmem), .RVM, .{AVX512BW}),
// VPADDD
vec(.VPADDD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xFE), .RVM, .{AVX}),
vec(.VPADDD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xFE), .RVM, .{AVX2}),
vec(.VPADDD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0xFE, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPADDD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0xFE, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPADDD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F, .W0, 0xFE, full), .RVM, .{AVX512F}),
// VPADDQ
vec(.VPADDQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xD4), .RVM, .{AVX}),
vec(.VPADDQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xD4), .RVM, .{AVX2}),
vec(.VPADDQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0xD4, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPADDQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0xD4, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPADDQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0xD4, full), .RVM, .{AVX512F}),
// VPADDSB / PADDSW
vec(.VPADDSB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xEC), .RVM, .{AVX}),
vec(.VPADDSB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xEC), .RVM, .{AVX2}),
vec(.VPADDSB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xEC, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDSB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xEC, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDSB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xEC, fmem), .RVM, .{AVX512BW}),
//
vec(.VPADDSW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xED), .RVM, .{AVX}),
vec(.VPADDSW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xED), .RVM, .{AVX2}),
vec(.VPADDSW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xED, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDSW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xED, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDSW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xED, fmem), .RVM, .{AVX512BW}),
// VPADDUSB / PADDUSW
vec(.VPADDUSB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xDC), .RVM, .{AVX}),
vec(.VPADDUSB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xDC), .RVM, .{AVX2}),
vec(.VPADDUSB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xDC, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDUSB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xDC, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDUSB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xDC, fmem), .RVM, .{AVX512BW}),
//
vec(.VPADDUSW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xDD), .RVM, .{AVX}),
vec(.VPADDUSW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xDD), .RVM, .{AVX2}),
vec(.VPADDUSW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xDD, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDUSW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xDD, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPADDUSW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xDD, fmem), .RVM, .{AVX512BW}),
// VPALIGNR
vec(.VPALIGNR, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x0F), .RVMI, .{AVX}),
vec(.VPALIGNR, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .WIG, 0x0F), .RVMI, .{AVX2}),
vec(.VPALIGNR, ops4(.xmm_kz, .xmm, .xmm_m128, .imm8), evex(.L128, ._66, ._0F3A, .WIG, 0x0F, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VPALIGNR, ops4(.ymm_kz, .ymm, .ymm_m256, .imm8), evex(.L256, ._66, ._0F3A, .WIG, 0x0F, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VPALIGNR, ops4(.zmm_kz, .zmm, .zmm_m512, .imm8), evex(.L512, ._66, ._0F3A, .WIG, 0x0F, fmem), .RVMI, .{AVX512BW}),
// VPAND
vec(.VPAND, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xDB), .RVM, .{AVX}),
vec(.VPAND, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xDB), .RVM, .{AVX2}),
vec(.VPANDD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0xDB, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPANDD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0xDB, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPANDD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F, .W0, 0xDB, full), .RVM, .{AVX512F}),
vec(.VPANDQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0xDB, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPANDQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0xDB, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPANDQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0xDB, full), .RVM, .{AVX512F}),
// VPANDN
vec(.VPANDN, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xDF), .RVM, .{AVX}),
vec(.VPANDN, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xDF), .RVM, .{AVX2}),
vec(.VPANDND, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0xDF, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPANDND, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0xDF, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPANDND, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F, .W0, 0xDF, full), .RVM, .{AVX512F}),
vec(.VPANDNQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0xDF, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPANDNQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0xDF, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPANDNQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0xDF, full), .RVM, .{AVX512F}),
// VPAVGB / VPAVGW
vec(.VPAVGB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xE0), .RVM, .{AVX}),
vec(.VPAVGB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xE0), .RVM, .{AVX2}),
vec(.VPAVGB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xE0, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPAVGB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xE0, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPAVGB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xE0, fmem), .RVM, .{AVX512BW}),
//
vec(.VPAVGW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xE3), .RVM, .{AVX}),
vec(.VPAVGW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xE3), .RVM, .{AVX2}),
vec(.VPAVGW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xE3, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPAVGW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xE3, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPAVGW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xE3, fmem), .RVM, .{AVX512BW}),
// VPBLENDVB
vec(.VPBLENDVB, ops4(.xmml, .xmml, .xmml_m128, .xmml), vex(.L128, ._66, ._0F3A, .W0, 0x4C), .RVMR, .{AVX}),
vec(.VPBLENDVB, ops4(.ymml, .ymml, .ymml_m256, .ymml), vex(.L256, ._66, ._0F3A, .W0, 0x4C), .RVMR, .{AVX2}),
// VPBLENDDW
vec(.VPBLENDW, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x0E), .RVMI, .{AVX}),
vec(.VPBLENDW, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .WIG, 0x0E), .RVMI, .{AVX2}),
// VPCLMULQDQ
vec(.VPCLMULQDQ, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x44), .RVMI, .{ cpu.PCLMULQDQ, AVX }),
vec(.VPCLMULQDQ, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .WIG, 0x44), .RVMI, .{cpu.VPCLMULQDQ}),
vec(.VPCLMULQDQ, ops4(.xmm, .xmm, .xmm_m128, .imm8), evex(.L128, ._66, ._0F3A, .WIG, 0x44, fmem), .RVMI, .{ cpu.VPCLMULQDQ, AVX512VL }),
vec(.VPCLMULQDQ, ops4(.ymm, .ymm, .ymm_m256, .imm8), evex(.L256, ._66, ._0F3A, .WIG, 0x44, fmem), .RVMI, .{ cpu.VPCLMULQDQ, AVX512VL }),
vec(.VPCLMULQDQ, ops4(.zmm, .zmm, .zmm_m512, .imm8), evex(.L512, ._66, ._0F3A, .WIG, 0x44, fmem), .RVMI, .{ cpu.VPCLMULQDQ, AVX512F }),
// VPCMPEQB / VPCMPEQW / VPCMPEQD
vec(.VPCMPEQB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x74), .RVM, .{AVX}),
vec(.VPCMPEQB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x74), .RVM, .{AVX2}),
vec(.VPCMPEQB, ops3(.reg_k_k, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0x74, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPCMPEQB, ops3(.reg_k_k, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0x74, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPCMPEQB, ops3(.reg_k_k, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0x74, fmem), .RVM, .{AVX512BW}),
// VPCMPEQW
vec(.VPCMPEQW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x75), .RVM, .{AVX}),
vec(.VPCMPEQW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x75), .RVM, .{AVX2}),
vec(.VPCMPEQW, ops3(.reg_k_k, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0x75, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPCMPEQW, ops3(.reg_k_k, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0x75, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPCMPEQW, ops3(.reg_k_k, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0x75, fmem), .RVM, .{AVX512BW}),
// VPCMPEQD
vec(.VPCMPEQD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x76), .RVM, .{AVX}),
vec(.VPCMPEQD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x76), .RVM, .{AVX2}),
vec(.VPCMPEQD, ops3(.reg_k_k, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0x76, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPCMPEQD, ops3(.reg_k_k, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0x76, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPCMPEQD, ops3(.reg_k_k, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F, .W0, 0x76, full), .RVM, .{AVX512F}),
// VPCMPEQQ
vec(.VPCMPEQQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x29), .RVM, .{AVX}),
vec(.VPCMPEQQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x29), .RVM, .{AVX2}),
vec(.VPCMPEQQ, ops3(.reg_k_k, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x29, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPCMPEQQ, ops3(.reg_k_k, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x29, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPCMPEQQ, ops3(.reg_k_k, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x29, full), .RVM, .{AVX512F}),
// VPCMPESTRI
vec(.VPCMPESTRI, ops3(.xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x61), .vRMI, .{AVX}),
// VPCMPESTRM
vec(.VPCMPESTRM, ops3(.xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x60), .vRMI, .{AVX}),
// VPCMPGTB / VPCMPGTW / VPCMPGTD
// VPCMPGTB
vec(.VPCMPGTB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x64), .RVM, .{AVX}),
vec(.VPCMPGTB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x64), .RVM, .{AVX2}),
vec(.VPCMPGTB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0x64, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPCMPGTB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0x64, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPCMPGTB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0x64, fmem), .RVM, .{AVX512BW}),
// VPCMPGTW
vec(.VPCMPGTW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x65), .RVM, .{AVX}),
vec(.VPCMPGTW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x65), .RVM, .{AVX2}),
vec(.VPCMPGTW, ops3(.reg_k_k, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0x65, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPCMPGTW, ops3(.reg_k_k, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0x65, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPCMPGTW, ops3(.reg_k_k, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0x65, fmem), .RVM, .{AVX512BW}),
// VPCMPGTD
vec(.VPCMPGTD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x66), .RVM, .{AVX}),
vec(.VPCMPGTD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x66), .RVM, .{AVX2}),
vec(.VPCMPGTD, ops3(.reg_k_k, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0x66, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPCMPGTD, ops3(.reg_k_k, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0x66, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPCMPGTD, ops3(.reg_k_k, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F, .W0, 0x66, full), .RVM, .{AVX512F}),
// VPCMPGTQ
vec(.VPCMPGTQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x37), .RVM, .{AVX}),
vec(.VPCMPGTQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x37), .RVM, .{AVX2}),
vec(.VPCMPGTQ, ops3(.reg_k_k, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x37, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPCMPGTQ, ops3(.reg_k_k, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x37, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPCMPGTQ, ops3(.reg_k_k, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x37, full), .RVM, .{AVX512F}),
// VPCMPISTRI
vec(.VPCMPISTRI, ops3(.xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x63), .vRMI, .{AVX}),
// VPCMPISTRM
vec(.VPCMPISTRM, ops3(.xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x62), .vRMI, .{AVX}),
// VPEXTRB / VPEXTRD / VPEXTRQ
vec(.VPEXTRB, ops3(.reg32_m8, .xmml, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x14), .vMRI, .{AVX}),
vec(.VPEXTRB, ops3(.rm_reg64, .xmml, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x14), .vMRI, .{ AVX, No32 }),
vec(.VPEXTRB, ops3(.reg32_m8, .xmm, .imm8), evex(.L128, ._66, ._0F3A, .WIG, 0x14, t1s), .vMRI, .{AVX512BW}),
vec(.VPEXTRB, ops3(.reg64, .xmm, .imm8), evex(.L128, ._66, ._0F3A, .WIG, 0x14, t1s), .vMRI, .{ AVX512BW, No32 }),
//
vec(.VPEXTRD, ops3(.rm32, .xmml, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x16), .vMRI, .{AVX}),
vec(.VPEXTRD, ops3(.rm32, .xmm, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x16, t1s), .vMRI, .{AVX512DQ}),
//
vec(.VPEXTRQ, ops3(.rm64, .xmml, .imm8), vex(.L128, ._66, ._0F3A, .W1, 0x16), .vMRI, .{AVX}),
vec(.VPEXTRQ, ops3(.rm64, .xmm, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x16, t1s), .vMRI, .{AVX512DQ}),
// VPEXTRW
vec(.VPEXTRW, ops3(.reg32, .xmml, .imm8), vex(.L128, ._66, ._0F, .W0, 0xC5), .vRMI, .{AVX}),
vec(.VPEXTRW, ops3(.reg64, .xmml, .imm8), vex(.L128, ._66, ._0F, .W0, 0xC5), .vRMI, .{ AVX, No32 }),
vec(.VPEXTRW, ops3(.reg32_m16, .xmml, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x15), .vMRI, .{AVX}),
vec(.VPEXTRW, ops3(.rm_reg64, .xmml, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x15), .vMRI, .{ AVX, No32 }),
vec(.VPEXTRW, ops3(.reg32, .xmm, .imm8), evex(.L128, ._66, ._0F, .WIG, 0xC5, nomem), .vRMI, .{AVX512BW}),
vec(.VPEXTRW, ops3(.reg64, .xmm, .imm8), evex(.L128, ._66, ._0F, .WIG, 0xC5, nomem), .vRMI, .{ AVX512BW, No32 }),
vec(.VPEXTRW, ops3(.reg32_m16, .xmm, .imm8), evex(.L128, ._66, ._0F3A, .WIG, 0x15, t1s), .vMRI, .{AVX512BW}),
vec(.VPEXTRW, ops3(.rm_reg64, .xmm, .imm8), evex(.L128, ._66, ._0F3A, .WIG, 0x15, t1s), .vMRI, .{ AVX512BW, No32 }),
// VPHADDW / VPHADDD
vec(.VPHADDW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x01), .RVM, .{AVX}),
vec(.VPHADDW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x01), .RVM, .{AVX2}),
//
vec(.VPHADDD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x02), .RVM, .{AVX}),
vec(.VPHADDD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x02), .RVM, .{AVX2}),
// VPHADDSW
vec(.VPHADDSW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x03), .RVM, .{AVX}),
vec(.VPHADDSW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x03), .RVM, .{AVX2}),
// VPHMINPOSUW
vec(.VPHMINPOSUW, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x41), .vRM, .{AVX}),
// VPHSUBW / VPHSUBD
vec(.VPHSUBW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x05), .RVM, .{AVX}),
vec(.VPHSUBW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x05), .RVM, .{AVX2}),
//
vec(.VPHSUBD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x06), .RVM, .{AVX}),
vec(.VPHSUBD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x06), .RVM, .{AVX2}),
// VPHSUBSW
vec(.VPHSUBSW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x07), .RVM, .{AVX}),
vec(.VPHSUBSW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x07), .RVM, .{AVX2}),
// VPINSRB / VPINSRD / VPINSRQ
vec(.VPINSRB, ops4(.xmml, .xmml, .reg32_m8, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x20), .RVMI, .{AVX}),
vec(.VPINSRB, ops4(.xmm, .xmm, .reg32_m8, .imm8), evex(.L128, ._66, ._0F3A, .WIG, 0x20, t1s), .RVMI, .{AVX512BW}),
//
vec(.VPINSRD, ops4(.xmml, .xmml, .rm32, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x22), .RVMI, .{AVX}),
vec(.VPINSRD, ops4(.xmm, .xmm, .rm32, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x22, t1s), .RVMI, .{AVX512DQ}),
//
vec(.VPINSRQ, ops4(.xmml, .xmml, .rm64, .imm8), vex(.L128, ._66, ._0F3A, .W1, 0x22), .RVMI, .{AVX}),
vec(.VPINSRQ, ops4(.xmm, .xmm, .rm64, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x22, t1s), .RVMI, .{AVX512DQ}),
// VPINSRW
vec(.VPINSRW, ops4(.xmml, .xmml, .reg32_m16, .imm8), vex(.L128, ._66, ._0F, .W0, 0xC4), .RVMI, .{AVX}),
vec(.VPINSRW, ops4(.xmml, .xmml, .rm_reg64, .imm8), vex(.L128, ._66, ._0F, .W0, 0xC4), .RVMI, .{AVX}),
vec(.VPINSRW, ops4(.xmm, .xmm, .reg32_m16, .imm8), evex(.L128, ._66, ._0F, .WIG, 0xC4, t1s), .RVMI, .{AVX512BW}),
vec(.VPINSRW, ops4(.xmm, .xmm, .rm_reg64, .imm8), evex(.L128, ._66, ._0F, .WIG, 0xC4, t1s), .RVMI, .{AVX512BW}),
// VPMADDUBSW
vec(.VPMADDUBSW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x04), .RVM, .{AVX}),
vec(.VPMADDUBSW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x04), .RVM, .{AVX2}),
vec(.VPMADDUBSW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .WIG, 0x04, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMADDUBSW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .WIG, 0x04, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMADDUBSW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .WIG, 0x04, fmem), .RVM, .{AVX512BW}),
// VPMADDWD
vec(.VPMADDWD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xF5), .RVM, .{AVX}),
vec(.VPMADDWD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xF5), .RVM, .{AVX2}),
vec(.VPMADDWD, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xF5, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMADDWD, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xF5, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMADDWD, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xF5, fmem), .RVM, .{AVX512BW}),
// VPMAXSB / VPMAXSW / VPMAXSD / VPMAXSQ
// VPMAXSB
vec(.VPMAXSB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x3C), .RVM, .{AVX}),
vec(.VPMAXSB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x3C), .RVM, .{AVX2}),
vec(.VPMAXSB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .WIG, 0x3C, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMAXSB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .WIG, 0x3C, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMAXSB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .WIG, 0x3C, fmem), .RVM, .{AVX512BW}),
// VPMAXSW
vec(.VPMAXSW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xEE), .RVM, .{AVX}),
vec(.VPMAXSW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xEE), .RVM, .{AVX2}),
vec(.VPMAXSW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xEE, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMAXSW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xEE, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMAXSW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xEE, fmem), .RVM, .{AVX512BW}),
// VPMAXSD
vec(.VPMAXSD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x3D), .RVM, .{AVX}),
vec(.VPMAXSD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x3D), .RVM, .{AVX2}),
vec(.VPMAXSD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x3D, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMAXSD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x3D, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMAXSD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x3D, full), .RVM, .{AVX512F}),
// VPMAXSQ
vec(.VPMAXSQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x3D, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMAXSQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x3D, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMAXSQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x3D, full), .RVM, .{AVX512F}),
// VPMAXUB / VPMAXUW
// VPMAXUB
vec(.VPMAXUB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xDE), .RVM, .{AVX}),
vec(.VPMAXUB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xDE), .RVM, .{AVX2}),
vec(.VPMAXUB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xDE, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMAXUB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xDE, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMAXUB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xDE, fmem), .RVM, .{AVX512BW}),
// VPMAXUW
vec(.VPMAXUW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x3E), .RVM, .{AVX}),
vec(.VPMAXUW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x3E), .RVM, .{AVX2}),
vec(.VPMAXUW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .WIG, 0x3E, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMAXUW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .WIG, 0x3E, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMAXUW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .WIG, 0x3E, fmem), .RVM, .{AVX512BW}),
// VPMAXUD / VPMAXUQ
// VPMAXUD
vec(.VPMAXUD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x3F), .RVM, .{AVX}),
vec(.VPMAXUD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x3F), .RVM, .{AVX2}),
vec(.VPMAXUD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x3F, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMAXUD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x3F, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMAXUD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x3F, full), .RVM, .{AVX512F}),
// VPMAXUQ
vec(.VPMAXUQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x3F, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMAXUQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x3F, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMAXUQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x3F, full), .RVM, .{AVX512F}),
// VPMINSB / VPMINSW
// VPMINSB
vec(.VPMINSB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x38), .RVM, .{AVX}),
vec(.VPMINSB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x38), .RVM, .{AVX2}),
vec(.VPMINSB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .WIG, 0x38, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMINSB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .WIG, 0x38, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMINSB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .WIG, 0x38, fmem), .RVM, .{AVX512BW}),
// VPMINSW
vec(.VPMINSW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xEA), .RVM, .{AVX}),
vec(.VPMINSW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xEA), .RVM, .{AVX2}),
vec(.VPMINSW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xEA, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMINSW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xEA, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMINSW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xEA, fmem), .RVM, .{AVX512BW}),
// VPMINSD / VPMINSQ
// VPMINSD
vec(.VPMINSD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x39), .RVM, .{AVX}),
vec(.VPMINSD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x39), .RVM, .{AVX2}),
vec(.VPMINSD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x39, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMINSD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x39, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMINSD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x39, full), .RVM, .{AVX512F}),
// VPMINSQ
vec(.VPMINSQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x39, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMINSQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x39, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMINSQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x39, full), .RVM, .{AVX512F}),
// VPMINUB / VPMINUW
// VPMINUB
vec(.VPMINUB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xDA), .RVM, .{AVX}),
vec(.VPMINUB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xDA), .RVM, .{AVX2}),
vec(.VPMINUB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xDA, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMINUB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xDA, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMINUB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xDA, fmem), .RVM, .{AVX512BW}),
// VPMINUW
vec(.VPMINUW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x3A), .RVM, .{AVX}),
vec(.VPMINUW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x3A), .RVM, .{AVX2}),
vec(.VPMINUW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .WIG, 0x3A, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMINUW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .WIG, 0x3A, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMINUW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .WIG, 0x3A, fmem), .RVM, .{AVX512BW}),
// VPMINUD / VPMINUQ
// VPMINUD
vec(.VPMINUD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x3B), .RVM, .{AVX}),
vec(.VPMINUD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x3B), .RVM, .{AVX2}),
vec(.VPMINUD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x3B, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMINUD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x3B, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMINUD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x3B, full), .RVM, .{AVX512F}),
// VPMINUQ
vec(.VPMINUQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x3B, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMINUQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x3B, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMINUQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x3B, full), .RVM, .{AVX512F}),
// VPMOVMSKB
vec(.VPMOVMSKB, ops2(.reg32, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xD7), .RVM, .{AVX}),
vec(.VPMOVMSKB, ops2(.reg64, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xD7), .RVM, .{AVX}),
vec(.VPMOVMSKB, ops2(.reg32, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xD7), .RVM, .{AVX2}),
vec(.VPMOVMSKB, ops2(.reg64, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xD7), .RVM, .{AVX2}),
// VPMOVSX
// VPMOVSXBW
vec(.VPMOVSXBW, ops2(.xmml, .xmml_m64), vex(.L128, ._66, ._0F38, .WIG, 0x20), .vRM, .{AVX}),
vec(.VPMOVSXBW, ops2(.ymml, .xmml_m128), vex(.L256, ._66, ._0F38, .WIG, 0x20), .vRM, .{AVX2}),
vec(.VPMOVSXBW, ops2(.xmm_kz, .xmm_m64), evex(.L128, ._66, ._0F38, .WIG, 0x20, hmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVSXBW, ops2(.ymm_kz, .xmm_m128), evex(.L256, ._66, ._0F38, .WIG, 0x20, hmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVSXBW, ops2(.zmm_kz, .ymm_m256), evex(.L512, ._66, ._0F38, .WIG, 0x20, hmem), .vRM, .{AVX512BW}),
// VPMOVSXBD
vec(.VPMOVSXBD, ops2(.xmml, .xmml_m32), vex(.L128, ._66, ._0F38, .WIG, 0x21), .vRM, .{AVX}),
vec(.VPMOVSXBD, ops2(.ymml, .xmml_m64), vex(.L256, ._66, ._0F38, .WIG, 0x21), .vRM, .{AVX2}),
vec(.VPMOVSXBD, ops2(.xmm_kz, .xmm_m32), evex(.L128, ._66, ._0F38, .WIG, 0x21, qmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVSXBD, ops2(.ymm_kz, .xmm_m64), evex(.L256, ._66, ._0F38, .WIG, 0x21, qmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVSXBD, ops2(.zmm_kz, .xmm_m128), evex(.L512, ._66, ._0F38, .WIG, 0x21, qmem), .vRM, .{AVX512F}),
// VPMOVSXBQ
vec(.VPMOVSXBQ, ops2(.xmml, .xmml_m16), vex(.L128, ._66, ._0F38, .WIG, 0x22), .vRM, .{AVX}),
vec(.VPMOVSXBQ, ops2(.ymml, .xmml_m32), vex(.L256, ._66, ._0F38, .WIG, 0x22), .vRM, .{AVX2}),
vec(.VPMOVSXBQ, ops2(.xmm_kz, .xmm_m16), evex(.L128, ._66, ._0F38, .WIG, 0x22, emem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVSXBQ, ops2(.ymm_kz, .xmm_m32), evex(.L256, ._66, ._0F38, .WIG, 0x22, emem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVSXBQ, ops2(.zmm_kz, .xmm_m64), evex(.L512, ._66, ._0F38, .WIG, 0x22, emem), .vRM, .{AVX512F}),
// VPMOVSXWD
vec(.VPMOVSXWD, ops2(.xmml, .xmml_m64), vex(.L128, ._66, ._0F38, .WIG, 0x23), .vRM, .{AVX}),
vec(.VPMOVSXWD, ops2(.ymml, .xmml_m128), vex(.L256, ._66, ._0F38, .WIG, 0x23), .vRM, .{AVX2}),
vec(.VPMOVSXWD, ops2(.xmm_kz, .xmm_m64), evex(.L128, ._66, ._0F38, .WIG, 0x23, hmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVSXWD, ops2(.ymm_kz, .xmm_m128), evex(.L256, ._66, ._0F38, .WIG, 0x23, hmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVSXWD, ops2(.zmm_kz, .ymm_m256), evex(.L512, ._66, ._0F38, .WIG, 0x23, hmem), .vRM, .{AVX512F}),
// VPMOVSXWQ
vec(.VPMOVSXWQ, ops2(.xmml, .xmml_m32), vex(.L128, ._66, ._0F38, .WIG, 0x24), .vRM, .{AVX}),
vec(.VPMOVSXWQ, ops2(.ymml, .xmml_m64), vex(.L256, ._66, ._0F38, .WIG, 0x24), .vRM, .{AVX2}),
vec(.VPMOVSXWQ, ops2(.xmm_kz, .xmm_m32), evex(.L128, ._66, ._0F38, .WIG, 0x24, qmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVSXWQ, ops2(.ymm_kz, .xmm_m64), evex(.L256, ._66, ._0F38, .WIG, 0x24, qmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVSXWQ, ops2(.zmm_kz, .xmm_m128), evex(.L512, ._66, ._0F38, .WIG, 0x24, qmem), .vRM, .{AVX512F}),
// VPMOVSXDQ
vec(.VPMOVSXDQ, ops2(.xmml, .xmml_m64), vex(.L128, ._66, ._0F38, .WIG, 0x25), .vRM, .{AVX}),
vec(.VPMOVSXDQ, ops2(.ymml, .xmml_m128), vex(.L256, ._66, ._0F38, .WIG, 0x25), .vRM, .{AVX2}),
vec(.VPMOVSXDQ, ops2(.xmm_kz, .xmm_m64), evex(.L128, ._66, ._0F38, .W0, 0x25, hmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVSXDQ, ops2(.ymm_kz, .xmm_m128), evex(.L256, ._66, ._0F38, .W0, 0x25, hmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVSXDQ, ops2(.zmm_kz, .ymm_m256), evex(.L512, ._66, ._0F38, .W0, 0x25, hmem), .vRM, .{AVX512F}),
// VPMOVZX
// VPMOVZXBW
vec(.VPMOVZXBW, ops2(.xmml, .xmml_m64), vex(.L128, ._66, ._0F38, .WIG, 0x30), .vRM, .{AVX}),
vec(.VPMOVZXBW, ops2(.ymml, .xmml_m128), vex(.L256, ._66, ._0F38, .WIG, 0x30), .vRM, .{AVX2}),
vec(.VPMOVZXBW, ops2(.xmm_kz, .xmm_m64), evex(.L128, ._66, ._0F38, .WIG, 0x30, hmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVZXBW, ops2(.ymm_kz, .xmm_m128), evex(.L256, ._66, ._0F38, .WIG, 0x30, hmem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVZXBW, ops2(.zmm_kz, .ymm_m256), evex(.L512, ._66, ._0F38, .WIG, 0x30, hmem), .vRM, .{AVX512BW}),
// VPMOVZXBD
vec(.VPMOVZXBD, ops2(.xmml, .xmml_m32), vex(.L128, ._66, ._0F38, .WIG, 0x31), .vRM, .{AVX}),
vec(.VPMOVZXBD, ops2(.ymml, .xmml_m64), vex(.L256, ._66, ._0F38, .WIG, 0x31), .vRM, .{AVX2}),
vec(.VPMOVZXBD, ops2(.xmm_kz, .xmm_m32), evex(.L128, ._66, ._0F38, .WIG, 0x31, qmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVZXBD, ops2(.ymm_kz, .xmm_m64), evex(.L256, ._66, ._0F38, .WIG, 0x31, qmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVZXBD, ops2(.zmm_kz, .xmm_m128), evex(.L512, ._66, ._0F38, .WIG, 0x31, qmem), .vRM, .{AVX512F}),
// VPMOVZXBQ
vec(.VPMOVZXBQ, ops2(.xmml, .xmml_m16), vex(.L128, ._66, ._0F38, .WIG, 0x32), .vRM, .{AVX}),
vec(.VPMOVZXBQ, ops2(.ymml, .xmml_m32), vex(.L256, ._66, ._0F38, .WIG, 0x32), .vRM, .{AVX2}),
vec(.VPMOVZXBQ, ops2(.xmm_kz, .xmm_m16), evex(.L128, ._66, ._0F38, .WIG, 0x32, emem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVZXBQ, ops2(.ymm_kz, .xmm_m32), evex(.L256, ._66, ._0F38, .WIG, 0x32, emem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVZXBQ, ops2(.zmm_kz, .xmm_m64), evex(.L512, ._66, ._0F38, .WIG, 0x32, emem), .vRM, .{AVX512F}),
// VPMOVZXWD
vec(.VPMOVZXWD, ops2(.xmml, .xmml_m64), vex(.L128, ._66, ._0F38, .WIG, 0x33), .vRM, .{AVX}),
vec(.VPMOVZXWD, ops2(.ymml, .xmml_m128), vex(.L256, ._66, ._0F38, .WIG, 0x33), .vRM, .{AVX2}),
vec(.VPMOVZXWD, ops2(.xmm_kz, .xmm_m64), evex(.L128, ._66, ._0F38, .WIG, 0x33, hmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVZXWD, ops2(.ymm_kz, .xmm_m128), evex(.L256, ._66, ._0F38, .WIG, 0x33, hmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVZXWD, ops2(.zmm_kz, .ymm_m256), evex(.L512, ._66, ._0F38, .WIG, 0x33, hmem), .vRM, .{AVX512F}),
// VPMOVZXWQ
vec(.VPMOVZXWQ, ops2(.xmml, .xmml_m32), vex(.L128, ._66, ._0F38, .WIG, 0x34), .vRM, .{AVX}),
vec(.VPMOVZXWQ, ops2(.ymml, .xmml_m64), vex(.L256, ._66, ._0F38, .WIG, 0x34), .vRM, .{AVX2}),
vec(.VPMOVZXWQ, ops2(.xmm_kz, .xmm_m32), evex(.L128, ._66, ._0F38, .WIG, 0x34, qmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVZXWQ, ops2(.ymm_kz, .xmm_m64), evex(.L256, ._66, ._0F38, .WIG, 0x34, qmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVZXWQ, ops2(.zmm_kz, .xmm_m128), evex(.L512, ._66, ._0F38, .WIG, 0x34, qmem), .vRM, .{AVX512F}),
// VPMOVZXDQ
vec(.VPMOVZXDQ, ops2(.xmml, .xmml_m64), vex(.L128, ._66, ._0F38, .WIG, 0x35), .vRM, .{AVX}),
vec(.VPMOVZXDQ, ops2(.ymml, .xmml_m128), vex(.L256, ._66, ._0F38, .WIG, 0x35), .vRM, .{AVX2}),
vec(.VPMOVZXDQ, ops2(.xmm_kz, .xmm_m64), evex(.L128, ._66, ._0F38, .W0, 0x35, hmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVZXDQ, ops2(.ymm_kz, .xmm_m128), evex(.L256, ._66, ._0F38, .W0, 0x35, hmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPMOVZXDQ, ops2(.zmm_kz, .ymm_m256), evex(.L512, ._66, ._0F38, .W0, 0x35, hmem), .vRM, .{AVX512F}),
// VPMULDQ
vec(.VPMULDQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x28), .RVM, .{AVX}),
vec(.VPMULDQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x28), .RVM, .{AVX2}),
vec(.VPMULDQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x28, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMULDQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x28, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMULDQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x28, full), .RVM, .{AVX512F}),
// VPMULHRSW
vec(.VPMULHRSW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x0B), .RVM, .{AVX}),
vec(.VPMULHRSW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x0B), .RVM, .{AVX2}),
vec(.VPMULHRSW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .WIG, 0x0B, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMULHRSW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .WIG, 0x0B, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMULHRSW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .WIG, 0x0B, fmem), .RVM, .{AVX512BW}),
// VPMULHUW
vec(.VPMULHUW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xE4), .RVM, .{AVX}),
vec(.VPMULHUW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xE4), .RVM, .{AVX2}),
vec(.VPMULHUW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xE4, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMULHUW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xE4, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMULHUW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xE4, fmem), .RVM, .{AVX512BW}),
// VPMULHW
vec(.VPMULHW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xE5), .RVM, .{AVX}),
vec(.VPMULHW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xE5), .RVM, .{AVX2}),
vec(.VPMULHW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xE5, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMULHW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xE5, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMULHW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xE5, fmem), .RVM, .{AVX512BW}),
// VPMULLD / VPMULLQ
// VPMULLD
vec(.VPMULLD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x40), .RVM, .{AVX}),
vec(.VPMULLD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x40), .RVM, .{AVX2}),
vec(.VPMULLD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x40, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMULLD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x40, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMULLD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x40, full), .RVM, .{AVX512F}),
// VPMULLQ
vec(.VPMULLQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x40, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VPMULLQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x40, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VPMULLQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x40, full), .RVM, .{AVX512DQ}),
// VPMULLW
vec(.VPMULLW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xD5), .RVM, .{AVX}),
vec(.VPMULLW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xD5), .RVM, .{AVX2}),
vec(.VPMULLW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xD5, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMULLW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xD5, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPMULLW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xD5, fmem), .RVM, .{AVX512BW}),
// VPMULUDQ
vec(.VPMULUDQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xF4), .RVM, .{AVX}),
vec(.VPMULUDQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xF4), .RVM, .{AVX2}),
vec(.VPMULUDQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0xF4, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMULUDQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0xF4, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPMULUDQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0xF4, full), .RVM, .{AVX512F}),
// VPOR
vec(.VPOR, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xEB), .RVM, .{AVX}),
vec(.VPOR, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xEB), .RVM, .{AVX2}),
//
vec(.VPORD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0xEB, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPORD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0xEB, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPORD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F, .W0, 0xEB, full), .RVM, .{AVX512F}),
//
vec(.VPORQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0xEB, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPORQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0xEB, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPORQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0xEB, full), .RVM, .{AVX512F}),
// VPSADBW
vec(.VPSADBW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xF6), .RVM, .{AVX}),
vec(.VPSADBW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xF6), .RVM, .{AVX2}),
vec(.VPSADBW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xF6, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSADBW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xF6, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSADBW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xF6, fmem), .RVM, .{AVX512BW}),
// VPSHUFB
vec(.VPSHUFB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x00), .RVM, .{AVX}),
vec(.VPSHUFB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x00), .RVM, .{AVX2}),
vec(.VPSHUFB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .WIG, 0x00, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSHUFB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .WIG, 0x00, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSHUFB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .WIG, 0x00, fmem), .RVM, .{AVX512BW}),
// VPSHUFD
vec(.VPSHUFD, ops3(.xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F, .WIG, 0x70), .vRMI, .{AVX}),
vec(.VPSHUFD, ops3(.ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F, .WIG, 0x70), .vRMI, .{AVX2}),
vec(.VPSHUFD, ops3(.xmm_kz, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F, .W0, 0x70, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VPSHUFD, ops3(.ymm_kz, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F, .W0, 0x70, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VPSHUFD, ops3(.zmm_kz, .zmm_m512_m32bcst, .imm8), evex(.L512, ._66, ._0F, .W0, 0x70, full), .vRMI, .{AVX512F}),
// VPSHUFHW
vec(.VPSHUFHW, ops3(.xmml, .xmml_m128, .imm8), vex(.L128, ._F3, ._0F, .WIG, 0x70), .vRMI, .{AVX}),
vec(.VPSHUFHW, ops3(.ymml, .ymml_m256, .imm8), vex(.L256, ._F3, ._0F, .WIG, 0x70), .vRMI, .{AVX2}),
vec(.VPSHUFHW, ops3(.xmm_kz, .xmm_m128, .imm8), evex(.L128, ._F3, ._0F, .WIG, 0x70, fmem), .vRMI, .{ AVX512VL, AVX512BW }),
vec(.VPSHUFHW, ops3(.ymm_kz, .ymm_m256, .imm8), evex(.L256, ._F3, ._0F, .WIG, 0x70, fmem), .vRMI, .{ AVX512VL, AVX512BW }),
vec(.VPSHUFHW, ops3(.zmm_kz, .zmm_m512, .imm8), evex(.L512, ._F3, ._0F, .WIG, 0x70, fmem), .vRMI, .{AVX512BW}),
// VPSHUFLW
vec(.VPSHUFLW, ops3(.xmml, .xmml_m128, .imm8), vex(.L128, ._F2, ._0F, .WIG, 0x70), .vRMI, .{AVX}),
vec(.VPSHUFLW, ops3(.ymml, .ymml_m256, .imm8), vex(.L256, ._F2, ._0F, .WIG, 0x70), .vRMI, .{AVX2}),
vec(.VPSHUFLW, ops3(.xmm_kz, .xmm_m128, .imm8), evex(.L128, ._F2, ._0F, .WIG, 0x70, fmem), .vRMI, .{ AVX512VL, AVX512BW }),
vec(.VPSHUFLW, ops3(.ymm_kz, .ymm_m256, .imm8), evex(.L256, ._F2, ._0F, .WIG, 0x70, fmem), .vRMI, .{ AVX512VL, AVX512BW }),
vec(.VPSHUFLW, ops3(.zmm_kz, .zmm_m512, .imm8), evex(.L512, ._F2, ._0F, .WIG, 0x70, fmem), .vRMI, .{AVX512BW}),
// VPSIGNB / VPSIGNW / VPSIGND
// VPSIGNB
vec(.VPSIGNB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x08), .RVM, .{AVX}),
vec(.VPSIGNB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x08), .RVM, .{AVX2}),
// VPSIGNW
vec(.VPSIGNW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x09), .RVM, .{AVX}),
vec(.VPSIGNW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x09), .RVM, .{AVX2}),
// VPSIGND
vec(.VPSIGND, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x0A), .RVM, .{AVX}),
vec(.VPSIGND, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x0A), .RVM, .{AVX2}),
// VPSLLDQ
vec(.VPSLLDQ, ops3(.xmml, .xmml_m128, .imm8), vexr(.L128, ._66, ._0F, .WIG, 0x73, 7), .VMI, .{AVX}),
vec(.VPSLLDQ, ops3(.ymml, .ymml_m256, .imm8), vexr(.L256, ._66, ._0F, .WIG, 0x73, 7), .VMI, .{AVX2}),
vec(.VPSLLDQ, ops3(.xmm_kz, .xmm_m128, .imm8), evexr(.L128, ._66, ._0F, .WIG, 0x73, 7, fmem), .VMI, .{ AVX512VL, AVX512BW }),
vec(.VPSLLDQ, ops3(.ymm_kz, .ymm_m256, .imm8), evexr(.L256, ._66, ._0F, .WIG, 0x73, 7, fmem), .VMI, .{ AVX512VL, AVX512BW }),
vec(.VPSLLDQ, ops3(.zmm_kz, .zmm_m512, .imm8), evexr(.L512, ._66, ._0F, .WIG, 0x73, 7, fmem), .VMI, .{AVX512BW}),
// VPSLLW / VPSLLD / VPSLLQ
// VPSLLW
vec(.VPSLLW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xF1), .RVM, .{AVX}),
vec(.VPSLLW, ops3(.xmml, .xmml_m128, .imm8), vexr(.L128, ._66, ._0F, .WIG, 0x71, 6), .VMI, .{AVX}),
vec(.VPSLLW, ops3(.ymml, .ymml, .xmml_m128), vex(.L256, ._66, ._0F, .WIG, 0xF1), .RVM, .{AVX2}),
vec(.VPSLLW, ops3(.ymml, .ymml_m256, .imm8), vexr(.L256, ._66, ._0F, .WIG, 0x71, 6), .VMI, .{AVX2}),
vec(.VPSLLW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xF1, mem128), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSLLW, ops3(.ymm_kz, .ymm, .xmm_m128), evex(.L256, ._66, ._0F, .WIG, 0xF1, mem128), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSLLW, ops3(.zmm_kz, .zmm, .xmm_m128), evex(.L512, ._66, ._0F, .WIG, 0xF1, mem128), .RVM, .{AVX512BW}),
vec(.VPSLLW, ops3(.xmm_kz, .xmm_m128, .imm8), evexr(.L128, ._66, ._0F, .WIG, 0x71, 6, fmem), .VMI, .{ AVX512VL, AVX512BW }),
vec(.VPSLLW, ops3(.ymm_kz, .ymm_m256, .imm8), evexr(.L256, ._66, ._0F, .WIG, 0x71, 6, fmem), .VMI, .{ AVX512VL, AVX512BW }),
vec(.VPSLLW, ops3(.zmm_kz, .zmm_m512, .imm8), evexr(.L512, ._66, ._0F, .WIG, 0x71, 6, fmem), .VMI, .{AVX512BW}),
// VPSLLD
vec(.VPSLLD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xF2), .RVM, .{AVX}),
vec(.VPSLLD, ops3(.xmml, .xmml_m128, .imm8), vexr(.L128, ._66, ._0F, .WIG, 0x72, 6), .VMI, .{AVX}),
vec(.VPSLLD, ops3(.ymml, .ymml, .xmml_m128), vex(.L256, ._66, ._0F, .WIG, 0xF2), .RVM, .{AVX2}),
vec(.VPSLLD, ops3(.ymml, .ymml_m256, .imm8), vexr(.L256, ._66, ._0F, .WIG, 0x72, 6), .VMI, .{AVX2}),
vec(.VPSLLD, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .W0, 0xF2, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSLLD, ops3(.ymm_kz, .ymm, .xmm_m128), evex(.L256, ._66, ._0F, .W0, 0xF2, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSLLD, ops3(.zmm_kz, .zmm, .xmm_m128), evex(.L512, ._66, ._0F, .W0, 0xF2, mem128), .RVM, .{AVX512F}),
vec(.VPSLLD, ops3(.xmm_kz, .xmm_m128_m32bcst, .imm8), evexr(.L128, ._66, ._0F, .W0, 0x72, 6, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSLLD, ops3(.ymm_kz, .ymm_m256_m32bcst, .imm8), evexr(.L256, ._66, ._0F, .W0, 0x72, 6, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSLLD, ops3(.zmm_kz, .zmm_m512_m32bcst, .imm8), evexr(.L512, ._66, ._0F, .W0, 0x72, 6, full), .VMI, .{AVX512F}),
// VPSLLQ
vec(.VPSLLQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xF3), .RVM, .{AVX}),
vec(.VPSLLQ, ops3(.xmml, .xmml_m128, .imm8), vexr(.L128, ._66, ._0F, .WIG, 0x73, 6), .VMI, .{AVX}),
vec(.VPSLLQ, ops3(.ymml, .ymml, .xmml_m128), vex(.L256, ._66, ._0F, .WIG, 0xF3), .RVM, .{AVX2}),
vec(.VPSLLQ, ops3(.ymml, .ymml_m256, .imm8), vexr(.L256, ._66, ._0F, .WIG, 0x73, 6), .VMI, .{AVX2}),
vec(.VPSLLQ, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .W1, 0xF3, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSLLQ, ops3(.ymm_kz, .ymm, .xmm_m128), evex(.L256, ._66, ._0F, .W1, 0xF3, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSLLQ, ops3(.zmm_kz, .zmm, .xmm_m128), evex(.L512, ._66, ._0F, .W1, 0xF3, mem128), .RVM, .{AVX512F}),
vec(.VPSLLQ, ops3(.xmm_kz, .xmm_m128_m64bcst, .imm8), evexr(.L128, ._66, ._0F, .W1, 0x73, 6, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSLLQ, ops3(.ymm_kz, .ymm_m256_m64bcst, .imm8), evexr(.L256, ._66, ._0F, .W1, 0x73, 6, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSLLQ, ops3(.zmm_kz, .zmm_m512_m64bcst, .imm8), evexr(.L512, ._66, ._0F, .W1, 0x73, 6, full), .VMI, .{AVX512F}),
// VPSRAW / VPSRAD / VPSRAQ
// VPSRAW
vec(.VPSRAW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xE1), .RVM, .{AVX}),
vec(.VPSRAW, ops3(.xmml, .xmml_m128, .imm8), vexr(.L128, ._66, ._0F, .WIG, 0x71, 4), .VMI, .{AVX}),
vec(.VPSRAW, ops3(.ymml, .ymml, .xmml_m128), vex(.L256, ._66, ._0F, .WIG, 0xE1), .RVM, .{AVX2}),
vec(.VPSRAW, ops3(.ymml, .ymml_m256, .imm8), vexr(.L256, ._66, ._0F, .WIG, 0x71, 4), .VMI, .{AVX2}),
vec(.VPSRAW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xE1, mem128), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSRAW, ops3(.ymm_kz, .ymm, .xmm_m128), evex(.L256, ._66, ._0F, .WIG, 0xE1, mem128), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSRAW, ops3(.zmm_kz, .zmm, .xmm_m128), evex(.L512, ._66, ._0F, .WIG, 0xE1, mem128), .RVM, .{AVX512BW}),
vec(.VPSRAW, ops3(.xmm_kz, .xmm_m128, .imm8), evexr(.L128, ._66, ._0F, .WIG, 0x71, 4, fmem), .VMI, .{ AVX512VL, AVX512BW }),
vec(.VPSRAW, ops3(.ymm_kz, .ymm_m256, .imm8), evexr(.L256, ._66, ._0F, .WIG, 0x71, 4, fmem), .VMI, .{ AVX512VL, AVX512BW }),
vec(.VPSRAW, ops3(.zmm_kz, .zmm_m512, .imm8), evexr(.L512, ._66, ._0F, .WIG, 0x71, 4, fmem), .VMI, .{AVX512BW}),
// VPSRAD
vec(.VPSRAD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xE2), .RVM, .{AVX}),
vec(.VPSRAD, ops3(.xmml, .xmml_m128, .imm8), vexr(.L128, ._66, ._0F, .WIG, 0x72, 4), .VMI, .{AVX}),
vec(.VPSRAD, ops3(.ymml, .ymml, .xmml_m128), vex(.L256, ._66, ._0F, .WIG, 0xE2), .RVM, .{AVX2}),
vec(.VPSRAD, ops3(.ymml, .ymml_m256, .imm8), vexr(.L256, ._66, ._0F, .WIG, 0x72, 4), .VMI, .{AVX2}),
vec(.VPSRAD, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .W0, 0xE2, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRAD, ops3(.ymm_kz, .ymm, .xmm_m128), evex(.L256, ._66, ._0F, .W0, 0xE2, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRAD, ops3(.zmm_kz, .zmm, .xmm_m128), evex(.L512, ._66, ._0F, .W0, 0xE2, mem128), .RVM, .{AVX512F}),
vec(.VPSRAD, ops3(.xmm_kz, .xmm_m128_m32bcst, .imm8), evexr(.L128, ._66, ._0F, .W0, 0x72, 4, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSRAD, ops3(.ymm_kz, .ymm_m256_m32bcst, .imm8), evexr(.L256, ._66, ._0F, .W0, 0x72, 4, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSRAD, ops3(.zmm_kz, .zmm_m512_m32bcst, .imm8), evexr(.L512, ._66, ._0F, .W0, 0x72, 4, full), .VMI, .{AVX512F}),
// VPSRAQ
vec(.VPSRAQ, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .W1, 0xE2, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRAQ, ops3(.ymm_kz, .ymm, .xmm_m128), evex(.L256, ._66, ._0F, .W1, 0xE2, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRAQ, ops3(.zmm_kz, .zmm, .xmm_m128), evex(.L512, ._66, ._0F, .W1, 0xE2, mem128), .RVM, .{AVX512F}),
vec(.VPSRAQ, ops3(.xmm_kz, .xmm_m128_m64bcst, .imm8), evexr(.L128, ._66, ._0F, .W1, 0x72, 4, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSRAQ, ops3(.ymm_kz, .ymm_m256_m64bcst, .imm8), evexr(.L256, ._66, ._0F, .W1, 0x72, 4, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSRAQ, ops3(.zmm_kz, .zmm_m512_m64bcst, .imm8), evexr(.L512, ._66, ._0F, .W1, 0x72, 4, full), .VMI, .{AVX512F}),
// VPSRLDQ
vec(.VPSRLDQ, ops3(.xmml, .xmml_m128, .imm8), vexr(.L128, ._66, ._0F, .WIG, 0x73, 3), .VMI, .{AVX}),
vec(.VPSRLDQ, ops3(.ymml, .ymml_m256, .imm8), vexr(.L256, ._66, ._0F, .WIG, 0x73, 3), .VMI, .{AVX2}),
vec(.VPSRLDQ, ops3(.xmm_kz, .xmm_m128, .imm8), evexr(.L128, ._66, ._0F, .WIG, 0x73, 3, fmem), .VMI, .{ AVX512VL, AVX512BW }),
vec(.VPSRLDQ, ops3(.ymm_kz, .ymm_m256, .imm8), evexr(.L256, ._66, ._0F, .WIG, 0x73, 3, fmem), .VMI, .{ AVX512VL, AVX512BW }),
vec(.VPSRLDQ, ops3(.zmm_kz, .zmm_m512, .imm8), evexr(.L512, ._66, ._0F, .WIG, 0x73, 3, fmem), .VMI, .{AVX512BW}),
// VPSRLW / VPSRLD / VPSRLQ
// VPSRLW
vec(.VPSRLW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xD1), .RVM, .{AVX}),
vec(.VPSRLW, ops3(.xmml, .xmml_m128, .imm8), vexr(.L128, ._66, ._0F, .WIG, 0x71, 2), .VMI, .{AVX}),
vec(.VPSRLW, ops3(.ymml, .ymml, .xmml_m128), vex(.L256, ._66, ._0F, .WIG, 0xD1), .RVM, .{AVX2}),
vec(.VPSRLW, ops3(.ymml, .ymml_m256, .imm8), vexr(.L256, ._66, ._0F, .WIG, 0x71, 2), .VMI, .{AVX2}),
vec(.VPSRLW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xD1, mem128), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSRLW, ops3(.ymm_kz, .ymm, .xmm_m128), evex(.L256, ._66, ._0F, .WIG, 0xD1, mem128), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSRLW, ops3(.zmm_kz, .zmm, .xmm_m128), evex(.L512, ._66, ._0F, .WIG, 0xD1, mem128), .RVM, .{AVX512BW}),
vec(.VPSRLW, ops3(.xmm_kz, .xmm_m128, .imm8), evexr(.L128, ._66, ._0F, .WIG, 0x71, 2, fmem), .VMI, .{ AVX512VL, AVX512BW }),
vec(.VPSRLW, ops3(.ymm_kz, .ymm_m256, .imm8), evexr(.L256, ._66, ._0F, .WIG, 0x71, 2, fmem), .VMI, .{ AVX512VL, AVX512BW }),
vec(.VPSRLW, ops3(.zmm_kz, .zmm_m512, .imm8), evexr(.L512, ._66, ._0F, .WIG, 0x71, 2, fmem), .VMI, .{AVX512BW}),
// VPSRLD
vec(.VPSRLD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xD2), .RVM, .{AVX}),
vec(.VPSRLD, ops3(.xmml, .xmml_m128, .imm8), vexr(.L128, ._66, ._0F, .WIG, 0x72, 2), .VMI, .{AVX}),
vec(.VPSRLD, ops3(.ymml, .ymml, .xmml_m128), vex(.L256, ._66, ._0F, .WIG, 0xD2), .RVM, .{AVX2}),
vec(.VPSRLD, ops3(.ymml, .ymml_m256, .imm8), vexr(.L256, ._66, ._0F, .WIG, 0x72, 2), .VMI, .{AVX2}),
vec(.VPSRLD, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .W0, 0xD2, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRLD, ops3(.ymm_kz, .ymm, .xmm_m128), evex(.L256, ._66, ._0F, .W0, 0xD2, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRLD, ops3(.zmm_kz, .zmm, .xmm_m128), evex(.L512, ._66, ._0F, .W0, 0xD2, mem128), .RVM, .{AVX512F}),
vec(.VPSRLD, ops3(.xmm_kz, .xmm_m128_m32bcst, .imm8), evexr(.L128, ._66, ._0F, .W0, 0x72, 2, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSRLD, ops3(.ymm_kz, .ymm_m256_m32bcst, .imm8), evexr(.L256, ._66, ._0F, .W0, 0x72, 2, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSRLD, ops3(.zmm_kz, .zmm_m512_m32bcst, .imm8), evexr(.L512, ._66, ._0F, .W0, 0x72, 2, full), .VMI, .{AVX512F}),
// VPSRLQ
vec(.VPSRLQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xD3), .RVM, .{AVX}),
vec(.VPSRLQ, ops3(.xmml, .xmml_m128, .imm8), vexr(.L128, ._66, ._0F, .WIG, 0x73, 2), .VMI, .{AVX}),
vec(.VPSRLQ, ops3(.ymml, .ymml, .xmml_m128), vex(.L256, ._66, ._0F, .WIG, 0xD3), .RVM, .{AVX2}),
vec(.VPSRLQ, ops3(.ymml, .ymml_m256, .imm8), vexr(.L256, ._66, ._0F, .WIG, 0x73, 2), .VMI, .{AVX2}),
vec(.VPSRLQ, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .W1, 0xD3, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRLQ, ops3(.ymm_kz, .ymm, .xmm_m128), evex(.L256, ._66, ._0F, .W1, 0xD3, mem128), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRLQ, ops3(.zmm_kz, .zmm, .xmm_m128), evex(.L512, ._66, ._0F, .W1, 0xD3, mem128), .RVM, .{AVX512F}),
vec(.VPSRLQ, ops3(.xmm_kz, .xmm_m128_m64bcst, .imm8), evexr(.L128, ._66, ._0F, .W1, 0x73, 2, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSRLQ, ops3(.ymm_kz, .ymm_m256_m64bcst, .imm8), evexr(.L256, ._66, ._0F, .W1, 0x73, 2, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPSRLQ, ops3(.zmm_kz, .zmm_m512_m64bcst, .imm8), evexr(.L512, ._66, ._0F, .W1, 0x73, 2, full), .VMI, .{AVX512F}),
// VPSUBB / VPSUBW / VPSUBD
// VPSUBB
vec(.VPSUBB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xF8), .RVM, .{AVX}),
vec(.VPSUBB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xF8), .RVM, .{AVX2}),
vec(.VPSUBB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xF8, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xF8, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xF8, fmem), .RVM, .{AVX512BW}),
// VPSUBW
vec(.VPSUBW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xF9), .RVM, .{AVX}),
vec(.VPSUBW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xF9), .RVM, .{AVX2}),
vec(.VPSUBW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xF9, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xF9, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xF9, fmem), .RVM, .{AVX512BW}),
// VPSUBD
vec(.VPSUBD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xFA), .RVM, .{AVX}),
vec(.VPSUBD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xFA), .RVM, .{AVX2}),
vec(.VPSUBD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0xFA, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSUBD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0xFA, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSUBD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F, .W0, 0xFA, full), .RVM, .{AVX512F}),
// VPSUBQ
vec(.VPSUBQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xFB), .RVM, .{AVX}),
vec(.VPSUBQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xFB), .RVM, .{AVX2}),
vec(.VPSUBQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0xFB, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSUBQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0xFB, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSUBQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0xFB, full), .RVM, .{AVX512F}),
// VPSUBSB / VPSUBSW
vec(.VPSUBSB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xE8), .RVM, .{AVX}),
vec(.VPSUBSB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xE8), .RVM, .{AVX2}),
vec(.VPSUBSB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xE8, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBSB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xE8, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBSB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xE8, fmem), .RVM, .{AVX512BW}),
//
vec(.VPSUBSW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xE9), .RVM, .{AVX}),
vec(.VPSUBSW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xE9), .RVM, .{AVX2}),
vec(.VPSUBSW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xE9, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBSW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xE9, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBSW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xE9, fmem), .RVM, .{AVX512BW}),
// VPSUBUSB / VPSUBUSW
vec(.VPSUBUSB, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xD8), .RVM, .{AVX}),
vec(.VPSUBUSB, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xD8), .RVM, .{AVX2}),
vec(.VPSUBUSB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xD8, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBUSB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xD8, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBUSB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xD8, fmem), .RVM, .{AVX512BW}),
//
vec(.VPSUBUSW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xD9), .RVM, .{AVX}),
vec(.VPSUBUSW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xD9), .RVM, .{AVX2}),
vec(.VPSUBUSW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0xD9, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBUSW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0xD9, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSUBUSW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0xD9, fmem), .RVM, .{AVX512BW}),
// VPTEST
vec(.VPTEST, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .WIG, 0x17), .vRM, .{AVX}),
vec(.VPTEST, ops2(.ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .WIG, 0x17), .vRM, .{AVX}),
// VPUNPCKHBW / VPUNPCKHWD / VPUNPCKHDQ / VPUNPCKHQDQ
// VPUNPCKHBW
vec(.VPUNPCKHBW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x68), .RVM, .{AVX}),
vec(.VPUNPCKHBW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x68), .RVM, .{AVX2}),
vec(.VPUNPCKHBW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0x68, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPUNPCKHBW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0x68, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPUNPCKHBW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0x68, fmem), .RVM, .{AVX512BW}),
// VPUNPCKHWD
vec(.VPUNPCKHWD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x69), .RVM, .{AVX}),
vec(.VPUNPCKHWD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x69), .RVM, .{AVX2}),
vec(.VPUNPCKHWD, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0x69, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPUNPCKHWD, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0x69, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPUNPCKHWD, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0x69, fmem), .RVM, .{AVX512BW}),
// VPUNPCKHDQ
vec(.VPUNPCKHDQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x6A), .RVM, .{AVX}),
vec(.VPUNPCKHDQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x6A), .RVM, .{AVX2}),
vec(.VPUNPCKHDQ, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0x6A, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPUNPCKHDQ, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0x6A, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPUNPCKHDQ, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F, .W0, 0x6A, full), .RVM, .{AVX512F}),
// VPUNPCKHQDQ
vec(.VPUNPCKHQDQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x6D), .RVM, .{AVX}),
vec(.VPUNPCKHQDQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x6D), .RVM, .{AVX2}),
vec(.VPUNPCKHQDQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x6D, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPUNPCKHQDQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x6D, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPUNPCKHQDQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0x6D, full), .RVM, .{AVX512F}),
// VPUNPCKLBW / VPUNPCKLWD / VPUNPCKLDQ / VPUNPCKLQDQ
// VPUNPCKLBW
vec(.VPUNPCKLBW, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x60), .RVM, .{AVX}),
vec(.VPUNPCKLBW, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x60), .RVM, .{AVX2}),
vec(.VPUNPCKLBW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0x60, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPUNPCKLBW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0x60, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPUNPCKLBW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0x60, fmem), .RVM, .{AVX512BW}),
// VPUNPCKLWD
vec(.VPUNPCKLWD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x61), .RVM, .{AVX}),
vec(.VPUNPCKLWD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x61), .RVM, .{AVX2}),
vec(.VPUNPCKLWD, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F, .WIG, 0x61, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPUNPCKLWD, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F, .WIG, 0x61, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPUNPCKLWD, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F, .WIG, 0x61, fmem), .RVM, .{AVX512BW}),
// VPUNPCKLDQ
vec(.VPUNPCKLDQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x62), .RVM, .{AVX}),
vec(.VPUNPCKLDQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x62), .RVM, .{AVX2}),
vec(.VPUNPCKLDQ, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0x62, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPUNPCKLDQ, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0x62, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPUNPCKLDQ, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F, .W0, 0x62, full), .RVM, .{AVX512F}),
// VPUNPCKLQDQ
vec(.VPUNPCKLQDQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x6C), .RVM, .{AVX}),
vec(.VPUNPCKLQDQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x6C), .RVM, .{AVX2}),
vec(.VPUNPCKLQDQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x6C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPUNPCKLQDQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x6C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPUNPCKLQDQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0x6C, full), .RVM, .{AVX512F}),
// VPXOR
vec(.VPXOR, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0xEF), .RVM, .{AVX}),
vec(.VPXOR, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0xEF), .RVM, .{AVX2}),
//
vec(.VPXORD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F, .W0, 0xEF, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPXORD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F, .W0, 0xEF, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPXORD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F, .W0, 0xEF, full), .RVM, .{AVX512F}),
//
vec(.VPXORQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0xEF, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPXORQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0xEF, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPXORQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0xEF, full), .RVM, .{AVX512F}),
// VRCPPS
vec(.VRCPPS, ops2(.xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x53), .vRM, .{AVX}),
vec(.VRCPPS, ops2(.ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x53), .vRM, .{AVX}),
// VRCPSS
vec(.VRCPSS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._F3, ._0F, .WIG, 0x53), .RVM, .{AVX}),
// VROUNDPD
vec(.VROUNDPD, ops3(.xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x09), .vRMI, .{AVX}),
vec(.VROUNDPD, ops3(.ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .WIG, 0x09), .vRMI, .{AVX}),
// VROUNDPS
vec(.VROUNDPS, ops3(.xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .WIG, 0x08), .vRMI, .{AVX}),
vec(.VROUNDPS, ops3(.ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .WIG, 0x08), .vRMI, .{AVX}),
// VROUNDSD
vec(.VROUNDSD, ops4(.xmml, .xmml, .xmml_m64, .imm8), vex(.LIG, ._66, ._0F3A, .WIG, 0x0B), .RVMI, .{AVX}),
// VROUNDSS
vec(.VROUNDSS, ops4(.xmml, .xmml, .xmml_m32, .imm8), vex(.LIG, ._66, ._0F3A, .WIG, 0x0A), .RVMI, .{AVX}),
// VRSQRTPS
vec(.VRSQRTPS, ops2(.xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x52), .vRM, .{AVX}),
vec(.VRSQRTPS, ops2(.ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x52), .vRM, .{AVX}),
// VRSQRTSS
vec(.VRSQRTSS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._F3, ._0F, .WIG, 0x52), .RVM, .{AVX}),
// VSHUFPD
vec(.VSHUFPD, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F, .WIG, 0xC6), .RVMI, .{AVX}),
vec(.VSHUFPD, ops4(.ymml, .xmml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F, .WIG, 0xC6), .RVMI, .{AVX}),
vec(.VSHUFPD, ops4(.xmm_kz, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F, .W1, 0xC6, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSHUFPD, ops4(.ymm_kz, .xmm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F, .W1, 0xC6, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSHUFPD, ops4(.zmm_kz, .xmm, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F, .W1, 0xC6, full), .RVMI, .{AVX512F}),
// VSHUFPS
vec(.VSHUFPS, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._NP, ._0F, .WIG, 0xC6), .RVMI, .{AVX}),
vec(.VSHUFPS, ops4(.ymml, .xmml, .ymml_m256, .imm8), vex(.L256, ._NP, ._0F, .WIG, 0xC6), .RVMI, .{AVX}),
vec(.VSHUFPS, ops4(.xmm_kz, .xmm, .xmm_m128_m32bcst, .imm8), evex(.L128, ._NP, ._0F, .W0, 0xC6, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSHUFPS, ops4(.ymm_kz, .xmm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._NP, ._0F, .W0, 0xC6, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSHUFPS, ops4(.zmm_kz, .xmm, .zmm_m512_m32bcst, .imm8), evex(.L512, ._NP, ._0F, .W0, 0xC6, full), .RVMI, .{AVX512F}),
// VSQRTPD
vec(.VSQRTPD, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x51), .vRM, .{AVX}),
vec(.VSQRTPD, ops2(.ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x51), .vRM, .{AVX}),
vec(.VSQRTPD, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x51, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VSQRTPD, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x51, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VSQRTPD, ops2(.zmm_kz, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F, .W1, 0x51, full), .vRM, .{AVX512F}),
// VSQRTPS
vec(.VSQRTPS, ops2(.xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x51), .vRM, .{AVX}),
vec(.VSQRTPS, ops2(.ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x51), .vRM, .{AVX}),
vec(.VSQRTPS, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x51, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VSQRTPS, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x51, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VSQRTPS, ops2(.zmm_kz, .zmm_m512_m32bcst_er), evex(.L512, ._NP, ._0F, .W0, 0x51, full), .vRM, .{AVX512F}),
// VSQRTSD
vec(.VSQRTSD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._F2, ._0F, .WIG, 0x51), .RVM, .{AVX}),
vec(.VSQRTSD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._F2, ._0F, .W1, 0x51, t1s), .RVM, .{AVX512F}),
// VSQRTSS
vec(.VSQRTSS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._F3, ._0F, .WIG, 0x51), .RVM, .{AVX}),
vec(.VSQRTSS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._F3, ._0F, .W0, 0x51, t1s), .RVM, .{AVX512F}),
// VSTMXCSR
vec(.VSTMXCSR, ops1(.rm_mem32), vexr(.LZ, ._NP, ._0F, .WIG, 0xAE, 3), .vM, .{AVX}),
// VSUBPD
vec(.VSUBPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x5C), .RVM, .{AVX}),
vec(.VSUBPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x5C), .RVM, .{AVX}),
vec(.VSUBPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x5C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VSUBPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x5C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VSUBPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F, .W1, 0x5C, full), .RVM, .{AVX512F}),
// VSUBPS
vec(.VSUBPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x5C), .RVM, .{AVX}),
vec(.VSUBPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x5C), .RVM, .{AVX}),
vec(.VSUBPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x5C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VSUBPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x5C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VSUBPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst_er), evex(.L512, ._NP, ._0F, .W0, 0x5C, full), .RVM, .{AVX512F}),
// VSUBSD
vec(.VSUBSD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._F2, ._0F, .WIG, 0x5C), .RVM, .{AVX}),
vec(.VSUBSD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._F2, ._0F, .W1, 0x5C, t1s), .RVM, .{AVX512F}),
// VSUBSS
vec(.VSUBSS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._F3, ._0F, .WIG, 0x5C), .RVM, .{AVX}),
vec(.VSUBSS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._F3, ._0F, .W0, 0x5C, t1s), .RVM, .{AVX512F}),
// VUCOMISD
vec(.VUCOMISD, ops2(.xmml, .xmml_m64), vex(.LIG, ._66, ._0F, .WIG, 0x2E), .vRM, .{AVX}),
vec(.VUCOMISD, ops2(.xmm_kz, .xmm_m64_sae), evex(.LIG, ._66, ._0F, .W1, 0x2E, t1s), .vRM, .{AVX512F}),
// VUCOMISS
vec(.VUCOMISS, ops2(.xmml, .xmml_m32), vex(.LIG, ._NP, ._0F, .WIG, 0x2E), .vRM, .{AVX}),
vec(.VUCOMISS, ops2(.xmm_kz, .xmm_m32_sae), evex(.LIG, ._NP, ._0F, .W0, 0x2E, t1s), .vRM, .{AVX512F}),
// VUNPCKHPD
vec(.VUNPCKHPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x15), .RVM, .{AVX}),
vec(.VUNPCKHPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x15), .RVM, .{AVX}),
vec(.VUNPCKHPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x15, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VUNPCKHPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x15, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VUNPCKHPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0x15, full), .RVM, .{AVX512F}),
// VUNPCKHPS
vec(.VUNPCKHPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x15), .RVM, .{AVX}),
vec(.VUNPCKHPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x15), .RVM, .{AVX}),
vec(.VUNPCKHPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x15, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VUNPCKHPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x15, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VUNPCKHPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._NP, ._0F, .W0, 0x15, full), .RVM, .{AVX512F}),
// VUNPCKLPD
vec(.VUNPCKLPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x14), .RVM, .{AVX}),
vec(.VUNPCKLPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x14), .RVM, .{AVX}),
vec(.VUNPCKLPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x14, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VUNPCKLPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x14, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VUNPCKLPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0x14, full), .RVM, .{AVX512F}),
// VUNPCKLPS
vec(.VUNPCKLPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x14), .RVM, .{AVX}),
vec(.VUNPCKLPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x14), .RVM, .{AVX}),
vec(.VUNPCKLPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x14, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VUNPCKLPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x14, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VUNPCKLPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._NP, ._0F, .W0, 0x14, full), .RVM, .{AVX512F}),
//
// Instructions V-Z
//
// VALIGND / VALIGNQ
// VALIGND
vec(.VALIGND, ops4(.xmm_kz, .xmm, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x03, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VALIGND, ops4(.ymm_kz, .xmm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x03, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VALIGND, ops4(.zmm_kz, .xmm, .zmm_m512_m32bcst, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x03, full), .RVMI, .{AVX512F}),
// VALIGNQ
vec(.VALIGNQ, ops4(.xmm_kz, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x03, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VALIGNQ, ops4(.ymm_kz, .xmm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x03, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VALIGNQ, ops4(.zmm_kz, .xmm, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x03, full), .RVMI, .{AVX512F}),
// VBLENDMPD / VBLENDMPS
// VBLENDMPD
vec(.VBLENDMPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x65, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VBLENDMPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x65, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VBLENDMPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x65, full), .RVM, .{AVX512F}),
// VBLENDMPS
vec(.VBLENDMPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x65, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VBLENDMPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x65, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VBLENDMPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x65, full), .RVM, .{AVX512F}),
// VBROADCAST
// VBROADCASTSS
vec(.VBROADCASTSS, ops2(.xmml, .rm_mem32), vex(.L128, ._66, ._0F38, .W0, 0x18), .vRM, .{AVX}),
vec(.VBROADCASTSS, ops2(.ymml, .rm_mem32), vex(.L256, ._66, ._0F38, .W0, 0x18), .vRM, .{AVX}),
vec(.VBROADCASTSS, ops2(.xmml, .rm_xmml), vex(.L128, ._66, ._0F38, .W0, 0x18), .vRM, .{AVX2}),
vec(.VBROADCASTSS, ops2(.ymml, .rm_xmml), vex(.L256, ._66, ._0F38, .W0, 0x18), .vRM, .{AVX2}),
vec(.VBROADCASTSS, ops2(.xmm_kz, .xmm_m32), evex(.L128, ._66, ._0F38, .W0, 0x18, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VBROADCASTSS, ops2(.ymm_kz, .xmm_m32), evex(.L256, ._66, ._0F38, .W0, 0x18, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VBROADCASTSS, ops2(.zmm_kz, .xmm_m32), evex(.L512, ._66, ._0F38, .W0, 0x18, t1s), .vRM, .{AVX512F}),
// VBROADCASTSD
vec(.VBROADCASTSD, ops2(.ymml, .rm_mem64), vex(.L256, ._66, ._0F38, .W0, 0x19), .vRM, .{AVX}),
vec(.VBROADCASTSD, ops2(.ymml, .rm_xmml), vex(.L256, ._66, ._0F38, .W0, 0x19), .vRM, .{AVX2}),
vec(.VBROADCASTSD, ops2(.ymm_kz, .xmm_m64), evex(.L256, ._66, ._0F38, .W1, 0x19, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VBROADCASTSD, ops2(.zmm_kz, .xmm_m64), evex(.L512, ._66, ._0F38, .W1, 0x19, t1s), .vRM, .{AVX512F}),
// VBROADCASTF128
vec(.VBROADCASTF128, ops2(.ymml, .rm_mem128), vex(.L256, ._66, ._0F38, .W0, 0x1A), .vRM, .{AVX}),
// VBROADCASTF32X2
vec(.VBROADCASTF32X2, ops2(.ymm_kz, .xmm_m64), evex(.L256, ._66, ._0F38, .W0, 0x19, tup2), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VBROADCASTF32X2, ops2(.zmm_kz, .xmm_m64), evex(.L512, ._66, ._0F38, .W0, 0x19, tup2), .vRM, .{AVX512DQ}),
// VBROADCASTF32X4
vec(.VBROADCASTF32X4, ops2(.ymm_kz, .rm_mem128), evex(.L256, ._66, ._0F38, .W0, 0x1A, tup4), .vRM, .{ AVX512VL, AVX512F }),
vec(.VBROADCASTF32X4, ops2(.zmm_kz, .rm_mem128), evex(.L512, ._66, ._0F38, .W0, 0x1A, tup4), .vRM, .{AVX512F}),
// VBROADCASTF64X2
vec(.VBROADCASTF64X2, ops2(.ymm_kz, .rm_mem128), evex(.L256, ._66, ._0F38, .W1, 0x1A, tup2), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VBROADCASTF64X2, ops2(.zmm_kz, .rm_mem128), evex(.L512, ._66, ._0F38, .W1, 0x1A, tup2), .vRM, .{AVX512DQ}),
// VBROADCASTF32X8
vec(.VBROADCASTF32X8, ops2(.zmm_kz, .rm_mem256), evex(.L512, ._66, ._0F38, .W0, 0x1B, tup8), .vRM, .{AVX512DQ}),
// VBROADCASTF64X4
vec(.VBROADCASTF64X4, ops2(.zmm_kz, .rm_mem256), evex(.L512, ._66, ._0F38, .W1, 0x1B, tup4), .vRM, .{AVX512F}),
// VCOMPRESSPD
vec(.VCOMPRESSPD, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._66, ._0F38, .W1, 0x8A, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VCOMPRESSPD, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._66, ._0F38, .W1, 0x8A, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VCOMPRESSPD, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._66, ._0F38, .W1, 0x8A, t1s), .vMR, .{AVX512F}),
// VCOMPRESSPS
vec(.VCOMPRESSPS, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._66, ._0F38, .W0, 0x8A, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VCOMPRESSPS, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._66, ._0F38, .W0, 0x8A, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VCOMPRESSPS, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._66, ._0F38, .W0, 0x8A, t1s), .vMR, .{AVX512F}),
// VCVTPD2QQ
vec(.VCVTPD2QQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x7B, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTPD2QQ, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x7B, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTPD2QQ, ops2(.zmm_kz, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F, .W1, 0x7B, full), .vRM, .{AVX512DQ}),
// VCVTPD2UDQ
vec(.VCVTPD2UDQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._NP, ._0F, .W1, 0x79, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPD2UDQ, ops2(.xmm_kz, .ymm_m256_m64bcst), evex(.L256, ._NP, ._0F, .W1, 0x79, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPD2UDQ, ops2(.ymm_kz, .zmm_m512_m64bcst_er), evex(.L512, ._NP, ._0F, .W1, 0x79, full), .vRM, .{AVX512F}),
// VCVTPD2UQQ
vec(.VCVTPD2UQQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x79, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTPD2UQQ, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x79, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTPD2UQQ, ops2(.zmm_kz, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F, .W1, 0x79, full), .vRM, .{AVX512DQ}),
// VCVTPH2PS
vec(.VCVTPH2PS, ops2(.xmml, .xmml_m64), vex(.L128, ._66, ._0F38, .W0, 0x13), .vRM, .{cpu.F16C}),
vec(.VCVTPH2PS, ops2(.ymml, .xmml_m128), vex(.L256, ._66, ._0F38, .W0, 0x13), .vRM, .{cpu.F16C}),
vec(.VCVTPH2PS, ops2(.xmm_kz, .xmm_m64), evex(.L128, ._66, ._0F38, .W0, 0x13, hmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPH2PS, ops2(.ymm_kz, .xmm_m128), evex(.L256, ._66, ._0F38, .W0, 0x13, hmem), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPH2PS, ops2(.zmm_kz, .ymm_m256_sae), evex(.L512, ._66, ._0F38, .W0, 0x13, hmem), .vRM, .{AVX512F}),
// VCVTPS2PH
vec(.VCVTPS2PH, ops3(.xmml_m64, .xmml, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x1D), .vMRI, .{cpu.F16C}),
vec(.VCVTPS2PH, ops3(.xmml_m128, .ymml, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x1D), .vMRI, .{cpu.F16C}),
vec(.VCVTPS2PH, ops3(.xmm_m64_kz, .xmm, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x1D, hmem), .vMRI, .{ AVX512VL, AVX512F }),
vec(.VCVTPS2PH, ops3(.xmm_m128_kz, .ymm, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x1D, hmem), .vMRI, .{ AVX512VL, AVX512F }),
vec(.VCVTPS2PH, ops3(.ymm_m256_kz, .zmm_sae, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x1D, hmem), .vMRI, .{AVX512F}),
// VCVTPS2QQ
vec(.VCVTPS2QQ, ops2(.xmm_kz, .xmm_m64_m32bcst), evex(.L128, ._66, ._0F, .W0, 0x7B, half), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTPS2QQ, ops2(.ymm_kz, .xmm_m128_m32bcst), evex(.L256, ._66, ._0F, .W0, 0x7B, half), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTPS2QQ, ops2(.zmm_kz, .ymm_m256_m32bcst_er), evex(.L512, ._66, ._0F, .W0, 0x7B, half), .vRM, .{AVX512DQ}),
// VCVTPS2UDQ
vec(.VCVTPS2UDQ, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x79, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPS2UDQ, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x79, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTPS2UDQ, ops2(.zmm_kz, .zmm_m512_m32bcst_er), evex(.L512, ._NP, ._0F, .W0, 0x79, full), .vRM, .{AVX512F}),
// VCVTPS2UQQ
vec(.VCVTPS2UQQ, ops2(.xmm_kz, .xmm_m64_m32bcst), evex(.L128, ._66, ._0F, .W0, 0x79, half), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTPS2UQQ, ops2(.ymm_kz, .xmm_m128_m32bcst), evex(.L256, ._66, ._0F, .W0, 0x79, half), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTPS2UQQ, ops2(.zmm_kz, .ymm_m256_m32bcst_er), evex(.L512, ._66, ._0F, .W0, 0x79, half), .vRM, .{AVX512DQ}),
// VCVTQQ2PD
vec(.VCVTQQ2PD, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._F3, ._0F, .W1, 0xE6, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTQQ2PD, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._F3, ._0F, .W1, 0xE6, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTQQ2PD, ops2(.zmm_kz, .zmm_m512_m64bcst_er), evex(.L512, ._F3, ._0F, .W1, 0xE6, full), .vRM, .{AVX512DQ}),
// VCVTQQ2PS
vec(.VCVTQQ2PS, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._NP, ._0F, .W1, 0x5B, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTQQ2PS, ops2(.xmm_kz, .ymm_m256_m64bcst), evex(.L256, ._NP, ._0F, .W1, 0x5B, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTQQ2PS, ops2(.ymm_kz, .zmm_m512_m64bcst_er), evex(.L512, ._NP, ._0F, .W1, 0x5B, full), .vRM, .{AVX512DQ}),
// VCVTSD2USI
vec(.VCVTSD2USI, ops2(.reg32, .xmm_m64_er), evex(.LIG, ._F2, ._0F, .W0, 0x79, t1f), .vRM, .{AVX512F}),
vec(.VCVTSD2USI, ops2(.reg64, .xmm_m64_er), evex(.LIG, ._F2, ._0F, .W1, 0x79, t1f), .vRM, .{ AVX512F, No32 }),
// VCVTSS2USI
vec(.VCVTSS2USI, ops2(.reg32, .xmm_m32_er), evex(.LIG, ._F3, ._0F, .W0, 0x79, t1f), .vRM, .{AVX512F}),
vec(.VCVTSS2USI, ops2(.reg64, .xmm_m32_er), evex(.LIG, ._F3, ._0F, .W1, 0x79, t1f), .vRM, .{ AVX512F, No32 }),
// VCVTTPD2QQ
vec(.VCVTTPD2QQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x7A, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTTPD2QQ, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x7A, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTTPD2QQ, ops2(.zmm_kz, .zmm_m512_m64bcst_sae), evex(.L512, ._66, ._0F, .W1, 0x7A, full), .vRM, .{AVX512DQ}),
// VCVTTPD2UDQ
vec(.VCVTTPD2UDQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._NP, ._0F, .W1, 0x78, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTTPD2UDQ, ops2(.xmm_kz, .ymm_m256_m64bcst), evex(.L256, ._NP, ._0F, .W1, 0x78, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTTPD2UDQ, ops2(.ymm_kz, .zmm_m512_m64bcst_sae), evex(.L512, ._NP, ._0F, .W1, 0x78, full), .vRM, .{AVX512F}),
// VCVTTPD2UQQ
vec(.VCVTTPD2UQQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x78, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTTPD2UQQ, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x78, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTTPD2UQQ, ops2(.zmm_kz, .zmm_m512_m64bcst_sae), evex(.L512, ._66, ._0F, .W1, 0x78, full), .vRM, .{AVX512DQ}),
// VCVTTPS2QQ
vec(.VCVTTPS2QQ, ops2(.xmm_kz, .xmm_m64_m32bcst), evex(.L128, ._66, ._0F, .W0, 0x7A, half), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTTPS2QQ, ops2(.ymm_kz, .xmm_m128_m32bcst), evex(.L256, ._66, ._0F, .W0, 0x7A, half), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTTPS2QQ, ops2(.zmm_kz, .ymm_m256_m32bcst_sae), evex(.L512, ._66, ._0F, .W0, 0x7A, half), .vRM, .{AVX512DQ}),
// VCVTTPS2UDQ
vec(.VCVTTPS2UDQ, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x78, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTTPS2UDQ, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x78, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTTPS2UDQ, ops2(.zmm_kz, .zmm_m512_m32bcst_sae), evex(.L512, ._NP, ._0F, .W0, 0x78, full), .vRM, .{AVX512F}),
// VCVTTPS2UQQ
vec(.VCVTTPS2UQQ, ops2(.xmm_kz, .xmm_m64_m32bcst), evex(.L128, ._66, ._0F, .W0, 0x78, half), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTTPS2UQQ, ops2(.ymm_kz, .xmm_m128_m32bcst), evex(.L256, ._66, ._0F, .W0, 0x78, half), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTTPS2UQQ, ops2(.zmm_kz, .ymm_m256_m32bcst_sae), evex(.L512, ._66, ._0F, .W0, 0x78, half), .vRM, .{AVX512DQ}),
// VCVTTSD2USI
vec(.VCVTTSD2USI, ops2(.reg32, .xmm_m64_sae), evex(.LIG, ._F2, ._0F, .W0, 0x78, t1f), .vRM, .{AVX512F}),
vec(.VCVTTSD2USI, ops2(.reg64, .xmm_m64_sae), evex(.LIG, ._F2, ._0F, .W1, 0x78, t1f), .vRM, .{ AVX512F, No32 }),
// VCVTTSS2USI
vec(.VCVTTSS2USI, ops2(.reg32, .xmm_m32_sae), evex(.LIG, ._F3, ._0F, .W0, 0x78, t1f), .vRM, .{AVX512F}),
vec(.VCVTTSS2USI, ops2(.reg64, .xmm_m32_sae), evex(.LIG, ._F3, ._0F, .W1, 0x78, t1f), .vRM, .{ AVX512F, No32 }),
// VCVTUDQ2PD
vec(.VCVTUDQ2PD, ops2(.xmm_kz, .xmm_m64_m32bcst), evex(.L128, ._F3, ._0F, .W0, 0x7A, half), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTUDQ2PD, ops2(.ymm_kz, .xmm_m128_m32bcst), evex(.L256, ._F3, ._0F, .W0, 0x7A, half), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTUDQ2PD, ops2(.zmm_kz, .ymm_m256_m32bcst), evex(.L512, ._F3, ._0F, .W0, 0x7A, half), .vRM, .{AVX512F}),
// VCVTUDQ2PS
vec(.VCVTUDQ2PS, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._F2, ._0F, .W0, 0x7A, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTUDQ2PS, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._F2, ._0F, .W0, 0x7A, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VCVTUDQ2PS, ops2(.zmm_kz, .zmm_m512_m32bcst_er), evex(.L512, ._F2, ._0F, .W0, 0x7A, full), .vRM, .{AVX512F}),
// VCVTUQQ2PD
vec(.VCVTUQQ2PD, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._F3, ._0F, .W1, 0x7A, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTUQQ2PD, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._F3, ._0F, .W1, 0x7A, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTUQQ2PD, ops2(.zmm_kz, .zmm_m512_m64bcst_er), evex(.L512, ._F3, ._0F, .W1, 0x7A, full), .vRM, .{AVX512DQ}),
// VCVTUQQ2PS
vec(.VCVTUQQ2PS, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._F2, ._0F, .W1, 0x7A, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTUQQ2PS, ops2(.xmm_kz, .ymm_m256_m64bcst), evex(.L256, ._F2, ._0F, .W1, 0x7A, full), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VCVTUQQ2PS, ops2(.ymm_kz, .zmm_m512_m64bcst_er), evex(.L512, ._F2, ._0F, .W1, 0x7A, full), .vRM, .{AVX512DQ}),
// VCVTUSI2SD
vec(.VCVTUSI2SD, ops3(.xmm, .xmm, .rm32), evex(.LIG, ._F2, ._0F, .W0, 0x7B, t1s), .RVM, .{AVX512F}),
vec(.VCVTUSI2SD, ops3(.xmm, .xmm, .rm64_er), evex(.LIG, ._F2, ._0F, .W1, 0x7B, t1s), .RVM, .{ AVX512F, No32 }),
// VCVTUSI2SS
vec(.VCVTUSI2SS, ops3(.xmm, .xmm, .rm32_er), evex(.LIG, ._F3, ._0F, .W0, 0x7B, t1s), .RVM, .{AVX512F}),
vec(.VCVTUSI2SS, ops3(.xmm, .xmm, .rm64_er), evex(.LIG, ._F3, ._0F, .W1, 0x7B, t1s), .RVM, .{ AVX512F, No32 }),
// VDBPSADBW
vec(.VDBPSADBW, ops4(.xmm_kz, .xmm, .xmm_m128, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x42, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VDBPSADBW, ops4(.ymm_kz, .ymm, .ymm_m256, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x42, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VDBPSADBW, ops4(.zmm_kz, .zmm, .zmm_m512, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x42, fmem), .RVMI, .{AVX512BW}),
// VEXPANDPD
vec(.VEXPANDPD, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x88, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VEXPANDPD, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x88, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VEXPANDPD, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x88, t1s), .vRM, .{AVX512F}),
// VEXPANDPS
vec(.VEXPANDPS, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F38, .W0, 0x88, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VEXPANDPS, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F38, .W0, 0x88, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VEXPANDPS, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F38, .W0, 0x88, t1s), .vRM, .{AVX512F}),
// VEXTRACTF (128, F32x4, 64x2, 32x8, 64x4)
// VEXTRACTF128
vec(.VEXTRACTF128, ops3(.xmml_m128, .ymml, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x19), .vMRI, .{AVX}),
// VEXTRACTF32X4
vec(.VEXTRACTF32X4, ops3(.xmm_m128_kz, .ymm, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x19, tup4), .vMRI, .{ AVX512VL, AVX512F }),
vec(.VEXTRACTF32X4, ops3(.xmm_m128_kz, .zmm, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x19, tup4), .vMRI, .{AVX512F}),
// VEXTRACTF64X2
vec(.VEXTRACTF64X2, ops3(.xmm_m128_kz, .ymm, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x19, tup2), .vMRI, .{ AVX512VL, AVX512DQ }),
vec(.VEXTRACTF64X2, ops3(.xmm_m128_kz, .zmm, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x19, tup2), .vMRI, .{AVX512DQ}),
// VEXTRACTF32X8
vec(.VEXTRACTF32X8, ops3(.ymm_m256_kz, .zmm, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x1B, tup8), .vMRI, .{AVX512DQ}),
// VEXTRACTF64X4
vec(.VEXTRACTF64X4, ops3(.ymm_m256_kz, .zmm, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x1B, tup4), .vMRI, .{AVX512F}),
// VEXTRACTI (128, F32x4, 64x2, 32x8, 64x4)
// VEXTRACTI128
vec(.VEXTRACTI128, ops3(.xmml_m128, .ymml, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x39), .vMRI, .{AVX2}),
// VEXTRACTI32X4
vec(.VEXTRACTI32X4, ops3(.xmm_m128_kz, .ymm, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x39, tup4), .vMRI, .{ AVX512VL, AVX512F }),
vec(.VEXTRACTI32X4, ops3(.xmm_m128_kz, .zmm, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x39, tup4), .vMRI, .{AVX512F}),
// VEXTRACTI64X2
vec(.VEXTRACTI64X2, ops3(.xmm_m128_kz, .ymm, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x39, tup2), .vMRI, .{ AVX512VL, AVX512DQ }),
vec(.VEXTRACTI64X2, ops3(.xmm_m128_kz, .zmm, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x39, tup2), .vMRI, .{AVX512DQ}),
// VEXTRACTI32X8
vec(.VEXTRACTI32X8, ops3(.ymm_m256_kz, .zmm, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x3B, tup8), .vMRI, .{AVX512DQ}),
// VEXTRACTI64X4
vec(.VEXTRACTI64X4, ops3(.ymm_m256_kz, .zmm, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x3B, tup4), .vMRI, .{AVX512F}),
// VFIXUPIMMPD
vec(.VFIXUPIMMPD, ops4(.xmm_kz, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x54, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VFIXUPIMMPD, ops4(.ymm_kz, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x54, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VFIXUPIMMPD, ops4(.zmm_kz, .ymm, .zmm_m512_m64bcst_sae, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x54, full), .RVMI, .{AVX512F}),
// VFIXUPIMMPS
vec(.VFIXUPIMMPS, ops4(.xmm_kz, .xmm, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x54, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VFIXUPIMMPS, ops4(.ymm_kz, .ymm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x54, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VFIXUPIMMPS, ops4(.zmm_kz, .ymm, .zmm_m512_m32bcst_sae, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x54, full), .RVMI, .{AVX512F}),
// VFIXUPIMMSD
vec(.VFIXUPIMMSD, ops4(.xmm_kz, .xmm, .xmm_m64_sae, .imm8), evex(.LIG, ._66, ._0F3A, .W1, 0x55, t1s), .RVMI, .{ AVX512VL, AVX512F }),
// VFIXUPIMMSS
vec(.VFIXUPIMMSS, ops4(.xmm_kz, .xmm, .xmm_m32_sae, .imm8), evex(.LIG, ._66, ._0F3A, .W0, 0x55, t1s), .RVMI, .{ AVX512VL, AVX512F }),
// VFMADD132PD / VFMADD213PD / VFMADD231PD
vec(.VFMADD132PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0x98), .RVM, .{FMA}),
vec(.VFMADD132PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0x98), .RVM, .{FMA}),
vec(.VFMADD132PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x98, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD132PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x98, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD132PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0x98, full), .RVM, .{AVX512F}),
//
vec(.VFMADD213PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xA8), .RVM, .{FMA}),
vec(.VFMADD213PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xA8), .RVM, .{FMA}),
vec(.VFMADD213PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xA8, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD213PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xA8, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD213PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xA8, full), .RVM, .{AVX512F}),
//
vec(.VFMADD231PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xB8), .RVM, .{FMA}),
vec(.VFMADD231PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xB8), .RVM, .{FMA}),
vec(.VFMADD231PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xB8, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD231PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xB8, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD231PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xB8, full), .RVM, .{AVX512F}),
// VFMADD132PS / VFMADD213PS / VFMADD231PS
vec(.VFMADD132PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x98), .RVM, .{FMA}),
vec(.VFMADD132PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x98), .RVM, .{FMA}),
vec(.VFMADD132PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x98, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD132PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x98, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD132PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0x98, full), .RVM, .{AVX512F}),
//
vec(.VFMADD213PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xA8), .RVM, .{FMA}),
vec(.VFMADD213PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xA8), .RVM, .{FMA}),
vec(.VFMADD213PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xA8, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD213PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xA8, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD213PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xA8, full), .RVM, .{AVX512F}),
//
vec(.VFMADD231PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xB8), .RVM, .{FMA}),
vec(.VFMADD231PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xB8), .RVM, .{FMA}),
vec(.VFMADD231PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xB8, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD231PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xB8, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADD231PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xB8, full), .RVM, .{AVX512F}),
// VFMADD132SD / VFMADD213SD / VFMADD231SD
vec(.VFMADD132SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0x99), .RVM, .{FMA}),
vec(.VFMADD132SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0x99, t1s), .RVM, .{AVX512F}),
//
vec(.VFMADD213SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0xA9), .RVM, .{FMA}),
vec(.VFMADD213SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0xA9, t1s), .RVM, .{AVX512F}),
//
vec(.VFMADD231SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0xB9), .RVM, .{FMA}),
vec(.VFMADD231SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0xB9, t1s), .RVM, .{AVX512F}),
// VFMADD132SS / VFMADD213SS / VFMADD231SS
vec(.VFMADD132SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0x99), .RVM, .{FMA}),
vec(.VFMADD132SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0x99, t1s), .RVM, .{AVX512F}),
//
vec(.VFMADD213SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0xA9), .RVM, .{FMA}),
vec(.VFMADD213SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0xA9, t1s), .RVM, .{AVX512F}),
//
vec(.VFMADD231SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0xB9), .RVM, .{FMA}),
vec(.VFMADD231SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0xB9, t1s), .RVM, .{AVX512F}),
// VFMADDSUB132PD / VFMADDSUB213PD / VFMADDSUB231PD
vec(.VFMADDSUB132PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0x96), .RVM, .{FMA}),
vec(.VFMADDSUB132PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0x96), .RVM, .{FMA}),
vec(.VFMADDSUB132PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x96, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB132PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x96, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB132PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0x96, full), .RVM, .{AVX512F}),
//
vec(.VFMADDSUB213PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xA6), .RVM, .{FMA}),
vec(.VFMADDSUB213PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xA6), .RVM, .{FMA}),
vec(.VFMADDSUB213PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xA6, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB213PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xA6, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB213PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xA6, full), .RVM, .{AVX512F}),
//
vec(.VFMADDSUB231PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xB6), .RVM, .{FMA}),
vec(.VFMADDSUB231PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xB6), .RVM, .{FMA}),
vec(.VFMADDSUB231PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xB6, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB231PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xB6, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB231PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xB6, full), .RVM, .{AVX512F}),
// VFMADDSUB132PS / VFMADDSUB213PS / VFMADDSUB231PS
vec(.VFMADDSUB132PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x96), .RVM, .{FMA}),
vec(.VFMADDSUB132PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x96), .RVM, .{FMA}),
vec(.VFMADDSUB132PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x96, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB132PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x96, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB132PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0x96, full), .RVM, .{AVX512F}),
//
vec(.VFMADDSUB213PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xA6), .RVM, .{FMA}),
vec(.VFMADDSUB213PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xA6), .RVM, .{FMA}),
vec(.VFMADDSUB213PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xA6, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB213PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xA6, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB213PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xA6, full), .RVM, .{AVX512F}),
//
vec(.VFMADDSUB231PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xB6), .RVM, .{FMA}),
vec(.VFMADDSUB231PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xB6), .RVM, .{FMA}),
vec(.VFMADDSUB231PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xB6, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB231PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xB6, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMADDSUB231PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xB6, full), .RVM, .{AVX512F}),
// VFMSUBADD132PD / VFMSUBADD213PD / VFMSUBADD231PD
vec(.VFMSUBADD132PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0x97), .RVM, .{FMA}),
vec(.VFMSUBADD132PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0x97), .RVM, .{FMA}),
vec(.VFMSUBADD132PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x97, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD132PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x97, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD132PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0x97, full), .RVM, .{AVX512F}),
//
vec(.VFMSUBADD213PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xA7), .RVM, .{FMA}),
vec(.VFMSUBADD213PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xA7), .RVM, .{FMA}),
vec(.VFMSUBADD213PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xA7, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD213PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xA7, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD213PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xA7, full), .RVM, .{AVX512F}),
//
vec(.VFMSUBADD231PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xB7), .RVM, .{FMA}),
vec(.VFMSUBADD231PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xB7), .RVM, .{FMA}),
vec(.VFMSUBADD231PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xB7, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD231PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xB7, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD231PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xB7, full), .RVM, .{AVX512F}),
// VFMSUBADD132PS / VFMSUBADD213PS / VFMSUBADD231PS
vec(.VFMSUBADD132PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x97), .RVM, .{FMA}),
vec(.VFMSUBADD132PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x97), .RVM, .{FMA}),
vec(.VFMSUBADD132PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x97, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD132PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x97, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD132PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0x97, full), .RVM, .{AVX512F}),
//
vec(.VFMSUBADD213PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xA7), .RVM, .{FMA}),
vec(.VFMSUBADD213PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xA7), .RVM, .{FMA}),
vec(.VFMSUBADD213PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xA7, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD213PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xA7, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD213PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xA7, full), .RVM, .{AVX512F}),
//
vec(.VFMSUBADD231PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xB7), .RVM, .{FMA}),
vec(.VFMSUBADD231PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xB7), .RVM, .{FMA}),
vec(.VFMSUBADD231PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xB7, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD231PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xB7, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUBADD231PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xB7, full), .RVM, .{AVX512F}),
// VFMSUB132PD / VFMSUB213PD / VFMSUB231PD
vec(.VFMSUB132PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0x9A), .RVM, .{FMA}),
vec(.VFMSUB132PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0x9A), .RVM, .{FMA}),
vec(.VFMSUB132PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x9A, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB132PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x9A, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB132PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0x9A, full), .RVM, .{AVX512F}),
//
vec(.VFMSUB213PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xAA), .RVM, .{FMA}),
vec(.VFMSUB213PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xAA), .RVM, .{FMA}),
vec(.VFMSUB213PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xAA, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB213PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xAA, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB213PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xAA, full), .RVM, .{AVX512F}),
//
vec(.VFMSUB231PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xBA), .RVM, .{FMA}),
vec(.VFMSUB231PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xBA), .RVM, .{FMA}),
vec(.VFMSUB231PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xBA, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB231PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xBA, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB231PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xBA, full), .RVM, .{AVX512F}),
// VFMSUB132PS / VFMSUB213PS / VFMSUB231PS
vec(.VFMSUB132PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x9A), .RVM, .{FMA}),
vec(.VFMSUB132PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x9A), .RVM, .{FMA}),
vec(.VFMSUB132PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x9A, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB132PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x9A, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB132PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0x9A, full), .RVM, .{AVX512F}),
//
vec(.VFMSUB213PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xAA), .RVM, .{FMA}),
vec(.VFMSUB213PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xAA), .RVM, .{FMA}),
vec(.VFMSUB213PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xAA, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB213PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xAA, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB213PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xAA, full), .RVM, .{AVX512F}),
//
vec(.VFMSUB231PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xBA), .RVM, .{FMA}),
vec(.VFMSUB231PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xBA), .RVM, .{FMA}),
vec(.VFMSUB231PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xBA, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB231PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xBA, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFMSUB231PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xBA, full), .RVM, .{AVX512F}),
// VFMSUB132SD / VFMSUB213SD / VFMSUB231SD
vec(.VFMSUB132SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0x9B), .RVM, .{FMA}),
vec(.VFMSUB132SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0x9B, t1s), .RVM, .{AVX512F}),
//
vec(.VFMSUB213SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0xAB), .RVM, .{FMA}),
vec(.VFMSUB213SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0xAB, t1s), .RVM, .{AVX512F}),
//
vec(.VFMSUB231SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0xBB), .RVM, .{FMA}),
vec(.VFMSUB231SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0xBB, t1s), .RVM, .{AVX512F}),
// VFMSUB132SS / VFMSUB213SS / VFMSUB231SS
vec(.VFMSUB132SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0x9B), .RVM, .{FMA}),
vec(.VFMSUB132SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0x9B, t1s), .RVM, .{AVX512F}),
//
vec(.VFMSUB213SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0xAB), .RVM, .{FMA}),
vec(.VFMSUB213SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0xAB, t1s), .RVM, .{AVX512F}),
//
vec(.VFMSUB231SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0xBB), .RVM, .{FMA}),
vec(.VFMSUB231SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0xBB, t1s), .RVM, .{AVX512F}),
// VFNMADD132PD / VFNMADD213PD / VFNMADD231PD
vec(.VFNMADD132PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0x9C), .RVM, .{FMA}),
vec(.VFNMADD132PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0x9C), .RVM, .{FMA}),
vec(.VFNMADD132PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x9C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD132PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x9C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD132PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0x9C, full), .RVM, .{AVX512F}),
//
vec(.VFNMADD213PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xAC), .RVM, .{FMA}),
vec(.VFNMADD213PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xAC), .RVM, .{FMA}),
vec(.VFNMADD213PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xAC, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD213PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xAC, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD213PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xAC, full), .RVM, .{AVX512F}),
//
vec(.VFNMADD231PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xBC), .RVM, .{FMA}),
vec(.VFNMADD231PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xBC), .RVM, .{FMA}),
vec(.VFNMADD231PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xBC, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD231PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xBC, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD231PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xBC, full), .RVM, .{AVX512F}),
// VFNMADD132PS / VFNMADD213PS / VFNMADD231PS
vec(.VFNMADD132PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x9C), .RVM, .{FMA}),
vec(.VFNMADD132PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x9C), .RVM, .{FMA}),
vec(.VFNMADD132PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x9C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD132PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x9C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD132PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0x9C, full), .RVM, .{AVX512F}),
//
vec(.VFNMADD213PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xAC), .RVM, .{FMA}),
vec(.VFNMADD213PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xAC), .RVM, .{FMA}),
vec(.VFNMADD213PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xAC, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD213PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xAC, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD213PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xAC, full), .RVM, .{AVX512F}),
//
vec(.VFNMADD231PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xBC), .RVM, .{FMA}),
vec(.VFNMADD231PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xBC), .RVM, .{FMA}),
vec(.VFNMADD231PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xBC, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD231PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xBC, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMADD231PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xBC, full), .RVM, .{AVX512F}),
// VFNMADD132SD / VFNMADD213SD / VFNMADD231SD
vec(.VFNMADD132SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0x9D), .RVM, .{FMA}),
vec(.VFNMADD132SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0x9D, t1s), .RVM, .{AVX512F}),
//
vec(.VFNMADD213SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0xAD), .RVM, .{FMA}),
vec(.VFNMADD213SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0xAD, t1s), .RVM, .{AVX512F}),
//
vec(.VFNMADD231SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0xBD), .RVM, .{FMA}),
vec(.VFNMADD231SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0xBD, t1s), .RVM, .{AVX512F}),
// VFNMADD132SS / VFNMADD213SS / VFNMADD231SS
vec(.VFNMADD132SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0x9D), .RVM, .{FMA}),
vec(.VFNMADD132SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0x9D, t1s), .RVM, .{AVX512F}),
//
vec(.VFNMADD213SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0xAD), .RVM, .{FMA}),
vec(.VFNMADD213SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0xAD, t1s), .RVM, .{AVX512F}),
//
vec(.VFNMADD231SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0xBD), .RVM, .{FMA}),
vec(.VFNMADD231SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0xBD, t1s), .RVM, .{AVX512F}),
// VFNMSUB132PD / VFNMSUB213PD / VFNMSUB231PD
vec(.VFNMSUB132PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0x9E), .RVM, .{FMA}),
vec(.VFNMSUB132PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0x9E), .RVM, .{FMA}),
vec(.VFNMSUB132PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x9E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB132PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x9E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB132PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0x9E, full), .RVM, .{AVX512F}),
//
vec(.VFNMSUB213PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xAE), .RVM, .{FMA}),
vec(.VFNMSUB213PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xAE), .RVM, .{FMA}),
vec(.VFNMSUB213PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xAE, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB213PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xAE, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB213PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xAE, full), .RVM, .{AVX512F}),
//
vec(.VFNMSUB231PD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0xBE), .RVM, .{FMA}),
vec(.VFNMSUB231PD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0xBE), .RVM, .{FMA}),
vec(.VFNMSUB231PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xBE, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB231PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xBE, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB231PD, ops3(.zmm_kz, .ymm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0xBE, full), .RVM, .{AVX512F}),
// VFNMSUB132PS / VFNMSUB213PS / VFNMSUB231PS
vec(.VFNMSUB132PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x9E), .RVM, .{FMA}),
vec(.VFNMSUB132PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x9E), .RVM, .{FMA}),
vec(.VFNMSUB132PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x9E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB132PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x9E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB132PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0x9E, full), .RVM, .{AVX512F}),
//
vec(.VFNMSUB213PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xAE), .RVM, .{FMA}),
vec(.VFNMSUB213PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xAE), .RVM, .{FMA}),
vec(.VFNMSUB213PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xAE, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB213PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xAE, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB213PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xAE, full), .RVM, .{AVX512F}),
//
vec(.VFNMSUB231PS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0xBE), .RVM, .{FMA}),
vec(.VFNMSUB231PS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0xBE), .RVM, .{FMA}),
vec(.VFNMSUB231PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xBE, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB231PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xBE, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VFNMSUB231PS, ops3(.zmm_kz, .ymm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0xBE, full), .RVM, .{AVX512F}),
// VFNMSUB132SD / VFNMSUB213SD / VFNMSUB231SD
vec(.VFNMSUB132SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0x9F), .RVM, .{FMA}),
vec(.VFNMSUB132SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0x9F, t1s), .RVM, .{AVX512F}),
//
vec(.VFNMSUB213SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0xAF), .RVM, .{FMA}),
vec(.VFNMSUB213SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0xAF, t1s), .RVM, .{AVX512F}),
//
vec(.VFNMSUB231SD, ops3(.xmml, .xmml, .xmml_m64), vex(.LIG, ._66, ._0F38, .W1, 0xBF), .RVM, .{FMA}),
vec(.VFNMSUB231SD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0xBF, t1s), .RVM, .{AVX512F}),
// VFNMSUB132SS / VFNMSUB213SS / VFNMSUB231SS
vec(.VFNMSUB132SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0x9F), .RVM, .{FMA}),
vec(.VFNMSUB132SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0x9F, t1s), .RVM, .{AVX512F}),
//
vec(.VFNMSUB213SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0xAF), .RVM, .{FMA}),
vec(.VFNMSUB213SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0xAF, t1s), .RVM, .{AVX512F}),
//
vec(.VFNMSUB231SS, ops3(.xmml, .xmml, .xmml_m32), vex(.LIG, ._66, ._0F38, .W0, 0xBF), .RVM, .{FMA}),
vec(.VFNMSUB231SS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0xBF, t1s), .RVM, .{AVX512F}),
// VFPCLASSPD
vec(.VFPCLASSPD, ops3(.reg_k_k, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x66, full), .vRMI, .{ AVX512VL, AVX512DQ }),
vec(.VFPCLASSPD, ops3(.reg_k_k, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x66, full), .vRMI, .{ AVX512VL, AVX512DQ }),
vec(.VFPCLASSPD, ops3(.reg_k_k, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x66, full), .vRMI, .{AVX512DQ}),
// VFPCLASSPS
vec(.VFPCLASSPS, ops3(.reg_k_k, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x66, full), .vRMI, .{ AVX512VL, AVX512DQ }),
vec(.VFPCLASSPS, ops3(.reg_k_k, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x66, full), .vRMI, .{ AVX512VL, AVX512DQ }),
vec(.VFPCLASSPS, ops3(.reg_k_k, .zmm_m512_m32bcst, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x66, full), .vRMI, .{AVX512DQ}),
// VFPCLASSSD
vec(.VFPCLASSSD, ops3(.reg_k_k, .xmm_m64, .imm8), evex(.LIG, ._66, ._0F3A, .W1, 0x67, t1s), .vRMI, .{AVX512DQ}),
// VFPCLASSSS
vec(.VFPCLASSSS, ops3(.reg_k_k, .xmm_m32, .imm8), evex(.LIG, ._66, ._0F3A, .W0, 0x67, t1s), .vRMI, .{AVX512DQ}),
// VGATHERDPD / VGATHERQPD
vec(.VGATHERDPD, ops3(.xmml, .vm32xl, .xmml), vex(.L128, ._66, ._0F38, .W1, 0x92), .RMV, .{AVX2}),
vec(.VGATHERDPD, ops3(.ymml, .vm32xl, .ymml), vex(.L256, ._66, ._0F38, .W1, 0x92), .RMV, .{AVX2}),
vec(.VGATHERDPD, ops2(.xmm_kz, .vm32x), evex(.L128, ._66, ._0F38, .W1, 0x92, t1s), .RMV, .{ AVX512VL, AVX512F }),
vec(.VGATHERDPD, ops2(.ymm_kz, .vm32x), evex(.L256, ._66, ._0F38, .W1, 0x92, t1s), .RMV, .{ AVX512VL, AVX512F }),
vec(.VGATHERDPD, ops2(.zmm_kz, .vm32y), evex(.L512, ._66, ._0F38, .W1, 0x92, t1s), .RMV, .{AVX512F}),
//
vec(.VGATHERQPD, ops3(.xmml, .vm64xl, .xmml), vex(.L128, ._66, ._0F38, .W1, 0x93), .RMV, .{AVX2}),
vec(.VGATHERQPD, ops3(.ymml, .vm64yl, .ymml), vex(.L256, ._66, ._0F38, .W1, 0x93), .RMV, .{AVX2}),
vec(.VGATHERQPD, ops2(.xmm_kz, .vm64x), evex(.L128, ._66, ._0F38, .W1, 0x93, t1s), .RMV, .{ AVX512VL, AVX512F }),
vec(.VGATHERQPD, ops2(.ymm_kz, .vm64y), evex(.L256, ._66, ._0F38, .W1, 0x93, t1s), .RMV, .{ AVX512VL, AVX512F }),
vec(.VGATHERQPD, ops2(.zmm_kz, .vm64z), evex(.L512, ._66, ._0F38, .W1, 0x93, t1s), .RMV, .{AVX512F}),
// VGATHERDPS / VGATHERQPS
vec(.VGATHERDPS, ops3(.xmml, .vm32xl, .xmml), vex(.L128, ._66, ._0F38, .W0, 0x92), .RMV, .{AVX2}),
vec(.VGATHERDPS, ops3(.ymml, .vm32yl, .ymml), vex(.L256, ._66, ._0F38, .W0, 0x92), .RMV, .{AVX2}),
vec(.VGATHERDPS, ops2(.xmm_kz, .vm32x), evex(.L128, ._66, ._0F38, .W0, 0x92, t1s), .RMV, .{ AVX512VL, AVX512F }),
vec(.VGATHERDPS, ops2(.ymm_kz, .vm32y), evex(.L256, ._66, ._0F38, .W0, 0x92, t1s), .RMV, .{ AVX512VL, AVX512F }),
vec(.VGATHERDPS, ops2(.zmm_kz, .vm32z), evex(.L512, ._66, ._0F38, .W0, 0x92, t1s), .RMV, .{AVX512F}),
//
vec(.VGATHERQPS, ops3(.xmml, .vm64xl, .xmml), vex(.L128, ._66, ._0F38, .W0, 0x93), .RMV, .{AVX2}),
vec(.VGATHERQPS, ops3(.xmml, .vm64yl, .xmml), vex(.L256, ._66, ._0F38, .W0, 0x93), .RMV, .{AVX2}),
vec(.VGATHERQPS, ops2(.xmm_kz, .vm64x), evex(.L128, ._66, ._0F38, .W0, 0x93, t1s), .RMV, .{ AVX512VL, AVX512F }),
vec(.VGATHERQPS, ops2(.xmm_kz, .vm64y), evex(.L256, ._66, ._0F38, .W0, 0x93, t1s), .RMV, .{ AVX512VL, AVX512F }),
vec(.VGATHERQPS, ops2(.ymm_kz, .vm64z), evex(.L512, ._66, ._0F38, .W0, 0x93, t1s), .RMV, .{AVX512F}),
// VGETEXPPD
vec(.VGETEXPPD, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x42, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VGETEXPPD, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x42, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VGETEXPPD, ops2(.zmm_kz, .zmm_m512_m64bcst_sae), evex(.L512, ._66, ._0F38, .W1, 0x42, full), .vRM, .{AVX512F}),
// VGETEXPPS
vec(.VGETEXPPS, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x42, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VGETEXPPS, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x42, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VGETEXPPS, ops2(.zmm_kz, .zmm_m512_m32bcst_sae), evex(.L512, ._66, ._0F38, .W0, 0x42, full), .vRM, .{AVX512F}),
// VGETEXPSD
vec(.VGETEXPSD, ops3(.xmm_kz, .xmm, .xmm_m64_sae), evex(.LIG, ._66, ._0F38, .W1, 0x43, t1s), .RVM, .{AVX512F}),
// VGETEXPSS
vec(.VGETEXPSS, ops3(.xmm_kz, .xmm, .xmm_m32_sae), evex(.LIG, ._66, ._0F38, .W0, 0x43, t1s), .RVM, .{AVX512F}),
// VGETMANTPD
vec(.VGETMANTPD, ops3(.xmm_kz, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x26, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VGETMANTPD, ops3(.ymm_kz, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x26, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VGETMANTPD, ops3(.zmm_kz, .zmm_m512_m64bcst_sae, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x26, full), .vRMI, .{AVX512F}),
// VGETMANTPS
vec(.VGETMANTPS, ops3(.xmm_kz, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x26, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VGETMANTPS, ops3(.ymm_kz, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x26, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VGETMANTPS, ops3(.zmm_kz, .zmm_m512_m32bcst_sae, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x26, full), .vRMI, .{AVX512F}),
// VGETMANTSD
vec(.VGETMANTSD, ops4(.xmm_kz, .xmm, .xmm_m64_sae, .imm8), evex(.LIG, ._66, ._0F3A, .W1, 0x27, t1s), .RVMI, .{AVX512F}),
// VGETMANTSS
vec(.VGETMANTSS, ops4(.xmm_kz, .xmm, .xmm_m32_sae, .imm8), evex(.LIG, ._66, ._0F3A, .W0, 0x27, t1s), .RVMI, .{AVX512F}),
// vecERTF (128, F32x4, 64x2, 32x8, 64x4)
vec(.VINSERTF128, ops4(.ymml, .ymml, .xmml_m128, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x18), .RVMI, .{AVX}),
// vecERTF32X4
vec(.VINSERTF32X4, ops4(.ymm_kz, .ymm, .xmm_m128, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x18, tup4), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VINSERTF32X4, ops4(.zmm_kz, .zmm, .xmm_m128, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x18, tup4), .RVMI, .{AVX512F}),
// vecERTF64X2
vec(.VINSERTF64X2, ops4(.ymm_kz, .ymm, .xmm_m128, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x18, tup2), .RVMI, .{ AVX512VL, AVX512DQ }),
vec(.VINSERTF64X2, ops4(.zmm_kz, .zmm, .xmm_m128, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x18, tup2), .RVMI, .{AVX512DQ}),
// vecERTF32X8
vec(.VINSERTF32X8, ops4(.zmm_kz, .zmm, .ymm_m256, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x1A, tup8), .RVMI, .{AVX512DQ}),
// vecERTF64X4
vec(.VINSERTF64X4, ops4(.zmm_kz, .zmm, .ymm_m256, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x1A, tup4), .RVMI, .{AVX512F}),
// vecERTI (128, F32x4, 64x2, 32x8, 64x4)
vec(.VINSERTI128, ops4(.ymml, .ymml, .xmml_m128, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x18), .RVMI, .{AVX}),
// vecERTI32X4
vec(.VINSERTI32X4, ops4(.ymm_kz, .ymm, .xmm_m128, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x18, tup4), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VINSERTI32X4, ops4(.zmm_kz, .zmm, .xmm_m128, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x18, tup4), .RVMI, .{AVX512F}),
// vecERTI64X2
vec(.VINSERTI64X2, ops4(.ymm_kz, .ymm, .xmm_m128, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x18, tup2), .RVMI, .{ AVX512VL, AVX512DQ }),
vec(.VINSERTI64X2, ops4(.zmm_kz, .zmm, .xmm_m128, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x18, tup2), .RVMI, .{AVX512DQ}),
// vecERTI32X8
vec(.VINSERTI32X8, ops4(.zmm_kz, .zmm, .ymm_m256, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x1A, tup8), .RVMI, .{AVX512DQ}),
// vecERTI64X4
vec(.VINSERTI64X4, ops4(.zmm_kz, .zmm, .ymm_m256, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x1A, tup4), .RVMI, .{AVX512F}),
// VMASKMOV
// VMASKMOVPD
vec(.VMASKMOVPD, ops3(.xmml, .xmml, .rm_mem128), vex(.L128, ._66, ._0F38, .W0, 0x2D), .RVM, .{AVX}),
vec(.VMASKMOVPD, ops3(.ymml, .ymml, .rm_mem256), vex(.L256, ._66, ._0F38, .W0, 0x2D), .RVM, .{AVX}),
vec(.VMASKMOVPD, ops3(.rm_mem128, .xmml, .xmml), vex(.L128, ._66, ._0F38, .W0, 0x2F), .MVR, .{AVX}),
vec(.VMASKMOVPD, ops3(.rm_mem256, .ymml, .ymml), vex(.L256, ._66, ._0F38, .W0, 0x2F), .MVR, .{AVX}),
// VMASKMOVPS
vec(.VMASKMOVPS, ops3(.xmml, .xmml, .rm_mem128), vex(.L128, ._66, ._0F38, .W0, 0x2C), .RVM, .{AVX}),
vec(.VMASKMOVPS, ops3(.ymml, .ymml, .rm_mem256), vex(.L256, ._66, ._0F38, .W0, 0x2C), .RVM, .{AVX}),
vec(.VMASKMOVPS, ops3(.rm_mem128, .xmml, .xmml), vex(.L128, ._66, ._0F38, .W0, 0x2E), .MVR, .{AVX}),
vec(.VMASKMOVPS, ops3(.rm_mem256, .ymml, .ymml), vex(.L256, ._66, ._0F38, .W0, 0x2E), .MVR, .{AVX}),
// VPBLENDD
vec(.VPBLENDD, ops4(.xmml, .xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x02), .RVMI, .{AVX2}),
vec(.VPBLENDD, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x02), .RVMI, .{AVX2}),
// VPBLENDMB / VPBLENDMW
// VPBLENDMB
vec(.VPBLENDMB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W0, 0x66, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPBLENDMB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W0, 0x66, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPBLENDMB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W0, 0x66, fmem), .RVM, .{AVX512BW}),
// VPBLENDMW
vec(.VPBLENDMW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x66, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPBLENDMW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x66, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPBLENDMW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x66, fmem), .RVM, .{AVX512BW}),
// VPBLENDMD / VPBLENDMQ
// VPBLENDMD
vec(.VPBLENDMD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x64, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPBLENDMD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x64, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPBLENDMD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x64, full), .RVM, .{AVX512F}),
// VPBLENDMQ
vec(.VPBLENDMQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x64, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPBLENDMQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x64, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPBLENDMQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x64, full), .RVM, .{AVX512F}),
// VPBROADCASTB / VPBROADCASTW / VPBROADCASTD / VPBROADCASTQ
// VPBROADCASTB
vec(.VPBROADCASTB, ops2(.xmml, .xmml_m8), vex(.L128, ._66, ._0F38, .W0, 0x78), .vRM, .{AVX2}),
vec(.VPBROADCASTB, ops2(.ymml, .xmml_m8), vex(.L256, ._66, ._0F38, .W0, 0x78), .vRM, .{AVX2}),
vec(.VPBROADCASTB, ops2(.xmm_kz, .xmm_m8), evex(.L128, ._66, ._0F38, .W0, 0x78, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTB, ops2(.ymm_kz, .xmm_m8), evex(.L256, ._66, ._0F38, .W0, 0x78, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTB, ops2(.zmm_kz, .xmm_m8), evex(.L512, ._66, ._0F38, .W0, 0x78, t1s), .vRM, .{AVX512BW}),
vec(.VPBROADCASTB, ops2(.xmm_kz, .rm_reg8), evex(.L128, ._66, ._0F38, .W0, 0x7A, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTB, ops2(.ymm_kz, .rm_reg8), evex(.L256, ._66, ._0F38, .W0, 0x7A, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTB, ops2(.zmm_kz, .rm_reg8), evex(.L512, ._66, ._0F38, .W0, 0x7A, t1s), .vRM, .{AVX512BW}),
vec(.VPBROADCASTB, ops2(.xmm_kz, .rm_reg32), evex(.L128, ._66, ._0F38, .W0, 0x7A, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTB, ops2(.ymm_kz, .rm_reg32), evex(.L256, ._66, ._0F38, .W0, 0x7A, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTB, ops2(.zmm_kz, .rm_reg32), evex(.L512, ._66, ._0F38, .W0, 0x7A, t1s), .vRM, .{AVX512BW}),
vec(.VPBROADCASTB, ops2(.xmm_kz, .rm_reg64), evex(.L128, ._66, ._0F38, .W0, 0x7A, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTB, ops2(.ymm_kz, .rm_reg64), evex(.L256, ._66, ._0F38, .W0, 0x7A, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTB, ops2(.zmm_kz, .rm_reg64), evex(.L512, ._66, ._0F38, .W0, 0x7A, t1s), .vRM, .{AVX512BW}),
// VPBROADCASTW
vec(.VPBROADCASTW, ops2(.xmml, .xmml_m16), vex(.L128, ._66, ._0F38, .W0, 0x79), .vRM, .{AVX2}),
vec(.VPBROADCASTW, ops2(.ymml, .xmml_m16), vex(.L256, ._66, ._0F38, .W0, 0x79), .vRM, .{AVX2}),
vec(.VPBROADCASTW, ops2(.xmm_kz, .xmm_m16), evex(.L128, ._66, ._0F38, .W0, 0x79, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTW, ops2(.ymm_kz, .xmm_m16), evex(.L256, ._66, ._0F38, .W0, 0x79, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTW, ops2(.zmm_kz, .xmm_m16), evex(.L512, ._66, ._0F38, .W0, 0x79, t1s), .vRM, .{AVX512BW}),
vec(.VPBROADCASTW, ops2(.xmm_kz, .rm_reg16), evex(.L128, ._66, ._0F38, .W0, 0x7B, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTW, ops2(.ymm_kz, .rm_reg16), evex(.L256, ._66, ._0F38, .W0, 0x7B, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTW, ops2(.zmm_kz, .rm_reg16), evex(.L512, ._66, ._0F38, .W0, 0x7B, t1s), .vRM, .{AVX512BW}),
vec(.VPBROADCASTW, ops2(.xmm_kz, .rm_reg32), evex(.L128, ._66, ._0F38, .W0, 0x7B, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTW, ops2(.ymm_kz, .rm_reg32), evex(.L256, ._66, ._0F38, .W0, 0x7B, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTW, ops2(.zmm_kz, .rm_reg32), evex(.L512, ._66, ._0F38, .W0, 0x7B, t1s), .vRM, .{AVX512BW}),
vec(.VPBROADCASTW, ops2(.xmm_kz, .rm_reg64), evex(.L128, ._66, ._0F38, .W0, 0x7B, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTW, ops2(.ymm_kz, .rm_reg64), evex(.L256, ._66, ._0F38, .W0, 0x7B, t1s), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPBROADCASTW, ops2(.zmm_kz, .rm_reg64), evex(.L512, ._66, ._0F38, .W0, 0x7B, t1s), .vRM, .{AVX512BW}),
// VPBROADCASTD
vec(.VPBROADCASTD, ops2(.xmml, .xmml_m32), vex(.L128, ._66, ._0F38, .W0, 0x58), .vRM, .{AVX2}),
vec(.VPBROADCASTD, ops2(.ymml, .xmml_m32), vex(.L256, ._66, ._0F38, .W0, 0x58), .vRM, .{AVX2}),
vec(.VPBROADCASTD, ops2(.xmm_kz, .xmm_m32), evex(.L128, ._66, ._0F38, .W0, 0x58, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPBROADCASTD, ops2(.ymm_kz, .xmm_m32), evex(.L256, ._66, ._0F38, .W0, 0x58, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPBROADCASTD, ops2(.zmm_kz, .xmm_m32), evex(.L512, ._66, ._0F38, .W0, 0x58, t1s), .vRM, .{AVX512F}),
vec(.VPBROADCASTD, ops2(.xmm_kz, .rm_reg32), evex(.L128, ._66, ._0F38, .W0, 0x7C, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPBROADCASTD, ops2(.ymm_kz, .rm_reg32), evex(.L256, ._66, ._0F38, .W0, 0x7C, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPBROADCASTD, ops2(.zmm_kz, .rm_reg32), evex(.L512, ._66, ._0F38, .W0, 0x7C, t1s), .vRM, .{AVX512F}),
// VPBROADCASTQ
vec(.VPBROADCASTQ, ops2(.xmml, .xmml_m64), vex(.L128, ._66, ._0F38, .W0, 0x59), .vRM, .{AVX2}),
vec(.VPBROADCASTQ, ops2(.ymml, .xmml_m64), vex(.L256, ._66, ._0F38, .W0, 0x59), .vRM, .{AVX2}),
vec(.VPBROADCASTQ, ops2(.xmm_kz, .xmm_m64), evex(.L128, ._66, ._0F38, .W1, 0x59, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPBROADCASTQ, ops2(.ymm_kz, .xmm_m64), evex(.L256, ._66, ._0F38, .W1, 0x59, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPBROADCASTQ, ops2(.zmm_kz, .xmm_m64), evex(.L512, ._66, ._0F38, .W1, 0x59, t1s), .vRM, .{AVX512F}),
vec(.VPBROADCASTQ, ops2(.xmm_kz, .rm_reg64), evex(.L128, ._66, ._0F38, .W1, 0x7C, t1s), .vRM, .{ AVX512VL, AVX512F, No32 }),
vec(.VPBROADCASTQ, ops2(.ymm_kz, .rm_reg64), evex(.L256, ._66, ._0F38, .W1, 0x7C, t1s), .vRM, .{ AVX512VL, AVX512F, No32 }),
vec(.VPBROADCASTQ, ops2(.zmm_kz, .rm_reg64), evex(.L512, ._66, ._0F38, .W1, 0x7C, t1s), .vRM, .{ AVX512F, No32 }),
// VPBROADCASTI32X2
vec(.VPBROADCASTI32X2, ops2(.xmm_kz, .xmm_m64), evex(.L128, ._66, ._0F38, .W0, 0x59, tup2), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VPBROADCASTI32X2, ops2(.ymm_kz, .xmm_m64), evex(.L256, ._66, ._0F38, .W0, 0x59, tup2), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VPBROADCASTI32X2, ops2(.zmm_kz, .xmm_m64), evex(.L512, ._66, ._0F38, .W0, 0x59, tup2), .vRM, .{ AVX512VL, AVX512DQ }),
// VPBROADCASTI128
vec(.VPBROADCASTI128, ops2(.ymml, .rm_mem128), vex(.L256, ._66, ._0F38, .W0, 0x5A), .vRM, .{AVX2}),
// VPBROADCASTI32X4
vec(.VPBROADCASTI32X4, ops2(.ymm_kz, .rm_mem128), evex(.L256, ._66, ._0F38, .W0, 0x5A, tup4), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPBROADCASTI32X4, ops2(.zmm_kz, .rm_mem128), evex(.L512, ._66, ._0F38, .W0, 0x5A, tup4), .vRM, .{AVX512F}),
// VPBROADCASTI64X2
vec(.VPBROADCASTI64X2, ops2(.ymm_kz, .rm_mem128), evex(.L256, ._66, ._0F38, .W1, 0x5A, tup2), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VPBROADCASTI64X2, ops2(.zmm_kz, .rm_mem128), evex(.L512, ._66, ._0F38, .W1, 0x5A, tup2), .vRM, .{AVX512DQ}),
// VPBROADCASTI32X8
vec(.VPBROADCASTI32X8, ops2(.zmm_kz, .rm_mem256), evex(.L512, ._66, ._0F38, .W0, 0x5B, tup8), .vRM, .{AVX512DQ}),
// VPBROADCASTI64X4
vec(.VPBROADCASTI64X4, ops2(.zmm_kz, .rm_mem256), evex(.L512, ._66, ._0F38, .W1, 0x5B, tup4), .vRM, .{AVX512F}),
// VPBROADCASTM
// VPBROADCASTMB2Q
vec(.VPBROADCASTMB2Q, ops2(.xmm_kz, .rm_k), evex(.L128, ._F3, ._0F38, .W1, 0x2A, nomem), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPBROADCASTMB2Q, ops2(.ymm_kz, .rm_k), evex(.L256, ._F3, ._0F38, .W1, 0x2A, nomem), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPBROADCASTMB2Q, ops2(.zmm_kz, .rm_k), evex(.L512, ._F3, ._0F38, .W1, 0x2A, nomem), .vRM, .{AVX512CD}),
// VPBROADCASTMW2D
vec(.VPBROADCASTMW2D, ops2(.xmm_kz, .rm_k), evex(.L128, ._F3, ._0F38, .W0, 0x3A, nomem), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPBROADCASTMW2D, ops2(.ymm_kz, .rm_k), evex(.L256, ._F3, ._0F38, .W0, 0x3A, nomem), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPBROADCASTMW2D, ops2(.zmm_kz, .rm_k), evex(.L512, ._F3, ._0F38, .W0, 0x3A, nomem), .vRM, .{AVX512CD}),
// VPCMPB / VPCMPUB
// VPCMPB
vec(.VPCMPB, ops4(.reg_k_k, .xmm, .xmm_m128, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x3F, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VPCMPB, ops4(.reg_k_k, .ymm, .ymm_m256, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x3F, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VPCMPB, ops4(.reg_k_k, .zmm, .zmm_m512, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x3F, fmem), .RVMI, .{AVX512BW}),
// VPCMPUB
vec(.VPCMPUB, ops4(.reg_k_k, .xmm, .xmm_m128, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x3E, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VPCMPUB, ops4(.reg_k_k, .ymm, .ymm_m256, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x3E, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VPCMPUB, ops4(.reg_k_k, .zmm, .zmm_m512, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x3E, fmem), .RVMI, .{AVX512BW}),
// VPCMPD / VPCMPUD
// VPCMPD
vec(.VPCMPD, ops4(.reg_k_k, .xmm, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x1F, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPCMPD, ops4(.reg_k_k, .ymm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x1F, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPCMPD, ops4(.reg_k_k, .zmm, .zmm_m512_m32bcst, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x1F, full), .RVMI, .{AVX512F}),
// VPCMPUD
vec(.VPCMPUD, ops4(.reg_k_k, .xmm, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x1E, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPCMPUD, ops4(.reg_k_k, .ymm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x1E, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPCMPUD, ops4(.reg_k_k, .zmm, .zmm_m512_m32bcst, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x1E, full), .RVMI, .{AVX512F}),
// VPCMPQ / VPCMPUQ
// VPCMPQ
vec(.VPCMPQ, ops4(.reg_k_k, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x1F, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPCMPQ, ops4(.reg_k_k, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x1F, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPCMPQ, ops4(.reg_k_k, .zmm, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x1F, full), .RVMI, .{AVX512F}),
// VPCMPUQ
vec(.VPCMPUQ, ops4(.reg_k_k, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x1E, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPCMPUQ, ops4(.reg_k_k, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x1E, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPCMPUQ, ops4(.reg_k_k, .zmm, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x1E, full), .RVMI, .{AVX512F}),
// VPCMPW / VPCMPUW
// VPCMPW
vec(.VPCMPW, ops4(.reg_k_k, .xmm, .xmm_m128, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x3F, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VPCMPW, ops4(.reg_k_k, .ymm, .ymm_m256, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x3F, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VPCMPW, ops4(.reg_k_k, .zmm, .zmm_m512, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x3F, fmem), .RVMI, .{AVX512BW}),
// VPCMPUW
vec(.VPCMPUW, ops4(.reg_k_k, .xmm, .xmm_m128, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x3E, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VPCMPUW, ops4(.reg_k_k, .ymm, .ymm_m256, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x3E, fmem), .RVMI, .{ AVX512VL, AVX512BW }),
vec(.VPCMPUW, ops4(.reg_k_k, .zmm, .zmm_m512, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x3E, fmem), .RVMI, .{AVX512BW}),
// VPCOMPRESSB / VPCOMPRESSW
// VPCOMPRESSB
vec(.VPCOMPRESSB, ops2(.rm_mem128_k, .xmm), evex(.L128, ._66, ._0F38, .W0, 0x63, t1s), .vMR, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPCOMPRESSB, ops2(.rm_xmm_kz, .xmm), evex(.L128, ._66, ._0F38, .W0, 0x63, nomem), .vMR, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPCOMPRESSB, ops2(.rm_mem256_k, .ymm), evex(.L256, ._66, ._0F38, .W0, 0x63, t1s), .vMR, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPCOMPRESSB, ops2(.rm_ymm_kz, .ymm), evex(.L256, ._66, ._0F38, .W0, 0x63, nomem), .vMR, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPCOMPRESSB, ops2(.rm_mem512_k, .zmm), evex(.L512, ._66, ._0F38, .W0, 0x63, t1s), .vMR, .{AVX512_VBMI2}),
vec(.VPCOMPRESSB, ops2(.rm_zmm_kz, .zmm), evex(.L512, ._66, ._0F38, .W0, 0x63, nomem), .vMR, .{AVX512_VBMI2}),
// VPCOMPRESSW
vec(.VPCOMPRESSW, ops2(.rm_mem128_k, .xmm), evex(.L128, ._66, ._0F38, .W1, 0x63, t1s), .vMR, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPCOMPRESSW, ops2(.rm_xmm_kz, .xmm), evex(.L128, ._66, ._0F38, .W1, 0x63, nomem), .vMR, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPCOMPRESSW, ops2(.rm_mem256_k, .ymm), evex(.L256, ._66, ._0F38, .W1, 0x63, t1s), .vMR, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPCOMPRESSW, ops2(.rm_ymm_kz, .ymm), evex(.L256, ._66, ._0F38, .W1, 0x63, nomem), .vMR, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPCOMPRESSW, ops2(.rm_mem512_k, .zmm), evex(.L512, ._66, ._0F38, .W1, 0x63, t1s), .vMR, .{AVX512_VBMI2}),
vec(.VPCOMPRESSW, ops2(.rm_zmm_kz, .zmm), evex(.L512, ._66, ._0F38, .W1, 0x63, nomem), .vMR, .{AVX512_VBMI2}),
// VPCOMPRESSD
vec(.VPCOMPRESSD, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._66, ._0F38, .W0, 0x8B, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPCOMPRESSD, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._66, ._0F38, .W0, 0x8B, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPCOMPRESSD, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._66, ._0F38, .W0, 0x8B, t1s), .vMR, .{AVX512F}),
// VPCOMPRESSQ
vec(.VPCOMPRESSQ, ops2(.xmm_m128_kz, .xmm), evex(.L128, ._66, ._0F38, .W1, 0x8B, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPCOMPRESSQ, ops2(.ymm_m256_kz, .ymm), evex(.L256, ._66, ._0F38, .W1, 0x8B, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPCOMPRESSQ, ops2(.zmm_m512_kz, .zmm), evex(.L512, ._66, ._0F38, .W1, 0x8B, t1s), .vMR, .{AVX512F}),
// VPCONFLICTD / VPCONFLICTQ
// VPCONFLICTD
vec(.VPCONFLICTD, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0xC4, full), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPCONFLICTD, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0xC4, full), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPCONFLICTD, ops2(.zmm_kz, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0xC4, full), .vRM, .{AVX512CD}),
// VPCONFLICTQ
vec(.VPCONFLICTQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xC4, full), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPCONFLICTQ, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xC4, full), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPCONFLICTQ, ops2(.zmm_kz, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0xC4, full), .vRM, .{AVX512CD}),
// VPDPBUSD
vec(.VPDPBUSD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x50, full), .RVM, .{ AVX512VL, AVX512_VNNI }),
vec(.VPDPBUSD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x50, full), .RVM, .{ AVX512VL, AVX512_VNNI }),
vec(.VPDPBUSD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x50, full), .RVM, .{AVX512_VNNI}),
// VPDPBUSDS
vec(.VPDPBUSDS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x51, full), .RVM, .{ AVX512VL, AVX512_VNNI }),
vec(.VPDPBUSDS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x51, full), .RVM, .{ AVX512VL, AVX512_VNNI }),
vec(.VPDPBUSDS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x51, full), .RVM, .{AVX512_VNNI}),
// VPDPWSSD
vec(.VPDPWSSD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x52, full), .RVM, .{ AVX512VL, AVX512_VNNI }),
vec(.VPDPWSSD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x52, full), .RVM, .{ AVX512VL, AVX512_VNNI }),
vec(.VPDPWSSD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x52, full), .RVM, .{AVX512_VNNI}),
// VPDPWSSDS
vec(.VPDPWSSDS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x53, full), .RVM, .{ AVX512VL, AVX512_VNNI }),
vec(.VPDPWSSDS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x53, full), .RVM, .{ AVX512VL, AVX512_VNNI }),
vec(.VPDPWSSDS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x53, full), .RVM, .{AVX512_VNNI}),
// VPERM2F128
vec(.VPERM2F128, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x06), .RVMI, .{AVX}),
// VPERM2I128
vec(.VPERM2I128, ops4(.ymml, .ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x46), .RVMI, .{AVX2}),
// VPERMB
vec(.VPERMB, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W0, 0x8D, fmem), .RVM, .{ AVX512VL, AVX512_VBMI }),
vec(.VPERMB, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W0, 0x8D, fmem), .RVM, .{ AVX512VL, AVX512_VBMI }),
vec(.VPERMB, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W0, 0x8D, fmem), .RVM, .{AVX512_VBMI}),
// VPERMD / VPERMW
// VPERMD
vec(.VPERMD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x36), .RVM, .{AVX2}),
vec(.VPERMD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x36, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x36, full), .RVM, .{AVX512F}),
// VPERMW
vec(.VPERMW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x8D, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPERMW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x8D, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPERMW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x8D, fmem), .RVM, .{AVX512BW}),
// VPERMI2B
vec(.VPERMI2B, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W0, 0x75, fmem), .RVM, .{ AVX512VL, AVX512_VBMI }),
vec(.VPERMI2B, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W0, 0x75, fmem), .RVM, .{ AVX512VL, AVX512_VBMI }),
vec(.VPERMI2B, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W0, 0x75, fmem), .RVM, .{AVX512_VBMI}),
// VPERMI2W / VPERMI2D / VPERMI2Q / VPERMI2PS / VPERMI2PD
// VPERMI2W
vec(.VPERMI2W, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x75, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPERMI2W, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x75, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPERMI2W, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x75, fmem), .RVM, .{AVX512BW}),
// VPERMI2D
vec(.VPERMI2D, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x76, full), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPERMI2D, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x76, full), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPERMI2D, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x76, full), .RVM, .{AVX512BW}),
// VPERMI2Q
vec(.VPERMI2Q, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x76, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMI2Q, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x76, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMI2Q, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x76, full), .RVM, .{AVX512F}),
// VPERMI2PS
vec(.VPERMI2PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x77, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMI2PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x77, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMI2PS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x77, full), .RVM, .{AVX512F}),
// VPERMI2PD
vec(.VPERMI2PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x77, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMI2PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x77, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMI2PD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x77, full), .RVM, .{AVX512F}),
// VPERMILPD
vec(.VPERMILPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x0D), .RVM, .{AVX}),
vec(.VPERMILPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x0D), .RVM, .{AVX}),
vec(.VPERMILPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x0D, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMILPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x0D, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMILPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x0D, full), .RVM, .{AVX512F}),
vec(.VPERMILPD, ops3(.xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x05), .vRMI, .{AVX}),
vec(.VPERMILPD, ops3(.ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x05), .vRMI, .{AVX}),
vec(.VPERMILPD, ops3(.xmm_kz, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x05, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VPERMILPD, ops3(.ymm_kz, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x05, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VPERMILPD, ops3(.zmm_kz, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x05, full), .vRMI, .{AVX512F}),
// VPERMILPS
vec(.VPERMILPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x0C), .RVM, .{AVX}),
vec(.VPERMILPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x0C), .RVM, .{AVX}),
vec(.VPERMILPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x0C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMILPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x0C, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMILPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x0C, full), .RVM, .{AVX512F}),
vec(.VPERMILPS, ops3(.xmml, .xmml_m128, .imm8), vex(.L128, ._66, ._0F3A, .W0, 0x04), .vRMI, .{AVX}),
vec(.VPERMILPS, ops3(.ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .W0, 0x04), .vRMI, .{AVX}),
vec(.VPERMILPS, ops3(.xmm_kz, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x04, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VPERMILPS, ops3(.ymm_kz, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x04, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VPERMILPS, ops3(.zmm_kz, .zmm_m512_m32bcst, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x04, full), .vRMI, .{AVX512F}),
// VPERMPD
vec(.VPERMPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x16, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x16, full), .RVM, .{AVX512F}),
vec(.VPERMPD, ops3(.ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .W1, 0x01), .vRMI, .{AVX2}),
vec(.VPERMPD, ops3(.ymm_kz, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x01, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VPERMPD, ops3(.zmm_kz, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x01, full), .vRMI, .{AVX512F}),
// VPERMPS
vec(.VPERMPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x16), .RVM, .{AVX2}),
vec(.VPERMPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x16, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x16, full), .RVM, .{AVX512F}),
// VPERMQ
vec(.VPERMQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x36, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x36, full), .RVM, .{AVX512F}),
vec(.VPERMQ, ops3(.ymml, .ymml_m256, .imm8), vex(.L256, ._66, ._0F3A, .W1, 0x00), .vRMI, .{AVX}),
vec(.VPERMQ, ops3(.ymm_kz, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x00, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VPERMQ, ops3(.zmm_kz, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x00, full), .vRMI, .{AVX512F}),
// VPERMT2B
vec(.VPERMT2B, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W0, 0x7D, fmem), .RVM, .{ AVX512VL, AVX512_VBMI }),
vec(.VPERMT2B, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W0, 0x7D, fmem), .RVM, .{ AVX512VL, AVX512_VBMI }),
vec(.VPERMT2B, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W0, 0x7D, fmem), .RVM, .{AVX512_VBMI}),
// VPERMT2W / VPERMT2D / VPERMT2Q / VPERMT2PS / VPERMT2PD
// VPERMT2W
vec(.VPERMT2W, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x7D, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPERMT2W, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x7D, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPERMT2W, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x7D, fmem), .RVM, .{AVX512BW}),
// VPERMT2D
vec(.VPERMT2D, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x7E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMT2D, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x7E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMT2D, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x7E, full), .RVM, .{AVX512F}),
// VPERMT2Q
vec(.VPERMT2Q, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x7E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMT2Q, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x7E, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMT2Q, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x7E, full), .RVM, .{AVX512F}),
// VPERMT2PS
vec(.VPERMT2PS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x7F, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMT2PS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x7F, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMT2PS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x7F, full), .RVM, .{AVX512F}),
// VPERMT2PD
vec(.VPERMT2PD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x7F, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMT2PD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x7F, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPERMT2PD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x7F, full), .RVM, .{AVX512F}),
// VPEXPANDB / VPEXPANDW
// VPEXPANDB
vec(.VPEXPANDB, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F38, .W0, 0x62, t1s), .vRM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPEXPANDB, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F38, .W0, 0x62, t1s), .vRM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPEXPANDB, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F38, .W0, 0x62, t1s), .vRM, .{AVX512_VBMI2}),
// VPEXPANDW
vec(.VPEXPANDW, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x62, t1s), .vRM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPEXPANDW, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x62, t1s), .vRM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPEXPANDW, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x62, t1s), .vRM, .{AVX512_VBMI2}),
// VPEXPANDD
vec(.VPEXPANDD, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F38, .W0, 0x89, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPEXPANDD, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F38, .W0, 0x89, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPEXPANDD, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F38, .W0, 0x89, t1s), .vRM, .{AVX512F}),
// VPEXPANDQ
vec(.VPEXPANDQ, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x89, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPEXPANDQ, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x89, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPEXPANDQ, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x89, t1s), .vRM, .{AVX512F}),
// VPGATHERDD / VPGATHERQD / VPGATHERDQ / VPGATHERQQ
// VPGATHERDD
vec(.VPGATHERDD, ops3(.xmml, .vm32xl, .xmml), vex(.L128, ._66, ._0F38, .W0, 0x90), .RMV, .{AVX2}),
vec(.VPGATHERDD, ops3(.ymml, .vm32yl, .ymml), vex(.L256, ._66, ._0F38, .W0, 0x90), .RMV, .{AVX2}),
vec(.VPGATHERDD, ops2(.xmm_k, .vm32x), evex(.L128, ._66, ._0F38, .W0, 0x90, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPGATHERDD, ops2(.ymm_k, .vm32y), evex(.L256, ._66, ._0F38, .W0, 0x90, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPGATHERDD, ops2(.zmm_k, .vm32z), evex(.L512, ._66, ._0F38, .W0, 0x90, t1s), .vRM, .{AVX512F}),
// VPGATHERDQ
vec(.VPGATHERDQ, ops3(.xmml, .vm32xl, .xmml), vex(.L128, ._66, ._0F38, .W1, 0x90), .RMV, .{AVX2}),
vec(.VPGATHERDQ, ops3(.ymml, .vm32xl, .ymml), vex(.L256, ._66, ._0F38, .W1, 0x90), .RMV, .{AVX2}),
vec(.VPGATHERDQ, ops2(.xmm_k, .vm32x), evex(.L128, ._66, ._0F38, .W1, 0x90, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPGATHERDQ, ops2(.ymm_k, .vm32x), evex(.L256, ._66, ._0F38, .W1, 0x90, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPGATHERDQ, ops2(.zmm_k, .vm32y), evex(.L512, ._66, ._0F38, .W1, 0x90, t1s), .vRM, .{AVX512F}),
// VPGATHERQD
vec(.VPGATHERQD, ops3(.xmml, .vm64xl, .xmml), vex(.L128, ._66, ._0F38, .W0, 0x91), .RMV, .{AVX2}),
vec(.VPGATHERQD, ops3(.xmml, .vm64yl, .xmml), vex(.L256, ._66, ._0F38, .W0, 0x91), .RMV, .{AVX2}),
vec(.VPGATHERQD, ops2(.xmm_k, .vm64x), evex(.L128, ._66, ._0F38, .W0, 0x91, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPGATHERQD, ops2(.xmm_k, .vm64y), evex(.L256, ._66, ._0F38, .W0, 0x91, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPGATHERQD, ops2(.ymm_k, .vm64z), evex(.L512, ._66, ._0F38, .W0, 0x91, t1s), .vRM, .{AVX512F}),
// VPGATHERQQ
vec(.VPGATHERQQ, ops3(.xmml, .vm64xl, .xmml), vex(.L128, ._66, ._0F38, .W1, 0x91), .RMV, .{AVX2}),
vec(.VPGATHERQQ, ops3(.ymml, .vm64yl, .ymml), vex(.L256, ._66, ._0F38, .W1, 0x91), .RMV, .{AVX2}),
vec(.VPGATHERQQ, ops2(.xmm_k, .vm64x), evex(.L128, ._66, ._0F38, .W1, 0x91, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPGATHERQQ, ops2(.ymm_k, .vm64y), evex(.L256, ._66, ._0F38, .W1, 0x91, t1s), .vRM, .{ AVX512VL, AVX512F }),
vec(.VPGATHERQQ, ops2(.zmm_k, .vm64z), evex(.L512, ._66, ._0F38, .W1, 0x91, t1s), .vRM, .{AVX512F}),
// VPLZCNTD / VPLZCNTQ
// VPLZCNTD
vec(.VPLZCNTD, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x44, full), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPLZCNTD, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x44, full), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPLZCNTD, ops2(.zmm_kz, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x44, full), .vRM, .{AVX512CD}),
// VPLZCNTQ
vec(.VPLZCNTQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x44, full), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPLZCNTQ, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x44, full), .vRM, .{ AVX512VL, AVX512CD }),
vec(.VPLZCNTQ, ops2(.zmm_kz, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x44, full), .vRM, .{AVX512CD}),
// VPMADD52HUQ
vec(.VPMADD52HUQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xB5, full), .RVM, .{ AVX512VL, AVX512_IFMA }),
vec(.VPMADD52HUQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xB5, full), .RVM, .{ AVX512VL, AVX512_IFMA }),
vec(.VPMADD52HUQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0xB5, full), .RVM, .{AVX512_IFMA}),
// VPMADD52LUQ
vec(.VPMADD52LUQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0xB4, full), .RVM, .{ AVX512VL, AVX512_IFMA }),
vec(.VPMADD52LUQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0xB4, full), .RVM, .{ AVX512VL, AVX512_IFMA }),
vec(.VPMADD52LUQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0xB4, full), .RVM, .{AVX512_IFMA}),
// VPMASKMOV
// VMASKMOVD
vec(.VMASKMOVD, ops3(.xmml, .xmml, .rm_mem128), vex(.L128, ._66, ._0F38, .W0, 0x8C), .RVM, .{AVX2}),
vec(.VMASKMOVD, ops3(.ymml, .ymml, .rm_mem256), vex(.L256, ._66, ._0F38, .W0, 0x8C), .RVM, .{AVX2}),
vec(.VMASKMOVD, ops3(.rm_mem128, .xmml, .xmml), vex(.L128, ._66, ._0F38, .W0, 0x8E), .MVR, .{AVX2}),
vec(.VMASKMOVD, ops3(.rm_mem256, .ymml, .ymml), vex(.L256, ._66, ._0F38, .W0, 0x8E), .MVR, .{AVX2}),
// VMASKMOVQ
vec(.VMASKMOVQ, ops3(.xmml, .xmml, .rm_mem128), vex(.L128, ._66, ._0F38, .W1, 0x8C), .RVM, .{AVX2}),
vec(.VMASKMOVQ, ops3(.ymml, .ymml, .rm_mem256), vex(.L256, ._66, ._0F38, .W1, 0x8C), .RVM, .{AVX2}),
vec(.VMASKMOVQ, ops3(.rm_mem128, .xmml, .xmml), vex(.L128, ._66, ._0F38, .W1, 0x8E), .MVR, .{AVX2}),
vec(.VMASKMOVQ, ops3(.rm_mem256, .ymml, .ymml), vex(.L256, ._66, ._0F38, .W1, 0x8E), .MVR, .{AVX2}),
// VPMOVB2M / VPMOVW2M / VPMOVD2M / VPMOVQ2M
// VPMOVB2M
vec(.VPMOVB2M, ops2(.reg_k, .rm_xmm), evex(.L128, ._F3, ._0F38, .W0, 0x29, nomem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVB2M, ops2(.reg_k, .rm_ymm), evex(.L256, ._F3, ._0F38, .W0, 0x29, nomem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVB2M, ops2(.reg_k, .rm_zmm), evex(.L512, ._F3, ._0F38, .W0, 0x29, nomem), .vRM, .{AVX512BW}),
// VPMOVW2M
vec(.VPMOVW2M, ops2(.reg_k, .rm_xmm), evex(.L128, ._F3, ._0F38, .W1, 0x29, nomem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVW2M, ops2(.reg_k, .rm_ymm), evex(.L256, ._F3, ._0F38, .W1, 0x29, nomem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVW2M, ops2(.reg_k, .rm_zmm), evex(.L512, ._F3, ._0F38, .W1, 0x29, nomem), .vRM, .{AVX512BW}),
// VPMOVD2M
vec(.VPMOVD2M, ops2(.reg_k, .rm_xmm), evex(.L128, ._F3, ._0F38, .W0, 0x39, nomem), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VPMOVD2M, ops2(.reg_k, .rm_ymm), evex(.L256, ._F3, ._0F38, .W0, 0x39, nomem), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VPMOVD2M, ops2(.reg_k, .rm_zmm), evex(.L512, ._F3, ._0F38, .W0, 0x39, nomem), .vRM, .{AVX512DQ}),
// VPMOVQ2M
vec(.VPMOVQ2M, ops2(.reg_k, .rm_xmm), evex(.L128, ._F3, ._0F38, .W1, 0x39, nomem), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VPMOVQ2M, ops2(.reg_k, .rm_ymm), evex(.L256, ._F3, ._0F38, .W1, 0x39, nomem), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VPMOVQ2M, ops2(.reg_k, .rm_zmm), evex(.L512, ._F3, ._0F38, .W1, 0x39, nomem), .vRM, .{AVX512DQ}),
// VPMOVDB / VPMOVSDB / VPMOVUSDB
// VPMOVDB
vec(.VPMOVDB, ops2(.xmm_m32_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x31, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVDB, ops2(.xmm_m64_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x31, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVDB, ops2(.xmm_m128_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x31, qmem), .vMR, .{AVX512F}),
// VPMOVSDB
vec(.VPMOVSDB, ops2(.xmm_m32_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x21, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVSDB, ops2(.xmm_m64_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x21, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVSDB, ops2(.xmm_m128_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x21, qmem), .vMR, .{AVX512F}),
// VPMOVUSDB
vec(.VPMOVUSDB, ops2(.xmm_m32_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x11, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVUSDB, ops2(.xmm_m64_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x11, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVUSDB, ops2(.xmm_m128_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x11, qmem), .vMR, .{AVX512F}),
// VPMOVDW / VPMOVSDB / VPMOVUSDB
// VPMOVDW
vec(.VPMOVDW, ops2(.xmm_m64_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x33, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVDW, ops2(.xmm_m128_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x33, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVDW, ops2(.ymm_m256_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x33, hmem), .vMR, .{AVX512F}),
// VPMOVSDB
vec(.VPMOVSDW, ops2(.xmm_m64_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x23, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVSDW, ops2(.xmm_m128_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x23, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVSDW, ops2(.ymm_m256_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x23, hmem), .vMR, .{AVX512F}),
// VPMOVUSDB
vec(.VPMOVUSDW, ops2(.xmm_m64_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x13, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVUSDW, ops2(.xmm_m128_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x13, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVUSDW, ops2(.ymm_m256_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x13, hmem), .vMR, .{AVX512F}),
// VPMOVM2B / VPMOVM2W / VPMOVM2D / VPMOVM2Q
// VPMOVM2B
vec(.VPMOVM2B, ops2(.xmm, .rm_k), evex(.L128, ._F3, ._0F38, .W0, 0x28, nomem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVM2B, ops2(.ymm, .rm_k), evex(.L256, ._F3, ._0F38, .W0, 0x28, nomem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVM2B, ops2(.zmm, .rm_k), evex(.L512, ._F3, ._0F38, .W0, 0x28, nomem), .vRM, .{AVX512BW}),
// VPMOVM2W
vec(.VPMOVM2W, ops2(.xmm, .rm_k), evex(.L128, ._F3, ._0F38, .W1, 0x28, nomem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVM2W, ops2(.ymm, .rm_k), evex(.L256, ._F3, ._0F38, .W1, 0x28, nomem), .vRM, .{ AVX512VL, AVX512BW }),
vec(.VPMOVM2W, ops2(.zmm, .rm_k), evex(.L512, ._F3, ._0F38, .W1, 0x28, nomem), .vRM, .{AVX512BW}),
// VPMOVM2D
vec(.VPMOVM2D, ops2(.xmm, .rm_k), evex(.L128, ._F3, ._0F38, .W0, 0x38, nomem), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VPMOVM2D, ops2(.ymm, .rm_k), evex(.L256, ._F3, ._0F38, .W0, 0x38, nomem), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VPMOVM2D, ops2(.zmm, .rm_k), evex(.L512, ._F3, ._0F38, .W0, 0x38, nomem), .vRM, .{AVX512DQ}),
// VPMOVM2Q
vec(.VPMOVM2Q, ops2(.xmm, .rm_k), evex(.L128, ._F3, ._0F38, .W1, 0x38, nomem), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VPMOVM2Q, ops2(.ymm, .rm_k), evex(.L256, ._F3, ._0F38, .W1, 0x38, nomem), .vRM, .{ AVX512VL, AVX512DQ }),
vec(.VPMOVM2Q, ops2(.zmm, .rm_k), evex(.L512, ._F3, ._0F38, .W1, 0x38, nomem), .vRM, .{AVX512DQ}),
// VPMOVQB / VPMOVSQB / VPMOVUSQB
// VPMOVQB
vec(.VPMOVQB, ops2(.xmm_m16_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x32, emem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVQB, ops2(.xmm_m32_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x32, emem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVQB, ops2(.xmm_m64_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x32, emem), .vMR, .{AVX512F}),
// VPMOVSQB
vec(.VPMOVSQB, ops2(.xmm_m16_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x22, emem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVSQB, ops2(.xmm_m32_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x22, emem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVSQB, ops2(.xmm_m64_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x22, emem), .vMR, .{AVX512F}),
// VPMOVUSQB
vec(.VPMOVUSQB, ops2(.xmm_m16_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x12, emem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVUSQB, ops2(.xmm_m32_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x12, emem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVUSQB, ops2(.xmm_m64_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x12, emem), .vMR, .{AVX512F}),
// VPMOVQD / VPMOVSQD / VPMOVUSQD
// VPMOVQD
vec(.VPMOVQD, ops2(.xmm_m64_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x35, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVQD, ops2(.xmm_m128_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x35, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVQD, ops2(.ymm_m256_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x35, hmem), .vMR, .{AVX512F}),
// VPMOVSQD
vec(.VPMOVSQD, ops2(.xmm_m64_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x25, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVSQD, ops2(.xmm_m128_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x25, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVSQD, ops2(.ymm_m256_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x25, hmem), .vMR, .{AVX512F}),
// VPMOVUSQD
vec(.VPMOVUSQD, ops2(.xmm_m64_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x15, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVUSQD, ops2(.xmm_m128_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x15, hmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVUSQD, ops2(.ymm_m256_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x15, hmem), .vMR, .{AVX512F}),
// VPMOVQW / VPMOVSQW / VPMOVUSQW
// VPMOVQW
vec(.VPMOVQW, ops2(.xmm_m32_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x34, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVQW, ops2(.xmm_m64_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x34, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVQW, ops2(.xmm_m128_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x34, qmem), .vMR, .{AVX512F}),
// VPMOVSQW
vec(.VPMOVSQW, ops2(.xmm_m32_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x24, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVSQW, ops2(.xmm_m64_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x24, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVSQW, ops2(.xmm_m128_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x24, qmem), .vMR, .{AVX512F}),
// VPMOVUSQW
vec(.VPMOVUSQW, ops2(.xmm_m32_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x14, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVUSQW, ops2(.xmm_m64_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x14, qmem), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPMOVUSQW, ops2(.xmm_m128_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x14, qmem), .vMR, .{AVX512F}),
// VPMOVWB / VPMOVSWB / VPMOVUSWB
// VPMOVWB
vec(.VPMOVWB, ops2(.xmm_m64_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x30, hmem), .vMR, .{ AVX512VL, AVX512BW }),
vec(.VPMOVWB, ops2(.xmm_m128_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x30, hmem), .vMR, .{ AVX512VL, AVX512BW }),
vec(.VPMOVWB, ops2(.ymm_m256_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x30, hmem), .vMR, .{AVX512BW}),
// VPMOVSWB
vec(.VPMOVSWB, ops2(.xmm_m64_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x20, hmem), .vMR, .{ AVX512VL, AVX512BW }),
vec(.VPMOVSWB, ops2(.xmm_m128_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x20, hmem), .vMR, .{ AVX512VL, AVX512BW }),
vec(.VPMOVSWB, ops2(.ymm_m256_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x20, hmem), .vMR, .{AVX512BW}),
// VPMOVUSWB
vec(.VPMOVUSWB, ops2(.xmm_m64_kz, .xmm), evex(.L128, ._F3, ._0F38, .W0, 0x10, hmem), .vMR, .{ AVX512VL, AVX512BW }),
vec(.VPMOVUSWB, ops2(.xmm_m128_kz, .ymm), evex(.L256, ._F3, ._0F38, .W0, 0x10, hmem), .vMR, .{ AVX512VL, AVX512BW }),
vec(.VPMOVUSWB, ops2(.ymm_m256_kz, .zmm), evex(.L512, ._F3, ._0F38, .W0, 0x10, hmem), .vMR, .{AVX512BW}),
// VPMULTISHIFTQB
// VPMULTISHIFTQB
vec(.VPMULTISHIFTQB, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x83, full), .RVM, .{ AVX512VL, AVX512_VBMI }),
vec(.VPMULTISHIFTQB, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x83, full), .RVM, .{ AVX512VL, AVX512_VBMI }),
vec(.VPMULTISHIFTQB, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x83, full), .RVM, .{AVX512_VBMI}),
// VPOPCNT
// VPOPCNTB
vec(.VPOPCNTB, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F38, .W0, 0x54, fmem), .vRM, .{ AVX512VL, AVX512_BITALG }),
vec(.VPOPCNTB, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F38, .W0, 0x54, fmem), .vRM, .{ AVX512VL, AVX512_BITALG }),
vec(.VPOPCNTB, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F38, .W0, 0x54, fmem), .vRM, .{AVX512_BITALG}),
// VPOPCNTW
vec(.VPOPCNTW, ops2(.xmm_kz, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x54, fmem), .vRM, .{ AVX512VL, AVX512_BITALG }),
vec(.VPOPCNTW, ops2(.ymm_kz, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x54, fmem), .vRM, .{ AVX512VL, AVX512_BITALG }),
vec(.VPOPCNTW, ops2(.zmm_kz, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x54, fmem), .vRM, .{AVX512_BITALG}),
// VPOPCNTD
vec(.VPOPCNTD, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x55, full), .vRM, .{ AVX512VL, AVX512_VPOPCNTDQ }),
vec(.VPOPCNTD, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x55, full), .vRM, .{ AVX512VL, AVX512_VPOPCNTDQ }),
vec(.VPOPCNTD, ops2(.zmm_kz, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x55, full), .vRM, .{AVX512_VPOPCNTDQ}),
// VPOPCNTQ
vec(.VPOPCNTQ, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x55, full), .vRM, .{ AVX512VL, AVX512_VPOPCNTDQ }),
vec(.VPOPCNTQ, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x55, full), .vRM, .{ AVX512VL, AVX512_VPOPCNTDQ }),
vec(.VPOPCNTQ, ops2(.zmm_kz, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x55, full), .vRM, .{AVX512_VPOPCNTDQ}),
// VPROLD / VPROLVD / VPROLQ / VPROLVQ
// VPROLD
vec(.VPROLD, ops3(.xmm_kz, .xmm_m128_m32bcst, .imm8), evexr(.L128, ._66, ._0F, .W0, 0x72, 1, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPROLD, ops3(.ymm_kz, .ymm_m256_m32bcst, .imm8), evexr(.L256, ._66, ._0F, .W0, 0x72, 1, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPROLD, ops3(.zmm_kz, .zmm_m512_m32bcst, .imm8), evexr(.L512, ._66, ._0F, .W0, 0x72, 1, full), .VMI, .{AVX512F}),
// VPROLVD
vec(.VPROLVD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x15, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPROLVD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x15, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPROLVD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x15, full), .RVM, .{AVX512F}),
// VPROLQ
vec(.VPROLQ, ops3(.xmm_kz, .xmm_m128_m64bcst, .imm8), evexr(.L128, ._66, ._0F, .W1, 0x72, 1, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPROLQ, ops3(.ymm_kz, .ymm_m256_m64bcst, .imm8), evexr(.L256, ._66, ._0F, .W1, 0x72, 1, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPROLQ, ops3(.zmm_kz, .zmm_m512_m64bcst, .imm8), evexr(.L512, ._66, ._0F, .W1, 0x72, 1, full), .VMI, .{AVX512F}),
// VPROLVQ
vec(.VPROLVQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x15, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPROLVQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x15, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPROLVQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x15, full), .RVM, .{AVX512F}),
// VPRORD / VPRORVD / VPRORQ / VPRORVQ
// VPRORD
vec(.VPRORD, ops3(.xmm_kz, .xmm_m128_m32bcst, .imm8), evexr(.L128, ._66, ._0F, .W0, 0x72, 0, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPRORD, ops3(.ymm_kz, .ymm_m256_m32bcst, .imm8), evexr(.L256, ._66, ._0F, .W0, 0x72, 0, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPRORD, ops3(.zmm_kz, .zmm_m512_m32bcst, .imm8), evexr(.L512, ._66, ._0F, .W0, 0x72, 0, full), .VMI, .{AVX512F}),
// VPRORVD
vec(.VPRORVD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x14, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPRORVD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x14, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPRORVD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x14, full), .RVM, .{AVX512F}),
// VPRORQ
vec(.VPRORQ, ops3(.xmm_kz, .xmm_m128_m64bcst, .imm8), evexr(.L128, ._66, ._0F, .W1, 0x72, 0, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPRORQ, ops3(.ymm_kz, .ymm_m256_m64bcst, .imm8), evexr(.L256, ._66, ._0F, .W1, 0x72, 0, full), .VMI, .{ AVX512VL, AVX512F }),
vec(.VPRORQ, ops3(.zmm_kz, .zmm_m512_m64bcst, .imm8), evexr(.L512, ._66, ._0F, .W1, 0x72, 0, full), .VMI, .{AVX512F}),
// VPRORVQ
vec(.VPRORVQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x14, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPRORVQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x14, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPRORVQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x14, full), .RVM, .{AVX512F}),
// VPSCATTERDD / VPSCATTERDQ / VPSCATTERQD / VPSCATTERQQ
// VPSCATTERDD
vec(.VPSCATTERDD, ops2(.vm32x_k, .xmm), evex(.L128, ._66, ._0F38, .W0, 0xA0, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPSCATTERDD, ops2(.vm32y_k, .ymm), evex(.L256, ._66, ._0F38, .W0, 0xA0, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPSCATTERDD, ops2(.vm32z_k, .zmm), evex(.L512, ._66, ._0F38, .W0, 0xA0, t1s), .vMR, .{AVX512F}),
// VPSCATTERDQ
vec(.VPSCATTERDQ, ops2(.vm32x_k, .xmm), evex(.L128, ._66, ._0F38, .W1, 0xA0, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPSCATTERDQ, ops2(.vm32x_k, .ymm), evex(.L256, ._66, ._0F38, .W1, 0xA0, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPSCATTERDQ, ops2(.vm32y_k, .zmm), evex(.L512, ._66, ._0F38, .W1, 0xA0, t1s), .vMR, .{AVX512F}),
// VPSCATTERQD
vec(.VPSCATTERQD, ops2(.vm64x_k, .xmm), evex(.L128, ._66, ._0F38, .W0, 0xA1, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPSCATTERQD, ops2(.vm64y_k, .xmm), evex(.L256, ._66, ._0F38, .W0, 0xA1, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPSCATTERQD, ops2(.vm64z_k, .ymm), evex(.L512, ._66, ._0F38, .W0, 0xA1, t1s), .vMR, .{AVX512F}),
// VPSCATTERQQ
vec(.VPSCATTERQQ, ops2(.vm64x_k, .xmm), evex(.L128, ._66, ._0F38, .W1, 0xA1, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPSCATTERQQ, ops2(.vm64y_k, .ymm), evex(.L256, ._66, ._0F38, .W1, 0xA1, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VPSCATTERQQ, ops2(.vm64z_k, .zmm), evex(.L512, ._66, ._0F38, .W1, 0xA1, t1s), .vMR, .{AVX512F}),
// VPSHLD
// VPSHLDW
vec(.VPSHLDW, ops4(.xmm_kz, .xmm, .xmm_m128, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x70, fmem), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDW, ops4(.ymm_kz, .ymm, .ymm_m256, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x70, fmem), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDW, ops4(.zmm_kz, .zmm, .zmm_m512, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x70, fmem), .RVMI, .{AVX512_VBMI2}),
// VPSHLDD
vec(.VPSHLDD, ops4(.xmm_kz, .xmm, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x71, full), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDD, ops4(.ymm_kz, .ymm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x71, full), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDD, ops4(.zmm_kz, .zmm, .zmm_m512_m32bcst, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x71, full), .RVMI, .{AVX512_VBMI2}),
// VPSHLDQ
vec(.VPSHLDQ, ops4(.xmm_kz, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x71, full), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDQ, ops4(.ymm_kz, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x71, full), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDQ, ops4(.zmm_kz, .zmm, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x71, full), .RVMI, .{AVX512_VBMI2}),
// VPSHLDV
// VPSHLDVW
vec(.VPSHLDVW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x70, fmem), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDVW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x70, fmem), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDVW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x70, fmem), .RVM, .{AVX512_VBMI2}),
// VPSHLDVD
vec(.VPSHLDVD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x71, full), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDVD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x71, full), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDVD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x71, full), .RVM, .{AVX512_VBMI2}),
// VPSHLDVQ
vec(.VPSHLDVQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x71, full), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDVQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x71, full), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHLDVQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x71, full), .RVM, .{AVX512_VBMI2}),
// VPSHRD
// VPSHRDW
vec(.VPSHRDW, ops4(.xmm_kz, .xmm, .xmm_m128, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x72, fmem), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDW, ops4(.ymm_kz, .ymm, .ymm_m256, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x72, fmem), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDW, ops4(.zmm_kz, .zmm, .zmm_m512, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x72, fmem), .RVMI, .{AVX512_VBMI2}),
// VPSHRDD
vec(.VPSHRDD, ops4(.xmm_kz, .xmm, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x73, full), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDD, ops4(.ymm_kz, .ymm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x73, full), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDD, ops4(.zmm_kz, .zmm, .zmm_m512_m32bcst, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x73, full), .RVMI, .{AVX512_VBMI2}),
// VPSHRDQ
vec(.VPSHRDQ, ops4(.xmm_kz, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x73, full), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDQ, ops4(.ymm_kz, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x73, full), .RVMI, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDQ, ops4(.zmm_kz, .zmm, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x73, full), .RVMI, .{AVX512_VBMI2}),
// VPSHRDV
// VPSHRDVW
vec(.VPSHRDVW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x72, fmem), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDVW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x72, fmem), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDVW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x72, fmem), .RVM, .{AVX512_VBMI2}),
// VPSHRDVD
vec(.VPSHRDVD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x73, full), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDVD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x73, full), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDVD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x73, full), .RVM, .{AVX512_VBMI2}),
// VPSHRDVQ
vec(.VPSHRDVQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x73, full), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDVQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x73, full), .RVM, .{ AVX512VL, AVX512_VBMI2 }),
vec(.VPSHRDVQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x73, full), .RVM, .{AVX512_VBMI2}),
// VPSHUFBITQMB
vec(.VPSHUFBITQMB, ops3(.reg_k_k, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W0, 0x8F, fmem), .RVM, .{ AVX512VL, AVX512_BITALG }),
vec(.VPSHUFBITQMB, ops3(.reg_k_k, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W0, 0x8F, fmem), .RVM, .{ AVX512VL, AVX512_BITALG }),
vec(.VPSHUFBITQMB, ops3(.reg_k_k, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W0, 0x8F, fmem), .RVM, .{AVX512_BITALG}),
// VPSLLVW / VPSLLVD / VPSLLVQ
// VPSLLVW
vec(.VPSLLVW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x12, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSLLVW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x12, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSLLVW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x12, fmem), .RVM, .{AVX512BW}),
// VPSLLVD
vec(.VPSLLVD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x47), .RVM, .{AVX2}),
vec(.VPSLLVD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x47), .RVM, .{AVX2}),
vec(.VPSLLVD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x47, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSLLVD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x47, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSLLVD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x47, full), .RVM, .{AVX512F}),
// VPSLLVQ
vec(.VPSLLVQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0x47), .RVM, .{AVX2}),
vec(.VPSLLVQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0x47), .RVM, .{AVX2}),
vec(.VPSLLVQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x47, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSLLVQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x47, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSLLVQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x47, full), .RVM, .{AVX512F}),
// VPSRAVW / VPSRAVD / VPSRAVQ
// VPSRAVW
vec(.VPSRAVW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x11, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSRAVW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x11, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSRAVW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x11, fmem), .RVM, .{AVX512BW}),
// VPSRAVD
vec(.VPSRAVD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x46), .RVM, .{AVX2}),
vec(.VPSRAVD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x46), .RVM, .{AVX2}),
vec(.VPSRAVD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x46, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRAVD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x46, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRAVD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x46, full), .RVM, .{AVX512F}),
// VPSRAVQ
vec(.VPSRAVQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x46, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRAVQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x46, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRAVQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x46, full), .RVM, .{AVX512F}),
// VPSRLVW / VPSRLVD / VPSRLVQ
// VPSRLVW
vec(.VPSRLVW, ops3(.xmm_kz, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x10, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSRLVW, ops3(.ymm_kz, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x10, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPSRLVW, ops3(.zmm_kz, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x10, fmem), .RVM, .{AVX512BW}),
// VPSRLVD
vec(.VPSRLVD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x45), .RVM, .{AVX2}),
vec(.VPSRLVD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x45), .RVM, .{AVX2}),
vec(.VPSRLVD, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x45, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRLVD, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x45, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRLVD, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x45, full), .RVM, .{AVX512F}),
// VPSRLVQ
vec(.VPSRLVQ, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W1, 0x45), .RVM, .{AVX2}),
vec(.VPSRLVQ, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W1, 0x45), .RVM, .{AVX2}),
vec(.VPSRLVQ, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x45, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRLVQ, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x45, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPSRLVQ, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x45, full), .RVM, .{AVX512F}),
// VPTERNLOGD / VPTERNLOGQ
// VPTERNLOGD
vec(.VPTERNLOGD, ops4(.xmm_kz, .xmm, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x25, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPTERNLOGD, ops4(.ymm_kz, .ymm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x25, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPTERNLOGD, ops4(.zmm_kz, .zmm, .zmm_m512_m32bcst, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x25, full), .RVMI, .{AVX512F}),
// VPTERNLOGQ
vec(.VPTERNLOGQ, ops4(.xmm_kz, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x25, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPTERNLOGQ, ops4(.ymm_kz, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x25, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VPTERNLOGQ, ops4(.zmm_kz, .zmm, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x25, full), .RVMI, .{AVX512F}),
// VPTESTMB / VPTESTMW / VPTESTMD / VPTESTMQ
// VPTESTMB
vec(.VPTESTMB, ops3(.reg_k_k, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W0, 0x26, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPTESTMB, ops3(.reg_k_k, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W0, 0x26, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPTESTMB, ops3(.reg_k_k, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W0, 0x26, fmem), .RVM, .{AVX512BW}),
// VPTESTMW
vec(.VPTESTMW, ops3(.reg_k_k, .xmm, .xmm_m128), evex(.L128, ._66, ._0F38, .W1, 0x26, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPTESTMW, ops3(.reg_k_k, .ymm, .ymm_m256), evex(.L256, ._66, ._0F38, .W1, 0x26, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPTESTMW, ops3(.reg_k_k, .zmm, .zmm_m512), evex(.L512, ._66, ._0F38, .W1, 0x26, fmem), .RVM, .{AVX512BW}),
// VPTESTMD
vec(.VPTESTMD, ops3(.reg_k_k, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x27, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPTESTMD, ops3(.reg_k_k, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x27, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPTESTMD, ops3(.reg_k_k, .zmm, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x27, full), .RVM, .{AVX512F}),
// VPTESTMQ
vec(.VPTESTMQ, ops3(.reg_k_k, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x27, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPTESTMQ, ops3(.reg_k_k, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x27, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPTESTMQ, ops3(.reg_k_k, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x27, full), .RVM, .{AVX512F}),
// VPTESTNMB / VPTESTNMW / VPTESTNMD / VPTESTNMQ
// VPTESTNMB
vec(.VPTESTNMB, ops3(.reg_k_k, .xmm, .xmm_m128), evex(.L128, ._F3, ._0F38, .W0, 0x26, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPTESTNMB, ops3(.reg_k_k, .ymm, .ymm_m256), evex(.L256, ._F3, ._0F38, .W0, 0x26, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPTESTNMB, ops3(.reg_k_k, .zmm, .zmm_m512), evex(.L512, ._F3, ._0F38, .W0, 0x26, fmem), .RVM, .{AVX512BW}),
// VPTESTNMW
vec(.VPTESTNMW, ops3(.reg_k_k, .xmm, .xmm_m128), evex(.L128, ._F3, ._0F38, .W1, 0x26, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPTESTNMW, ops3(.reg_k_k, .ymm, .ymm_m256), evex(.L256, ._F3, ._0F38, .W1, 0x26, fmem), .RVM, .{ AVX512VL, AVX512BW }),
vec(.VPTESTNMW, ops3(.reg_k_k, .zmm, .zmm_m512), evex(.L512, ._F3, ._0F38, .W1, 0x26, fmem), .RVM, .{AVX512BW}),
// VPTESTNMD
vec(.VPTESTNMD, ops3(.reg_k_k, .xmm, .xmm_m128_m32bcst), evex(.L128, ._F3, ._0F38, .W0, 0x27, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPTESTNMD, ops3(.reg_k_k, .ymm, .ymm_m256_m32bcst), evex(.L256, ._F3, ._0F38, .W0, 0x27, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPTESTNMD, ops3(.reg_k_k, .zmm, .zmm_m512_m32bcst), evex(.L512, ._F3, ._0F38, .W0, 0x27, full), .RVM, .{AVX512F}),
// VPTESTNMQ
vec(.VPTESTNMQ, ops3(.reg_k_k, .xmm, .xmm_m128_m64bcst), evex(.L128, ._F3, ._0F38, .W1, 0x27, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPTESTNMQ, ops3(.reg_k_k, .ymm, .ymm_m256_m64bcst), evex(.L256, ._F3, ._0F38, .W1, 0x27, full), .RVM, .{ AVX512VL, AVX512F }),
vec(.VPTESTNMQ, ops3(.reg_k_k, .zmm, .zmm_m512_m64bcst), evex(.L512, ._F3, ._0F38, .W1, 0x27, full), .RVM, .{AVX512F}),
// VRANGEPD / VRANGEPS / VRANGESD / VRANGESS
// VRANGEPD
vec(.VRANGEPD, ops4(.xmm_kz, .xmm, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x50, full), .RVMI, .{ AVX512VL, AVX512DQ }),
vec(.VRANGEPD, ops4(.ymm_kz, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x50, full), .RVMI, .{ AVX512VL, AVX512DQ }),
vec(.VRANGEPD, ops4(.zmm_kz, .zmm, .zmm_m512_m64bcst_sae, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x50, full), .RVMI, .{AVX512DQ}),
// VRANGEPS
vec(.VRANGEPS, ops4(.xmm_kz, .xmm, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x50, full), .RVMI, .{ AVX512VL, AVX512DQ }),
vec(.VRANGEPS, ops4(.ymm_kz, .ymm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x50, full), .RVMI, .{ AVX512VL, AVX512DQ }),
vec(.VRANGEPS, ops4(.zmm_kz, .zmm, .zmm_m512_m32bcst_sae, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x50, full), .RVMI, .{AVX512DQ}),
// VRANGESD
vec(.VRANGESD, ops4(.xmm_kz, .xmm, .xmm_m64_sae, .imm8), evex(.LIG, ._66, ._0F3A, .W1, 0x51, t1s), .RVMI, .{AVX512DQ}),
// VRANGESS
vec(.VRANGESS, ops4(.xmm_kz, .xmm, .xmm_m32_sae, .imm8), evex(.LIG, ._66, ._0F3A, .W0, 0x51, t1s), .RVMI, .{AVX512DQ}),
// VRCP14PD / VRCP14PS / VRCP14SD / VRCP14SS
// VRCP14PD
vec(.VRCP14PD, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x4C, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VRCP14PD, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x4C, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VRCP14PD, ops2(.zmm_kz, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x4C, full), .vRM, .{AVX512F}),
// VRCP14PS
vec(.VRCP14PS, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x4C, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VRCP14PS, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x4C, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VRCP14PS, ops2(.zmm_kz, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x4C, full), .vRM, .{AVX512F}),
// VRCP14SD
vec(.VRCP14SD, ops3(.xmm_kz, .xmm, .xmm_m64), evex(.LIG, ._66, ._0F38, .W1, 0x4D, t1s), .RVM, .{AVX512F}),
// VRCP14SS
vec(.VRCP14SS, ops3(.xmm_kz, .xmm, .xmm_m32), evex(.LIG, ._66, ._0F38, .W0, 0x4D, t1s), .RVM, .{AVX512F}),
// VREDUCEPD / VREDUCEPS / VREDUCESD / VREDUCESS
// VREDUCEPD
vec(.VREDUCEPD, ops3(.xmm_kz, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x56, full), .vRMI, .{ AVX512VL, AVX512DQ }),
vec(.VREDUCEPD, ops3(.ymm_kz, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x56, full), .vRMI, .{ AVX512VL, AVX512DQ }),
vec(.VREDUCEPD, ops3(.zmm_kz, .zmm_m512_m64bcst_sae, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x56, full), .vRMI, .{AVX512DQ}),
// VREDUCEPS
vec(.VREDUCEPS, ops3(.xmm_kz, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x56, full), .vRMI, .{ AVX512VL, AVX512DQ }),
vec(.VREDUCEPS, ops3(.ymm_kz, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x56, full), .vRMI, .{ AVX512VL, AVX512DQ }),
vec(.VREDUCEPS, ops3(.zmm_kz, .zmm_m512_m32bcst_sae, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x56, full), .vRMI, .{AVX512DQ}),
// VREDUCESD
vec(.VREDUCESD, ops4(.xmm_kz, .xmm, .xmm_m64_sae, .imm8), evex(.LIG, ._66, ._0F3A, .W1, 0x57, t1s), .RVMI, .{AVX512DQ}),
// VREDUCESS
vec(.VREDUCESS, ops4(.xmm_kz, .xmm, .xmm_m32_sae, .imm8), evex(.LIG, ._66, ._0F3A, .W0, 0x57, t1s), .RVMI, .{AVX512DQ}),
// VRNDSCALEPD / VRNDSCALEPS / VRNDSCALESD / VRNDSCALESS
// VRNDSCALEPD
vec(.VRNDSCALEPD, ops3(.xmm_kz, .xmm_m128_m64bcst, .imm8), evex(.L128, ._66, ._0F3A, .W1, 0x09, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VRNDSCALEPD, ops3(.ymm_kz, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x09, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VRNDSCALEPD, ops3(.zmm_kz, .zmm_m512_m64bcst_sae, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x09, full), .vRMI, .{AVX512F}),
// VRNDSCALEPS
vec(.VRNDSCALEPS, ops3(.xmm_kz, .xmm_m128_m32bcst, .imm8), evex(.L128, ._66, ._0F3A, .W0, 0x08, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VRNDSCALEPS, ops3(.ymm_kz, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x08, full), .vRMI, .{ AVX512VL, AVX512F }),
vec(.VRNDSCALEPS, ops3(.zmm_kz, .zmm_m512_m32bcst_sae, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x08, full), .vRMI, .{AVX512F}),
// VRNDSCALESD
vec(.VRNDSCALESD, ops4(.xmm_kz, .xmm, .xmm_m64_sae, .imm8), evex(.LIG, ._66, ._0F3A, .W1, 0x0B, t1s), .RVMI, .{AVX512F}),
// VRNDSCALESS
vec(.VRNDSCALESS, ops4(.xmm_kz, .xmm, .xmm_m32_sae, .imm8), evex(.LIG, ._66, ._0F3A, .W0, 0x0A, t1s), .RVMI, .{AVX512F}),
// VRSQRT14PD / VRSQRT14PS / VRSQRT14SD / VRSQRT14SS
// VRSQRT14PD
vec(.VRSQRT14PD, ops2(.xmm_kz, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x4E, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VRSQRT14PD, ops2(.ymm_kz, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x4E, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VRSQRT14PD, ops2(.zmm_kz, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F38, .W1, 0x4E, full), .vRM, .{AVX512F}),
// VRSQRT14PS
vec(.VRSQRT14PS, ops2(.xmm_kz, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x4E, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VRSQRT14PS, ops2(.ymm_kz, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x4E, full), .vRM, .{ AVX512VL, AVX512F }),
vec(.VRSQRT14PS, ops2(.zmm_kz, .zmm_m512_m32bcst), evex(.L512, ._66, ._0F38, .W0, 0x4E, full), .vRM, .{AVX512F}),
// VRSQRT14SD
vec(.VRSQRT14SD, ops3(.xmm_kz, .xmm, .xmm_m64), evex(.LIG, ._66, ._0F38, .W1, 0x4F, t1s), .RVM, .{AVX512F}),
// VRSQRT14SS
vec(.VRSQRT14SS, ops3(.xmm_kz, .xmm, .xmm_m32), evex(.LIG, ._66, ._0F38, .W0, 0x4F, t1s), .RVM, .{AVX512F}),
// VSCALEFPD / VSCALEFPS / VSCALEFSD / VSCALEFSS
// VSCALEFPD
vec(.VSCALEFPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F38, .W1, 0x2C, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSCALEFPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F38, .W1, 0x2C, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSCALEFPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst_er), evex(.L512, ._66, ._0F38, .W1, 0x2C, full), .RVMI, .{AVX512F}),
// VSCALEFPS
vec(.VSCALEFPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._66, ._0F38, .W0, 0x2C, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSCALEFPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._66, ._0F38, .W0, 0x2C, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSCALEFPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst_er), evex(.L512, ._66, ._0F38, .W0, 0x2C, full), .RVMI, .{AVX512F}),
// VSCALEFSD
vec(.VSCALEFSD, ops3(.xmm_kz, .xmm, .xmm_m64_er), evex(.LIG, ._66, ._0F38, .W1, 0x2D, t1s), .RVMI, .{AVX512F}),
// VSCALEFSS
vec(.VSCALEFSS, ops3(.xmm_kz, .xmm, .xmm_m32_er), evex(.LIG, ._66, ._0F38, .W0, 0x2D, t1s), .RVMI, .{AVX512F}),
// VSCATTERDPS / VSCATTERDPD / VSCATTERQPS / VSCATTERQPD
// VSCATTERDPS
vec(.VSCATTERDPS, ops2(.vm32x_k, .xmm), evex(.L128, ._66, ._0F38, .W0, 0xA2, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VSCATTERDPS, ops2(.vm32y_k, .ymm), evex(.L256, ._66, ._0F38, .W0, 0xA2, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VSCATTERDPS, ops2(.vm32z_k, .zmm), evex(.L512, ._66, ._0F38, .W0, 0xA2, t1s), .vMR, .{AVX512F}),
// VSCATTERDPD
vec(.VSCATTERDPD, ops2(.vm32x_k, .xmm), evex(.L128, ._66, ._0F38, .W1, 0xA2, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VSCATTERDPD, ops2(.vm32x_k, .ymm), evex(.L256, ._66, ._0F38, .W1, 0xA2, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VSCATTERDPD, ops2(.vm32y_k, .zmm), evex(.L512, ._66, ._0F38, .W1, 0xA2, t1s), .vMR, .{AVX512F}),
// VSCATTERQPS
vec(.VSCATTERQPS, ops2(.vm64x_k, .xmm), evex(.L128, ._66, ._0F38, .W0, 0xA3, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VSCATTERQPS, ops2(.vm64y_k, .xmm), evex(.L256, ._66, ._0F38, .W0, 0xA3, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VSCATTERQPS, ops2(.vm64z_k, .ymm), evex(.L512, ._66, ._0F38, .W0, 0xA3, t1s), .vMR, .{AVX512F}),
// VSCATTERQPD
vec(.VSCATTERQPD, ops2(.vm64x_k, .xmm), evex(.L128, ._66, ._0F38, .W1, 0xA3, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VSCATTERQPD, ops2(.vm64y_k, .ymm), evex(.L256, ._66, ._0F38, .W1, 0xA3, t1s), .vMR, .{ AVX512VL, AVX512F }),
vec(.VSCATTERQPD, ops2(.vm64z_k, .zmm), evex(.L512, ._66, ._0F38, .W1, 0xA3, t1s), .vMR, .{AVX512F}),
// VSHUFF32X4 / VSHUFF64X2 / VSHUFI32X4 / VSHUFI64X2
// VSHUFF32X4
vec(.VSHUFF32X4, ops4(.ymm_kz, .ymm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x23, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSHUFF32X4, ops4(.zmm_kz, .zmm, .zmm_m512_m32bcst, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x23, full), .RVMI, .{AVX512F}),
// VSHUFF64X2
vec(.VSHUFF64X2, ops4(.ymm_kz, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x23, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSHUFF64X2, ops4(.zmm_kz, .zmm, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x23, full), .RVMI, .{AVX512F}),
// VSHUFI32X4
vec(.VSHUFI32X4, ops4(.ymm_kz, .ymm, .ymm_m256_m32bcst, .imm8), evex(.L256, ._66, ._0F3A, .W0, 0x43, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSHUFI32X4, ops4(.zmm_kz, .zmm, .zmm_m512_m32bcst, .imm8), evex(.L512, ._66, ._0F3A, .W0, 0x43, full), .RVMI, .{AVX512F}),
// VSHUFI64X2
vec(.VSHUFI64X2, ops4(.ymm_kz, .ymm, .ymm_m256_m64bcst, .imm8), evex(.L256, ._66, ._0F3A, .W1, 0x43, full), .RVMI, .{ AVX512VL, AVX512F }),
vec(.VSHUFI64X2, ops4(.zmm_kz, .zmm, .zmm_m512_m64bcst, .imm8), evex(.L512, ._66, ._0F3A, .W1, 0x43, full), .RVMI, .{AVX512F}),
// VTESTPD / VTESTPS
// VTESTPS
vec(.VTESTPS, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x0E), .vRM, .{AVX}),
vec(.VTESTPS, ops2(.ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x0E), .vRM, .{AVX}),
// VTESTPD
vec(.VTESTPD, ops2(.xmml, .xmml_m128), vex(.L128, ._66, ._0F38, .W0, 0x0F), .vRM, .{AVX}),
vec(.VTESTPD, ops2(.ymml, .ymml_m256), vex(.L256, ._66, ._0F38, .W0, 0x0F), .vRM, .{AVX}),
// VXORPD
vec(.VXORPD, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._66, ._0F, .WIG, 0x57), .RVM, .{AVX}),
vec(.VXORPD, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._66, ._0F, .WIG, 0x57), .RVM, .{AVX}),
vec(.VXORPD, ops3(.xmm_kz, .xmm, .xmm_m128_m64bcst), evex(.L128, ._66, ._0F, .W1, 0x57, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VXORPD, ops3(.ymm_kz, .ymm, .ymm_m256_m64bcst), evex(.L256, ._66, ._0F, .W1, 0x57, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VXORPD, ops3(.zmm_kz, .zmm, .zmm_m512_m64bcst), evex(.L512, ._66, ._0F, .W1, 0x57, full), .RVM, .{AVX512DQ}),
// VXXORPS
vec(.VXORPS, ops3(.xmml, .xmml, .xmml_m128), vex(.L128, ._NP, ._0F, .WIG, 0x57), .RVM, .{AVX}),
vec(.VXORPS, ops3(.ymml, .ymml, .ymml_m256), vex(.L256, ._NP, ._0F, .WIG, 0x57), .RVM, .{AVX}),
vec(.VXORPS, ops3(.xmm_kz, .xmm, .xmm_m128_m32bcst), evex(.L128, ._NP, ._0F, .W0, 0x57, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VXORPS, ops3(.ymm_kz, .ymm, .ymm_m256_m32bcst), evex(.L256, ._NP, ._0F, .W0, 0x57, full), .RVM, .{ AVX512VL, AVX512DQ }),
vec(.VXORPS, ops3(.zmm_kz, .zmm, .zmm_m512_m32bcst), evex(.L512, ._NP, ._0F, .W0, 0x57, full), .RVM, .{AVX512DQ}),
// VZEROALL
vec(.VZEROALL, ops0(), vex(.L256, ._NP, ._0F, .WIG, 0x77), .vZO, .{AVX}),
// VZEROUPPER
vec(.VZEROUPPER, ops0(), vex(.L128, ._NP, ._0F, .WIG, 0x77), .vZO, .{AVX}),
//
// AVX512 mask register instructions
//
// KADDW / KADDB / KADDD / KADDQ
vec(.KADDB, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W0, 0x4A), .RVM, .{AVX512DQ}),
vec(.KADDW, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W0, 0x4A), .RVM, .{AVX512DQ}),
vec(.KADDD, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W1, 0x4A), .RVM, .{AVX512BW}),
vec(.KADDQ, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W1, 0x4A), .RVM, .{AVX512BW}),
// KANDW / KANDB / KANDD / KANDQ
vec(.KANDB, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W0, 0x41), .RVM, .{AVX512DQ}),
vec(.KANDW, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W0, 0x41), .RVM, .{AVX512F}),
vec(.KANDD, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W1, 0x41), .RVM, .{AVX512BW}),
vec(.KANDQ, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W1, 0x41), .RVM, .{AVX512BW}),
// KANDNW / KANDNB / KANDND / KANDNQ
vec(.KANDNB, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W0, 0x42), .RVM, .{AVX512DQ}),
vec(.KANDNW, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W0, 0x42), .RVM, .{AVX512F}),
vec(.KANDND, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W1, 0x42), .RVM, .{AVX512BW}),
vec(.KANDNQ, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W1, 0x42), .RVM, .{AVX512BW}),
// KNOTW / KNOTB / KNOTD / KNOTQ
vec(.KNOTB, ops2(.reg_k, .reg_k), vex(.LZ, ._66, ._0F, .W0, 0x44), .vRM, .{AVX512DQ}),
vec(.KNOTW, ops2(.reg_k, .reg_k), vex(.LZ, ._NP, ._0F, .W0, 0x44), .vRM, .{AVX512F}),
vec(.KNOTD, ops2(.reg_k, .reg_k), vex(.LZ, ._66, ._0F, .W1, 0x44), .vRM, .{AVX512BW}),
vec(.KNOTQ, ops2(.reg_k, .reg_k), vex(.LZ, ._NP, ._0F, .W1, 0x44), .vRM, .{AVX512BW}),
// KORW / KORB / KORD / KORQ
vec(.KORB, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W0, 0x45), .RVM, .{AVX512DQ}),
vec(.KORW, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W0, 0x45), .RVM, .{AVX512F}),
vec(.KORD, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W1, 0x45), .RVM, .{AVX512BW}),
vec(.KORQ, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W1, 0x45), .RVM, .{AVX512BW}),
// KORTESTW / KORTESTB / KORTESTD / KORTESTQ
vec(.KORTESTB, ops2(.reg_k, .reg_k), vex(.LZ, ._66, ._0F, .W0, 0x98), .vRM, .{AVX512DQ}),
vec(.KORTESTW, ops2(.reg_k, .reg_k), vex(.LZ, ._NP, ._0F, .W0, 0x98), .vRM, .{AVX512F}),
vec(.KORTESTD, ops2(.reg_k, .reg_k), vex(.LZ, ._66, ._0F, .W1, 0x98), .vRM, .{AVX512BW}),
vec(.KORTESTQ, ops2(.reg_k, .reg_k), vex(.LZ, ._NP, ._0F, .W1, 0x98), .vRM, .{AVX512BW}),
// KMOVW / KMOVB / KMOVD / KMOVQ
vec(.KMOVB, ops2(.reg_k, .k_m8), vex(.LZ, ._66, ._0F, .W0, 0x90), .vRM, .{AVX512DQ}),
vec(.KMOVB, ops2(.rm_mem8, .reg_k), vex(.LZ, ._66, ._0F, .W0, 0x91), .vMR, .{AVX512DQ}),
vec(.KMOVB, ops2(.reg_k, .reg32), vex(.LZ, ._66, ._0F, .W0, 0x92), .vRM, .{AVX512DQ}),
vec(.KMOVB, ops2(.reg32, .reg_k), vex(.LZ, ._66, ._0F, .W0, 0x93), .vRM, .{AVX512DQ}),
//
vec(.KMOVW, ops2(.reg_k, .k_m16), vex(.LZ, ._NP, ._0F, .W0, 0x90), .vRM, .{AVX512F}),
vec(.KMOVW, ops2(.rm_mem16, .reg_k), vex(.LZ, ._NP, ._0F, .W0, 0x91), .vMR, .{AVX512F}),
vec(.KMOVW, ops2(.reg_k, .reg32), vex(.LZ, ._NP, ._0F, .W0, 0x92), .vRM, .{AVX512F}),
vec(.KMOVW, ops2(.reg32, .reg_k), vex(.LZ, ._NP, ._0F, .W0, 0x93), .vRM, .{AVX512F}),
//
vec(.KMOVD, ops2(.reg_k, .k_m32), vex(.LZ, ._66, ._0F, .W1, 0x90), .vRM, .{AVX512BW}),
vec(.KMOVD, ops2(.rm_mem32, .reg_k), vex(.LZ, ._66, ._0F, .W1, 0x91), .vMR, .{AVX512BW}),
vec(.KMOVD, ops2(.reg_k, .reg32), vex(.LZ, ._F2, ._0F, .W0, 0x92), .vRM, .{AVX512BW}),
vec(.KMOVD, ops2(.reg32, .reg_k), vex(.LZ, ._F2, ._0F, .W0, 0x93), .vRM, .{AVX512BW}),
//
vec(.KMOVQ, ops2(.reg_k, .k_m64), vex(.LZ, ._NP, ._0F, .W1, 0x90), .vRM, .{AVX512BW}),
vec(.KMOVQ, ops2(.rm_mem64, .reg_k), vex(.LZ, ._NP, ._0F, .W1, 0x91), .vMR, .{AVX512BW}),
vec(.KMOVQ, ops2(.reg_k, .reg64), vex(.LZ, ._F2, ._0F, .W1, 0x92), .vRM, .{ AVX512BW, No32 }),
vec(.KMOVQ, ops2(.reg64, .reg_k), vex(.LZ, ._F2, ._0F, .W1, 0x93), .vRM, .{ AVX512BW, No32 }),
// KSHIFTLW / KSHIFTLB / KSHIFTLD / KSHIFTLQ
vec(.KSHIFTLB, ops3(.reg_k, .reg_k, .imm8), vex(.LZ, ._66, ._0F3A, .W0, 0x32), .vRMI, .{AVX512DQ}),
vec(.KSHIFTLW, ops3(.reg_k, .reg_k, .imm8), vex(.LZ, ._66, ._0F3A, .W1, 0x32), .vRMI, .{AVX512F}),
vec(.KSHIFTLD, ops3(.reg_k, .reg_k, .imm8), vex(.LZ, ._66, ._0F3A, .W0, 0x33), .vRMI, .{AVX512BW}),
vec(.KSHIFTLQ, ops3(.reg_k, .reg_k, .imm8), vex(.LZ, ._66, ._0F3A, .W1, 0x33), .vRMI, .{AVX512BW}),
// KSHIFTRW / KSHIFTRB / KSHIFTRD / KSHIFTRQ
vec(.KSHIFTRB, ops3(.reg_k, .reg_k, .imm8), vex(.LZ, ._66, ._0F3A, .W0, 0x30), .vRMI, .{AVX512DQ}),
vec(.KSHIFTRW, ops3(.reg_k, .reg_k, .imm8), vex(.LZ, ._66, ._0F3A, .W1, 0x30), .vRMI, .{AVX512F}),
vec(.KSHIFTRD, ops3(.reg_k, .reg_k, .imm8), vex(.LZ, ._66, ._0F3A, .W0, 0x31), .vRMI, .{AVX512BW}),
vec(.KSHIFTRQ, ops3(.reg_k, .reg_k, .imm8), vex(.LZ, ._66, ._0F3A, .W1, 0x31), .vRMI, .{AVX512BW}),
// KTESTW / KTESTB / KTESTD / KTESTQ
vec(.KTESTB, ops2(.reg_k, .reg_k), vex(.LZ, ._66, ._0F, .W0, 0x99), .vRM, .{AVX512DQ}),
vec(.KTESTW, ops2(.reg_k, .reg_k), vex(.LZ, ._NP, ._0F, .W0, 0x99), .vRM, .{AVX512DQ}),
vec(.KTESTD, ops2(.reg_k, .reg_k), vex(.LZ, ._66, ._0F, .W1, 0x99), .vRM, .{AVX512BW}),
vec(.KTESTQ, ops2(.reg_k, .reg_k), vex(.LZ, ._NP, ._0F, .W1, 0x99), .vRM, .{AVX512BW}),
// KUNPCKBW / KUNPCKWD / KUNPCKDQ
vec(.KUNPCKBW, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W0, 0x4B), .RVM, .{AVX512F}),
vec(.KUNPCKWD, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W0, 0x4B), .RVM, .{AVX512BW}),
vec(.KUNPCKDQ, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W1, 0x4B), .RVM, .{AVX512BW}),
// KXNORW / KXNORB / KXNORD / KXNORQ
vec(.KXNORB, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W0, 0x46), .RVM, .{AVX512DQ}),
vec(.KXNORW, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W0, 0x46), .RVM, .{AVX512F}),
vec(.KXNORD, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W1, 0x46), .RVM, .{AVX512BW}),
vec(.KXNORQ, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W1, 0x46), .RVM, .{AVX512BW}),
// KXORW / KXORB / KXORD / KXORQ
vec(.KXORB, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W0, 0x47), .RVM, .{AVX512DQ}),
vec(.KXORW, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W0, 0x47), .RVM, .{AVX512F}),
vec(.KXORD, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._66, ._0F, .W1, 0x47), .RVM, .{AVX512BW}),
vec(.KXORQ, ops3(.reg_k, .reg_k, .reg_k), vex(.L1, ._NP, ._0F, .W1, 0x47), .RVM, .{AVX512BW}),
//
// Xeon Phi
//
// PREFETCHWT1
instr(.PREFETCHWT1, ops1(.rm_mem8), Op2r(0x0F, 0x0D, 2), .M, .ZO, .{cpu.PREFETCHWT1}),
//
// Undefined/Reserved Opcodes
//
// Unused opcodes are reserved for future instruction extensions, and most
// encodings will generate (#UD) on earlier processors. However, for
// compatibility with older processors, some reserved opcodes do not generate
// #UD but behave like other instructions or have unique behavior.
//
// SALC
// - Set AL to Cary flag. IF (CF=1), AL=FF, ELSE, AL=0 (#UD in 64-bit mode)
instr(.SALC, ops0(), Op1(0xD6), .ZO, .ZO, .{ _8086, No64 }),
//
// Immediate Group 1
//
// Same behavior as corresponding instruction with Opcode Op1r(0x80, x)
//
instr(.RESRV_ADD, ops2(.rm8, .imm8), Op1r(0x82, 0), .MI, .ZO, .{ _8086, No64 }),
instr(.RESRV_OR, ops2(.rm8, .imm8), Op1r(0x82, 1), .MI, .ZO, .{ _8086, No64 }),
instr(.RESRV_ADC, ops2(.rm8, .imm8), Op1r(0x82, 2), .MI, .ZO, .{ _8086, No64 }),
instr(.RESRV_SBB, ops2(.rm8, .imm8), Op1r(0x82, 3), .MI, .ZO, .{ _8086, No64 }),
instr(.RESRV_AND, ops2(.rm8, .imm8), Op1r(0x82, 4), .MI, .ZO, .{ _8086, No64 }),
instr(.RESRV_SUB, ops2(.rm8, .imm8), Op1r(0x82, 5), .MI, .ZO, .{ _8086, No64 }),
instr(.RESRV_XOR, ops2(.rm8, .imm8), Op1r(0x82, 6), .MI, .ZO, .{ _8086, No64 }),
instr(.RESRV_CMP, ops2(.rm8, .imm8), Op1r(0x82, 7), .MI, .ZO, .{ _8086, No64 }),
//
// Shift Group 2 /6
//
// Same behavior as corresponding instruction with Opcode Op1r(x, 4)
//
instr(.RESRV_SAL, ops2(.rm8, .imm_1), Op1r(0xD0, 6), .M, .ZO, .{_8086}),
instr(.RESRV_SAL, ops2(.rm8, .reg_cl), Op1r(0xD2, 6), .M, .ZO, .{_8086}),
instr(.RESRV_SAL, ops2(.rm8, .imm8), Op1r(0xC0, 6), .MI, .ZO, .{_186}),
instr(.RESRV_SAL, ops2(.rm16, .imm_1), Op1r(0xD1, 6), .M, .Op16, .{_8086}),
instr(.RESRV_SAL, ops2(.rm32, .imm_1), Op1r(0xD1, 6), .M, .Op32, .{_386}),
instr(.RESRV_SAL, ops2(.rm64, .imm_1), Op1r(0xD1, 6), .M, .REX_W, .{x86_64}),
instr(.RESRV_SAL, ops2(.rm16, .imm8), Op1r(0xC1, 6), .MI, .Op16, .{_186}),
instr(.RESRV_SAL, ops2(.rm32, .imm8), Op1r(0xC1, 6), .MI, .Op32, .{_386}),
instr(.RESRV_SAL, ops2(.rm64, .imm8), Op1r(0xC1, 6), .MI, .REX_W, .{x86_64}),
instr(.RESRV_SAL, ops2(.rm16, .reg_cl), Op1r(0xD3, 6), .M, .Op16, .{_8086}),
instr(.RESRV_SAL, ops2(.rm32, .reg_cl), Op1r(0xD3, 6), .M, .Op32, .{_386}),
instr(.RESRV_SAL, ops2(.rm64, .reg_cl), Op1r(0xD3, 6), .M, .REX_W, .{x86_64}),
//
instr(.RESRV_SHL, ops2(.rm8, .imm_1), Op1r(0xD0, 6), .M, .ZO, .{_8086}),
instr(.RESRV_SHL, ops2(.rm8, .reg_cl), Op1r(0xD2, 6), .M, .ZO, .{_8086}),
instr(.RESRV_SHL, ops2(.rm8, .imm8), Op1r(0xC0, 6), .MI, .ZO, .{_186}),
instr(.RESRV_SHL, ops2(.rm16, .imm_1), Op1r(0xD1, 6), .M, .Op16, .{_8086}),
instr(.RESRV_SHL, ops2(.rm32, .imm_1), Op1r(0xD1, 6), .M, .Op32, .{_386}),
instr(.RESRV_SHL, ops2(.rm64, .imm_1), Op1r(0xD1, 6), .M, .REX_W, .{x86_64}),
instr(.RESRV_SHL, ops2(.rm16, .imm8), Op1r(0xC1, 6), .MI, .Op16, .{_186}),
instr(.RESRV_SHL, ops2(.rm32, .imm8), Op1r(0xC1, 6), .MI, .Op32, .{_386}),
instr(.RESRV_SHL, ops2(.rm64, .imm8), Op1r(0xC1, 6), .MI, .REX_W, .{x86_64}),
instr(.RESRV_SHL, ops2(.rm16, .reg_cl), Op1r(0xD3, 6), .M, .Op16, .{_8086}),
instr(.RESRV_SHL, ops2(.rm32, .reg_cl), Op1r(0xD3, 6), .M, .Op32, .{_386}),
instr(.RESRV_SHL, ops2(.rm64, .reg_cl), Op1r(0xD3, 6), .M, .REX_W, .{x86_64}),
//
// Unary Group 3 /1
//
instr(.RESRV_TEST, ops2(.rm8, .imm8), Op1r(0xF6, 1), .MI, .ZO, .{_8086}),
instr(.RESRV_TEST, ops2(.rm16, .imm16), Op1r(0xF7, 1), .MI, .Op16, .{_8086}),
instr(.RESRV_TEST, ops2(.rm32, .imm32), Op1r(0xF7, 1), .MI, .Op32, .{_386}),
instr(.RESRV_TEST, ops2(.rm64, .imm32), Op1r(0xF7, 1), .MI, .REX_W, .{x86_64}),
//
// x87
//
// DCD0 - DCD7 (same as FCOM D8D0-D8D7)
instr(.RESRV_FCOM, ops2(.reg_st0, .reg_st), Op2(0xDC, 0xD0), .O2, .ZO, .{_087}),
instr(.RESRV_FCOM, ops1(.reg_st), Op2(0xDC, 0xD0), .O, .ZO, .{_087}),
instr(.RESRV_FCOM, ops0(), Op2(0xDC, 0xD1), .ZO, .ZO, .{_087}),
// DCD8 - DCDF (same as FCOMP D8D8-D8DF)
instr(.RESRV_FCOMP, ops2(.reg_st0, .reg_st), Op2(0xDC, 0xD8), .O2, .ZO, .{_087}),
instr(.RESRV_FCOMP, ops1(.reg_st), Op2(0xDC, 0xD8), .O, .ZO, .{_087}),
instr(.RESRV_FCOMP, ops0(), Op2(0xDC, 0xD9), .ZO, .ZO, .{_087}),
// DED0 - DED7 (same as FCOMP D8C8-D8DF)
instr(.RESRV_FCOMP2, ops2(.reg_st0, .reg_st), Op2(0xDE, 0xD0), .O2, .ZO, .{_087}),
instr(.RESRV_FCOMP2, ops1(.reg_st), Op2(0xDE, 0xD0), .O, .ZO, .{_087}),
instr(.RESRV_FCOMP2, ops0(), Op2(0xDE, 0xD1), .ZO, .ZO, .{_087}),
// D0C8 - D0CF (same as FXCH D9C8-D9CF)
instr(.RESRV_FXCH, ops2(.reg_st0, .reg_st), Op2(0xD0, 0xC8), .O2, .ZO, .{_087}),
instr(.RESRV_FXCH, ops1(.reg_st), Op2(0xD0, 0xC8), .O, .ZO, .{_087}),
instr(.RESRV_FXCH, ops0(), Op2(0xD0, 0xC9), .ZO, .ZO, .{_087}),
// DFC8 - DFCF (same as FXCH D9C8-D9CF)
instr(.RESRV_FXCH2, ops2(.reg_st0, .reg_st), Op2(0xDF, 0xC8), .O2, .ZO, .{_087}),
instr(.RESRV_FXCH2, ops1(.reg_st), Op2(0xDF, 0xC8), .O, .ZO, .{_087}),
instr(.RESRV_FXCH2, ops0(), Op2(0xDF, 0xC9), .ZO, .ZO, .{_087}),
// DFD0 - DFD7 (same as FSTP DDD8-DDDF)
instr(.RESRV_FSTP, ops1(.reg_st), Op2(0xDF, 0xD0), .O, .ZO, .{_087}),
instr(.RESRV_FSTP, ops2(.reg_st0, .reg_st), Op2(0xDF, 0xD0), .O2, .ZO, .{_087}),
// DFD8 - DFDF (same as FSTP DDD8-DDDF)
instr(.RESRV_FSTP2, ops1(.reg_st), Op2(0xDF, 0xD8), .O, .ZO, .{_087}),
instr(.RESRV_FSTP2, ops2(.reg_st0, .reg_st), Op2(0xDF, 0xD8), .O2, .ZO, .{_087}),
// D9D8 - D9DF (same as FFREE with addition of an x87 POP)
instr(.FFREEP, ops1(.reg_st), Op2(0xDF, 0xC0), .O, .ZO, .{_287}),
// DFC0 - DFC7 (same as FSTP DDD8-DDDF but won't cause a stack underflow exception)
instr(.FSTPNOUFLOW, ops1(.reg_st), Op2(0xDD, 0xD8), .O, .ZO, .{_087}),
instr(.FSTPNOUFLOW, ops2(.reg_st0, .reg_st), Op2(0xDD, 0xD8), .O2, .ZO, .{_087}),
//
// Reserved NOP
// - Reserved instructions which have no defined impact on existing architectural state.
// - All opcodes `0F 0D` and opcodes in range `0F 18` to `0F 1F` that are not defined above
// NOP - 0F 0D
// PREFETCH Op2r(0x0F, 0x0D, 0)
instr(.RESRV_NOP_0F0D_0, ops1(.rm16), Op2r(0x0F, 0x0D, 0), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F0D_0, ops1(.rm32), Op2r(0x0F, 0x0D, 0), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F0D_0, ops1(.rm64), Op2r(0x0F, 0x0D, 0), .M, .REX_W, .{x86_64}),
// PREFETCHW Op2r(0x0F, 0x0D, 1)
instr(.RESRV_NOP_0F0D_1, ops1(.rm16), Op2r(0x0F, 0x0D, 1), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F0D_1, ops1(.rm32), Op2r(0x0F, 0x0D, 1), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F0D_1, ops1(.rm64), Op2r(0x0F, 0x0D, 1), .M, .REX_W, .{x86_64}),
// PREFETCHWT1 Op2r(0x0F, 0x0D, 2)
instr(.RESRV_NOP_0F0D_2, ops1(.rm16), Op2r(0x0F, 0x0D, 2), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F0D_2, ops1(.rm32), Op2r(0x0F, 0x0D, 2), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F0D_2, ops1(.rm64), Op2r(0x0F, 0x0D, 2), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F0D_3, ops1(.rm16), Op2r(0x0F, 0x0D, 3), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F0D_3, ops1(.rm32), Op2r(0x0F, 0x0D, 3), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F0D_3, ops1(.rm64), Op2r(0x0F, 0x0D, 3), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F0D_4, ops1(.rm16), Op2r(0x0F, 0x0D, 4), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F0D_4, ops1(.rm32), Op2r(0x0F, 0x0D, 4), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F0D_4, ops1(.rm64), Op2r(0x0F, 0x0D, 4), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F0D_5, ops1(.rm16), Op2r(0x0F, 0x0D, 5), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F0D_5, ops1(.rm32), Op2r(0x0F, 0x0D, 5), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F0D_5, ops1(.rm64), Op2r(0x0F, 0x0D, 5), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F0D_6, ops1(.rm16), Op2r(0x0F, 0x0D, 6), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F0D_6, ops1(.rm32), Op2r(0x0F, 0x0D, 6), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F0D_6, ops1(.rm64), Op2r(0x0F, 0x0D, 6), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F0D_7, ops1(.rm16), Op2r(0x0F, 0x0D, 7), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F0D_7, ops1(.rm32), Op2r(0x0F, 0x0D, 7), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F0D_7, ops1(.rm64), Op2r(0x0F, 0x0D, 7), .M, .REX_W, .{x86_64}),
// NOP - 0F 18
// PREFETCHNTA Op2r(0x0F, 0x18, 0)
instr(.RESRV_NOP_0F18_0, ops1(.rm16), Op2r(0x0F, 0x18, 0), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F18_0, ops1(.rm32), Op2r(0x0F, 0x18, 0), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F18_0, ops1(.rm64), Op2r(0x0F, 0x18, 0), .M, .REX_W, .{x86_64}),
// PREFETCHT0 Op2r(0x0F, 0x18, 1)
instr(.RESRV_NOP_0F18_1, ops1(.rm16), Op2r(0x0F, 0x18, 1), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F18_1, ops1(.rm32), Op2r(0x0F, 0x18, 1), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F18_1, ops1(.rm64), Op2r(0x0F, 0x18, 1), .M, .REX_W, .{x86_64}),
// PREFETCHT1 Op2r(0x0F, 0x18, 2)
instr(.RESRV_NOP_0F18_2, ops1(.rm16), Op2r(0x0F, 0x18, 2), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F18_2, ops1(.rm32), Op2r(0x0F, 0x18, 2), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F18_2, ops1(.rm64), Op2r(0x0F, 0x18, 2), .M, .REX_W, .{x86_64}),
// PREFETCHT2 Op2r(0x0F, 0x18, 3)
instr(.RESRV_NOP_0F18_3, ops1(.rm16), Op2r(0x0F, 0x18, 3), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F18_3, ops1(.rm32), Op2r(0x0F, 0x18, 3), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F18_3, ops1(.rm64), Op2r(0x0F, 0x18, 3), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F18_4, ops1(.rm16), Op2r(0x0F, 0x18, 4), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F18_4, ops1(.rm32), Op2r(0x0F, 0x18, 4), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F18_4, ops1(.rm64), Op2r(0x0F, 0x18, 4), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F18_5, ops1(.rm16), Op2r(0x0F, 0x18, 5), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F18_5, ops1(.rm32), Op2r(0x0F, 0x18, 5), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F18_5, ops1(.rm64), Op2r(0x0F, 0x18, 5), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F18_6, ops1(.rm16), Op2r(0x0F, 0x18, 6), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F18_6, ops1(.rm32), Op2r(0x0F, 0x18, 6), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F18_6, ops1(.rm64), Op2r(0x0F, 0x18, 6), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F18_7, ops1(.rm16), Op2r(0x0F, 0x18, 7), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F18_7, ops1(.rm32), Op2r(0x0F, 0x18, 7), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F18_7, ops1(.rm64), Op2r(0x0F, 0x18, 7), .M, .REX_W, .{x86_64}),
// NOP - 0F 19
instr(.RESRV_NOP_0F19_0, ops1(.rm16), Op2r(0x0F, 0x19, 0), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F19_0, ops1(.rm32), Op2r(0x0F, 0x19, 0), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F19_0, ops1(.rm64), Op2r(0x0F, 0x19, 0), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F19_1, ops1(.rm16), Op2r(0x0F, 0x19, 1), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F19_1, ops1(.rm32), Op2r(0x0F, 0x19, 1), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F19_1, ops1(.rm64), Op2r(0x0F, 0x19, 1), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F19_2, ops1(.rm16), Op2r(0x0F, 0x19, 2), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F19_2, ops1(.rm32), Op2r(0x0F, 0x19, 2), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F19_2, ops1(.rm64), Op2r(0x0F, 0x19, 2), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F19_3, ops1(.rm16), Op2r(0x0F, 0x19, 3), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F19_3, ops1(.rm32), Op2r(0x0F, 0x19, 3), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F19_3, ops1(.rm64), Op2r(0x0F, 0x19, 3), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F19_4, ops1(.rm16), Op2r(0x0F, 0x19, 4), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F19_4, ops1(.rm32), Op2r(0x0F, 0x19, 4), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F19_4, ops1(.rm64), Op2r(0x0F, 0x19, 4), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F19_5, ops1(.rm16), Op2r(0x0F, 0x19, 5), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F19_5, ops1(.rm32), Op2r(0x0F, 0x19, 5), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F19_5, ops1(.rm64), Op2r(0x0F, 0x19, 5), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F19_6, ops1(.rm16), Op2r(0x0F, 0x19, 6), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F19_6, ops1(.rm32), Op2r(0x0F, 0x19, 6), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F19_6, ops1(.rm64), Op2r(0x0F, 0x19, 6), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F19_7, ops1(.rm16), Op2r(0x0F, 0x19, 7), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F19_7, ops1(.rm32), Op2r(0x0F, 0x19, 7), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F19_7, ops1(.rm64), Op2r(0x0F, 0x19, 7), .M, .REX_W, .{x86_64}),
// NOP - 0F 1A
instr(.RESRV_NOP_0F1A_0, ops1(.rm16), Op2r(0x0F, 0x1A, 0), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1A_0, ops1(.rm32), Op2r(0x0F, 0x1A, 0), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1A_0, ops1(.rm64), Op2r(0x0F, 0x1A, 0), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1A_1, ops1(.rm16), Op2r(0x0F, 0x1A, 1), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1A_1, ops1(.rm32), Op2r(0x0F, 0x1A, 1), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1A_1, ops1(.rm64), Op2r(0x0F, 0x1A, 1), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1A_2, ops1(.rm16), Op2r(0x0F, 0x1A, 2), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1A_2, ops1(.rm32), Op2r(0x0F, 0x1A, 2), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1A_2, ops1(.rm64), Op2r(0x0F, 0x1A, 2), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1A_3, ops1(.rm16), Op2r(0x0F, 0x1A, 3), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1A_3, ops1(.rm32), Op2r(0x0F, 0x1A, 3), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1A_3, ops1(.rm64), Op2r(0x0F, 0x1A, 3), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1A_4, ops1(.rm16), Op2r(0x0F, 0x1A, 4), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1A_4, ops1(.rm32), Op2r(0x0F, 0x1A, 4), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1A_4, ops1(.rm64), Op2r(0x0F, 0x1A, 4), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1A_5, ops1(.rm16), Op2r(0x0F, 0x1A, 5), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1A_5, ops1(.rm32), Op2r(0x0F, 0x1A, 5), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1A_5, ops1(.rm64), Op2r(0x0F, 0x1A, 5), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1A_6, ops1(.rm16), Op2r(0x0F, 0x1A, 6), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1A_6, ops1(.rm32), Op2r(0x0F, 0x1A, 6), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1A_6, ops1(.rm64), Op2r(0x0F, 0x1A, 6), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1A_7, ops1(.rm16), Op2r(0x0F, 0x1A, 7), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1A_7, ops1(.rm32), Op2r(0x0F, 0x1A, 7), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1A_7, ops1(.rm64), Op2r(0x0F, 0x1A, 7), .M, .REX_W, .{x86_64}),
// NOP - 0F 1B
instr(.RESRV_NOP_0F1B_0, ops1(.rm16), Op2r(0x0F, 0x1B, 0), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1B_0, ops1(.rm32), Op2r(0x0F, 0x1B, 0), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1B_0, ops1(.rm64), Op2r(0x0F, 0x1B, 0), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1B_1, ops1(.rm16), Op2r(0x0F, 0x1B, 1), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1B_1, ops1(.rm32), Op2r(0x0F, 0x1B, 1), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1B_1, ops1(.rm64), Op2r(0x0F, 0x1B, 1), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1B_2, ops1(.rm16), Op2r(0x0F, 0x1B, 2), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1B_2, ops1(.rm32), Op2r(0x0F, 0x1B, 2), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1B_2, ops1(.rm64), Op2r(0x0F, 0x1B, 2), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1B_3, ops1(.rm16), Op2r(0x0F, 0x1B, 3), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1B_3, ops1(.rm32), Op2r(0x0F, 0x1B, 3), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1B_3, ops1(.rm64), Op2r(0x0F, 0x1B, 3), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1B_4, ops1(.rm16), Op2r(0x0F, 0x1B, 4), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1B_4, ops1(.rm32), Op2r(0x0F, 0x1B, 4), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1B_4, ops1(.rm64), Op2r(0x0F, 0x1B, 4), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1B_5, ops1(.rm16), Op2r(0x0F, 0x1B, 5), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1B_5, ops1(.rm32), Op2r(0x0F, 0x1B, 5), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1B_5, ops1(.rm64), Op2r(0x0F, 0x1B, 5), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1B_6, ops1(.rm16), Op2r(0x0F, 0x1B, 6), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1B_6, ops1(.rm32), Op2r(0x0F, 0x1B, 6), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1B_6, ops1(.rm64), Op2r(0x0F, 0x1B, 6), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1B_7, ops1(.rm16), Op2r(0x0F, 0x1B, 7), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1B_7, ops1(.rm32), Op2r(0x0F, 0x1B, 7), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1B_7, ops1(.rm64), Op2r(0x0F, 0x1B, 7), .M, .REX_W, .{x86_64}),
// NOP - 0F 1C
// CLDEMOTE preOp2r(._NP, 0x0F, 0x1C, 0)
instr(.RESRV_NOP_0F1C_0, ops1(.rm16), Op2r(0x0F, 0x1C, 0), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1C_0, ops1(.rm32), Op2r(0x0F, 0x1C, 0), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1C_0, ops1(.rm64), Op2r(0x0F, 0x1C, 0), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1C_1, ops1(.rm16), Op2r(0x0F, 0x1C, 1), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1C_1, ops1(.rm32), Op2r(0x0F, 0x1C, 1), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1C_1, ops1(.rm64), Op2r(0x0F, 0x1C, 1), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1C_2, ops1(.rm16), Op2r(0x0F, 0x1C, 2), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1C_2, ops1(.rm32), Op2r(0x0F, 0x1C, 2), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1C_2, ops1(.rm64), Op2r(0x0F, 0x1C, 2), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1C_3, ops1(.rm16), Op2r(0x0F, 0x1C, 3), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1C_3, ops1(.rm32), Op2r(0x0F, 0x1C, 3), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1C_3, ops1(.rm64), Op2r(0x0F, 0x1C, 3), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1C_4, ops1(.rm16), Op2r(0x0F, 0x1C, 4), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1C_4, ops1(.rm32), Op2r(0x0F, 0x1C, 4), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1C_4, ops1(.rm64), Op2r(0x0F, 0x1C, 4), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1C_5, ops1(.rm16), Op2r(0x0F, 0x1C, 5), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1C_5, ops1(.rm32), Op2r(0x0F, 0x1C, 5), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1C_5, ops1(.rm64), Op2r(0x0F, 0x1C, 5), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1C_6, ops1(.rm16), Op2r(0x0F, 0x1C, 6), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1C_6, ops1(.rm32), Op2r(0x0F, 0x1C, 6), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1C_6, ops1(.rm64), Op2r(0x0F, 0x1C, 6), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1C_7, ops1(.rm16), Op2r(0x0F, 0x1C, 7), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1C_7, ops1(.rm32), Op2r(0x0F, 0x1C, 7), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1C_7, ops1(.rm64), Op2r(0x0F, 0x1C, 7), .M, .REX_W, .{x86_64}),
// NOP - 0F 1D
instr(.RESRV_NOP_0F1D_0, ops1(.rm16), Op2r(0x0F, 0x1D, 0), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1D_0, ops1(.rm32), Op2r(0x0F, 0x1D, 0), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1D_0, ops1(.rm64), Op2r(0x0F, 0x1D, 0), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1D_1, ops1(.rm16), Op2r(0x0F, 0x1D, 1), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1D_1, ops1(.rm32), Op2r(0x0F, 0x1D, 1), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1D_1, ops1(.rm64), Op2r(0x0F, 0x1D, 1), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1D_2, ops1(.rm16), Op2r(0x0F, 0x1D, 2), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1D_2, ops1(.rm32), Op2r(0x0F, 0x1D, 2), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1D_2, ops1(.rm64), Op2r(0x0F, 0x1D, 2), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1D_3, ops1(.rm16), Op2r(0x0F, 0x1D, 3), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1D_3, ops1(.rm32), Op2r(0x0F, 0x1D, 3), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1D_3, ops1(.rm64), Op2r(0x0F, 0x1D, 3), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1D_4, ops1(.rm16), Op2r(0x0F, 0x1D, 4), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1D_4, ops1(.rm32), Op2r(0x0F, 0x1D, 4), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1D_4, ops1(.rm64), Op2r(0x0F, 0x1D, 4), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1D_5, ops1(.rm16), Op2r(0x0F, 0x1D, 5), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1D_5, ops1(.rm32), Op2r(0x0F, 0x1D, 5), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1D_5, ops1(.rm64), Op2r(0x0F, 0x1D, 5), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1D_6, ops1(.rm16), Op2r(0x0F, 0x1D, 6), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1D_6, ops1(.rm32), Op2r(0x0F, 0x1D, 6), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1D_6, ops1(.rm64), Op2r(0x0F, 0x1D, 6), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1D_7, ops1(.rm16), Op2r(0x0F, 0x1D, 7), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1D_7, ops1(.rm32), Op2r(0x0F, 0x1D, 7), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1D_7, ops1(.rm64), Op2r(0x0F, 0x1D, 7), .M, .REX_W, .{x86_64}),
// NOP - 0F 1E
instr(.RESRV_NOP_0F1E_0, ops1(.rm16), Op2r(0x0F, 0x1E, 0), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1E_0, ops1(.rm32), Op2r(0x0F, 0x1E, 0), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1E_0, ops1(.rm64), Op2r(0x0F, 0x1E, 0), .M, .REX_W, .{x86_64}),
// RDSSPD preOp2r(._F3, 0x0F, 0x1E, 1)
// RDSSPQ preOp2r(._F3, 0x0F, 0x1E, 1)
instr(.RESRV_NOP_0F1E_1, ops1(.rm16), Op2r(0x0F, 0x1E, 1), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1E_1, ops1(.rm32), Op2r(0x0F, 0x1E, 1), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1E_1, ops1(.rm64), Op2r(0x0F, 0x1E, 1), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1E_2, ops1(.rm16), Op2r(0x0F, 0x1E, 2), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1E_2, ops1(.rm32), Op2r(0x0F, 0x1E, 2), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1E_2, ops1(.rm64), Op2r(0x0F, 0x1E, 2), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1E_3, ops1(.rm16), Op2r(0x0F, 0x1E, 3), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1E_3, ops1(.rm32), Op2r(0x0F, 0x1E, 3), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1E_3, ops1(.rm64), Op2r(0x0F, 0x1E, 3), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1E_4, ops1(.rm16), Op2r(0x0F, 0x1E, 4), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1E_4, ops1(.rm32), Op2r(0x0F, 0x1E, 4), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1E_4, ops1(.rm64), Op2r(0x0F, 0x1E, 4), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1E_5, ops1(.rm16), Op2r(0x0F, 0x1E, 5), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1E_5, ops1(.rm32), Op2r(0x0F, 0x1E, 5), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1E_5, ops1(.rm64), Op2r(0x0F, 0x1E, 5), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1E_6, ops1(.rm16), Op2r(0x0F, 0x1E, 6), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1E_6, ops1(.rm32), Op2r(0x0F, 0x1E, 6), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1E_6, ops1(.rm64), Op2r(0x0F, 0x1E, 6), .M, .REX_W, .{x86_64}),
// ENDBR32 preOp3(._F3, 0x0F, 0x1E, 0xFB)
// ENDBR64 preOp3(._F3, 0x0F, 0x1E, 0xFA)
instr(.RESRV_NOP_0F1E_7, ops1(.rm16), Op2r(0x0F, 0x1E, 7), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1E_7, ops1(.rm32), Op2r(0x0F, 0x1E, 7), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1E_7, ops1(.rm64), Op2r(0x0F, 0x1E, 7), .M, .REX_W, .{x86_64}),
// NOP - 0F 1F
instr(.RESRV_NOP_0F1F_0, ops1(.rm16), Op2r(0x0F, 0x1F, 0), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1F_0, ops1(.rm32), Op2r(0x0F, 0x1F, 0), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1F_0, ops1(.rm64), Op2r(0x0F, 0x1F, 0), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1F_1, ops1(.rm16), Op2r(0x0F, 0x1F, 1), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1F_1, ops1(.rm32), Op2r(0x0F, 0x1F, 1), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1F_1, ops1(.rm64), Op2r(0x0F, 0x1F, 1), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1F_2, ops1(.rm16), Op2r(0x0F, 0x1F, 2), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1F_2, ops1(.rm32), Op2r(0x0F, 0x1F, 2), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1F_2, ops1(.rm64), Op2r(0x0F, 0x1F, 2), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1F_3, ops1(.rm16), Op2r(0x0F, 0x1F, 3), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1F_3, ops1(.rm32), Op2r(0x0F, 0x1F, 3), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1F_3, ops1(.rm64), Op2r(0x0F, 0x1F, 3), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1F_4, ops1(.rm16), Op2r(0x0F, 0x1F, 4), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1F_4, ops1(.rm32), Op2r(0x0F, 0x1F, 4), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1F_4, ops1(.rm64), Op2r(0x0F, 0x1F, 4), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1F_5, ops1(.rm16), Op2r(0x0F, 0x1F, 5), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1F_5, ops1(.rm32), Op2r(0x0F, 0x1F, 5), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1F_5, ops1(.rm64), Op2r(0x0F, 0x1F, 5), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1F_6, ops1(.rm16), Op2r(0x0F, 0x1F, 6), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1F_6, ops1(.rm32), Op2r(0x0F, 0x1F, 6), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1F_6, ops1(.rm64), Op2r(0x0F, 0x1F, 6), .M, .REX_W, .{x86_64}),
instr(.RESRV_NOP_0F1F_7, ops1(.rm16), Op2r(0x0F, 0x1F, 7), .M, .Op16, .{P6}),
instr(.RESRV_NOP_0F1F_7, ops1(.rm32), Op2r(0x0F, 0x1F, 7), .M, .Op32, .{P6}),
instr(.RESRV_NOP_0F1F_7, ops1(.rm64), Op2r(0x0F, 0x1F, 7), .M, .REX_W, .{x86_64}),
//
// Legacy, obsolete and undocumented Opcodes
//
// STOREALL
// see: https://web.archive.org/save/http://www.vcfed.org/forum/showthread.php?70386-I-found-the-SAVEALL-opcode/page2
instr(.STOREALL, ops0(), Op2(0x0F, 0x04), .ZO, .ZO, .{ cpu._286_Legacy, No64 }),
// LOADALL
instr(.LOADALL, ops0(), Op2(0x0F, 0x05), .ZO, .ZO, .{ cpu._286_Legacy, No64 }),
instr(.LOADALL, ops0(), Op2(0x0F, 0x07), .ZO, .ZO, .{ cpu._386_Legacy, No64 }),
instr(.LOADALLD, ops0(), Op2(0x0F, 0x07), .ZO, .ZO, .{ cpu._386_Legacy, No64 }),
// UMOV
instr(.UMOV, ops2(.rm8, .reg8), Op2(0x0F, 0x10), .MR, .ZO, .{ _386_Legacy, No64 }),
instr(.UMOV, ops2(.rm16, .reg16), Op2(0x0F, 0x11), .MR, .Op16, .{ _386_Legacy, No64 }),
instr(.UMOV, ops2(.rm32, .reg32), Op2(0x0F, 0x11), .MR, .Op32, .{ _386_Legacy, No64 }),
//
instr(.UMOV, ops2(.reg8, .rm8), Op2(0x0F, 0x12), .RM, .ZO, .{ _386_Legacy, No64 }),
instr(.UMOV, ops2(.reg16, .rm16), Op2(0x0F, 0x13), .RM, .Op32, .{ _386_Legacy, No64 }),
instr(.UMOV, ops2(.reg32, .rm32), Op2(0x0F, 0x13), .RM, .Op32, .{ _386_Legacy, No64 }),
//
instr(.UMOV, ops2(.rm8, .reg8), Op2(0x0F, 0x10), .MR, .ZO, .{ _486_Legacy, No64 }),
instr(.UMOV, ops2(.rm16, .reg16), Op2(0x0F, 0x11), .MR, .Op16, .{ _486_Legacy, No64 }),
instr(.UMOV, ops2(.rm32, .reg32), Op2(0x0F, 0x11), .MR, .Op32, .{ _486_Legacy, No64 }),
//
instr(.UMOV, ops2(.reg8, .rm8), Op2(0x0F, 0x12), .RM, .ZO, .{ _486_Legacy, No64 }),
instr(.UMOV, ops2(.reg16, .rm16), Op2(0x0F, 0x13), .RM, .Op32, .{ _486_Legacy, No64 }),
instr(.UMOV, ops2(.reg32, .rm32), Op2(0x0F, 0x13), .RM, .Op32, .{ _486_Legacy, No64 }),
// Dummy sigil value that marks the end of the table, use this to avoid
// extra bounds checking when scanning this table.
instr(._mnemonic_final, ops0(), Opcode{}, .ZO, .ZO, .{}),
};
|
src/x86/database.zig
|
const std = @import("std");
const time = std.time;
const Timer = time.Timer;
const hash_map = std.hash_map;
var timer: Timer = undefined;
pub fn main() !void {
const stdout = std.io.getStdOut().outStream();
timer = try Timer.start();
var allocator = std.heap.c_allocator;
var result: u64 = 0;
const buf_size = 256;
var buf: [buf_size]u8 = undefined;
var numInsertions: usize = 1;
var maxInsertions: usize = 10 * 1000 * 1000;
try stdout.print("num_elements,nanoseconds_per_element\n", .{});
while (numInsertions < maxInsertions) {
var fastest: u64 = std.math.maxInt(u64);
var i: usize = 0;
const numAttempts = 5;
while (i < numAttempts) : (i += 1) {
var map = hash_map.StringHashMap(i32).init(allocator);
defer map.deinit();
var r = std.rand.DefaultPrng.init(213);
// Allocate all the strings upfront so the allocation doesn't get included in the benchmark
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
var strings: [][]const u8 = try arena.allocator.alloc([]const u8, numInsertions);
var n: usize = 0;
while (n < numInsertions) : (n += 1) {
r.random.bytes(&buf);
const end_i = r.random.intRangeLessThan(usize, 1, buf_size);
const str = try arena.allocator.dupe(u8, buf[0..end_i]);
strings[n] = str;
}
beginMeasure();
n = 0;
while (n < numInsertions) : (n += 1) {
_ = try map.put(strings[n], undefined);
}
const ns_per_element = endMeasure(numInsertions);
if (ns_per_element < fastest) {
fastest = ns_per_element;
}
}
try stdout.print("{},{}\n", .{ numInsertions, fastest });
numInsertions = nextInterval(numInsertions);
}
}
fn beginMeasure() void {
timer.reset();
}
fn endMeasure(iterations: usize) u64 {
const elapsed_ns = timer.read();
return elapsed_ns / iterations;
}
fn nextInterval(x: usize) usize {
return @floatToInt(usize, @intToFloat(f64, x + 1) * 1.25);
}
|
insert-strings.zig
|
const std = @import("std");
const clap = @import("clap");
const fs = std.fs;
const Elf = @import("Elf.zig");
var global_alloc = std.heap.GeneralPurposeAllocator(.{}){};
const gpa = &global_alloc.allocator;
pub fn main() anyerror!void {
const stderr = std.io.getStdErr().writer();
const stdout = std.io.getStdOut().writer();
const params = comptime [_]clap.Param(clap.Help){
clap.parseParam("--help Display this help and exit") catch unreachable,
clap.parseParam("-a, --all Equivalent to having all flags on") catch unreachable,
clap.parseParam("-h, --file-header Display the ELF file header") catch unreachable,
clap.parseParam("-l, --program-headers Display the programs' headers") catch unreachable,
clap.parseParam("-S, --section-headers Display the sections' header") catch unreachable,
clap.parseParam("-s, --symbols Display the symbol table") catch unreachable,
clap.parseParam("-r, --relocs Display the relocations (if present)") catch unreachable,
clap.parseParam("<FILE>") catch unreachable,
};
var args = try clap.parse(clap.Help, ¶ms, .{});
defer args.deinit();
if (args.flag("--help")) {
return printUsageWithHelp(¶ms, stderr);
}
const positionals = args.positionals();
if (positionals.len == 0) {
return stderr.print("missing positional argument <FILE>...\n", .{});
}
const filename = positionals[0];
const file = try fs.cwd().openFile(filename, .{});
defer file.close();
var elf = Elf.init(gpa, file);
defer elf.deinit();
try elf.parseMetadata();
if (args.flag("--all")) {
try elf.printHeader(stdout);
try stdout.writeAll("\n");
try elf.printShdrs(stdout);
try stdout.writeAll("\n");
try elf.printPhdrs(stdout);
try stdout.writeAll("\n");
try elf.printRelocs(stdout);
try stdout.writeAll("\n");
try elf.printSymtabs(stdout);
} else if (args.flag("--file-header")) {
try elf.printHeader(stdout);
} else if (args.flag("--section-headers")) {
try elf.printShdrs(stdout);
} else if (args.flag("--program-headers")) {
try elf.printPhdrs(stdout);
} else if (args.flag("--relocs")) {
try elf.printRelocs(stdout);
} else if (args.flag("--symbols")) {
try elf.printSymtabs(stdout);
} else {
return printUsageWithHelp(¶ms, stderr);
}
}
fn printUsageWithHelp(comptime params: []const clap.Param(clap.Help), writer: anytype) !void {
try writer.print("zelf ", .{});
try clap.usage(writer, params);
try writer.print("\n", .{});
try clap.help(writer, params);
}
|
src/main.zig
|
const builtin = @import("builtin");
const clap = @import("clap");
const folders = @import("folders");
const std = @import("std");
const debug = std.debug;
const fmt = std.fmt;
const fs = std.fs;
const heap = std.heap;
const log = std.log;
const math = std.math;
const mem = std.mem;
const os = std.os;
pub const bit = @import("bit.zig");
pub const escape = @import("escape.zig");
pub const glob = @import("glob.zig");
pub const io = @import("io.zig");
pub const random = @import("random.zig");
pub const set = @import("set.zig");
pub const testing = @import("testing.zig");
pub const unicode = @import("unicode.zig");
pub const unsafe = @import("unsafe.zig");
test {
std.testing.refAllDecls(@This());
}
pub fn getSeed(args: anytype) !u64 {
const seed = args.option("--seed") orelse return std.crypto.random.int(u64);
return fmt.parseUnsigned(u64, seed, 10) catch |err| {
log.err("'{s}' could not be parsed as a number to --seed: {}", .{ seed, err });
return error.InvalidSeed;
};
}
pub fn generateMain(comptime Program: type) fn () anyerror!void {
return struct {
fn main() anyerror!void {
// No need to deinit arena. The program will exit when this function
// ends and all the memory will be freed by the os. This saves a bit
// of shutdown time.
var arena = heap.ArenaAllocator.init(heap.page_allocator);
var diag = clap.Diagnostic{};
var args = clap.parse(clap.Help, Program.params, .{ .diagnostic = &diag }) catch |err| {
var stderr = std.io.bufferedWriter(std.io.getStdErr().writer());
diag.report(stderr.writer(), err) catch {};
usage(stderr.writer()) catch {};
stderr.flush() catch {};
return error.InvalidArgument;
};
if (args.flag("--help")) {
var stdout = std.io.bufferedWriter(std.io.getStdOut().writer());
try usage(stdout.writer());
try stdout.flush();
return;
}
if (args.flag("--version")) {
var stdout = std.io.bufferedWriter(std.io.getStdOut().writer());
try stdout.writer().writeAll(Program.version);
try stdout.writer().writeAll("\n");
try stdout.flush();
return;
}
var stdout = std.io.bufferedWriter(std.io.getStdOut().writer());
var program = try Program.init(arena.allocator(), args);
try program.run(std.fs.File.Reader, @TypeOf(stdout.writer()), .{
.in = std.io.getStdIn().reader(),
.out = stdout.writer(),
});
try stdout.flush();
}
fn usage(writer: anytype) !void {
try writer.writeAll("Usage: ");
try writer.writeAll(@typeName(Program));
try writer.writeAll(" ");
try clap.usage(writer, Program.params);
try writer.writeAll("\n");
try writer.writeAll(Program.description);
try writer.writeAll("\nOptions:\n");
try clap.help(writer, Program.params);
}
}.main;
}
pub fn CustomStdIoStreams(comptime _Reader: type, comptime _Writer: type) type {
return struct {
pub const Reader = _Reader;
pub const Writer = _Writer;
in: Reader,
out: Writer,
};
}
/// Given a slice and a pointer, returns the pointers index into the slice.
/// ptr has to point into slice.
pub fn indexOfPtr(comptime T: type, slice: []const T, ptr: *const T) usize {
const start = @ptrToInt(slice.ptr);
const item = @ptrToInt(ptr);
const dist_from_start = item - start;
const res = @divExact(dist_from_start, @sizeOf(T));
debug.assert(res < slice.len);
return res;
}
test "indexOfPtr" {
const arr = "abcde";
for (arr) |*item, i| {
try std.testing.expectEqual(i, indexOfPtr(u8, arr, item));
}
}
/// A datastructure representing an array that is either terminated at
/// `sentinel` or at `n`.
pub fn TerminatedArray(comptime n: usize, comptime T: type, comptime sentinel: T) type {
return extern struct {
data: [n]T,
pub fn span(array: anytype) mem.Span(@TypeOf(&array.data)) {
const i = mem.indexOfScalar(T, &array.data, sentinel) orelse return &array.data;
return array.data[0..i];
}
};
}
pub const Path = std.BoundedArray(u8, fs.MAX_PATH_BYTES);
pub const path = struct {
pub fn join(paths: []const []const u8) Path {
var res: Path = undefined;
// FixedBufferAllocator + FailingAllocator are used here to ensure that a max
// of MAX_PATH_BYTES is allocated, and that only one allocation occures. This
// ensures that only a valid path has been allocated into res.
var fba = heap.FixedBufferAllocator.init(&res.buffer);
var failing = std.testing.FailingAllocator.init(fba.allocator(), 1);
const res_slice = fs.path.join(failing.allocator(), paths) catch unreachable;
res.len = res_slice.len;
return res;
}
pub fn resolve(paths: []const []const u8) !Path {
var res: Path = undefined;
// FixedBufferAllocator + FailingAllocator are used here to ensure that a max
// of MAX_PATH_BYTES is allocated, and that only one allocation occures. This
// ensures that only a valid path has been allocated into res.
var fba = heap.FixedBufferAllocator.init(&res.buffer);
var failing = debug.FailingAllocator.init(&fba.allocator, 1);
const res_slice = fs.path.resolve(&failing.allocator, paths) catch |err| switch (err) {
error.OutOfMemory => unreachable,
else => |e| return e,
};
res.len = res_slice.len;
return res;
}
};
pub const dir = struct {
pub fn selfExeDir() !Path {
var res: Path = undefined;
const res_slice = try fs.selfExeDirPath(&res.buffer);
res.len = res_slice.len;
return res;
}
pub fn cwd() !Path {
var res: Path = undefined;
const res_slice = try os.getcwd(&res.buffer);
res.len = res_slice.len;
return res;
}
pub fn folder(f: folders.KnownFolder) !Path {
var buf: [fs.MAX_PATH_BYTES * 2]u8 = undefined;
var fba = heap.FixedBufferAllocator.init(&buf);
const res = (try folders.getPath(fba.allocator(), f)) orelse
return error.NotAvailable;
return Path.fromSlice(res);
}
};
|
src/common/util.zig
|
const std = @import("std");
const Type = @import("Type.zig");
const Tokenizer = @import("Tokenizer.zig");
const Compilation = @import("Compilation.zig");
const Source = @import("Source.zig");
const Tree = @This();
pub const Token = struct {
id: Id,
/// This location contains the actual token slice which might be generated.
/// If it is generated then the next location will be the location of the concatenation.
/// Any subsequent locations mark where the token was expanded from.
loc: Source.Location,
pub const List = std.MultiArrayList(Token);
pub const Id = Tokenizer.Token.Id;
};
pub const TokenIndex = u32;
pub const NodeIndex = u32;
comp: *Compilation,
arena: std.heap.ArenaAllocator,
generated: []const u8,
tokens: Token.List.Slice,
nodes: Node.List.Slice,
data: []const NodeIndex,
root_decls: []const NodeIndex,
pub fn deinit(tree: *Tree) void {
tree.comp.gpa.free(tree.root_decls);
tree.comp.gpa.free(tree.data);
tree.nodes.deinit(tree.comp.gpa);
tree.arena.deinit();
}
/// A generic struct capable of represening all Decl, Stmt and Expr.
pub const Node = struct {
tag: Tag,
ty: Type = .{ .specifier = .void },
data: Data = .{},
pub const Data = struct {
first: NodeIndex = 0,
second: NodeIndex = 0,
};
pub const List = std.MultiArrayList(Node);
};
pub const Tag = enum(u8) {
/// Only appears at index 0 and reaching it is always a result of a bug.
invalid,
// ====== Decl ======
// function prototype
fn_proto,
static_fn_proto,
inline_fn_proto,
inline_static_fn_proto,
noreturn_fn_proto,
noreturn_static_fn_proto,
noreturn_inline_fn_proto,
noreturn_inline_static_fn_proto,
// function definition
fn_def,
static_fn_def,
inline_fn_def,
inline_static_fn_def,
noreturn_fn_def,
noreturn_static_fn_def,
noreturn_inline_fn_def,
noreturn_inline_static_fn_def,
// a parameter
param_decl,
register_param_decl,
// variable declaration
@"var",
extern_var,
static_var,
register_var,
threadlocal_var,
threadlocal_extern_var,
threadlocal_static_var,
// typedef declaration
typedef,
// container type forward declarations
struct_forward,
union_forward,
enum_forawrd,
// container type definitions
struct_def,
union_def,
enum_def,
// ====== Stmt ======
labeled_stmt,
/// { first; second; } first and second may be null
compound_stmt_two,
/// { data }
compound_stmt,
/// if (first) data[second] else data[second+1];
if_then_else_stmt,
/// if (first); else second;
if_else_stmt,
/// if (first) second; second may be null
if_then_stmt,
/// switch (first) second
switch_stmt,
/// case first: second
case_stmt,
/// default: first
default_stmt,
/// while (first) second
while_stmt,
/// do second while(first);
do_while_stmt,
/// for (data[..]; data[len-3]; data[len-2]) data[len-1]
for_decl_stmt,
/// for (;;;) first
forever_stmt,
/// for (data[first]; data[first+1]; data[first+2]) second
for_stmt,
/// goto first;
goto_stmt,
// continue; first and second unused
continue_stmt,
// break; first and second unused
break_stmt,
/// return first; first may be null
return_stmt,
// ====== Expr ======
/// lhs , rhs
comma_expr,
/// lhs ?: rhs
binary_cond_expr,
/// lhs ? data[0] : data[1]
cond_expr,
/// lhs = rhs
assign_expr,
/// lhs *= rhs
mul_assign_expr,
/// lhs /= rhs
div_assign_expr,
/// lhs %= rhs
mod_assign_expr,
/// lhs += rhs
add_assign_expr,
/// lhs -= rhs
sub_assign_expr,
/// lhs <<= rhs
shl_assign_expr,
/// lhs >>= rhs
shr_assign_expr,
/// lhs &= rhs
and_assign_expr,
/// lhs ^= rhs
xor_assign_expr,
/// lhs |= rhs
or_assign_expr,
/// lhs || rhs
bool_or_expr,
/// lhs && rhs
bool_and_expr,
/// lhs | rhs
bit_or_expr,
/// lhs ^ rhs
bit_xor_expr,
/// lhs & rhs
bit_and_expr,
/// lhs == rhs
equal_expr,
/// lhs != rhs
not_equal_expr,
/// lhs < rhs
less_than_expr,
/// lhs <= rhs
less_than_equal_expr,
/// lhs > rhs
greater_than_expr,
/// lhs >= rhs
greater_than_equal_expr,
/// lhs << rhs
shl_expr,
/// lhs >> rhs
shr_expr,
/// lhs + rhs
add_expr,
/// lhs - rhs
sub_expr,
/// lhs * rhs
mul_expr,
/// lhs / rhs
div_expr,
/// lhs % rhs
mod_expr,
/// Explicit (type)lhs
cast_expr,
/// &lhs
addr_of_expr,
/// *lhs
deref_expr,
/// +lhs
plus_expr,
/// -lhs
negate_expr,
/// ~lhs
bit_not_expr,
/// !lhs
bool_not_expr,
/// ++lhs
pre_inc_expr,
/// --lhs
pre_dec_expr,
/// lhs[rhs] lhs is pointer/array type, rhs is integer type
array_access_expr,
/// first(second) second may be 0
call_expr_one,
/// data[0](data[1..])
call_expr,
/// lhs.rhs rhs is a TokenIndex of the identifier
member_access_expr,
/// lhs->rhs rhs is a TokenIndex of the identifier
member_access_ptr_expr,
/// lhs++
post_inc_expr,
/// lhs--
post_dec_expr,
/// lhs is a TokenIndex of the identifier
decl_ref_expr,
/// integer literal with 64 bits, split in first and second, check node.ty for signedness
int_literal,
/// f32 literal stored in first
float_literal,
/// f64 literal split in first and second
double_literal,
/// data[first][0..second]
string_literal_expr,
/// TODO
compound_literal_expr,
// ====== Implicit casts ======
/// convert T[] to T *
array_to_pointer,
/// same as deref
lval_to_rval,
pub fn Type(comptime tag: Tag) ?type {
return switch (tag) {
.invalid => unreachable,
.fn_proto,
.extern_fn_proto,
.static_fn_proto,
.inline_fn_proto,
.inline_extern_fn_proto,
.inline_static_fn_proto,
=> Decl.FnProto,
.fn_def,
.extern_fn_def,
.static_fn_def,
.inline_fn_def,
.inline_extern_fn_def,
.inline_static_fn_def,
=> Decl.FnDef,
.param_decl, .register_param_decl => Decl.Param,
.@"var",
.auto_var,
.extern_var,
.static_var,
.register_var,
.threadlocal_var,
.threadlocal_auto_var,
.threadlocal_extern_var,
.threadlocal_static_var,
.threadlocal_register_var,
=> Decl.Var,
.typdef => Decl.Typedef,
.struct_forward, .union_forward => Decl.RecordForward,
.enum_forawrd => Decl.EnumForward,
.struct_def, .union_def => Decl.Record,
.enum_def => Decl.Enum,
.field_decl => Decl.Field,
.labeled_stmt => Stmt.Labeled,
.compound_stmt => Stmt.Compound,
.if_stmt => Stmt.If,
.switch_stmt => Stmt.Switch,
.do_while_stmt, .while_stmt => Stmt.While,
.for_stmt => Stmt.For,
.goto_stmt => Stmt.Goto,
.continue_stmt, .break_stmt => null,
.return_stmt => Stmt.Return,
.cond_expr => Expr.Conditional,
.comma_expr,
.binary_cond_expr,
.assign_expr,
.mul_assign_expr,
.div_assign_expr,
.mod_assign_expr,
.add_assign_expr,
.sub_assign_expr,
.shl_assign_expr,
.shr_assign_expr,
.and_assign_expr,
.xor_assign_expr,
.or_assign_expr,
.bool_or_expr,
.bool_and_expr,
.bit_or_expr,
.bit_xor_expr,
.bit_and_expr,
.equal_expr,
.not_equal_expr,
.less_than_expr,
.less_than_equal_expr,
.greater_than_expr,
.greater_than_equal_expr,
.shl_expr,
.shr_expr,
.add_expr,
.sub_expr,
.mul_expr,
.div_expr,
.mod_expr,
=> Expr.BinOp,
else => @panic("TODO Tag.Type()"),
};
}
};
// Represents a declaration expanded from a Node.
pub const Decl = struct {
pub const FnProto = struct {
name: []const u8,
ty: Type,
is_static: bool,
is_inline: bool,
definition: ?*FnDef,
};
pub const FnDef = struct {
name: []const u8,
ty: Type,
is_static: bool,
is_inline: bool,
body: NodeIndex,
};
pub const Param = struct {
// identifier or first token after declSpec
name_tok: TokenIndex,
ty: Type,
};
pub const Var = struct {
name: []const u8,
ty: Type,
storage_class: enum {
none,
static,
@"extern",
register,
},
is_threadlocal: bool,
init: ?NodeIndex,
};
pub const Typedef = struct {
name: []const u8,
ty: Type,
};
pub const RecordForward = struct {
name: []const u8,
definition: ?*Record,
};
pub const Record = struct {
// identifier or keyword_struct/union if anonymous
name_tok: TokenIndex,
fields: []Field,
};
pub const Field = struct {
// identifier or keyword_struct/union if anonymous
name: TokenIndex,
ty: Type,
// TODO bit field stuff
};
pub const EnumForward = struct {
name: []const u8,
definition: ?*Enum,
};
pub const Enum = struct {
// identifier or keyword_enum if anonymous
tag_type: Type,
name_tok: TokenIndex,
};
};
// Represents a statement expanded from a Node.
pub const Stmt = struct {
pub const Labeled = struct {
label: []const u8,
stmt: NodeIndex,
};
pub const Compound = struct {
stmts: NodeIndex,
};
pub const If = struct {
cond: NodeIndex,
then: NodeIndex,
@"else": NodeIndex,
};
pub const Switch = struct { cond: NodeIndex, stmt: NodeIndex };
pub const While = struct {
cond: NodeIndex,
stmt: NodeIndex,
};
pub const For = struct {
init: NodeIndex,
cond: NodeIndex,
inc: NodeIndex,
stmt: NodeIndex,
};
pub const Goto = struct {
label: []const u8,
};
pub const Return = struct {
stmt: NodeIndex,
};
};
// Represents an expression expanded from a Node.
pub const Expr = struct {
pub const Float = f32;
pub const Double = f64;
pub const String = struct {
tokens: []TokenIndex,
};
// pub const Number = struct {
// ty: Type,
// };
pub const BinOp = struct {
ty: Type,
lhs: NodeIndex,
rhs: NodeIndex,
};
pub const UnOp = struct {
ty: Type,
operand: NodeIndex,
};
pub const Conditional = struct {
ty: Type,
cond: NodeIndex,
then: NodeIndex,
@"else": NodeIndex,
};
};
pub fn tokSlice(tree: Tree, tok_i: TokenIndex) []const u8 {
if (tree.tokens.items(.id)[tok_i].lexeme()) |some| return some;
const loc = tree.tokens.items(.loc)[tok_i];
var tmp_tokenizer = Tokenizer{
.buf = if (loc.id == .generated)
tree.generated
else
tree.comp.getSource(loc.id).buf,
.index = loc.byte_offset,
.source = .generated,
};
const tok = tmp_tokenizer.next();
return tmp_tokenizer.buf[tok.start..tok.end];
}
pub fn dump(tree: Tree, writer: anytype) @TypeOf(writer).Error!void {
for (tree.root_decls) |i| {
try tree.dumpNode(i, 0, writer);
try writer.writeByte('\n');
}
}
fn dumpNode(tree: Tree, node: NodeIndex, level: u32, w: anytype) @TypeOf(w).Error!void {
const delta = 2;
const half = delta / 2;
const TYPE = "\x1b[35;1m";
const TAG = "\x1b[36;1m";
const NAME = "\x1b[91;1m";
const LITERAL = "\x1b[32;1m";
const RESET = "\x1b[0m";
const tag = tree.nodes.items(.tag)[node];
try w.writeByteNTimes(' ', level);
try w.print(TAG ++ "{s}: " ++ TYPE ++ "'", .{@tagName(tag)});
try tree.nodes.items(.ty)[node].dump(tree, w);
try w.writeAll("'\n" ++ RESET);
switch (tag) {
.invalid => unreachable,
.fn_proto,
.static_fn_proto,
.inline_fn_proto,
.inline_static_fn_proto,
.noreturn_fn_proto,
.noreturn_static_fn_proto,
.noreturn_inline_fn_proto,
.noreturn_inline_static_fn_proto,
=> {
try w.writeByteNTimes(' ', level + half);
try w.print("name: " ++ NAME ++ "{s}\n" ++ RESET, .{tree.tokSlice(tree.nodes.items(.data)[node].first)});
},
.fn_def,
.static_fn_def,
.inline_fn_def,
.inline_static_fn_def,
.noreturn_fn_def,
.noreturn_static_fn_def,
.noreturn_inline_fn_def,
.noreturn_inline_static_fn_def,
=> {
const data = tree.nodes.items(.data)[node];
try w.writeByteNTimes(' ', level + half);
try w.print("name: " ++ NAME ++ "{s}\n" ++ RESET, .{tree.tokSlice(data.first)});
try w.writeByteNTimes(' ', level + half);
try w.writeAll("body:\n");
try tree.dumpNode(data.second, level + delta, w);
},
.typedef,
.@"var",
.extern_var,
.static_var,
.register_var,
.threadlocal_var,
.threadlocal_extern_var,
.threadlocal_static_var,
=> {
const data = tree.nodes.items(.data)[node];
try w.writeByteNTimes(' ', level + half);
try w.print("name: " ++ NAME ++ "{s}\n" ++ RESET, .{tree.tokSlice(data.first)});
if (data.second != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("init:\n");
try tree.dumpNode(data.second, level + delta, w);
}
},
.compound_stmt => {
const range = tree.nodes.items(.data)[node];
for (tree.data[range.first..range.second]) |stmt, i| {
if (i != 0) try w.writeByte('\n');
try tree.dumpNode(stmt, level + delta, w);
}
},
.compound_stmt_two => {
const data = tree.nodes.items(.data)[node];
if (data.first != 0) try tree.dumpNode(data.first, level + delta, w);
if (data.second != 0) try tree.dumpNode(data.second, level + delta, w);
},
.labeled_stmt => {
const data = tree.nodes.items(.data)[node];
try w.writeByteNTimes(' ', level + half);
try w.print("label: " ++ LITERAL ++ "{s}\n" ++ RESET, .{tree.tokSlice(data.first)});
if (data.second != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("stmt:\n");
try tree.dumpNode(data.second, level + delta, w);
}
},
.case_stmt => {
const data = tree.nodes.items(.data)[node];
try w.writeByteNTimes(' ', level + half);
try w.writeAll("value:\n");
try tree.dumpNode(data.first, level + delta, w);
if (data.second != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("stmt:\n");
try tree.dumpNode(data.second, level + delta, w);
}
},
.default_stmt => {
const stmt = tree.nodes.items(.data)[node].first;
if (stmt != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("stmt:\n");
try tree.dumpNode(stmt, level + delta, w);
}
},
.if_then_else_stmt => {
const data = tree.nodes.items(.data)[node];
try w.writeByteNTimes(' ', level + half);
try w.writeAll("cond:\n");
try tree.dumpNode(data.first, level + delta, w);
try w.writeByteNTimes(' ', level + half);
try w.writeAll("then:\n");
try tree.dumpNode(tree.data[data.second], level + delta, w);
try w.writeByteNTimes(' ', level + half);
try w.writeAll("else:\n");
try tree.dumpNode(tree.data[data.second + 1], level + delta, w);
},
.if_else_stmt => {
const data = tree.nodes.items(.data)[node];
try w.writeByteNTimes(' ', level + half);
try w.writeAll("cond:\n");
try tree.dumpNode(data.first, level + delta, w);
try w.writeByteNTimes(' ', level + half);
try w.writeAll("else:\n");
try tree.dumpNode(data.second, level + delta, w);
},
.if_then_stmt => {
const data = tree.nodes.items(.data)[node];
try w.writeByteNTimes(' ', level + half);
try w.writeAll("cond:\n");
try tree.dumpNode(data.first, level + delta, w);
if (data.second != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("then:\n");
try tree.dumpNode(data.second, level + delta, w);
}
},
.switch_stmt, .while_stmt, .do_while_stmt => {
const data = tree.nodes.items(.data)[node];
try w.writeByteNTimes(' ', level + half);
try w.writeAll("cond:\n");
try tree.dumpNode(data.first, level + delta, w);
if (data.second != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("body:\n");
try tree.dumpNode(data.second, level + delta, w);
}
},
.for_decl_stmt => {
const range = tree.nodes.items(.data)[node];
const items = tree.data[range.first..range.second];
const decls = items[0 .. items.len - 3];
try w.writeByteNTimes(' ', level + half);
try w.writeAll("decl:\n");
for (decls) |decl| {
try tree.dumpNode(decl, level + delta, w);
try w.writeByte('\n');
}
const cond = items[items.len - 3];
if (cond != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("cond:\n");
try tree.dumpNode(cond, level + delta, w);
}
const incr = items[items.len - 2];
if (incr != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("incr:\n");
try tree.dumpNode(incr, level + delta, w);
}
const body = items[items.len - 1];
if (body != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("body:\n");
try tree.dumpNode(body, level + delta, w);
}
},
.forever_stmt => {
const body = tree.nodes.items(.data)[node].first;
if (body != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("body:\n");
try tree.dumpNode(body, level + delta, w);
}
},
.for_stmt => {
const data = tree.nodes.items(.data)[node];
const start = tree.data[data.first..];
const init = start[0];
if (init != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("init:\n");
try tree.dumpNode(init, level + delta, w);
}
const cond = start[1];
if (cond != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("cond:\n");
try tree.dumpNode(cond, level + delta, w);
}
const incr = start[2];
if (incr != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("incr:\n");
try tree.dumpNode(incr, level + delta, w);
}
if (data.second != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("body:\n");
try tree.dumpNode(data.second, level + delta, w);
}
},
.goto_stmt => {
try w.writeByteNTimes(' ', level + half);
try w.print("label: " ++ LITERAL ++ "{s}\n" ++ RESET, .{tree.tokSlice(tree.nodes.items(.data)[node].first)});
},
.continue_stmt, .break_stmt => {},
.return_stmt => {
const expr = tree.nodes.items(.data)[node].first;
if (expr != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("expr:\n");
try tree.dumpNode(expr, level + delta, w);
}
},
.string_literal_expr => {
const data = tree.nodes.items(.data)[node];
const ptr = @intToPtr([*]const u8, @bitCast(usize, tree.data[data.first..][0..2].*));
try w.writeByteNTimes(' ', level + half);
try w.print("data: " ++ LITERAL ++ "\"{s}\"\n" ++ RESET, .{ptr[0 .. data.second - 1]});
},
.call_expr => {
const range = tree.nodes.items(.data)[node];
try w.writeByteNTimes(' ', level + half);
try w.writeAll("lhs:\n");
try tree.dumpNode(tree.data[range.first], level + delta, w);
try w.writeByteNTimes(' ', level + half);
try w.writeAll("args:\n");
for (tree.data[range.first + 1 .. range.second]) |stmt| try tree.dumpNode(stmt, level + delta, w);
},
.call_expr_one => {
const data = tree.nodes.items(.data)[node];
try w.writeByteNTimes(' ', level + half);
try w.writeAll("lhs:\n");
try tree.dumpNode(data.first, level + delta, w);
if (data.second != 0) {
try w.writeByteNTimes(' ', level + half);
try w.writeAll("arg:\n");
try tree.dumpNode(data.second, level + delta, w);
}
},
.comma_expr,
.binary_cond_expr,
.assign_expr,
.mul_assign_expr,
.div_assign_expr,
.mod_assign_expr,
.add_assign_expr,
.sub_assign_expr,
.shl_assign_expr,
.shr_assign_expr,
.and_assign_expr,
.xor_assign_expr,
.or_assign_expr,
.bool_or_expr,
.bool_and_expr,
.bit_or_expr,
.bit_xor_expr,
.bit_and_expr,
.equal_expr,
.not_equal_expr,
.less_than_expr,
.less_than_equal_expr,
.greater_than_expr,
.greater_than_equal_expr,
.shl_expr,
.shr_expr,
.add_expr,
.sub_expr,
.mul_expr,
.div_expr,
.mod_expr,
=> {
const data = tree.nodes.items(.data)[node];
try w.writeByteNTimes(' ', level + 1);
try w.writeAll("lhs:\n");
try tree.dumpNode(data.first, level + delta, w);
try w.writeByteNTimes(' ', level + 1);
try w.writeAll("rhs:\n");
try tree.dumpNode(data.second, level + delta, w);
},
.cast_expr,
.addr_of_expr,
.deref_expr,
.plus_expr,
.negate_expr,
.bit_not_expr,
.bool_not_expr,
.pre_inc_expr,
.pre_dec_expr,
.post_inc_expr,
.post_dec_expr,
.array_to_pointer,
.lval_to_rval,
=> {
try w.writeByteNTimes(' ', level + 1);
try w.writeAll("operand:\n");
try tree.dumpNode(tree.nodes.items(.data)[node].first, level + delta, w);
},
.decl_ref_expr => {
try w.writeByteNTimes(' ', level + 1);
try w.print("name: " ++ NAME ++ "{s}\n" ++ RESET, .{tree.tokSlice(tree.nodes.items(.data)[node].first)});
},
.int_literal => {
try w.writeByteNTimes(' ', level + 1);
if (tree.nodes.items(.ty)[node].isUnsignedInt(tree.comp)) {
try w.print("value: " ++ LITERAL ++ "{d}\n" ++ RESET, .{@bitCast(u64, tree.nodes.items(.data)[node])});
} else {
try w.print("value: " ++ LITERAL ++ "{d}\n" ++ RESET, .{@bitCast(i64, tree.nodes.items(.data)[node])});
}
},
.float_literal => {
try w.writeByteNTimes(' ', level + 1);
try w.print("value: " ++ LITERAL ++ "{d}\n" ++ RESET, .{@bitCast(f32, tree.nodes.items(.data)[node].first)});
},
.double_literal => {
try w.writeByteNTimes(' ', level + 1);
try w.print("value: " ++ LITERAL ++ "{d}\n" ++ RESET, .{@bitCast(f64, tree.nodes.items(.data)[node])});
},
else => {},
}
}
|
src/Tree.zig
|
const std = @import("std");
const WavContents = @import("read_wav.zig").WavContents;
const Span = @import("basics.zig").Span;
// FIXME - no effort at all has been made to optimize the sampler module
// FIXME - use a better resampling filter
// TODO - more complex looping schemes
// TODO - allow sample_rate to be controlled
pub const SampleFormat = enum {
unsigned8,
signed16_lsb,
signed24_lsb,
signed32_lsb,
};
pub const Sample = struct {
num_channels: usize,
sample_rate: usize,
format: SampleFormat,
data: []const u8,
};
fn decodeSigned(
comptime byte_count: u16,
slice: []const u8,
index: usize,
) f32 {
const T = std.meta.IntType(true, byte_count * 8);
const subslice = slice[index * byte_count .. (index + 1) * byte_count];
const sval = std.mem.readIntSliceLittle(T, subslice);
const max = 1 << @as(u32, byte_count * 8 - 1);
return @intToFloat(f32, sval) / @intToFloat(f32, max);
}
fn getSample(params: Sampler.Params, index1: i32) f32 {
const bytes_per_sample: usize = switch (params.sample.format) {
.unsigned8 => 1,
.signed16_lsb => 2,
.signed24_lsb => 3,
.signed32_lsb => 4,
};
const num_samples = @intCast(i32,
params.sample.data.len / bytes_per_sample / params.sample.num_channels);
const index = if (params.loop) @mod(index1, num_samples) else index1;
if (index >= 0 and index < num_samples) {
const i = @intCast(usize, index) * params.sample.num_channels +
params.channel;
return switch (params.sample.format) {
.unsigned8 =>
(@intToFloat(f32, params.sample.data[i]) - 127.5) / 127.5,
.signed16_lsb => decodeSigned(2, params.sample.data, i),
.signed24_lsb => decodeSigned(3, params.sample.data, i),
.signed32_lsb => decodeSigned(4, params.sample.data, i),
};
} else {
return 0.0;
}
}
pub const Sampler = struct {
pub const num_outputs = 1;
pub const num_temps = 0;
pub const Params = struct {
sample_rate: f32,
sample: Sample,
channel: usize,
loop: bool,
};
t: f32,
pub fn init() Sampler {
return .{
.t = 0.0,
};
}
pub fn paint(
self: *Sampler,
span: Span,
outputs: [num_outputs][]f32,
temps: [num_temps][]f32,
note_id_changed: bool,
params: Params,
) void {
if (params.channel >= params.sample.num_channels) {
return;
}
if (note_id_changed) {
self.t = 0.0;
}
const out = outputs[0][span.start..span.end];
const ratio =
@intToFloat(f32, params.sample.sample_rate) / params.sample_rate;
if (ratio < 0.0 and !params.loop) {
// i don't think it makes sense to play backwards without looping
return;
}
// FIXME - pulled these epsilon values out of my ass
if (ratio > 0.9999 and ratio < 1.0001) {
// no resampling needed
const t = @floatToInt(i32, std.math.round(self.t));
var i: u31 = 0; while (i < out.len) : (i += 1) {
out[i] += getSample(params, t + @as(i32, i));
}
self.t += @intToFloat(f32, out.len);
} else {
// resample
var i: u31 = 0; while (i < out.len) : (i += 1) {
const t0 = @floatToInt(i32, std.math.floor(self.t));
const t1 = t0 + 1;
const tfrac = @intToFloat(f32, t1) - self.t;
const s0 = getSample(params, t0);
const s1 = getSample(params, t1);
const s = s0 * (1.0 - tfrac) + s1 * tfrac;
out[i] += s;
self.t += ratio;
}
}
if (self.t >= @intToFloat(f32, params.sample.data.len) and params.loop) {
self.t -= @intToFloat(f32, params.sample.data.len);
}
}
};
|
src/zang/mod_sampler.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const expect = std.testing.expect;
const test_allocator = std.testing.allocator;
pub fn beatToSecs(bpm: f64) callconv(.Inline) f64 {
return 60.0 / bpm;
}
pub fn tickInSecs(bpm: f64, resolution: i64) callconv(.Inline) f64 {
return beatToSecs(bpm) / @intToFloat(f64, resolution);
}
pub fn ticksFromSecs(secs: f64, bpm: f64, resolution: u32) callconv(.Inline) f64 {
return secs / tickInSecs(bpm, resolution);
}
pub fn secsFromTicks(ticks: i64, bpm: f64, resolution: u32) callconv(.Inline) f64 {
return tickInSecs(bpm, resolution) * @intToFloat(f64, ticks);
}
const BpmChange = struct {
bpm: f64,
tick: i64,
};
const TimeSigChange = struct {
n: u32,
d: u32,
measure: i64,
};
pub const MusicError = error{
InvalidResolution,
NoBPM,
NoTimeSig,
};
/// A structure for calculating time in a song that contains bpm and time signature changes.
/// Currently the functions for this structure assumes that the bpms and timeSigs lists are sorted at all times.
pub const MusicTime = struct {
bpms: std.ArrayList(BpmChange),
timeSigs: std.ArrayList(TimeSigChange),
resolution: u32,
pub fn tickToTime(self: *MusicTime, tick: u64) f64 {
try self.validate();
}
fn validate(self: *MusicTime) MusicError!void {
if (self.resolution == 0) {
return error.InvalidResolution;
}
if (self.timeSigs.items.len == 0) {
return error.NoTimeSig;
}
if (self.bpms.items.len == 0) {
return error.NoBPM;
}
}
pub fn init(startBpm: f64, d: u32, n: u32, resolution: u32, allocator: *Allocator) !MusicTime {
var newTime = MusicTime{ .resolution = resolution, .bpms = std.ArrayList(BpmChange).init(allocator), .timeSigs = std.ArrayList(TimeSigChange).init(allocator) };
try newTime.bpms.append(BpmChange{
.bpm = startBpm,
.tick = 0,
});
try newTime.timeSigs.append(TimeSigChange{ .d = d, .n = n, .measure = 0 });
return newTime;
}
pub fn deinit(self: *MusicTime) void {
self.bpms.deinit();
self.timeSigs.deinit();
}
/// Converts tick # to discrete time in seconds.
pub fn timeAtTick(self: *MusicTime, tick: i64) !f64 {
try self.validate();
var result: f64 = 0.0;
var lastBpm = self.bpms.items[0];
for (self.bpms.items[1..self.bpms.items.len]) |bpm| {
if (bpm.tick > tick) {
break;
}
result += secsFromTicks(bpm.tick - lastBpm.tick, lastBpm.bpm, self.resolution);
lastBpm = bpm;
}
result += secsFromTicks(tick - lastBpm.tick, lastBpm.bpm, self.resolution);
return result;
}
/// Converts discrete time in seconds to tick #.
pub fn tickAtTime(self: *MusicTime, time: f64) !f64 {
try self.validate();
var remaining = time;
var result: i64 = 0;
var prev = self.bpms.items[0];
for (self.bpms.items) |bpm| {
const new_time = try self.timeAtTick(bpm.tick); //bad time complexity
if (new_time > time) {
break;
}
result = bpm.tick;
remaining = time - new_time;
prev = bpm;
}
return @intToFloat(f64, result) + ticksFromSecs(remaining, prev.bpm, self.resolution);
}
/// Get the measure # for any given tick #
pub fn measureAtTick(self: *MusicTime, tick: i64) !i64 {
try self.validate();
var result: i64 = 0;
var remainingTicks: i64 = tick;
const firstSig = self.timeSigs.items[0];
var prevMeasure = firstSig.measure;
var prevTicksPerMeasure: i64 = self.resolution * 4 * firstSig.n / firstSig.d;
if (prevTicksPerMeasure == 0) {
return result;
}
for (self.timeSigs.items[1..self.timeSigs.items.len]) |current_sig| {
const measureCount = current_sig.measure - prevMeasure;
const tickCount = measureCount * prevTicksPerMeasure;
if (tickCount > remainingTicks) {
break;
}
result += measureCount;
remainingTicks -= tickCount;
prevMeasure = current_sig.measure;
prevTicksPerMeasure = self.resolution * 4 * current_sig.n / current_sig.d;
if (prevTicksPerMeasure == 0) {
return result;
}
}
result += @divFloor(remainingTicks, prevTicksPerMeasure);
return result;
}
/// Get the tick # at the start of any given measure #
pub fn tickAtMeasure(self: *MusicTime, measure: i64) !i64 {
try self.validate();
var result: i64 = 0;
var remainingMeasures: i64 = measure;
const firstSig = self.timeSigs.items[0];
var prevMeasure = firstSig.measure;
var prevTicksPerMeasure = self.resolution * 4 * firstSig.n / firstSig.d;
for (self.timeSigs.items[1..self.timeSigs.items.len]) |currentSig| {
const measureCount = currentSig.measure - prevMeasure;
if (measureCount > remainingMeasures) {
break;
}
result += measureCount * prevTicksPerMeasure;
remainingMeasures -= measureCount;
prevMeasure = currentSig.measure;
prevTicksPerMeasure = self.resolution * 4 * currentSig.n / currentSig.d;
}
result += remainingMeasures * prevTicksPerMeasure;
return result;
}
/// Get the BPM at any given tick #
pub fn bpmAtTick(self: *MusicTime, tick: i64) MusicError!f64 {
if (self.bpms.items.len == 0) {
return error.NoBPM;
}
var prev = self.bpms.items[0];
for (self.bpms.items[1..self.bpms.items.len]) |b| {
if (b.tick > tick) {
break;
}
prev = b;
}
return prev.bpm;
}
};
test "tick #240 @ 120bpm" {
var times = try MusicTime.init(120.0, 4, 4, 240, test_allocator);
defer times.deinit();
const calculatedTime = try times.timeAtTick(240);
expect(calculatedTime == 0.5);
}
test "time 0.5s @ 120bpm" {
var times = try MusicTime.init(120.0, 4, 4, 240, test_allocator);
defer times.deinit();
const calculatedTick = try times.tickAtTime(0.5);
expect(calculatedTick == 240.0);
}
test "tick conversion equality" {
var times = try MusicTime.init(120.0, 4, 4, 240, test_allocator);
defer times.deinit();
const test_tick = 123456;
const calculatedTime = try times.timeAtTick(test_tick);
const calculatedTick = try times.tickAtTime(calculatedTime);
expect(@floatToInt(i64, calculatedTick) == test_tick);
}
test "tick /w bpm changes" {
var times = try MusicTime.init(120.0, 4, 4, 240, test_allocator);
defer times.deinit();
try times.bpms.append(BpmChange{ .bpm = 240.0, .tick = 4 * 240 });
const calculatedTime = try times.timeAtTick(240 * 6);
expect(calculatedTime == 2.5);
}
test "time /w bpm changes" {
var times = try MusicTime.init(120.0, 4, 4, 240, test_allocator);
defer times.deinit();
try times.bpms.append(BpmChange{ .bpm = 240.0, .tick = 4 * 240 });
const calculatedTick = try times.tickAtTime(2.5);
expect(calculatedTick == 240.0 * 6.0);
}
test "tick conversion equality /w bpm changes" {
var times = try MusicTime.init(120.0, 4, 4, 240, test_allocator);
defer times.deinit();
try times.bpms.append(BpmChange{ .bpm = 124.0, .tick = 682 });
try times.bpms.append(BpmChange{ .bpm = 182.0, .tick = 900 });
try times.bpms.append(BpmChange{ .bpm = 100.0, .tick = 2400 });
const test_tick = 123456;
const calculatedTime = try times.timeAtTick(test_tick);
const calculatedTick = try times.tickAtTime(calculatedTime);
expect(@floatToInt(i64, @round(calculatedTick)) == test_tick);
}
test "tick to measure, simple" {
var times = try MusicTime.init(120.0, 4, 4, 240, test_allocator);
defer times.deinit();
const calculatedMeasure = try times.measureAtTick(240 * (4 * 512 + 1));
expect(calculatedMeasure == 512);
}
test "measure to tick, simple" {
var times = try MusicTime.init(120.0, 4, 4, 240, test_allocator);
defer times.deinit();
const calculatedTick = try times.tickAtMeasure(50);
expect(calculatedTick == 240 * 4 * 50);
}
test "bpm@tick /w bpm changes" {
var times = try MusicTime.init(120.0, 4, 4, 240, test_allocator);
defer times.deinit();
try times.bpms.append(BpmChange{ .bpm = 124.0, .tick = 682 });
try times.bpms.append(BpmChange{ .bpm = 182.0, .tick = 900 });
try times.bpms.append(BpmChange{ .bpm = 100.0, .tick = 2400 });
expect((try times.bpmAtTick(0)) == 120.0);
expect((try times.bpmAtTick(690)) == 124.0);
expect((try times.bpmAtTick(901)) == 182.0);
expect((try times.bpmAtTick(2500)) == 100.0);
}
|
src/muzig-time.zig
|
pub const FSCTL_DFS_BASE = @as(u32, 6);
pub const DFS_VOLUME_STATES = @as(u32, 15);
pub const DFS_VOLUME_STATE_OK = @as(u32, 1);
pub const DFS_VOLUME_STATE_INCONSISTENT = @as(u32, 2);
pub const DFS_VOLUME_STATE_OFFLINE = @as(u32, 3);
pub const DFS_VOLUME_STATE_ONLINE = @as(u32, 4);
pub const DFS_VOLUME_STATE_RESYNCHRONIZE = @as(u32, 16);
pub const DFS_VOLUME_STATE_STANDBY = @as(u32, 32);
pub const DFS_VOLUME_STATE_FORCE_SYNC = @as(u32, 64);
pub const DFS_VOLUME_FLAVORS = @as(u32, 768);
pub const DFS_VOLUME_FLAVOR_UNUSED1 = @as(u32, 0);
pub const DFS_VOLUME_FLAVOR_STANDALONE = @as(u32, 256);
pub const DFS_VOLUME_FLAVOR_AD_BLOB = @as(u32, 512);
pub const DFS_STORAGE_FLAVOR_UNUSED2 = @as(u32, 768);
pub const DFS_STORAGE_STATES = @as(u32, 15);
pub const DFS_STORAGE_STATE_OFFLINE = @as(u32, 1);
pub const DFS_STORAGE_STATE_ONLINE = @as(u32, 2);
pub const DFS_STORAGE_STATE_ACTIVE = @as(u32, 4);
pub const DFS_PROPERTY_FLAG_INSITE_REFERRALS = @as(u32, 1);
pub const DFS_PROPERTY_FLAG_ROOT_SCALABILITY = @as(u32, 2);
pub const DFS_PROPERTY_FLAG_SITE_COSTING = @as(u32, 4);
pub const DFS_PROPERTY_FLAG_TARGET_FAILBACK = @as(u32, 8);
pub const DFS_PROPERTY_FLAG_CLUSTER_ENABLED = @as(u32, 16);
pub const DFS_PROPERTY_FLAG_ABDE = @as(u32, 32);
pub const DFS_ADD_VOLUME = @as(u32, 1);
pub const DFS_RESTORE_VOLUME = @as(u32, 2);
pub const NET_DFS_SETDC_FLAGS = @as(u32, 0);
pub const NET_DFS_SETDC_TIMEOUT = @as(u32, 1);
pub const NET_DFS_SETDC_INITPKT = @as(u32, 2);
pub const DFS_SITE_PRIMARY = @as(u32, 1);
pub const DFS_MOVE_FLAG_REPLACE_IF_EXISTS = @as(u32, 1);
pub const DFS_FORCE_REMOVE = @as(u32, 2147483648);
pub const FSCTL_DFS_GET_PKT_ENTRY_STATE = @as(u32, 401340);
//--------------------------------------------------------------------------------
// Section: Types (35)
//--------------------------------------------------------------------------------
pub const DFS_TARGET_PRIORITY_CLASS = enum(i32) {
InvalidPriorityClass = -1,
SiteCostNormalPriorityClass = 0,
GlobalHighPriorityClass = 1,
SiteCostHighPriorityClass = 2,
SiteCostLowPriorityClass = 3,
GlobalLowPriorityClass = 4,
};
pub const DfsInvalidPriorityClass = DFS_TARGET_PRIORITY_CLASS.InvalidPriorityClass;
pub const DfsSiteCostNormalPriorityClass = DFS_TARGET_PRIORITY_CLASS.SiteCostNormalPriorityClass;
pub const DfsGlobalHighPriorityClass = DFS_TARGET_PRIORITY_CLASS.GlobalHighPriorityClass;
pub const DfsSiteCostHighPriorityClass = DFS_TARGET_PRIORITY_CLASS.SiteCostHighPriorityClass;
pub const DfsSiteCostLowPriorityClass = DFS_TARGET_PRIORITY_CLASS.SiteCostLowPriorityClass;
pub const DfsGlobalLowPriorityClass = DFS_TARGET_PRIORITY_CLASS.GlobalLowPriorityClass;
pub const DFS_TARGET_PRIORITY = extern struct {
TargetPriorityClass: DFS_TARGET_PRIORITY_CLASS,
TargetPriorityRank: u16,
Reserved: u16,
};
pub const DFS_INFO_1 = extern struct {
EntryPath: ?PWSTR,
};
pub const DFS_INFO_2 = extern struct {
EntryPath: ?PWSTR,
Comment: ?PWSTR,
State: u32,
NumberOfStorages: u32,
};
pub const DFS_STORAGE_INFO = extern struct {
State: u32,
ServerName: ?PWSTR,
ShareName: ?PWSTR,
};
pub const DFS_STORAGE_INFO_1 = extern struct {
State: u32,
ServerName: ?PWSTR,
ShareName: ?PWSTR,
TargetPriority: DFS_TARGET_PRIORITY,
};
pub const DFS_INFO_3 = extern struct {
EntryPath: ?PWSTR,
Comment: ?PWSTR,
State: u32,
NumberOfStorages: u32,
Storage: ?*DFS_STORAGE_INFO,
};
pub const DFS_INFO_4 = extern struct {
EntryPath: ?PWSTR,
Comment: ?PWSTR,
State: u32,
Timeout: u32,
Guid: Guid,
NumberOfStorages: u32,
Storage: ?*DFS_STORAGE_INFO,
};
pub const DFS_INFO_5 = extern struct {
EntryPath: ?PWSTR,
Comment: ?PWSTR,
State: u32,
Timeout: u32,
Guid: Guid,
PropertyFlags: u32,
MetadataSize: u32,
NumberOfStorages: u32,
};
pub const DFS_INFO_6 = extern struct {
EntryPath: ?PWSTR,
Comment: ?PWSTR,
State: u32,
Timeout: u32,
Guid: Guid,
PropertyFlags: u32,
MetadataSize: u32,
NumberOfStorages: u32,
Storage: ?*DFS_STORAGE_INFO_1,
};
pub const DFS_INFO_7 = extern struct {
GenerationGuid: Guid,
};
pub const DFS_INFO_8 = extern struct {
EntryPath: ?PWSTR,
Comment: ?PWSTR,
State: u32,
Timeout: u32,
Guid: Guid,
PropertyFlags: u32,
MetadataSize: u32,
SdLengthReserved: u32,
pSecurityDescriptor: ?*SECURITY_DESCRIPTOR,
NumberOfStorages: u32,
};
pub const DFS_INFO_9 = extern struct {
EntryPath: ?PWSTR,
Comment: ?PWSTR,
State: u32,
Timeout: u32,
Guid: Guid,
PropertyFlags: u32,
MetadataSize: u32,
SdLengthReserved: u32,
pSecurityDescriptor: ?*SECURITY_DESCRIPTOR,
NumberOfStorages: u32,
Storage: ?*DFS_STORAGE_INFO_1,
};
pub const DFS_INFO_50 = extern struct {
NamespaceMajorVersion: u32,
NamespaceMinorVersion: u32,
NamespaceCapabilities: u64,
};
pub const DFS_INFO_100 = extern struct {
Comment: ?PWSTR,
};
pub const DFS_INFO_101 = extern struct {
State: u32,
};
pub const DFS_INFO_102 = extern struct {
Timeout: u32,
};
pub const DFS_INFO_103 = extern struct {
PropertyFlagMask: u32,
PropertyFlags: u32,
};
pub const DFS_INFO_104 = extern struct {
TargetPriority: DFS_TARGET_PRIORITY,
};
pub const DFS_INFO_105 = extern struct {
Comment: ?PWSTR,
State: u32,
Timeout: u32,
PropertyFlagMask: u32,
PropertyFlags: u32,
};
pub const DFS_INFO_106 = extern struct {
State: u32,
TargetPriority: DFS_TARGET_PRIORITY,
};
pub const DFS_INFO_107 = extern struct {
Comment: ?PWSTR,
State: u32,
Timeout: u32,
PropertyFlagMask: u32,
PropertyFlags: u32,
SdLengthReserved: u32,
pSecurityDescriptor: ?*SECURITY_DESCRIPTOR,
};
pub const DFS_INFO_150 = extern struct {
SdLengthReserved: u32,
pSecurityDescriptor: ?*SECURITY_DESCRIPTOR,
};
pub const DFS_INFO_200 = extern struct {
FtDfsName: ?PWSTR,
};
pub const DFS_INFO_300 = extern struct {
Flags: u32,
DfsName: ?PWSTR,
};
pub const DFS_SITENAME_INFO = extern struct {
SiteFlags: u32,
SiteName: ?PWSTR,
};
pub const DFS_SITELIST_INFO = extern struct {
cSites: u32,
Site: [1]DFS_SITENAME_INFO,
};
pub const DFS_NAMESPACE_VERSION_ORIGIN = enum(i32) {
COMBINED = 0,
SERVER = 1,
DOMAIN = 2,
};
pub const DFS_NAMESPACE_VERSION_ORIGIN_COMBINED = DFS_NAMESPACE_VERSION_ORIGIN.COMBINED;
pub const DFS_NAMESPACE_VERSION_ORIGIN_SERVER = DFS_NAMESPACE_VERSION_ORIGIN.SERVER;
pub const DFS_NAMESPACE_VERSION_ORIGIN_DOMAIN = DFS_NAMESPACE_VERSION_ORIGIN.DOMAIN;
pub const DFS_SUPPORTED_NAMESPACE_VERSION_INFO = extern struct {
DomainDfsMajorVersion: u32,
DomainDfsMinorVersion: u32,
DomainDfsCapabilities: u64,
StandaloneDfsMajorVersion: u32,
StandaloneDfsMinorVersion: u32,
StandaloneDfsCapabilities: u64,
};
pub const DFS_GET_PKT_ENTRY_STATE_ARG = extern struct {
DfsEntryPathLen: u16,
ServerNameLen: u16,
ShareNameLen: u16,
Level: u32,
Buffer: [1]u16,
};
pub const DFS_INFO_1_32 = switch(@import("../zig.zig").arch) {
.X64, .Arm64 => extern struct {
EntryPath: u32,
},
else => usize, // NOTE: this should be a @compileError but can't because of https://github.com/ziglang/zig/issues/9682
};
pub const DFS_INFO_2_32 = switch(@import("../zig.zig").arch) {
.X64, .Arm64 => extern struct {
EntryPath: u32,
Comment: u32,
State: u32,
NumberOfStorages: u32,
},
else => usize, // NOTE: this should be a @compileError but can't because of https://github.com/ziglang/zig/issues/9682
};
pub const DFS_STORAGE_INFO_0_32 = switch(@import("../zig.zig").arch) {
.X64, .Arm64 => extern struct {
State: u32,
ServerName: u32,
ShareName: u32,
},
else => usize, // NOTE: this should be a @compileError but can't because of https://github.com/ziglang/zig/issues/9682
};
pub const DFS_INFO_3_32 = switch(@import("../zig.zig").arch) {
.X64, .Arm64 => extern struct {
EntryPath: u32,
Comment: u32,
State: u32,
NumberOfStorages: u32,
Storage: u32,
},
else => usize, // NOTE: this should be a @compileError but can't because of https://github.com/ziglang/zig/issues/9682
};
pub const DFS_INFO_4_32 = switch(@import("../zig.zig").arch) {
.X64, .Arm64 => extern struct {
EntryPath: u32,
Comment: u32,
State: u32,
Timeout: u32,
Guid: Guid,
NumberOfStorages: u32,
Storage: u32,
},
else => usize, // NOTE: this should be a @compileError but can't because of https://github.com/ziglang/zig/issues/9682
};
//--------------------------------------------------------------------------------
// Section: Functions (22)
//--------------------------------------------------------------------------------
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsAdd(
DfsEntryPath: ?PWSTR,
ServerName: ?PWSTR,
ShareName: ?PWSTR,
Comment: ?PWSTR,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsAddStdRoot(
ServerName: ?PWSTR,
RootShare: ?PWSTR,
Comment: ?PWSTR,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsRemoveStdRoot(
ServerName: ?PWSTR,
RootShare: ?PWSTR,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsAddFtRoot(
ServerName: ?PWSTR,
RootShare: ?PWSTR,
FtDfsName: ?PWSTR,
Comment: ?PWSTR,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsRemoveFtRoot(
ServerName: ?PWSTR,
RootShare: ?PWSTR,
FtDfsName: ?PWSTR,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsRemoveFtRootForced(
DomainName: ?PWSTR,
ServerName: ?PWSTR,
RootShare: ?PWSTR,
FtDfsName: ?PWSTR,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsRemove(
DfsEntryPath: ?PWSTR,
ServerName: ?PWSTR,
ShareName: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsEnum(
DfsName: ?PWSTR,
Level: u32,
PrefMaxLen: u32,
Buffer: ?*?*u8,
EntriesRead: ?*u32,
ResumeHandle: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsGetInfo(
DfsEntryPath: ?PWSTR,
ServerName: ?PWSTR,
ShareName: ?PWSTR,
Level: u32,
Buffer: ?*?*u8,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsSetInfo(
DfsEntryPath: ?PWSTR,
ServerName: ?PWSTR,
ShareName: ?PWSTR,
Level: u32,
Buffer: ?*u8,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsGetClientInfo(
DfsEntryPath: ?PWSTR,
ServerName: ?PWSTR,
ShareName: ?PWSTR,
Level: u32,
Buffer: ?*?*u8,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsSetClientInfo(
DfsEntryPath: ?PWSTR,
ServerName: ?PWSTR,
ShareName: ?PWSTR,
Level: u32,
Buffer: ?*u8,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsMove(
OldDfsEntryPath: ?PWSTR,
NewDfsEntryPath: ?PWSTR,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsAddRootTarget(
pDfsPath: ?PWSTR,
pTargetPath: ?PWSTR,
MajorVersion: u32,
pComment: ?PWSTR,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsRemoveRootTarget(
pDfsPath: ?PWSTR,
pTargetPath: ?PWSTR,
Flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsGetSecurity(
DfsEntryPath: ?PWSTR,
SecurityInformation: u32,
ppSecurityDescriptor: ?*?*SECURITY_DESCRIPTOR,
lpcbSecurityDescriptor: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsSetSecurity(
DfsEntryPath: ?PWSTR,
SecurityInformation: u32,
pSecurityDescriptor: ?*SECURITY_DESCRIPTOR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsGetStdContainerSecurity(
MachineName: ?PWSTR,
SecurityInformation: u32,
ppSecurityDescriptor: ?*?*SECURITY_DESCRIPTOR,
lpcbSecurityDescriptor: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsSetStdContainerSecurity(
MachineName: ?PWSTR,
SecurityInformation: u32,
pSecurityDescriptor: ?*SECURITY_DESCRIPTOR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsGetFtContainerSecurity(
DomainName: ?PWSTR,
SecurityInformation: u32,
ppSecurityDescriptor: ?*?*SECURITY_DESCRIPTOR,
lpcbSecurityDescriptor: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsSetFtContainerSecurity(
DomainName: ?PWSTR,
SecurityInformation: u32,
pSecurityDescriptor: ?*SECURITY_DESCRIPTOR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "NETAPI32" fn NetDfsGetSupportedNamespaceVersion(
Origin: DFS_NAMESPACE_VERSION_ORIGIN,
pName: ?PWSTR,
ppVersionInfo: ?*?*DFS_SUPPORTED_NAMESPACE_VERSION_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (3)
//--------------------------------------------------------------------------------
const Guid = @import("../zig.zig").Guid;
const PWSTR = @import("../foundation.zig").PWSTR;
const SECURITY_DESCRIPTOR = @import("../security.zig").SECURITY_DESCRIPTOR;
test {
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
}
|
win32/storage/distributed_file_system.zig
|
pub const WSMAN_FLAG_REQUESTED_API_VERSION_1_0 = @as(u32, 0);
pub const WSMAN_FLAG_REQUESTED_API_VERSION_1_1 = @as(u32, 1);
pub const WSMAN_OPERATION_INFOV1 = @as(u32, 0);
pub const WSMAN_OPERATION_INFOV2 = @as(u32, 2864434397);
pub const WSMAN_DEFAULT_TIMEOUT_MS = @as(u32, 60000);
pub const WSMAN_FLAG_RECEIVE_RESULT_NO_MORE_DATA = @as(u32, 1);
pub const WSMAN_FLAG_RECEIVE_FLUSH = @as(u32, 2);
pub const WSMAN_FLAG_RECEIVE_RESULT_DATA_BOUNDARY = @as(u32, 4);
pub const WSMAN_PLUGIN_PARAMS_MAX_ENVELOPE_SIZE = @as(u32, 1);
pub const WSMAN_PLUGIN_PARAMS_TIMEOUT = @as(u32, 2);
pub const WSMAN_PLUGIN_PARAMS_REMAINING_RESULT_SIZE = @as(u32, 3);
pub const WSMAN_PLUGIN_PARAMS_LARGEST_RESULT_SIZE = @as(u32, 4);
pub const WSMAN_PLUGIN_PARAMS_GET_REQUESTED_LOCALE = @as(u32, 5);
pub const WSMAN_PLUGIN_PARAMS_GET_REQUESTED_DATA_LOCALE = @as(u32, 6);
pub const WSMAN_PLUGIN_PARAMS_SHAREDHOST = @as(u32, 1);
pub const WSMAN_PLUGIN_PARAMS_RUNAS_USER = @as(u32, 2);
pub const WSMAN_PLUGIN_PARAMS_AUTORESTART = @as(u32, 3);
pub const WSMAN_PLUGIN_PARAMS_HOSTIDLETIMEOUTSECONDS = @as(u32, 4);
pub const WSMAN_PLUGIN_PARAMS_NAME = @as(u32, 5);
pub const WSMAN_PLUGIN_STARTUP_REQUEST_RECEIVED = @as(u32, 0);
pub const WSMAN_PLUGIN_STARTUP_AUTORESTARTED_REBOOT = @as(u32, 1);
pub const WSMAN_PLUGIN_STARTUP_AUTORESTARTED_CRASH = @as(u32, 2);
pub const WSMAN_PLUGIN_SHUTDOWN_SYSTEM = @as(u32, 1);
pub const WSMAN_PLUGIN_SHUTDOWN_SERVICE = @as(u32, 2);
pub const WSMAN_PLUGIN_SHUTDOWN_IISHOST = @as(u32, 3);
pub const WSMAN_PLUGIN_SHUTDOWN_IDLETIMEOUT_ELAPSED = @as(u32, 4);
pub const WSMAN_FLAG_SEND_NO_MORE_DATA = @as(u32, 1);
pub const ERROR_WSMAN_RESOURCE_NOT_FOUND = @as(u32, 2150858752);
pub const ERROR_WSMAN_INVALID_ACTIONURI = @as(u32, 2150858753);
pub const ERROR_WSMAN_INVALID_URI = @as(u32, 2150858754);
pub const ERROR_WSMAN_PROVIDER_FAILURE = @as(u32, 2150858755);
pub const ERROR_WSMAN_BATCH_COMPLETE = @as(u32, 2150858756);
pub const ERROR_WSMAN_CONFIG_CORRUPTED = @as(u32, 2150858757);
pub const ERROR_WSMAN_PULL_IN_PROGRESS = @as(u32, 2150858758);
pub const ERROR_WSMAN_ENUMERATION_CLOSED = @as(u32, 2150858759);
pub const ERROR_WSMAN_SUBSCRIPTION_CLOSED = @as(u32, 2150858760);
pub const ERROR_WSMAN_SUBSCRIPTION_CLOSE_IN_PROGRESS = @as(u32, 2150858761);
pub const ERROR_WSMAN_SUBSCRIPTION_CLIENT_DID_NOT_CALL_WITHIN_HEARTBEAT = @as(u32, 2150858762);
pub const ERROR_WSMAN_SUBSCRIPTION_NO_HEARTBEAT = @as(u32, 2150858763);
pub const ERROR_WSMAN_UNSUPPORTED_TIMEOUT = @as(u32, 2150858764);
pub const ERROR_WSMAN_SOAP_VERSION_MISMATCH = @as(u32, 2150858765);
pub const ERROR_WSMAN_SOAP_DATA_ENCODING_UNKNOWN = @as(u32, 2150858766);
pub const ERROR_WSMAN_INVALID_MESSAGE_INFORMATION_HEADER = @as(u32, 2150858767);
pub const ERROR_WSMAN_SOAP_FAULT_MUST_UNDERSTAND = @as(u32, 2150858768);
pub const ERROR_WSMAN_MESSAGE_INFORMATION_HEADER_REQUIRED = @as(u32, 2150858769);
pub const ERROR_WSMAN_DESTINATION_UNREACHABLE = @as(u32, 2150858770);
pub const ERROR_WSMAN_ACTION_NOT_SUPPORTED = @as(u32, 2150858771);
pub const ERROR_WSMAN_ENDPOINT_UNAVAILABLE = @as(u32, 2150858772);
pub const ERROR_WSMAN_INVALID_REPRESENTATION = @as(u32, 2150858773);
pub const ERROR_WSMAN_ENUMERATE_INVALID_EXPIRATION_TIME = @as(u32, 2150858774);
pub const ERROR_WSMAN_ENUMERATE_UNSUPPORTED_EXPIRATION_TIME = @as(u32, 2150858775);
pub const ERROR_WSMAN_ENUMERATE_FILTERING_NOT_SUPPORTED = @as(u32, 2150858776);
pub const ERROR_WSMAN_ENUMERATE_FILTER_DIALECT_REQUESTED_UNAVAILABLE = @as(u32, 2150858777);
pub const ERROR_WSMAN_ENUMERATE_CANNOT_PROCESS_FILTER = @as(u32, 2150858778);
pub const ERROR_WSMAN_ENUMERATE_INVALID_ENUMERATION_CONTEXT = @as(u32, 2150858779);
pub const ERROR_WSMAN_ENUMERATE_TIMED_OUT = @as(u32, 2150858780);
pub const ERROR_WSMAN_ENUMERATE_UNABLE_TO_RENEW = @as(u32, 2150858781);
pub const ERROR_WSMAN_EVENTING_DELIVERY_MODE_REQUESTED_UNAVAILABLE = @as(u32, 2150858782);
pub const ERROR_WSMAN_EVENTING_INVALID_EXPIRATION_TIME = @as(u32, 2150858783);
pub const ERROR_WSMAN_EVENTING_UNSUPPORTED_EXPIRATION_TYPE = @as(u32, 2150858784);
pub const ERROR_WSMAN_EVENTING_FILTERING_NOT_SUPPORTED = @as(u32, 2150858785);
pub const ERROR_WSMAN_EVENTING_FILTERING_REQUESTED_UNAVAILABLE = @as(u32, 2150858786);
pub const ERROR_WSMAN_EVENTING_SOURCE_UNABLE_TO_PROCESS = @as(u32, 2150858787);
pub const ERROR_WSMAN_EVENTING_UNABLE_TO_RENEW = @as(u32, 2150858788);
pub const ERROR_WSMAN_EVENTING_INVALID_MESSAGE = @as(u32, 2150858789);
pub const ERROR_WSMAN_ENVELOPE_TOO_LARGE = @as(u32, 2150858790);
pub const ERROR_WSMAN_INVALID_SOAP_BODY = @as(u32, 2150858791);
pub const ERROR_WSMAN_INVALID_RESUMPTION_CONTEXT = @as(u32, 2150858792);
pub const ERROR_WSMAN_OPERATION_TIMEDOUT = @as(u32, 2150858793);
pub const ERROR_WSMAN_RESUMPTION_NOT_SUPPORTED = @as(u32, 2150858794);
pub const ERROR_WSMAN_RESUMPTION_TYPE_NOT_SUPPORTED = @as(u32, 2150858795);
pub const ERROR_WSMAN_UNSUPPORTED_ENCODING = @as(u32, 2150858796);
pub const ERROR_WSMAN_URI_LIMIT = @as(u32, 2150858797);
pub const ERROR_WSMAN_INVALID_PROPOSED_ID = @as(u32, 2150858798);
pub const ERROR_WSMAN_INVALID_BATCH_PARAMETER = @as(u32, 2150858799);
pub const ERROR_WSMAN_NO_ACK = @as(u32, 2150858800);
pub const ERROR_WSMAN_ACTION_MISMATCH = @as(u32, 2150858801);
pub const ERROR_WSMAN_CONCURRENCY = @as(u32, 2150858802);
pub const ERROR_WSMAN_ALREADY_EXISTS = @as(u32, 2150858803);
pub const ERROR_WSMAN_DELIVERY_REFUSED = @as(u32, 2150858804);
pub const ERROR_WSMAN_ENCODING_LIMIT = @as(u32, 2150858805);
pub const ERROR_WSMAN_FAILED_AUTHENTICATION = @as(u32, 2150858806);
pub const ERROR_WSMAN_INCOMPATIBLE_EPR = @as(u32, 2150858807);
pub const ERROR_WSMAN_INVALID_BOOKMARK = @as(u32, 2150858808);
pub const ERROR_WSMAN_INVALID_OPTIONS = @as(u32, 2150858809);
pub const ERROR_WSMAN_INVALID_PARAMETER = @as(u32, 2150858810);
pub const ERROR_WSMAN_INVALID_RESOURCE_URI = @as(u32, 2150858811);
pub const ERROR_WSMAN_INVALID_SYSTEM = @as(u32, 2150858812);
pub const ERROR_WSMAN_INVALID_SELECTORS = @as(u32, 2150858813);
pub const ERROR_WSMAN_METADATA_REDIRECT = @as(u32, 2150858814);
pub const ERROR_WSMAN_QUOTA_LIMIT = @as(u32, 2150858815);
pub const ERROR_WSMAN_RENAME_FAILURE = @as(u32, 2150858816);
pub const ERROR_WSMAN_SCHEMA_VALIDATION_ERROR = @as(u32, 2150858817);
pub const ERROR_WSMAN_UNSUPPORTED_FEATURE = @as(u32, 2150858818);
pub const ERROR_WSMAN_INVALID_XML = @as(u32, 2150858819);
pub const ERROR_WSMAN_INVALID_KEY = @as(u32, 2150858820);
pub const ERROR_WSMAN_DELIVER_IN_PROGRESS = @as(u32, 2150858821);
pub const ERROR_WSMAN_SYSTEM_NOT_FOUND = @as(u32, 2150858822);
pub const ERROR_WSMAN_MAX_ENVELOPE_SIZE = @as(u32, 2150858823);
pub const ERROR_WSMAN_MAX_ENVELOPE_SIZE_EXCEEDED = @as(u32, 2150858824);
pub const ERROR_WSMAN_SERVER_ENVELOPE_LIMIT = @as(u32, 2150858825);
pub const ERROR_WSMAN_SELECTOR_LIMIT = @as(u32, 2150858826);
pub const ERROR_WSMAN_OPTION_LIMIT = @as(u32, 2150858827);
pub const ERROR_WSMAN_CHARACTER_SET = @as(u32, 2150858828);
pub const ERROR_WSMAN_UNREPORTABLE_SUCCESS = @as(u32, 2150858829);
pub const ERROR_WSMAN_WHITESPACE = @as(u32, 2150858830);
pub const ERROR_WSMAN_FILTERING_REQUIRED = @as(u32, 2150858831);
pub const ERROR_WSMAN_BOOKMARK_EXPIRED = @as(u32, 2150858832);
pub const ERROR_WSMAN_OPTIONS_NOT_SUPPORTED = @as(u32, 2150858833);
pub const ERROR_WSMAN_OPTIONS_INVALID_NAME = @as(u32, 2150858834);
pub const ERROR_WSMAN_OPTIONS_INVALID_VALUE = @as(u32, 2150858835);
pub const ERROR_WSMAN_PARAMETER_TYPE_MISMATCH = @as(u32, 2150858836);
pub const ERROR_WSMAN_INVALID_PARAMETER_NAME = @as(u32, 2150858837);
pub const ERROR_WSMAN_INVALID_XML_VALUES = @as(u32, 2150858838);
pub const ERROR_WSMAN_INVALID_XML_MISSING_VALUES = @as(u32, 2150858839);
pub const ERROR_WSMAN_INVALID_XML_NAMESPACE = @as(u32, 2150858840);
pub const ERROR_WSMAN_INVALID_XML_FRAGMENT = @as(u32, 2150858841);
pub const ERROR_WSMAN_INSUFFCIENT_SELECTORS = @as(u32, 2150858842);
pub const ERROR_WSMAN_UNEXPECTED_SELECTORS = @as(u32, 2150858843);
pub const ERROR_WSMAN_SELECTOR_TYPEMISMATCH = @as(u32, 2150858844);
pub const ERROR_WSMAN_INVALID_SELECTOR_VALUE = @as(u32, 2150858845);
pub const ERROR_WSMAN_AMBIGUOUS_SELECTORS = @as(u32, 2150858846);
pub const ERROR_WSMAN_DUPLICATE_SELECTORS = @as(u32, 2150858847);
pub const ERROR_WSMAN_INVALID_TARGET_SELECTORS = @as(u32, 2150858848);
pub const ERROR_WSMAN_INVALID_TARGET_RESOURCEURI = @as(u32, 2150858849);
pub const ERROR_WSMAN_INVALID_TARGET_SYSTEM = @as(u32, 2150858850);
pub const ERROR_WSMAN_TARGET_ALREADY_EXISTS = @as(u32, 2150858851);
pub const ERROR_WSMAN_AUTHORIZATION_MODE_NOT_SUPPORTED = @as(u32, 2150858852);
pub const ERROR_WSMAN_ACK_NOT_SUPPORTED = @as(u32, 2150858853);
pub const ERROR_WSMAN_OPERATION_TIMEOUT_NOT_SUPPORTED = @as(u32, 2150858854);
pub const ERROR_WSMAN_LOCALE_NOT_SUPPORTED = @as(u32, 2150858855);
pub const ERROR_WSMAN_EXPIRATION_TIME_NOT_SUPPORTED = @as(u32, 2150858856);
pub const ERROR_WSMAN_DELIVERY_RETRIES_NOT_SUPPORTED = @as(u32, 2150858857);
pub const ERROR_WSMAN_HEARTBEATS_NOT_SUPPORTED = @as(u32, 2150858858);
pub const ERROR_WSMAN_BOOKMARKS_NOT_SUPPORTED = @as(u32, 2150858859);
pub const ERROR_WSMAN_MAXITEMS_NOT_SUPPORTED = @as(u32, 2150858860);
pub const ERROR_WSMAN_MAXTIME_NOT_SUPPORTED = @as(u32, 2150858861);
pub const ERROR_WSMAN_MAXENVELOPE_SIZE_NOT_SUPPORTED = @as(u32, 2150858862);
pub const ERROR_WSMAN_MAXENVELOPE_POLICY_NOT_SUPPORTED = @as(u32, 2150858863);
pub const ERROR_WSMAN_FILTERING_REQUIRED_NOT_SUPPORTED = @as(u32, 2150858864);
pub const ERROR_WSMAN_INSECURE_ADDRESS_NOT_SUPPORTED = @as(u32, 2150858865);
pub const ERROR_WSMAN_FORMAT_MISMATCH_NOT_SUPPORTED = @as(u32, 2150858866);
pub const ERROR_WSMAN_FORMAT_SECURITY_TOKEN_NOT_SUPPORTED = @as(u32, 2150858867);
pub const ERROR_WSMAN_BAD_METHOD = @as(u32, 2150858868);
pub const ERROR_WSMAN_UNSUPPORTED_MEDIA = @as(u32, 2150858869);
pub const ERROR_WSMAN_UNSUPPORTED_ADDRESSING_MODE = @as(u32, 2150858870);
pub const ERROR_WSMAN_FRAGMENT_TRANSFER_NOT_SUPPORTED = @as(u32, 2150858871);
pub const ERROR_WSMAN_ENUMERATION_INITIALIZING = @as(u32, 2150858872);
pub const ERROR_WSMAN_CONNECTOR_GET = @as(u32, 2150858873);
pub const ERROR_WSMAN_URI_QUERY_STRING_SYNTAX_ERROR = @as(u32, 2150858874);
pub const ERROR_WSMAN_INEXISTENT_MAC_ADDRESS = @as(u32, 2150858875);
pub const ERROR_WSMAN_NO_UNICAST_ADDRESSES = @as(u32, 2150858876);
pub const ERROR_WSMAN_NO_DHCP_ADDRESSES = @as(u32, 2150858877);
pub const ERROR_WSMAN_MIN_ENVELOPE_SIZE = @as(u32, 2150858878);
pub const ERROR_WSMAN_EPR_NESTING_EXCEEDED = @as(u32, 2150858879);
pub const ERROR_WSMAN_REQUEST_INIT_ERROR = @as(u32, 2150858880);
pub const ERROR_WSMAN_INVALID_TIMEOUT_HEADER = @as(u32, 2150858881);
pub const ERROR_WSMAN_CERT_NOT_FOUND = @as(u32, 2150858882);
pub const ERROR_WSMAN_PLUGIN_FAILED = @as(u32, 2150858883);
pub const ERROR_WSMAN_ENUMERATION_INVALID = @as(u32, 2150858884);
pub const ERROR_WSMAN_CONFIG_CANNOT_CHANGE_MUTUAL = @as(u32, 2150858885);
pub const ERROR_WSMAN_ENUMERATION_MODE_UNSUPPORTED = @as(u32, 2150858886);
pub const ERROR_WSMAN_MUSTUNDERSTAND_ON_LOCALE_UNSUPPORTED = @as(u32, 2150858887);
pub const ERROR_WSMAN_POLICY_CORRUPTED = @as(u32, 2150858888);
pub const ERROR_WSMAN_LISTENER_ADDRESS_INVALID = @as(u32, 2150858889);
pub const ERROR_WSMAN_CONFIG_CANNOT_CHANGE_GPO_CONTROLLED_SETTING = @as(u32, 2150858890);
pub const ERROR_WSMAN_EVENTING_CONCURRENT_CLIENT_RECEIVE = @as(u32, 2150858891);
pub const ERROR_WSMAN_EVENTING_FAST_SENDER = @as(u32, 2150858892);
pub const ERROR_WSMAN_EVENTING_INSECURE_PUSHSUBSCRIPTION_CONNECTION = @as(u32, 2150858893);
pub const ERROR_WSMAN_EVENTING_INVALID_EVENTSOURCE = @as(u32, 2150858894);
pub const ERROR_WSMAN_EVENTING_NOMATCHING_LISTENER = @as(u32, 2150858895);
pub const ERROR_WSMAN_FRAGMENT_DIALECT_REQUESTED_UNAVAILABLE = @as(u32, 2150858896);
pub const ERROR_WSMAN_MISSING_FRAGMENT_PATH = @as(u32, 2150858897);
pub const ERROR_WSMAN_INVALID_FRAGMENT_DIALECT = @as(u32, 2150858898);
pub const ERROR_WSMAN_INVALID_FRAGMENT_PATH = @as(u32, 2150858899);
pub const ERROR_WSMAN_EVENTING_INCOMPATIBLE_BATCHPARAMS_AND_DELIVERYMODE = @as(u32, 2150858900);
pub const ERROR_WSMAN_EVENTING_LOOPBACK_TESTFAILED = @as(u32, 2150858901);
pub const ERROR_WSMAN_EVENTING_INVALID_ENDTO_ADDRESSS = @as(u32, 2150858902);
pub const ERROR_WSMAN_EVENTING_INVALID_INCOMING_EVENT_PACKET_HEADER = @as(u32, 2150858903);
pub const ERROR_WSMAN_SESSION_ALREADY_CLOSED = @as(u32, 2150858904);
pub const ERROR_WSMAN_SUBSCRIPTION_LISTENER_NOLONGERVALID = @as(u32, 2150858905);
pub const ERROR_WSMAN_PROVIDER_LOAD_FAILED = @as(u32, 2150858906);
pub const ERROR_WSMAN_EVENTING_SUBSCRIPTIONCLOSED_BYREMOTESERVICE = @as(u32, 2150858907);
pub const ERROR_WSMAN_EVENTING_DELIVERYFAILED_FROMSOURCE = @as(u32, 2150858908);
pub const ERROR_WSMAN_SECURITY_UNMAPPED = @as(u32, 2150858909);
pub const ERROR_WSMAN_EVENTING_SUBSCRIPTION_CANCELLED_BYSOURCE = @as(u32, 2150858910);
pub const ERROR_WSMAN_INVALID_HOSTNAME_PATTERN = @as(u32, 2150858911);
pub const ERROR_WSMAN_EVENTING_MISSING_NOTIFYTO = @as(u32, 2150858912);
pub const ERROR_WSMAN_EVENTING_MISSING_NOTIFYTO_ADDRESSS = @as(u32, 2150858913);
pub const ERROR_WSMAN_EVENTING_INVALID_NOTIFYTO_ADDRESSS = @as(u32, 2150858914);
pub const ERROR_WSMAN_EVENTING_INVALID_LOCALE_IN_DELIVERY = @as(u32, 2150858915);
pub const ERROR_WSMAN_EVENTING_INVALID_HEARTBEAT = @as(u32, 2150858916);
pub const ERROR_WSMAN_MACHINE_OPTION_REQUIRED = @as(u32, 2150858917);
pub const ERROR_WSMAN_UNSUPPORTED_FEATURE_OPTIONS = @as(u32, 2150858918);
pub const ERROR_WSMAN_BATCHSIZE_TOO_SMALL = @as(u32, 2150858919);
pub const ERROR_WSMAN_EVENTING_DELIVERY_MODE_REQUESTED_INVALID = @as(u32, 2150858920);
pub const ERROR_WSMAN_PROVSYS_NOT_SUPPORTED = @as(u32, 2150858921);
pub const ERROR_WSMAN_PUSH_SUBSCRIPTION_CONFIG_INVALID = @as(u32, 2150858922);
pub const ERROR_WSMAN_CREDS_PASSED_WITH_NO_AUTH_FLAG = @as(u32, 2150858923);
pub const ERROR_WSMAN_CLIENT_INVALID_FLAG = @as(u32, 2150858924);
pub const ERROR_WSMAN_CLIENT_MULTIPLE_AUTH_FLAGS = @as(u32, 2150858925);
pub const ERROR_WSMAN_CLIENT_SPN_WRONG_AUTH = @as(u32, 2150858926);
pub const ERROR_WSMAN_CLIENT_CERT_UNNEEDED_CREDS = @as(u32, 2150858927);
pub const ERROR_WSMAN_CLIENT_USERNAME_PASSWORD_NEEDED = @as(u32, 2150858928);
pub const ERROR_WSMAN_CLIENT_CERT_UNNEEDED_USERNAME = @as(u32, 2150858929);
pub const ERROR_WSMAN_CLIENT_CREDENTIALS_NEEDED = @as(u32, 2150858930);
pub const ERROR_WSMAN_CLIENT_CREDENTIALS_FLAG_NEEDED = @as(u32, 2150858931);
pub const ERROR_WSMAN_CLIENT_CERT_NEEDED = @as(u32, 2150858932);
pub const ERROR_WSMAN_CLIENT_CERT_UNKNOWN_TYPE = @as(u32, 2150858933);
pub const ERROR_WSMAN_CLIENT_CERT_UNKNOWN_LOCATION = @as(u32, 2150858934);
pub const ERROR_WSMAN_CLIENT_INVALID_CERT = @as(u32, 2150858935);
pub const ERROR_WSMAN_CLIENT_LOCAL_INVALID_CREDS = @as(u32, 2150858936);
pub const ERROR_WSMAN_CLIENT_LOCAL_INVALID_CONNECTION_OPTIONS = @as(u32, 2150858937);
pub const ERROR_WSMAN_CLIENT_CREATESESSION_NULL_PARAM = @as(u32, 2150858938);
pub const ERROR_WSMAN_CLIENT_ENUMERATE_NULL_PARAM = @as(u32, 2150858939);
pub const ERROR_WSMAN_CLIENT_SUBSCRIBE_NULL_PARAM = @as(u32, 2150858940);
pub const ERROR_WSMAN_CLIENT_NULL_RESULT_PARAM = @as(u32, 2150858941);
pub const ERROR_WSMAN_CLIENT_NO_HANDLE = @as(u32, 2150858942);
pub const ERROR_WSMAN_CLIENT_BLANK_URI = @as(u32, 2150858943);
pub const ERROR_WSMAN_CLIENT_INVALID_RESOURCE_LOCATOR = @as(u32, 2150858944);
pub const ERROR_WSMAN_CLIENT_BLANK_INPUT_XML = @as(u32, 2150858945);
pub const ERROR_WSMAN_CLIENT_BATCH_ITEMS_TOO_SMALL = @as(u32, 2150858946);
pub const ERROR_WSMAN_CLIENT_MAX_CHARS_TOO_SMALL = @as(u32, 2150858947);
pub const ERROR_WSMAN_CLIENT_BLANK_ACTION_URI = @as(u32, 2150858948);
pub const ERROR_WSMAN_CLIENT_ZERO_HEARTBEAT = @as(u32, 2150858949);
pub const ERROR_WSMAN_CLIENT_MULTIPLE_DELIVERY_MODES = @as(u32, 2150858950);
pub const ERROR_WSMAN_CLIENT_MULTIPLE_ENVELOPE_POLICIES = @as(u32, 2150858951);
pub const ERROR_WSMAN_CLIENT_UNKNOWN_EXPIRATION_TYPE = @as(u32, 2150858952);
pub const ERROR_WSMAN_CLIENT_MISSING_EXPIRATION = @as(u32, 2150858953);
pub const ERROR_WSMAN_CLIENT_PULL_INVALID_FLAGS = @as(u32, 2150858954);
pub const ERROR_WSMAN_CLIENT_PUSH_UNSUPPORTED_TRANSPORT = @as(u32, 2150858955);
pub const ERROR_WSMAN_CLIENT_PUSH_HOST_TOO_LONG = @as(u32, 2150858956);
pub const ERROR_WSMAN_CLIENT_COMPRESSION_INVALID_OPTION = @as(u32, 2150858957);
pub const ERROR_WSMAN_CLIENT_DELIVERENDSUBSCRIPTION_NULL_PARAM = @as(u32, 2150858958);
pub const ERROR_WSMAN_CLIENT_DELIVEREVENTS_NULL_PARAM = @as(u32, 2150858959);
pub const ERROR_WSMAN_CLIENT_GETBOOKMARK_NULL_PARAM = @as(u32, 2150858960);
pub const ERROR_WSMAN_CLIENT_DECODEOBJECT_NULL_PARAM = @as(u32, 2150858961);
pub const ERROR_WSMAN_CLIENT_ENCODEOBJECT_NULL_PARAM = @as(u32, 2150858962);
pub const ERROR_WSMAN_CLIENT_ENUMERATORADDOBJECT_NULL_PARAM = @as(u32, 2150858963);
pub const ERROR_WSMAN_CLIENT_ENUMERATORNEXTOBJECT_NULL_PARAM = @as(u32, 2150858964);
pub const ERROR_WSMAN_CLIENT_CONSTRUCTERROR_NULL_PARAM = @as(u32, 2150858965);
pub const ERROR_WSMAN_SERVER_NONPULLSUBSCRIBE_NULL_PARAM = @as(u32, 2150858966);
pub const ERROR_WSMAN_CLIENT_UNENCRYPTED_HTTP_ONLY = @as(u32, 2150858967);
pub const ERROR_WSMAN_CANNOT_USE_CERTIFICATES_FOR_HTTP = @as(u32, 2150858968);
pub const ERROR_WSMAN_CONNECTIONSTR_INVALID = @as(u32, 2150858969);
pub const ERROR_WSMAN_TRANSPORT_NOT_SUPPORTED = @as(u32, 2150858970);
pub const ERROR_WSMAN_PORT_INVALID = @as(u32, 2150858971);
pub const ERROR_WSMAN_CONFIG_PORT_INVALID = @as(u32, 2150858972);
pub const ERROR_WSMAN_SENDHEARBEAT_EMPTY_ENUMERATOR = @as(u32, 2150858973);
pub const ERROR_WSMAN_CLIENT_UNENCRYPTED_DISABLED = @as(u32, 2150858974);
pub const ERROR_WSMAN_CLIENT_BASIC_AUTHENTICATION_DISABLED = @as(u32, 2150858975);
pub const ERROR_WSMAN_CLIENT_DIGEST_AUTHENTICATION_DISABLED = @as(u32, 2150858976);
pub const ERROR_WSMAN_CLIENT_NEGOTIATE_AUTHENTICATION_DISABLED = @as(u32, 2150858977);
pub const ERROR_WSMAN_CLIENT_KERBEROS_AUTHENTICATION_DISABLED = @as(u32, 2150858978);
pub const ERROR_WSMAN_CLIENT_CERTIFICATES_AUTHENTICATION_DISABLED = @as(u32, 2150858979);
pub const ERROR_WSMAN_SERVER_NOT_TRUSTED = @as(u32, 2150858980);
pub const ERROR_WSMAN_EXPLICIT_CREDENTIALS_REQUIRED = @as(u32, 2150858981);
pub const ERROR_WSMAN_CERT_THUMBPRINT_NOT_BLANK = @as(u32, 2150858982);
pub const ERROR_WSMAN_CERT_THUMBPRINT_BLANK = @as(u32, 2150858983);
pub const ERROR_WSMAN_CONFIG_CANNOT_SHARE_SSL_CONFIG = @as(u32, 2150858984);
pub const ERROR_WSMAN_CONFIG_CERT_CN_DOES_NOT_MATCH_HOSTNAME = @as(u32, 2150858985);
pub const ERROR_WSMAN_CONFIG_HOSTNAME_CHANGE_WITHOUT_CERT = @as(u32, 2150858986);
pub const ERROR_WSMAN_CONFIG_THUMBPRINT_SHOULD_BE_EMPTY = @as(u32, 2150858987);
pub const ERROR_WSMAN_INVALID_IPFILTER = @as(u32, 2150858988);
pub const ERROR_WSMAN_CANNOT_CHANGE_KEYS = @as(u32, 2150858989);
pub const ERROR_WSMAN_CERT_INVALID_USAGE = @as(u32, 2150858990);
pub const ERROR_WSMAN_RESPONSE_NO_RESULTS = @as(u32, 2150858991);
pub const ERROR_WSMAN_CREATE_RESPONSE_NO_EPR = @as(u32, 2150858992);
pub const ERROR_WSMAN_RESPONSE_INVALID_ENUMERATION_CONTEXT = @as(u32, 2150858993);
pub const ERROR_WSMAN_RESPONSE_NO_XML_FRAGMENT_WRAPPER = @as(u32, 2150858994);
pub const ERROR_WSMAN_RESPONSE_INVALID_MESSAGE_INFORMATION_HEADER = @as(u32, 2150858995);
pub const ERROR_WSMAN_RESPONSE_NO_SOAP_HEADER_BODY = @as(u32, 2150858996);
pub const ERROR_WSMAN_HTTP_NO_RESPONSE_DATA = @as(u32, 2150858997);
pub const ERROR_WSMAN_RESPONSE_INVALID_SOAP_FAULT = @as(u32, 2150858998);
pub const ERROR_WSMAN_HTTP_INVALID_CONTENT_TYPE_IN_RESPONSE_DATA = @as(u32, 2150858999);
pub const ERROR_WSMAN_HTTP_CONTENT_TYPE_MISSMATCH_RESPONSE_DATA = @as(u32, 2150859000);
pub const ERROR_WSMAN_CANNOT_DECRYPT = @as(u32, 2150859001);
pub const ERROR_WSMAN_INVALID_URI_WMI_SINGLETON = @as(u32, 2150859002);
pub const ERROR_WSMAN_INVALID_URI_WMI_ENUM_WQL = @as(u32, 2150859003);
pub const ERROR_WSMAN_NO_IDENTIFY_FOR_LOCAL_SESSION = @as(u32, 2150859004);
pub const ERROR_WSMAN_NO_PUSH_SUBSCRIPTION_FOR_LOCAL_SESSION = @as(u32, 2150859005);
pub const ERROR_WSMAN_INVALID_SUBSCRIPTION_MANAGER = @as(u32, 2150859006);
pub const ERROR_WSMAN_NON_PULL_SUBSCRIPTION_NOT_SUPPORTED = @as(u32, 2150859007);
pub const ERROR_WSMAN_WMI_MAX_NESTED = @as(u32, 2150859008);
pub const ERROR_WSMAN_REMOTE_CIMPATH_NOT_SUPPORTED = @as(u32, 2150859009);
pub const ERROR_WSMAN_WMI_PROVIDER_NOT_CAPABLE = @as(u32, 2150859010);
pub const ERROR_WSMAN_WMI_INVALID_VALUE = @as(u32, 2150859011);
pub const ERROR_WSMAN_WMI_SVC_ACCESS_DENIED = @as(u32, 2150859012);
pub const ERROR_WSMAN_WMI_PROVIDER_ACCESS_DENIED = @as(u32, 2150859013);
pub const ERROR_WSMAN_WMI_CANNOT_CONNECT_ACCESS_DENIED = @as(u32, 2150859014);
pub const ERROR_WSMAN_INVALID_FILTER_XML = @as(u32, 2150859015);
pub const ERROR_WSMAN_ENUMERATE_WMI_INVALID_KEY = @as(u32, 2150859016);
pub const ERROR_WSMAN_INVALID_FRAGMENT_PATH_BLANK = @as(u32, 2150859017);
pub const ERROR_WSMAN_INVALID_CHARACTERS_IN_RESPONSE = @as(u32, 2150859018);
pub const ERROR_WSMAN_KERBEROS_IPADDRESS = @as(u32, 2150859019);
pub const ERROR_WSMAN_CLIENT_WORKGROUP_NO_KERBEROS = @as(u32, 2150859020);
pub const ERROR_WSMAN_INVALID_BATCH_SETTINGS_PARAMETER = @as(u32, 2150859021);
pub const ERROR_WSMAN_SERVER_DESTINATION_LOCALHOST = @as(u32, 2150859022);
pub const ERROR_WSMAN_UNKNOWN_HTTP_STATUS_RETURNED = @as(u32, 2150859023);
pub const ERROR_WSMAN_UNSUPPORTED_HTTP_STATUS_REDIRECT = @as(u32, 2150859024);
pub const ERROR_WSMAN_HTTP_REQUEST_TOO_LARGE_STATUS = @as(u32, 2150859025);
pub const ERROR_WSMAN_HTTP_SERVICE_UNAVAILABLE_STATUS = @as(u32, 2150859026);
pub const ERROR_WSMAN_HTTP_NOT_FOUND_STATUS = @as(u32, 2150859027);
pub const ERROR_WSMAN_EVENTING_MISSING_LOCALE_IN_DELIVERY = @as(u32, 2150859028);
pub const ERROR_WSMAN_QUICK_CONFIG_FAILED_CERT_REQUIRED = @as(u32, 2150859029);
pub const ERROR_WSMAN_QUICK_CONFIG_FIREWALL_EXCEPTIONS_DISALLOWED = @as(u32, 2150859030);
pub const ERROR_WSMAN_QUICK_CONFIG_LOCAL_POLICY_CHANGE_DISALLOWED = @as(u32, 2150859031);
pub const ERROR_WSMAN_INVALID_SELECTOR_NAME = @as(u32, 2150859032);
pub const ERROR_WSMAN_ENCODING_TYPE = @as(u32, 2150859033);
pub const ERROR_WSMAN_ENDPOINT_UNAVAILABLE_INVALID_VALUE = @as(u32, 2150859034);
pub const ERROR_WSMAN_INVALID_HEADER = @as(u32, 2150859035);
pub const ERROR_WSMAN_ENUMERATE_UNSUPPORTED_EXPIRATION_TYPE = @as(u32, 2150859036);
pub const ERROR_WSMAN_MAX_ELEMENTS_NOT_SUPPORTED = @as(u32, 2150859037);
pub const ERROR_WSMAN_WMI_PROVIDER_INVALID_PARAMETER = @as(u32, 2150859038);
pub const ERROR_WSMAN_CLIENT_MULTIPLE_ENUM_MODE_FLAGS = @as(u32, 2150859039);
pub const ERROR_WINRS_CLIENT_INVALID_FLAG = @as(u32, 2150859040);
pub const ERROR_WINRS_CLIENT_NULL_PARAM = @as(u32, 2150859041);
pub const ERROR_WSMAN_CANNOT_PROCESS_FILTER = @as(u32, 2150859042);
pub const ERROR_WSMAN_CLIENT_ENUMERATORADDEVENT_NULL_PARAM = @as(u32, 2150859043);
pub const ERROR_WSMAN_ADDOBJECT_MISSING_OBJECT = @as(u32, 2150859044);
pub const ERROR_WSMAN_ADDOBJECT_MISSING_EPR = @as(u32, 2150859045);
pub const ERROR_WSMAN_NETWORK_TIMEDOUT = @as(u32, 2150859046);
pub const ERROR_WINRS_RECEIVE_IN_PROGRESS = @as(u32, 2150859047);
pub const ERROR_WINRS_RECEIVE_NO_RESPONSE_DATA = @as(u32, 2150859048);
pub const ERROR_WINRS_CLIENT_CREATESHELL_NULL_PARAM = @as(u32, 2150859049);
pub const ERROR_WINRS_CLIENT_CLOSESHELL_NULL_PARAM = @as(u32, 2150859050);
pub const ERROR_WINRS_CLIENT_FREECREATESHELLRESULT_NULL_PARAM = @as(u32, 2150859051);
pub const ERROR_WINRS_CLIENT_RUNCOMMAND_NULL_PARAM = @as(u32, 2150859052);
pub const ERROR_WINRS_CLIENT_FREERUNCOMMANDRESULT_NULL_PARAM = @as(u32, 2150859053);
pub const ERROR_WINRS_CLIENT_SIGNAL_NULL_PARAM = @as(u32, 2150859054);
pub const ERROR_WINRS_CLIENT_RECEIVE_NULL_PARAM = @as(u32, 2150859055);
pub const ERROR_WINRS_CLIENT_FREEPULLRESULT_NULL_PARAM = @as(u32, 2150859056);
pub const ERROR_WINRS_CLIENT_PULL_NULL_PARAM = @as(u32, 2150859057);
pub const ERROR_WINRS_CLIENT_CLOSERECEIVEHANDLE_NULL_PARAM = @as(u32, 2150859058);
pub const ERROR_WINRS_CLIENT_SEND_NULL_PARAM = @as(u32, 2150859059);
pub const ERROR_WINRS_CLIENT_PUSH_NULL_PARAM = @as(u32, 2150859060);
pub const ERROR_WINRS_CLIENT_CLOSESENDHANDLE_NULL_PARAM = @as(u32, 2150859061);
pub const ERROR_WINRS_CLIENT_GET_NULL_PARAM = @as(u32, 2150859062);
pub const ERROR_WSMAN_POLYMORPHISM_MODE_UNSUPPORTED = @as(u32, 2150859063);
pub const ERROR_WSMAN_REQUEST_NOT_SUPPORTED_AT_SERVICE = @as(u32, 2150859064);
pub const ERROR_WSMAN_URI_NON_DMTF_CLASS = @as(u32, 2150859065);
pub const ERROR_WSMAN_URI_WRONG_DMTF_VERSION = @as(u32, 2150859066);
pub const ERROR_WSMAN_DIFFERENT_CIM_SELECTOR = @as(u32, 2150859067);
pub const ERROR_WSMAN_PUSHSUBSCRIPTION_INVALIDUSERACCOUNT = @as(u32, 2150859068);
pub const ERROR_WSMAN_EVENTING_NONDOMAINJOINED_PUBLISHER = @as(u32, 2150859069);
pub const ERROR_WSMAN_EVENTING_NONDOMAINJOINED_COLLECTOR = @as(u32, 2150859070);
pub const ERROR_WSMAN_CONFIG_READONLY_PROPERTY = @as(u32, 2150859071);
pub const ERROR_WINRS_CODE_PAGE_NOT_SUPPORTED = @as(u32, 2150859072);
pub const ERROR_WSMAN_CLIENT_DISABLE_LOOPBACK_WITH_EXPLICIT_CREDENTIALS = @as(u32, 2150859073);
pub const ERROR_WSMAN_CLIENT_INVALID_DISABLE_LOOPBACK = @as(u32, 2150859074);
pub const ERROR_WSMAN_CLIENT_ENUM_RECEIVED_TOO_MANY_ITEMS = @as(u32, 2150859075);
pub const ERROR_WSMAN_MULTIPLE_CREDENTIALS = @as(u32, 2150859076);
pub const ERROR_WSMAN_AUTHENTICATION_INVALID_FLAG = @as(u32, 2150859077);
pub const ERROR_WSMAN_CLIENT_CREDENTIALS_FOR_DEFAULT_AUTHENTICATION = @as(u32, 2150859078);
pub const ERROR_WSMAN_CLIENT_USERNAME_AND_PASSWORD_NEEDED = @as(u32, 2150859079);
pub const ERROR_WSMAN_CLIENT_INVALID_CERT_DNS_OR_UPN = @as(u32, 2150859080);
pub const ERROR_WSMAN_CREATESHELL_NULL_ENVIRONMENT_VARIABLE_NAME = @as(u32, 2150859081);
pub const ERROR_WSMAN_SHELL_ALREADY_CLOSED = @as(u32, 2150859082);
pub const ERROR_WSMAN_CREATESHELL_NULL_STREAMID = @as(u32, 2150859083);
pub const ERROR_WSMAN_SHELL_INVALID_SHELL_HANDLE = @as(u32, 2150859084);
pub const ERROR_WSMAN_SHELL_INVALID_COMMAND_HANDLE = @as(u32, 2150859085);
pub const ERROR_WSMAN_RUNSHELLCOMMAND_NULL_ARGUMENT = @as(u32, 2150859086);
pub const ERROR_WSMAN_COMMAND_ALREADY_CLOSED = @as(u32, 2150859087);
pub const ERROR_WSMAN_SENDSHELLINPUT_INVALID_STREAMID_INDEX = @as(u32, 2150859088);
pub const ERROR_WSMAN_SHELL_SYNCHRONOUS_NOT_SUPPORTED = @as(u32, 2150859089);
pub const ERROR_WSMAN_NO_CERTMAPPING_OPERATION_FOR_LOCAL_SESSION = @as(u32, 2150859090);
pub const ERROR_WSMAN_CERTMAPPING_CONFIGLIMIT_EXCEEDED = @as(u32, 2150859091);
pub const ERROR_WSMAN_CERTMAPPING_INVALIDUSERCREDENTIALS = @as(u32, 2150859092);
pub const ERROR_WSMAN_CERT_INVALID_USAGE_CLIENT = @as(u32, 2150859093);
pub const ERROR_WSMAN_CERT_MISSING_AUTH_FLAG = @as(u32, 2150859094);
pub const ERROR_WSMAN_CERT_MULTIPLE_CREDENTIALS_FLAG = @as(u32, 2150859095);
pub const ERROR_WSMAN_CONFIG_SHELL_URI_INVALID = @as(u32, 2150859096);
pub const ERROR_WSMAN_CONFIG_SHELL_URI_CMDSHELLURI_NOTPERMITTED = @as(u32, 2150859097);
pub const ERROR_WSMAN_CONFIG_SHELLURI_INVALID_PROCESSPATH = @as(u32, 2150859098);
pub const ERROR_WINRS_SHELL_URI_INVALID = @as(u32, 2150859099);
pub const ERROR_WSMAN_INVALID_SECURITY_DESCRIPTOR = @as(u32, 2150859100);
pub const ERROR_WSMAN_POLICY_TOO_COMPLEX = @as(u32, 2150859101);
pub const ERROR_WSMAN_POLICY_CANNOT_COMPLY = @as(u32, 2150859102);
pub const ERROR_WSMAN_INVALID_CONNECTIONRETRY = @as(u32, 2150859103);
pub const ERROR_WSMAN_URISECURITY_INVALIDURIKEY = @as(u32, 2150859104);
pub const ERROR_WSMAN_CERTMAPPING_INVALIDSUBJECTKEY = @as(u32, 2150859105);
pub const ERROR_WSMAN_CERTMAPPING_INVALIDISSUERKEY = @as(u32, 2150859106);
pub const ERROR_WSMAN_INVALID_PUBLISHERS_TYPE = @as(u32, 2150859107);
pub const ERROR_WSMAN_CLIENT_INVALID_DELIVERY_RETRY = @as(u32, 2150859108);
pub const ERROR_WSMAN_CLIENT_NULL_PUBLISHERS = @as(u32, 2150859109);
pub const ERROR_WSMAN_CLIENT_NULL_ISSUERS = @as(u32, 2150859110);
pub const ERROR_WSMAN_CLIENT_NO_SOURCES = @as(u32, 2150859111);
pub const ERROR_WSMAN_INVALID_SUBSCRIBE_OBJECT = @as(u32, 2150859112);
pub const ERROR_WSMAN_PUBLIC_FIREWALL_PROFILE_ACTIVE = @as(u32, 2150859113);
pub const ERROR_WSMAN_CERTMAPPING_PASSWORDTOOLONG = @as(u32, 2150859114);
pub const ERROR_WSMAN_CERTMAPPING_PASSWORDBLANK = @as(u32, 2150859115);
pub const ERROR_WSMAN_CERTMAPPING_PASSWORDUSERTUPLE = @as(u32, 2150859116);
pub const ERROR_WSMAN_INVALID_PROVIDER_RESPONSE = @as(u32, 2150859117);
pub const ERROR_WSMAN_SHELL_NOT_INITIALIZED = @as(u32, 2150859118);
pub const ERROR_WSMAN_CONFIG_SHELLURI_INVALID_OPERATION_ON_KEY = @as(u32, 2150859119);
pub const ERROR_WSMAN_HTTP_STATUS_SERVER_ERROR = @as(u32, 2150859120);
pub const ERROR_WSMAN_HTTP_STATUS_BAD_REQUEST = @as(u32, 2150859121);
pub const ERROR_WSMAN_CONFIG_CANNOT_CHANGE_CERTMAPPING_KEYS = @as(u32, 2150859122);
pub const ERROR_WSMAN_HTML_ERROR = @as(u32, 2150859123);
pub const ERROR_WSMAN_CLIENT_INITIALIZE_NULL_PARAM = @as(u32, 2150859124);
pub const ERROR_WSMAN_CLIENT_INVALID_INIT_APPLICATION_FLAG = @as(u32, 2150859125);
pub const ERROR_WSMAN_CLIENT_INVALID_DEINIT_APPLICATION_FLAG = @as(u32, 2150859126);
pub const ERROR_WSMAN_CLIENT_SETSESSIONOPTION_NULL_PARAM = @as(u32, 2150859127);
pub const ERROR_WSMAN_CLIENT_SETSESSIONOPTION_INVALID_PARAM = @as(u32, 2150859128);
pub const ERROR_WSMAN_CLIENT_GETSESSIONOPTION_INVALID_PARAM = @as(u32, 2150859129);
pub const ERROR_WSMAN_CLIENT_CREATESHELL_NULL_PARAM = @as(u32, 2150859130);
pub const ERROR_WSMAN_CLIENT_INVALID_CREATE_SHELL_FLAG = @as(u32, 2150859131);
pub const ERROR_WSMAN_CLIENT_INVALID_CLOSE_SHELL_FLAG = @as(u32, 2150859132);
pub const ERROR_WSMAN_CLIENT_INVALID_CLOSE_COMMAND_FLAG = @as(u32, 2150859133);
pub const ERROR_WSMAN_CLIENT_CLOSESHELL_NULL_PARAM = @as(u32, 2150859134);
pub const ERROR_WSMAN_CLIENT_CLOSECOMMAND_NULL_PARAM = @as(u32, 2150859135);
pub const ERROR_WSMAN_CLIENT_RUNCOMMAND_NULL_PARAM = @as(u32, 2150859136);
pub const ERROR_WSMAN_CLIENT_INVALID_RUNCOMMAND_FLAG = @as(u32, 2150859137);
pub const ERROR_WSMAN_CLIENT_RUNCOMMAND_NOTCOMPLETED = @as(u32, 2150859138);
pub const ERROR_WSMAN_NO_COMMAND_RESPONSE = @as(u32, 2150859139);
pub const ERROR_WSMAN_INVALID_OPTIONSET = @as(u32, 2150859140);
pub const ERROR_WSMAN_NO_COMMANDID = @as(u32, 2150859141);
pub const ERROR_WSMAN_CLIENT_SIGNAL_NULL_PARAM = @as(u32, 2150859142);
pub const ERROR_WSMAN_CLIENT_INVALID_SIGNAL_SHELL_FLAG = @as(u32, 2150859143);
pub const ERROR_WSMAN_CLIENT_SEND_NULL_PARAM = @as(u32, 2150859144);
pub const ERROR_WSMAN_CLIENT_INVALID_SEND_SHELL_FLAG = @as(u32, 2150859145);
pub const ERROR_WSMAN_CLIENT_INVALID_SEND_SHELL_PARAMETER = @as(u32, 2150859146);
pub const ERROR_WSMAN_SHELL_INVALID_INPUT_STREAM = @as(u32, 2150859147);
pub const ERROR_WSMAN_CLIENT_RECEIVE_NULL_PARAM = @as(u32, 2150859148);
pub const ERROR_WSMAN_SHELL_INVALID_DESIRED_STREAMS = @as(u32, 2150859149);
pub const ERROR_WSMAN_CLIENT_INVALID_RECEIVE_SHELL_FLAG = @as(u32, 2150859150);
pub const ERROR_WSMAN_NO_RECEIVE_RESPONSE = @as(u32, 2150859151);
pub const ERROR_WSMAN_PLUGIN_CONFIGURATION_CORRUPTED = @as(u32, 2150859152);
pub const ERROR_WSMAN_INVALID_FILEPATH = @as(u32, 2150859153);
pub const ERROR_WSMAN_FILE_NOT_PRESENT = @as(u32, 2150859154);
pub const ERROR_WSMAN_IISCONFIGURATION_READ_FAILED = @as(u32, 2150859155);
pub const ERROR_WSMAN_CLIENT_INVALID_LOCALE = @as(u32, 2150859156);
pub const ERROR_WSMAN_CLIENT_INVALID_UI_LANGUAGE = @as(u32, 2150859157);
pub const ERROR_WSMAN_CLIENT_GETERRORMESSAGE_NULL_PARAM = @as(u32, 2150859158);
pub const ERROR_WSMAN_CLIENT_INVALID_LANGUAGE_CODE = @as(u32, 2150859159);
pub const ERROR_WSMAN_CLIENT_INVALID_GETERRORMESSAGE_FLAG = @as(u32, 2150859160);
pub const ERROR_WSMAN_REDIRECT_REQUESTED = @as(u32, 2150859161);
pub const ERROR_WSMAN_PROXY_AUTHENTICATION_INVALID_FLAG = @as(u32, 2150859162);
pub const ERROR_WSMAN_CLIENT_CREDENTIALS_FOR_PROXY_AUTHENTICATION = @as(u32, 2150859163);
pub const ERROR_WSMAN_PROXY_ACCESS_TYPE = @as(u32, 2150859164);
pub const ERROR_WSMAN_INVALID_OPTION_NO_PROXY_SERVER = @as(u32, 2150859165);
pub const ERROR_WSMAN_CLIENT_GETSESSIONOPTION_DWORD_NULL_PARAM = @as(u32, 2150859166);
pub const ERROR_WSMAN_CLIENT_GETSESSIONOPTION_DWORD_INVALID_PARAM = @as(u32, 2150859167);
pub const ERROR_WSMAN_CLIENT_GETSESSIONOPTION_STRING_INVALID_PARAM = @as(u32, 2150859168);
pub const ERROR_WSMAN_CREDSSP_USERNAME_PASSWORD_NEEDED = @as(u32, 2150859169);
pub const ERROR_WSMAN_CLIENT_CREDSSP_AUTHENTICATION_DISABLED = @as(u32, 2150859170);
pub const ERROR_WSMAN_CLIENT_ALLOWFRESHCREDENTIALS = @as(u32, 2150859171);
pub const ERROR_WSMAN_CLIENT_ALLOWFRESHCREDENTIALS_NTLMONLY = @as(u32, 2150859172);
pub const ERROR_WSMAN_QUOTA_MAX_SHELLS = @as(u32, 2150859173);
pub const ERROR_WSMAN_QUOTA_MAX_OPERATIONS = @as(u32, 2150859174);
pub const ERROR_WSMAN_QUOTA_USER = @as(u32, 2150859175);
pub const ERROR_WSMAN_QUOTA_SYSTEM = @as(u32, 2150859176);
pub const ERROR_WSMAN_DIFFERENT_AUTHZ_TOKEN = @as(u32, 2150859177);
pub const ERROR_WSMAN_REDIRECT_LOCATION_NOT_AVAILABLE = @as(u32, 2150859178);
pub const ERROR_WSMAN_QUOTA_MAX_SHELLUSERS = @as(u32, 2150859179);
pub const ERROR_WSMAN_REMOTESHELLS_NOT_ALLOWED = @as(u32, 2150859180);
pub const ERROR_WSMAN_PULL_PARAMS_NOT_SAME_AS_ENUM = @as(u32, 2150859181);
pub const ERROR_WSMAN_DEPRECATED_CONFIG_SETTING = @as(u32, 2150859182);
pub const ERROR_WSMAN_URI_SECURITY_URI = @as(u32, 2150859183);
pub const ERROR_WSMAN_CANNOT_USE_ALLOW_NEGOTIATE_IMPLICIT_CREDENTIALS_FOR_HTTP = @as(u32, 2150859184);
pub const ERROR_WSMAN_CANNOT_USE_PROXY_SETTINGS_FOR_HTTP = @as(u32, 2150859185);
pub const ERROR_WSMAN_CANNOT_USE_PROXY_SETTINGS_FOR_KERBEROS = @as(u32, 2150859186);
pub const ERROR_WSMAN_CANNOT_USE_PROXY_SETTINGS_FOR_CREDSSP = @as(u32, 2150859187);
pub const ERROR_WSMAN_CLIENT_MULTIPLE_PROXY_AUTH_FLAGS = @as(u32, 2150859188);
pub const ERROR_WSMAN_INVALID_REDIRECT_ERROR = @as(u32, 2150859189);
pub const ERROR_REDIRECT_LOCATION_TOO_LONG = @as(u32, 2150859190);
pub const ERROR_REDIRECT_LOCATION_INVALID = @as(u32, 2150859191);
pub const ERROR_SERVICE_CBT_HARDENING_INVALID = @as(u32, 2150859192);
pub const ERROR_WSMAN_NAME_NOT_RESOLVED = @as(u32, 2150859193);
pub const ERROR_WSMAN_SSL_CONNECTION_ABORTED = @as(u32, 2150859194);
pub const ERROR_WSMAN_DEFAULTAUTH_IPADDRESS = @as(u32, 2150859195);
pub const ERROR_WSMAN_CUSTOMREMOTESHELL_DEPRECATED = @as(u32, 2150859196);
pub const ERROR_WSMAN_FEATURE_DEPRECATED = @as(u32, 2150859197);
pub const ERROR_WSMAN_INVALID_USESSL_PARAM = @as(u32, 2150859198);
pub const ERROR_WSMAN_INVALID_CONFIGSDDL_URL = @as(u32, 2150859199);
pub const ERROR_WSMAN_ENUMERATE_SHELLCOMAMNDS_FILTER_EXPECTED = @as(u32, 2150859200);
pub const ERROR_WSMAN_ENUMERATE_SHELLCOMMANDS_EPRS_NOTSUPPORTED = @as(u32, 2150859201);
pub const ERROR_WSMAN_CLIENT_CREATESHELL_NAME_INVALID = @as(u32, 2150859202);
pub const ERROR_WSMAN_RUNAS_INVALIDUSERCREDENTIALS = @as(u32, 2150859203);
pub const ERROR_WINRS_SHELL_DISCONNECTED = @as(u32, 2150859204);
pub const ERROR_WINRS_SHELL_DISCONNECT_NOT_SUPPORTED = @as(u32, 2150859205);
pub const ERROR_WINRS_SHELL_CLIENTSESSIONID_MISMATCH = @as(u32, 2150859206);
pub const ERROR_WSMAN_CLIENT_DISCONNECTSHELL_NULL_PARAM = @as(u32, 2150859207);
pub const ERROR_WSMAN_CLIENT_RECONNECTSHELL_NULL_PARAM = @as(u32, 2150859208);
pub const ERROR_WSMAN_CLIENT_CONNECTSHELL_NULL_PARAM = @as(u32, 2150859209);
pub const ERROR_WSMAN_CLIENT_CONNECTCOMMAND_NULL_PARAM = @as(u32, 2150859210);
pub const ERROR_WINRS_CONNECT_RESPONSE_BAD_BODY = @as(u32, 2150859211);
pub const ERROR_WSMAN_COMMAND_TERMINATED = @as(u32, 2150859212);
pub const ERROR_WINRS_SHELL_CONNECTED_TO_DIFFERENT_CLIENT = @as(u32, 2150859213);
pub const ERROR_WINRS_SHELL_DISCONNECT_OPERATION_NOT_GRACEFUL = @as(u32, 2150859214);
pub const ERROR_WINRS_SHELL_DISCONNECT_OPERATION_NOT_VALID = @as(u32, 2150859215);
pub const ERROR_WINRS_SHELL_RECONNECT_OPERATION_NOT_VALID = @as(u32, 2150859216);
pub const ERROR_WSMAN_CONFIG_GROUP_POLICY_CHANGE_NOTIFICATION_SUBSCRIPTION_FAILED = @as(u32, 2150859217);
pub const ERROR_WSMAN_CLIENT_RECONNECTSHELLCOMMAND_NULL_PARAM = @as(u32, 2150859218);
pub const ERROR_WINRS_SHELLCOMMAND_RECONNECT_OPERATION_NOT_VALID = @as(u32, 2150859219);
pub const ERROR_WINRS_SHELLCOMMAND_CLIENTID_NOT_VALID = @as(u32, 2150859220);
pub const ERROR_WINRS_SHELL_CLIENTID_NOT_VALID = @as(u32, 2150859221);
pub const ERROR_WINRS_SHELLCOMMAND_CLIENTID_RESOURCE_CONFLICT = @as(u32, 2150859222);
pub const ERROR_WINRS_SHELL_CLIENTID_RESOURCE_CONFLICT = @as(u32, 2150859223);
pub const ERROR_WINRS_SHELLCOMMAND_DISCONNECT_OPERATION_NOT_VALID = @as(u32, 2150859224);
pub const ERROR_WSMAN_SUBSCRIBE_WMI_INVALID_KEY = @as(u32, 2150859225);
pub const ERROR_WSMAN_CLIENT_INVALID_DISCONNECT_SHELL_FLAG = @as(u32, 2150859226);
pub const ERROR_WSMAN_CLIENT_INVALID_SHELL_COMMAND_PAIR = @as(u32, 2150859227);
pub const ERROR_WSMAN_SEMANTICCALLBACK_TIMEDOUT = @as(u32, 2150859228);
pub const ERROR_WSMAN_SERVICE_REMOTE_ACCESS_DISABLED = @as(u32, 2150859229);
pub const ERROR_WSMAN_SERVICE_STREAM_DISCONNECTED = @as(u32, 2150859230);
pub const ERROR_WSMAN_CREATESHELL_RUNAS_FAILED = @as(u32, 2150859231);
pub const ERROR_WSMAN_INVALID_XML_RUNAS_DISABLED = @as(u32, 2150859232);
pub const ERROR_WSMAN_WRONG_METADATA = @as(u32, 2150859233);
pub const ERROR_WSMAN_UNSUPPORTED_TYPE = @as(u32, 2150859234);
pub const ERROR_WSMAN_REMOTE_CONNECTION_NOT_ALLOWED = @as(u32, 2150859235);
pub const ERROR_WSMAN_QUOTA_MAX_SHELLS_PPQ = @as(u32, 2150859236);
pub const ERROR_WSMAN_QUOTA_MAX_USERS_PPQ = @as(u32, 2150859237);
pub const ERROR_WSMAN_QUOTA_MAX_PLUGINSHELLS_PPQ = @as(u32, 2150859238);
pub const ERROR_WSMAN_QUOTA_MAX_PLUGINOPERATIONS_PPQ = @as(u32, 2150859239);
pub const ERROR_WSMAN_QUOTA_MAX_OPERATIONS_USER_PPQ = @as(u32, 2150859240);
pub const ERROR_WSMAN_QUOTA_MAX_COMMANDS_PER_SHELL_PPQ = @as(u32, 2150859241);
pub const ERROR_WSMAN_QUOTA_MIN_REQUIREMENT_NOT_AVAILABLE_PPQ = @as(u32, 2150859242);
pub const ERROR_WSMAN_NEW_DESERIALIZER = @as(u32, 2150859243);
pub const ERROR_WSMAN_DESERIALIZE_CLASS = @as(u32, 2150859244);
pub const ERROR_WSMAN_GETCLASS = @as(u32, 2150859245);
pub const ERROR_WSMAN_NEW_SESSION = @as(u32, 2150859246);
pub const ERROR_WSMAN_NULL_KEY = @as(u32, 2150859247);
pub const ERROR_WSMAN_MUTUAL_AUTH_FAILED = @as(u32, 2150859248);
pub const ERROR_WSMAN_UNSUPPORTED_OCTETTYPE = @as(u32, 2150859249);
pub const ERROR_WINRS_IDLETIMEOUT_OUTOFBOUNDS = @as(u32, 2150859250);
pub const ERROR_WSMAN_INSUFFICIENT_METADATA_FOR_BASIC = @as(u32, 2150859251);
pub const ERROR_WSMAN_INVALID_LITERAL_URI = @as(u32, 2150859252);
pub const ERROR_WSMAN_OBJECTONLY_INVALID = @as(u32, 2150859253);
pub const ERROR_WSMAN_MISSING_CLASSNAME = @as(u32, 2150859254);
pub const ERROR_WSMAN_EVENTING_INVALID_ENCODING_IN_DELIVERY = @as(u32, 2150859255);
pub const ERROR_WSMAN_DESTINATION_INVALID = @as(u32, 2150859256);
pub const ERROR_WSMAN_UNSUPPORTED_FEATURE_IDENTIFY = @as(u32, 2150859257);
pub const ERROR_WSMAN_CLIENT_SESSION_UNUSABLE = @as(u32, 2150859258);
pub const ERROR_WSMAN_VIRTUALACCOUNT_NOTSUPPORTED = @as(u32, 2150859259);
pub const ERROR_WSMAN_VIRTUALACCOUNT_NOTSUPPORTED_DOWNLEVEL = @as(u32, 2150859260);
pub const ERROR_WSMAN_RUNASUSER_MANAGEDACCOUNT_LOGON_FAILED = @as(u32, 2150859261);
pub const ERROR_WSMAN_CERTMAPPING_CREDENTIAL_MANAGEMENT_FAILIED = @as(u32, 2150859262);
//--------------------------------------------------------------------------------
// Section: Types (76)
//--------------------------------------------------------------------------------
pub const WSMAN_DATA_TEXT = extern struct {
bufferLength: u32,
buffer: ?[*:0]const u16,
};
pub const WSMAN_DATA_BINARY = extern struct {
dataLength: u32,
data: ?*u8,
};
pub const WSManDataType = enum(i32) {
NONE = 0,
TYPE_TEXT = 1,
TYPE_BINARY = 2,
TYPE_DWORD = 4,
};
pub const WSMAN_DATA_NONE = WSManDataType.NONE;
pub const WSMAN_DATA_TYPE_TEXT = WSManDataType.TYPE_TEXT;
pub const WSMAN_DATA_TYPE_BINARY = WSManDataType.TYPE_BINARY;
pub const WSMAN_DATA_TYPE_DWORD = WSManDataType.TYPE_DWORD;
pub const WSMAN_DATA = extern struct {
type: WSManDataType,
Anonymous: extern union {
text: WSMAN_DATA_TEXT,
binaryData: WSMAN_DATA_BINARY,
number: u32,
},
};
pub const WSMAN_ERROR = extern struct {
code: u32,
errorDetail: ?[*:0]const u16,
language: ?[*:0]const u16,
machineName: ?[*:0]const u16,
pluginName: ?[*:0]const u16,
};
pub const WSMAN_USERNAME_PASSWORD_CREDS = extern struct {
username: ?[*:0]const u16,
password: ?[*:0]const u16,
};
pub const WSManAuthenticationFlags = enum(i32) {
DEFAULT_AUTHENTICATION = 0,
NO_AUTHENTICATION = 1,
AUTH_DIGEST = 2,
AUTH_NEGOTIATE = 4,
AUTH_BASIC = 8,
AUTH_KERBEROS = 16,
AUTH_CREDSSP = 128,
AUTH_CLIENT_CERTIFICATE = 32,
};
pub const WSMAN_FLAG_DEFAULT_AUTHENTICATION = WSManAuthenticationFlags.DEFAULT_AUTHENTICATION;
pub const WSMAN_FLAG_NO_AUTHENTICATION = WSManAuthenticationFlags.NO_AUTHENTICATION;
pub const WSMAN_FLAG_AUTH_DIGEST = WSManAuthenticationFlags.AUTH_DIGEST;
pub const WSMAN_FLAG_AUTH_NEGOTIATE = WSManAuthenticationFlags.AUTH_NEGOTIATE;
pub const WSMAN_FLAG_AUTH_BASIC = WSManAuthenticationFlags.AUTH_BASIC;
pub const WSMAN_FLAG_AUTH_KERBEROS = WSManAuthenticationFlags.AUTH_KERBEROS;
pub const WSMAN_FLAG_AUTH_CREDSSP = WSManAuthenticationFlags.AUTH_CREDSSP;
pub const WSMAN_FLAG_AUTH_CLIENT_CERTIFICATE = WSManAuthenticationFlags.AUTH_CLIENT_CERTIFICATE;
pub const WSMAN_AUTHENTICATION_CREDENTIALS = extern struct {
authenticationMechanism: u32,
Anonymous: extern union {
userAccount: WSMAN_USERNAME_PASSWORD_CREDS,
certificateThumbprint: ?[*:0]const u16,
},
};
pub const WSMAN_OPTION = extern struct {
name: ?[*:0]const u16,
value: ?[*:0]const u16,
mustComply: BOOL,
};
pub const WSMAN_OPTION_SET = extern struct {
optionsCount: u32,
options: ?*WSMAN_OPTION,
optionsMustUnderstand: BOOL,
};
pub const WSMAN_OPTION_SETEX = extern struct {
optionsCount: u32,
options: ?*WSMAN_OPTION,
optionsMustUnderstand: BOOL,
optionTypes: ?*?PWSTR,
};
pub const WSMAN_KEY = extern struct {
key: ?[*:0]const u16,
value: ?[*:0]const u16,
};
pub const WSMAN_SELECTOR_SET = extern struct {
numberKeys: u32,
keys: ?*WSMAN_KEY,
};
pub const WSMAN_FRAGMENT = extern struct {
path: ?[*:0]const u16,
dialect: ?[*:0]const u16,
};
pub const WSMAN_FILTER = extern struct {
filter: ?[*:0]const u16,
dialect: ?[*:0]const u16,
};
pub const WSMAN_OPERATION_INFO = extern struct {
fragment: WSMAN_FRAGMENT,
filter: WSMAN_FILTER,
selectorSet: WSMAN_SELECTOR_SET,
optionSet: WSMAN_OPTION_SET,
reserved: ?*anyopaque,
version: u32,
};
pub const WSMAN_OPERATION_INFOEX = extern struct {
fragment: WSMAN_FRAGMENT,
filter: WSMAN_FILTER,
selectorSet: WSMAN_SELECTOR_SET,
optionSet: WSMAN_OPTION_SETEX,
version: u32,
uiLocale: ?[*:0]const u16,
dataLocale: ?[*:0]const u16,
};
pub const WSMAN_API = extern struct {
placeholder: usize, // TODO: why is this type empty?
};
pub const WSManProxyAccessType = enum(i32) {
IE_PROXY_CONFIG = 1,
WINHTTP_PROXY_CONFIG = 2,
AUTO_DETECT = 4,
NO_PROXY_SERVER = 8,
};
pub const WSMAN_OPTION_PROXY_IE_PROXY_CONFIG = WSManProxyAccessType.IE_PROXY_CONFIG;
pub const WSMAN_OPTION_PROXY_WINHTTP_PROXY_CONFIG = WSManProxyAccessType.WINHTTP_PROXY_CONFIG;
pub const WSMAN_OPTION_PROXY_AUTO_DETECT = WSManProxyAccessType.AUTO_DETECT;
pub const WSMAN_OPTION_PROXY_NO_PROXY_SERVER = WSManProxyAccessType.NO_PROXY_SERVER;
pub const WSMAN_PROXY_INFO = extern struct {
accessType: u32,
authenticationCredentials: WSMAN_AUTHENTICATION_CREDENTIALS,
};
pub const WSMAN_SESSION = extern struct {
placeholder: usize, // TODO: why is this type empty?
};
pub const WSManSessionOption = enum(i32) {
DEFAULT_OPERATION_TIMEOUTMS = 1,
MAX_RETRY_TIME = 11,
TIMEOUTMS_CREATE_SHELL = 12,
TIMEOUTMS_RUN_SHELL_COMMAND = 13,
TIMEOUTMS_RECEIVE_SHELL_OUTPUT = 14,
TIMEOUTMS_SEND_SHELL_INPUT = 15,
TIMEOUTMS_SIGNAL_SHELL = 16,
TIMEOUTMS_CLOSE_SHELL = 17,
SKIP_CA_CHECK = 18,
SKIP_CN_CHECK = 19,
UNENCRYPTED_MESSAGES = 20,
UTF16 = 21,
ENABLE_SPN_SERVER_PORT = 22,
MACHINE_ID = 23,
LOCALE = 25,
UI_LANGUAGE = 26,
MAX_ENVELOPE_SIZE_KB = 28,
SHELL_MAX_DATA_SIZE_PER_MESSAGE_KB = 29,
REDIRECT_LOCATION = 30,
SKIP_REVOCATION_CHECK = 31,
ALLOW_NEGOTIATE_IMPLICIT_CREDENTIALS = 32,
USE_SSL = 33,
USE_INTEARACTIVE_TOKEN = 34,
};
pub const WSMAN_OPTION_DEFAULT_OPERATION_TIMEOUTMS = WSManSessionOption.DEFAULT_OPERATION_TIMEOUTMS;
pub const WSMAN_OPTION_MAX_RETRY_TIME = WSManSessionOption.MAX_RETRY_TIME;
pub const WSMAN_OPTION_TIMEOUTMS_CREATE_SHELL = WSManSessionOption.TIMEOUTMS_CREATE_SHELL;
pub const WSMAN_OPTION_TIMEOUTMS_RUN_SHELL_COMMAND = WSManSessionOption.TIMEOUTMS_RUN_SHELL_COMMAND;
pub const WSMAN_OPTION_TIMEOUTMS_RECEIVE_SHELL_OUTPUT = WSManSessionOption.TIMEOUTMS_RECEIVE_SHELL_OUTPUT;
pub const WSMAN_OPTION_TIMEOUTMS_SEND_SHELL_INPUT = WSManSessionOption.TIMEOUTMS_SEND_SHELL_INPUT;
pub const WSMAN_OPTION_TIMEOUTMS_SIGNAL_SHELL = WSManSessionOption.TIMEOUTMS_SIGNAL_SHELL;
pub const WSMAN_OPTION_TIMEOUTMS_CLOSE_SHELL = WSManSessionOption.TIMEOUTMS_CLOSE_SHELL;
pub const WSMAN_OPTION_SKIP_CA_CHECK = WSManSessionOption.SKIP_CA_CHECK;
pub const WSMAN_OPTION_SKIP_CN_CHECK = WSManSessionOption.SKIP_CN_CHECK;
pub const WSMAN_OPTION_UNENCRYPTED_MESSAGES = WSManSessionOption.UNENCRYPTED_MESSAGES;
pub const WSMAN_OPTION_UTF16 = WSManSessionOption.UTF16;
pub const WSMAN_OPTION_ENABLE_SPN_SERVER_PORT = WSManSessionOption.ENABLE_SPN_SERVER_PORT;
pub const WSMAN_OPTION_MACHINE_ID = WSManSessionOption.MACHINE_ID;
pub const WSMAN_OPTION_LOCALE = WSManSessionOption.LOCALE;
pub const WSMAN_OPTION_UI_LANGUAGE = WSManSessionOption.UI_LANGUAGE;
pub const WSMAN_OPTION_MAX_ENVELOPE_SIZE_KB = WSManSessionOption.MAX_ENVELOPE_SIZE_KB;
pub const WSMAN_OPTION_SHELL_MAX_DATA_SIZE_PER_MESSAGE_KB = WSManSessionOption.SHELL_MAX_DATA_SIZE_PER_MESSAGE_KB;
pub const WSMAN_OPTION_REDIRECT_LOCATION = WSManSessionOption.REDIRECT_LOCATION;
pub const WSMAN_OPTION_SKIP_REVOCATION_CHECK = WSManSessionOption.SKIP_REVOCATION_CHECK;
pub const WSMAN_OPTION_ALLOW_NEGOTIATE_IMPLICIT_CREDENTIALS = WSManSessionOption.ALLOW_NEGOTIATE_IMPLICIT_CREDENTIALS;
pub const WSMAN_OPTION_USE_SSL = WSManSessionOption.USE_SSL;
pub const WSMAN_OPTION_USE_INTEARACTIVE_TOKEN = WSManSessionOption.USE_INTEARACTIVE_TOKEN;
pub const WSMAN_OPERATION = extern struct {
placeholder: usize, // TODO: why is this type empty?
};
pub const WSManCallbackFlags = enum(i32) {
END_OF_OPERATION = 1,
END_OF_STREAM = 8,
SHELL_SUPPORTS_DISCONNECT = 32,
SHELL_AUTODISCONNECTED = 64,
NETWORK_FAILURE_DETECTED = 256,
RETRYING_AFTER_NETWORK_FAILURE = 512,
RECONNECTED_AFTER_NETWORK_FAILURE = 1024,
SHELL_AUTODISCONNECTING = 2048,
RETRY_ABORTED_DUE_TO_INTERNAL_ERROR = 4096,
RECEIVE_DELAY_STREAM_REQUEST_PROCESSED = 8192,
};
pub const WSMAN_FLAG_CALLBACK_END_OF_OPERATION = WSManCallbackFlags.END_OF_OPERATION;
pub const WSMAN_FLAG_CALLBACK_END_OF_STREAM = WSManCallbackFlags.END_OF_STREAM;
pub const WSMAN_FLAG_CALLBACK_SHELL_SUPPORTS_DISCONNECT = WSManCallbackFlags.SHELL_SUPPORTS_DISCONNECT;
pub const WSMAN_FLAG_CALLBACK_SHELL_AUTODISCONNECTED = WSManCallbackFlags.SHELL_AUTODISCONNECTED;
pub const WSMAN_FLAG_CALLBACK_NETWORK_FAILURE_DETECTED = WSManCallbackFlags.NETWORK_FAILURE_DETECTED;
pub const WSMAN_FLAG_CALLBACK_RETRYING_AFTER_NETWORK_FAILURE = WSManCallbackFlags.RETRYING_AFTER_NETWORK_FAILURE;
pub const WSMAN_FLAG_CALLBACK_RECONNECTED_AFTER_NETWORK_FAILURE = WSManCallbackFlags.RECONNECTED_AFTER_NETWORK_FAILURE;
pub const WSMAN_FLAG_CALLBACK_SHELL_AUTODISCONNECTING = WSManCallbackFlags.SHELL_AUTODISCONNECTING;
pub const WSMAN_FLAG_CALLBACK_RETRY_ABORTED_DUE_TO_INTERNAL_ERROR = WSManCallbackFlags.RETRY_ABORTED_DUE_TO_INTERNAL_ERROR;
pub const WSMAN_FLAG_CALLBACK_RECEIVE_DELAY_STREAM_REQUEST_PROCESSED = WSManCallbackFlags.RECEIVE_DELAY_STREAM_REQUEST_PROCESSED;
pub const WSMAN_SHELL = extern struct {
placeholder: usize, // TODO: why is this type empty?
};
pub const WSMAN_COMMAND = extern struct {
placeholder: usize, // TODO: why is this type empty?
};
pub const WSMAN_STREAM_ID_SET = extern struct {
streamIDsCount: u32,
streamIDs: ?*?PWSTR,
};
pub const WSMAN_ENVIRONMENT_VARIABLE = extern struct {
name: ?[*:0]const u16,
value: ?[*:0]const u16,
};
pub const WSMAN_ENVIRONMENT_VARIABLE_SET = extern struct {
varsCount: u32,
vars: ?*WSMAN_ENVIRONMENT_VARIABLE,
};
pub const WSMAN_SHELL_STARTUP_INFO_V10 = extern struct {
inputStreamSet: ?*WSMAN_STREAM_ID_SET,
outputStreamSet: ?*WSMAN_STREAM_ID_SET,
idleTimeoutMs: u32,
workingDirectory: ?[*:0]const u16,
variableSet: ?*WSMAN_ENVIRONMENT_VARIABLE_SET,
};
pub const WSMAN_SHELL_STARTUP_INFO_V11 = extern struct {
__AnonymousBase_wsman_L665_C48: WSMAN_SHELL_STARTUP_INFO_V10,
name: ?[*:0]const u16,
};
pub const WSMAN_SHELL_DISCONNECT_INFO = extern struct {
idleTimeoutMs: u32,
};
pub const WSManShellFlag = enum(i32) {
NO_COMPRESSION = 1,
DELETE_SERVER_SESSION = 2,
SERVER_BUFFERING_MODE_DROP = 4,
SERVER_BUFFERING_MODE_BLOCK = 8,
RECEIVE_DELAY_OUTPUT_STREAM = 16,
};
pub const WSMAN_FLAG_NO_COMPRESSION = WSManShellFlag.NO_COMPRESSION;
pub const WSMAN_FLAG_DELETE_SERVER_SESSION = WSManShellFlag.DELETE_SERVER_SESSION;
pub const WSMAN_FLAG_SERVER_BUFFERING_MODE_DROP = WSManShellFlag.SERVER_BUFFERING_MODE_DROP;
pub const WSMAN_FLAG_SERVER_BUFFERING_MODE_BLOCK = WSManShellFlag.SERVER_BUFFERING_MODE_BLOCK;
pub const WSMAN_FLAG_RECEIVE_DELAY_OUTPUT_STREAM = WSManShellFlag.RECEIVE_DELAY_OUTPUT_STREAM;
pub const WSMAN_RECEIVE_DATA_RESULT = extern struct {
streamId: ?[*:0]const u16,
streamData: WSMAN_DATA,
commandState: ?[*:0]const u16,
exitCode: u32,
};
pub const WSMAN_CONNECT_DATA = extern struct {
data: WSMAN_DATA,
};
pub const WSMAN_CREATE_SHELL_DATA = extern struct {
data: WSMAN_DATA,
};
pub const WSMAN_RESPONSE_DATA = extern union {
receiveData: WSMAN_RECEIVE_DATA_RESULT,
connectData: WSMAN_CONNECT_DATA,
createData: WSMAN_CREATE_SHELL_DATA,
};
pub const WSMAN_SHELL_COMPLETION_FUNCTION = fn(
operationContext: ?*anyopaque,
flags: u32,
@"error": ?*WSMAN_ERROR,
shell: ?*WSMAN_SHELL,
command: ?*WSMAN_COMMAND,
operationHandle: ?*WSMAN_OPERATION,
data: ?*WSMAN_RESPONSE_DATA,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_SHELL_ASYNC = extern struct {
operationContext: ?*anyopaque,
completionFunction: ?WSMAN_SHELL_COMPLETION_FUNCTION,
};
pub const WSMAN_COMMAND_ARG_SET = extern struct {
argsCount: u32,
args: ?*?PWSTR,
};
pub const WSMAN_CERTIFICATE_DETAILS = extern struct {
subject: ?[*:0]const u16,
issuerName: ?[*:0]const u16,
issuerThumbprint: ?[*:0]const u16,
subjectName: ?[*:0]const u16,
};
pub const WSMAN_SENDER_DETAILS = extern struct {
senderName: ?[*:0]const u16,
authenticationMechanism: ?[*:0]const u16,
certificateDetails: ?*WSMAN_CERTIFICATE_DETAILS,
clientToken: ?HANDLE,
httpURL: ?[*:0]const u16,
};
pub const WSMAN_PLUGIN_REQUEST = extern struct {
senderDetails: ?*WSMAN_SENDER_DETAILS,
locale: ?[*:0]const u16,
resourceUri: ?[*:0]const u16,
operationInfo: ?*WSMAN_OPERATION_INFO,
shutdownNotification: i32,
shutdownNotificationHandle: ?HANDLE,
dataLocale: ?[*:0]const u16,
};
pub const WSMAN_PLUGIN_RELEASE_SHELL_CONTEXT = fn(
shellContext: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_PLUGIN_RELEASE_COMMAND_CONTEXT = fn(
shellContext: ?*anyopaque,
commandContext: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_PLUGIN_STARTUP = fn(
flags: u32,
applicationIdentification: ?[*:0]const u16,
extraInfo: ?[*:0]const u16,
pluginContext: ?*?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const WSMAN_PLUGIN_SHUTDOWN = fn(
pluginContext: ?*anyopaque,
flags: u32,
reason: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const WSMAN_PLUGIN_SHELL = fn(
pluginContext: ?*anyopaque,
requestDetails: ?*WSMAN_PLUGIN_REQUEST,
flags: u32,
startupInfo: ?*WSMAN_SHELL_STARTUP_INFO_V11,
inboundShellInformation: ?*WSMAN_DATA,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_PLUGIN_COMMAND = fn(
requestDetails: ?*WSMAN_PLUGIN_REQUEST,
flags: u32,
shellContext: ?*anyopaque,
commandLine: ?[*:0]const u16,
arguments: ?*WSMAN_COMMAND_ARG_SET,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_PLUGIN_SEND = fn(
requestDetails: ?*WSMAN_PLUGIN_REQUEST,
flags: u32,
shellContext: ?*anyopaque,
commandContext: ?*anyopaque,
stream: ?[*:0]const u16,
inboundData: ?*WSMAN_DATA,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_PLUGIN_RECEIVE = fn(
requestDetails: ?*WSMAN_PLUGIN_REQUEST,
flags: u32,
shellContext: ?*anyopaque,
commandContext: ?*anyopaque,
streamSet: ?*WSMAN_STREAM_ID_SET,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_PLUGIN_SIGNAL = fn(
requestDetails: ?*WSMAN_PLUGIN_REQUEST,
flags: u32,
shellContext: ?*anyopaque,
commandContext: ?*anyopaque,
code: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_PLUGIN_CONNECT = fn(
requestDetails: ?*WSMAN_PLUGIN_REQUEST,
flags: u32,
shellContext: ?*anyopaque,
commandContext: ?*anyopaque,
inboundConnectInformation: ?*WSMAN_DATA,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_AUTHZ_QUOTA = extern struct {
maxAllowedConcurrentShells: u32,
maxAllowedConcurrentOperations: u32,
timeslotSize: u32,
maxAllowedOperationsPerTimeslot: u32,
};
pub const WSMAN_PLUGIN_AUTHORIZE_USER = fn(
pluginContext: ?*anyopaque,
senderDetails: ?*WSMAN_SENDER_DETAILS,
flags: u32,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_PLUGIN_AUTHORIZE_OPERATION = fn(
pluginContext: ?*anyopaque,
senderDetails: ?*WSMAN_SENDER_DETAILS,
flags: u32,
operation: u32,
action: ?[*:0]const u16,
resourceUri: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_PLUGIN_AUTHORIZE_QUERY_QUOTA = fn(
pluginContext: ?*anyopaque,
senderDetails: ?*WSMAN_SENDER_DETAILS,
flags: u32,
) callconv(@import("std").os.windows.WINAPI) void;
pub const WSMAN_PLUGIN_AUTHORIZE_RELEASE_CONTEXT = fn(
userAuthorizationContext: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) void;
const CLSID_WSMan_Value = Guid.initString("bced617b-ec03-420b-8508-977dc7a686bd");
pub const CLSID_WSMan = &CLSID_WSMan_Value;
const CLSID_WSManInternal_Value = Guid.initString("7de087a5-5dcb-4df7-bb12-0924ad8fbd9a");
pub const CLSID_WSManInternal = &CLSID_WSManInternal_Value;
pub const WSManSessionFlags = enum(i32) {
UTF8 = 1,
CredUsernamePassword = <PASSWORD>,
SkipCACheck = 8192,
SkipCNCheck = 16384,
UseNoAuthentication = 32768,
UseDigest = 65536,
UseNegotiate = 131072,
UseBasic = 262144,
UseKerberos = 524288,
NoEncryption = 1048576,
UseClientCertificate = 2097152,
EnableSPNServerPort = 4194304,
UTF16 = 8388608,
UseCredSsp = 16777216,
SkipRevocationCheck = 33554432,
AllowNegotiateImplicitCredentials = 67108864,
UseSsl = 134217728,
};
pub const WSManFlagUTF8 = WSManSessionFlags.UTF8;
pub const WSManFlagCredUsernamePassword = WSManSessionFlags.CredUsernamePassword;
pub const WSManFlagSkipCACheck = WSManSessionFlags.SkipCACheck;
pub const WSManFlagSkipCNCheck = WSManSessionFlags.SkipCNCheck;
pub const WSManFlagUseNoAuthentication = WSManSessionFlags.UseNoAuthentication;
pub const WSManFlagUseDigest = WSManSessionFlags.UseDigest;
pub const WSManFlagUseNegotiate = WSManSessionFlags.UseNegotiate;
pub const WSManFlagUseBasic = WSManSessionFlags.UseBasic;
pub const WSManFlagUseKerberos = WSManSessionFlags.UseKerberos;
pub const WSManFlagNoEncryption = WSManSessionFlags.NoEncryption;
pub const WSManFlagUseClientCertificate = WSManSessionFlags.UseClientCertificate;
pub const WSManFlagEnableSPNServerPort = WSManSessionFlags.EnableSPNServerPort;
pub const WSManFlagUTF16 = WSManSessionFlags.UTF16;
pub const WSManFlagUseCredSsp = WSManSessionFlags.UseCredSsp;
pub const WSManFlagSkipRevocationCheck = WSManSessionFlags.SkipRevocationCheck;
pub const WSManFlagAllowNegotiateImplicitCredentials = WSManSessionFlags.AllowNegotiateImplicitCredentials;
pub const WSManFlagUseSsl = WSManSessionFlags.UseSsl;
pub const WSManEnumFlags = enum(i32) {
NonXmlText = 1,
ReturnObject = 0,
ReturnEPR = 2,
ReturnObjectAndEPR = 4,
// HierarchyDeep = 0, this enum value conflicts with ReturnObject
HierarchyShallow = 32,
HierarchyDeepBasePropsOnly = 64,
// AssociatedInstance = 0, this enum value conflicts with ReturnObject
AssociationInstance = 128,
};
pub const WSManFlagNonXmlText = WSManEnumFlags.NonXmlText;
pub const WSManFlagReturnObject = WSManEnumFlags.ReturnObject;
pub const WSManFlagReturnEPR = WSManEnumFlags.ReturnEPR;
pub const WSManFlagReturnObjectAndEPR = WSManEnumFlags.ReturnObjectAndEPR;
pub const WSManFlagHierarchyDeep = WSManEnumFlags.ReturnObject;
pub const WSManFlagHierarchyShallow = WSManEnumFlags.HierarchyShallow;
pub const WSManFlagHierarchyDeepBasePropsOnly = WSManEnumFlags.HierarchyDeepBasePropsOnly;
pub const WSManFlagAssociatedInstance = WSManEnumFlags.ReturnObject;
pub const WSManFlagAssociationInstance = WSManEnumFlags.AssociationInstance;
pub const WSManProxyAccessTypeFlags = enum(i32) {
IEConfig = 1,
WinHttpConfig = 2,
AutoDetect = 4,
NoProxyServer = 8,
};
pub const WSManProxyIEConfig = WSManProxyAccessTypeFlags.IEConfig;
pub const WSManProxyWinHttpConfig = WSManProxyAccessTypeFlags.WinHttpConfig;
pub const WSManProxyAutoDetect = WSManProxyAccessTypeFlags.AutoDetect;
pub const WSManProxyNoProxyServer = WSManProxyAccessTypeFlags.NoProxyServer;
pub const WSManProxyAuthenticationFlags = enum(i32) {
Negotiate = 1,
Basic = 2,
Digest = 4,
};
pub const WSManFlagProxyAuthenticationUseNegotiate = WSManProxyAuthenticationFlags.Negotiate;
pub const WSManFlagProxyAuthenticationUseBasic = WSManProxyAuthenticationFlags.Basic;
pub const WSManFlagProxyAuthenticationUseDigest = WSManProxyAuthenticationFlags.Digest;
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWSMan_Value = Guid.initString("190d8637-5cd3-496d-ad24-69636bb5a3b5");
pub const IID_IWSMan = &IID_IWSMan_Value;
pub const IWSMan = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
CreateSession: fn(
self: *const IWSMan,
connection: ?BSTR,
flags: i32,
connectionOptions: ?*IDispatch,
session: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CreateConnectionOptions: fn(
self: *const IWSMan,
connectionOptions: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_CommandLine: fn(
self: *const IWSMan,
value: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Error: fn(
self: *const IWSMan,
value: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSMan_CreateSession(self: *const T, connection: ?BSTR, flags: i32, connectionOptions: ?*IDispatch, session: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSMan.VTable, self.vtable).CreateSession(@ptrCast(*const IWSMan, self), connection, flags, connectionOptions, session);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSMan_CreateConnectionOptions(self: *const T, connectionOptions: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSMan.VTable, self.vtable).CreateConnectionOptions(@ptrCast(*const IWSMan, self), connectionOptions);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSMan_get_CommandLine(self: *const T, value: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSMan.VTable, self.vtable).get_CommandLine(@ptrCast(*const IWSMan, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSMan_get_Error(self: *const T, value: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSMan.VTable, self.vtable).get_Error(@ptrCast(*const IWSMan, self), value);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWSManEx_Value = Guid.initString("2d53bdaa-798e-49e6-a1aa-74d01256f411");
pub const IID_IWSManEx = &IID_IWSManEx_Value;
pub const IWSManEx = extern struct {
pub const VTable = extern struct {
base: IWSMan.VTable,
CreateResourceLocator: fn(
self: *const IWSManEx,
strResourceLocator: ?BSTR,
newResourceLocator: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagUTF8: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagCredUsernamePassword: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagSkipCACheck: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagSkipCNCheck: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagUseDigest: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagUseNegotiate: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagUseBasic: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagUseKerberos: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagNoEncryption: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagEnableSPNServerPort: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagUseNoAuthentication: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumerationFlagNonXmlText: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumerationFlagReturnEPR: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumerationFlagReturnObjectAndEPR: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetErrorMessage: fn(
self: *const IWSManEx,
errorNumber: u32,
errorMessage: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumerationFlagHierarchyDeep: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumerationFlagHierarchyShallow: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumerationFlagHierarchyDeepBasePropsOnly: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumerationFlagReturnObject: fn(
self: *const IWSManEx,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IWSMan.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_CreateResourceLocator(self: *const T, strResourceLocator: ?BSTR, newResourceLocator: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).CreateResourceLocator(@ptrCast(*const IWSManEx, self), strResourceLocator, newResourceLocator);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_SessionFlagUTF8(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).SessionFlagUTF8(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_SessionFlagCredUsernamePassword(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).SessionFlagCredUsernamePassword(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_SessionFlagSkipCACheck(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).SessionFlagSkipCACheck(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_SessionFlagSkipCNCheck(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).SessionFlagSkipCNCheck(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_SessionFlagUseDigest(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).SessionFlagUseDigest(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_SessionFlagUseNegotiate(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).SessionFlagUseNegotiate(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_SessionFlagUseBasic(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).SessionFlagUseBasic(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_SessionFlagUseKerberos(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).SessionFlagUseKerberos(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_SessionFlagNoEncryption(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).SessionFlagNoEncryption(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_SessionFlagEnableSPNServerPort(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).SessionFlagEnableSPNServerPort(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_SessionFlagUseNoAuthentication(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).SessionFlagUseNoAuthentication(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_EnumerationFlagNonXmlText(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).EnumerationFlagNonXmlText(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_EnumerationFlagReturnEPR(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).EnumerationFlagReturnEPR(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_EnumerationFlagReturnObjectAndEPR(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).EnumerationFlagReturnObjectAndEPR(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_GetErrorMessage(self: *const T, errorNumber: u32, errorMessage: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).GetErrorMessage(@ptrCast(*const IWSManEx, self), errorNumber, errorMessage);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_EnumerationFlagHierarchyDeep(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).EnumerationFlagHierarchyDeep(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_EnumerationFlagHierarchyShallow(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).EnumerationFlagHierarchyShallow(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_EnumerationFlagHierarchyDeepBasePropsOnly(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).EnumerationFlagHierarchyDeepBasePropsOnly(@ptrCast(*const IWSManEx, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx_EnumerationFlagReturnObject(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx.VTable, self.vtable).EnumerationFlagReturnObject(@ptrCast(*const IWSManEx, self), flags);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWSManEx2_Value = Guid.initString("1d1b5ae0-42d9-4021-8261-3987619512e9");
pub const IID_IWSManEx2 = &IID_IWSManEx2_Value;
pub const IWSManEx2 = extern struct {
pub const VTable = extern struct {
base: IWSManEx.VTable,
SessionFlagUseClientCertificate: fn(
self: *const IWSManEx2,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IWSManEx.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx2_SessionFlagUseClientCertificate(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx2.VTable, self.vtable).SessionFlagUseClientCertificate(@ptrCast(*const IWSManEx2, self), flags);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.1'
const IID_IWSManEx3_Value = Guid.initString("6400e966-011d-4eac-8474-049e0848afad");
pub const IID_IWSManEx3 = &IID_IWSManEx3_Value;
pub const IWSManEx3 = extern struct {
pub const VTable = extern struct {
base: IWSManEx2.VTable,
SessionFlagUTF16: fn(
self: *const IWSManEx3,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagUseCredSsp: fn(
self: *const IWSManEx3,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumerationFlagAssociationInstance: fn(
self: *const IWSManEx3,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
EnumerationFlagAssociatedInstance: fn(
self: *const IWSManEx3,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagSkipRevocationCheck: fn(
self: *const IWSManEx3,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagAllowNegotiateImplicitCredentials: fn(
self: *const IWSManEx3,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SessionFlagUseSsl: fn(
self: *const IWSManEx3,
flags: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IWSManEx2.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx3_SessionFlagUTF16(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx3.VTable, self.vtable).SessionFlagUTF16(@ptrCast(*const IWSManEx3, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx3_SessionFlagUseCredSsp(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx3.VTable, self.vtable).SessionFlagUseCredSsp(@ptrCast(*const IWSManEx3, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx3_EnumerationFlagAssociationInstance(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx3.VTable, self.vtable).EnumerationFlagAssociationInstance(@ptrCast(*const IWSManEx3, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx3_EnumerationFlagAssociatedInstance(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx3.VTable, self.vtable).EnumerationFlagAssociatedInstance(@ptrCast(*const IWSManEx3, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx3_SessionFlagSkipRevocationCheck(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx3.VTable, self.vtable).SessionFlagSkipRevocationCheck(@ptrCast(*const IWSManEx3, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx3_SessionFlagAllowNegotiateImplicitCredentials(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx3.VTable, self.vtable).SessionFlagAllowNegotiateImplicitCredentials(@ptrCast(*const IWSManEx3, self), flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEx3_SessionFlagUseSsl(self: *const T, flags: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEx3.VTable, self.vtable).SessionFlagUseSsl(@ptrCast(*const IWSManEx3, self), flags);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWSManConnectionOptions_Value = Guid.initString("f704e861-9e52-464f-b786-da5eb2320fdd");
pub const IID_IWSManConnectionOptions = &IID_IWSManConnectionOptions_Value;
pub const IWSManConnectionOptions = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_UserName: fn(
self: *const IWSManConnectionOptions,
name: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_UserName: fn(
self: *const IWSManConnectionOptions,
name: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_Password: fn(
self: *const IWSManConnectionOptions,
password: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptions_get_UserName(self: *const T, name: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptions.VTable, self.vtable).get_UserName(@ptrCast(*const IWSManConnectionOptions, self), name);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptions_put_UserName(self: *const T, name: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptions.VTable, self.vtable).put_UserName(@ptrCast(*const IWSManConnectionOptions, self), name);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptions_put_Password(self: *const T, password: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptions.VTable, self.vtable).put_Password(@ptrCast(*const IWSManConnectionOptions, self), password);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWSManConnectionOptionsEx_Value = Guid.initString("ef43edf7-2a48-4d93-9526-8bd6ab6d4a6b");
pub const IID_IWSManConnectionOptionsEx = &IID_IWSManConnectionOptionsEx_Value;
pub const IWSManConnectionOptionsEx = extern struct {
pub const VTable = extern struct {
base: IWSManConnectionOptions.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_CertificateThumbprint: fn(
self: *const IWSManConnectionOptionsEx,
thumbprint: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_CertificateThumbprint: fn(
self: *const IWSManConnectionOptionsEx,
thumbprint: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IWSManConnectionOptions.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptionsEx_get_CertificateThumbprint(self: *const T, thumbprint: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptionsEx.VTable, self.vtable).get_CertificateThumbprint(@ptrCast(*const IWSManConnectionOptionsEx, self), thumbprint);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptionsEx_put_CertificateThumbprint(self: *const T, thumbprint: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptionsEx.VTable, self.vtable).put_CertificateThumbprint(@ptrCast(*const IWSManConnectionOptionsEx, self), thumbprint);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.1'
const IID_IWSManConnectionOptionsEx2_Value = Guid.initString("f500c9ec-24ee-48ab-b38d-fc9a164c658e");
pub const IID_IWSManConnectionOptionsEx2 = &IID_IWSManConnectionOptionsEx2_Value;
pub const IWSManConnectionOptionsEx2 = extern struct {
pub const VTable = extern struct {
base: IWSManConnectionOptionsEx.VTable,
SetProxy: fn(
self: *const IWSManConnectionOptionsEx2,
accessType: i32,
authenticationMechanism: i32,
userName: ?BSTR,
password: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ProxyIEConfig: fn(
self: *const IWSManConnectionOptionsEx2,
value: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ProxyWinHttpConfig: fn(
self: *const IWSManConnectionOptionsEx2,
value: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ProxyAutoDetect: fn(
self: *const IWSManConnectionOptionsEx2,
value: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ProxyNoProxyServer: fn(
self: *const IWSManConnectionOptionsEx2,
value: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ProxyAuthenticationUseNegotiate: fn(
self: *const IWSManConnectionOptionsEx2,
value: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ProxyAuthenticationUseBasic: fn(
self: *const IWSManConnectionOptionsEx2,
value: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ProxyAuthenticationUseDigest: fn(
self: *const IWSManConnectionOptionsEx2,
value: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IWSManConnectionOptionsEx.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptionsEx2_SetProxy(self: *const T, accessType: i32, authenticationMechanism: i32, userName: ?BSTR, password: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptionsEx2.VTable, self.vtable).SetProxy(@ptrCast(*const IWSManConnectionOptionsEx2, self), accessType, authenticationMechanism, userName, password);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptionsEx2_ProxyIEConfig(self: *const T, value: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptionsEx2.VTable, self.vtable).ProxyIEConfig(@ptrCast(*const IWSManConnectionOptionsEx2, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptionsEx2_ProxyWinHttpConfig(self: *const T, value: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptionsEx2.VTable, self.vtable).ProxyWinHttpConfig(@ptrCast(*const IWSManConnectionOptionsEx2, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptionsEx2_ProxyAutoDetect(self: *const T, value: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptionsEx2.VTable, self.vtable).ProxyAutoDetect(@ptrCast(*const IWSManConnectionOptionsEx2, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptionsEx2_ProxyNoProxyServer(self: *const T, value: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptionsEx2.VTable, self.vtable).ProxyNoProxyServer(@ptrCast(*const IWSManConnectionOptionsEx2, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptionsEx2_ProxyAuthenticationUseNegotiate(self: *const T, value: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptionsEx2.VTable, self.vtable).ProxyAuthenticationUseNegotiate(@ptrCast(*const IWSManConnectionOptionsEx2, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptionsEx2_ProxyAuthenticationUseBasic(self: *const T, value: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptionsEx2.VTable, self.vtable).ProxyAuthenticationUseBasic(@ptrCast(*const IWSManConnectionOptionsEx2, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManConnectionOptionsEx2_ProxyAuthenticationUseDigest(self: *const T, value: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManConnectionOptionsEx2.VTable, self.vtable).ProxyAuthenticationUseDigest(@ptrCast(*const IWSManConnectionOptionsEx2, self), value);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWSManSession_Value = Guid.initString("fc84fc58-1286-40c4-9da0-c8ef6ec241e0");
pub const IID_IWSManSession = &IID_IWSManSession_Value;
pub const IWSManSession = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
Get: fn(
self: *const IWSManSession,
resourceUri: VARIANT,
flags: i32,
resource: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Put: fn(
self: *const IWSManSession,
resourceUri: VARIANT,
resource: ?BSTR,
flags: i32,
resultResource: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Create: fn(
self: *const IWSManSession,
resourceUri: VARIANT,
resource: ?BSTR,
flags: i32,
newUri: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Delete: fn(
self: *const IWSManSession,
resourceUri: VARIANT,
flags: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Invoke: fn(
self: *const IWSManSession,
actionUri: ?BSTR,
resourceUri: VARIANT,
parameters: ?BSTR,
flags: i32,
result: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Enumerate: fn(
self: *const IWSManSession,
resourceUri: VARIANT,
filter: ?BSTR,
dialect: ?BSTR,
flags: i32,
resultSet: ?*?*IDispatch,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Identify: fn(
self: *const IWSManSession,
flags: i32,
result: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Error: fn(
self: *const IWSManSession,
value: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_BatchItems: fn(
self: *const IWSManSession,
value: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_BatchItems: fn(
self: *const IWSManSession,
value: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Timeout: fn(
self: *const IWSManSession,
value: ?*i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_Timeout: fn(
self: *const IWSManSession,
value: i32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_Get(self: *const T, resourceUri: VARIANT, flags: i32, resource: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).Get(@ptrCast(*const IWSManSession, self), resourceUri, flags, resource);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_Put(self: *const T, resourceUri: VARIANT, resource: ?BSTR, flags: i32, resultResource: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).Put(@ptrCast(*const IWSManSession, self), resourceUri, resource, flags, resultResource);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_Create(self: *const T, resourceUri: VARIANT, resource: ?BSTR, flags: i32, newUri: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).Create(@ptrCast(*const IWSManSession, self), resourceUri, resource, flags, newUri);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_Delete(self: *const T, resourceUri: VARIANT, flags: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).Delete(@ptrCast(*const IWSManSession, self), resourceUri, flags);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_Invoke(self: *const T, actionUri: ?BSTR, resourceUri: VARIANT, parameters: ?BSTR, flags: i32, result: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).Invoke(@ptrCast(*const IWSManSession, self), actionUri, resourceUri, parameters, flags, result);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_Enumerate(self: *const T, resourceUri: VARIANT, filter: ?BSTR, dialect: ?BSTR, flags: i32, resultSet: ?*?*IDispatch) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).Enumerate(@ptrCast(*const IWSManSession, self), resourceUri, filter, dialect, flags, resultSet);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_Identify(self: *const T, flags: i32, result: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).Identify(@ptrCast(*const IWSManSession, self), flags, result);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_get_Error(self: *const T, value: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).get_Error(@ptrCast(*const IWSManSession, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_get_BatchItems(self: *const T, value: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).get_BatchItems(@ptrCast(*const IWSManSession, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_put_BatchItems(self: *const T, value: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).put_BatchItems(@ptrCast(*const IWSManSession, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_get_Timeout(self: *const T, value: ?*i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).get_Timeout(@ptrCast(*const IWSManSession, self), value);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManSession_put_Timeout(self: *const T, value: i32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManSession.VTable, self.vtable).put_Timeout(@ptrCast(*const IWSManSession, self), value);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWSManEnumerator_Value = Guid.initString("f3457ca9-abb9-4fa5-b850-90e8ca300e7f");
pub const IID_IWSManEnumerator = &IID_IWSManEnumerator_Value;
pub const IWSManEnumerator = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
ReadItem: fn(
self: *const IWSManEnumerator,
resource: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_AtEndOfStream: fn(
self: *const IWSManEnumerator,
eos: ?*i16,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Error: fn(
self: *const IWSManEnumerator,
value: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEnumerator_ReadItem(self: *const T, resource: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEnumerator.VTable, self.vtable).ReadItem(@ptrCast(*const IWSManEnumerator, self), resource);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEnumerator_get_AtEndOfStream(self: *const T, eos: ?*i16) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEnumerator.VTable, self.vtable).get_AtEndOfStream(@ptrCast(*const IWSManEnumerator, self), eos);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManEnumerator_get_Error(self: *const T, value: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManEnumerator.VTable, self.vtable).get_Error(@ptrCast(*const IWSManEnumerator, self), value);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWSManResourceLocator_Value = Guid.initString("a7a1ba28-de41-466a-ad0a-c4059ead7428");
pub const IID_IWSManResourceLocator = &IID_IWSManResourceLocator_Value;
pub const IWSManResourceLocator = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_ResourceURI: fn(
self: *const IWSManResourceLocator,
uri: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_ResourceURI: fn(
self: *const IWSManResourceLocator,
uri: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AddSelector: fn(
self: *const IWSManResourceLocator,
resourceSelName: ?BSTR,
selValue: VARIANT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ClearSelectors: fn(
self: *const IWSManResourceLocator,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_FragmentPath: fn(
self: *const IWSManResourceLocator,
text: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_FragmentPath: fn(
self: *const IWSManResourceLocator,
text: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_FragmentDialect: fn(
self: *const IWSManResourceLocator,
text: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_FragmentDialect: fn(
self: *const IWSManResourceLocator,
text: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AddOption: fn(
self: *const IWSManResourceLocator,
OptionName: ?BSTR,
OptionValue: VARIANT,
mustComply: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
put_MustUnderstandOptions: fn(
self: *const IWSManResourceLocator,
mustUnderstand: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_MustUnderstandOptions: fn(
self: *const IWSManResourceLocator,
mustUnderstand: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ClearOptions: fn(
self: *const IWSManResourceLocator,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
// TODO: this function has a "SpecialName", should Zig do anything with this?
get_Error: fn(
self: *const IWSManResourceLocator,
value: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_put_ResourceURI(self: *const T, uri: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).put_ResourceURI(@ptrCast(*const IWSManResourceLocator, self), uri);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_get_ResourceURI(self: *const T, uri: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).get_ResourceURI(@ptrCast(*const IWSManResourceLocator, self), uri);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_AddSelector(self: *const T, resourceSelName: ?BSTR, selValue: VARIANT) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).AddSelector(@ptrCast(*const IWSManResourceLocator, self), resourceSelName, selValue);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_ClearSelectors(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).ClearSelectors(@ptrCast(*const IWSManResourceLocator, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_get_FragmentPath(self: *const T, text: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).get_FragmentPath(@ptrCast(*const IWSManResourceLocator, self), text);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_put_FragmentPath(self: *const T, text: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).put_FragmentPath(@ptrCast(*const IWSManResourceLocator, self), text);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_get_FragmentDialect(self: *const T, text: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).get_FragmentDialect(@ptrCast(*const IWSManResourceLocator, self), text);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_put_FragmentDialect(self: *const T, text: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).put_FragmentDialect(@ptrCast(*const IWSManResourceLocator, self), text);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_AddOption(self: *const T, OptionName: ?BSTR, OptionValue: VARIANT, mustComply: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).AddOption(@ptrCast(*const IWSManResourceLocator, self), OptionName, OptionValue, mustComply);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_put_MustUnderstandOptions(self: *const T, mustUnderstand: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).put_MustUnderstandOptions(@ptrCast(*const IWSManResourceLocator, self), mustUnderstand);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_get_MustUnderstandOptions(self: *const T, mustUnderstand: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).get_MustUnderstandOptions(@ptrCast(*const IWSManResourceLocator, self), mustUnderstand);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_ClearOptions(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).ClearOptions(@ptrCast(*const IWSManResourceLocator, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManResourceLocator_get_Error(self: *const T, value: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManResourceLocator.VTable, self.vtable).get_Error(@ptrCast(*const IWSManResourceLocator, self), value);
}
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IWSManResourceLocatorInternal_Value = Guid.initString("effaead7-7ec8-4716-b9be-f2e7e9fb4adb");
pub const IID_IWSManResourceLocatorInternal = &IID_IWSManResourceLocatorInternal_Value;
pub const IWSManResourceLocatorInternal = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
};}
pub usingnamespace MethodMixin(@This());
};
const IID_IWSManInternal_Value = Guid.initString("04ae2b1d-9954-4d99-94a9-a961e72c3a13");
pub const IID_IWSManInternal = &IID_IWSManInternal_Value;
pub const IWSManInternal = extern struct {
pub const VTable = extern struct {
base: IDispatch.VTable,
ConfigSDDL: fn(
self: *const IWSManInternal,
session: ?*IDispatch,
resourceUri: VARIANT,
flags: i32,
resource: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IDispatch.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWSManInternal_ConfigSDDL(self: *const T, session: ?*IDispatch, resourceUri: VARIANT, flags: i32, resource: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWSManInternal.VTable, self.vtable).ConfigSDDL(@ptrCast(*const IWSManInternal, self), session, resourceUri, flags, resource);
}
};}
pub usingnamespace MethodMixin(@This());
};
//--------------------------------------------------------------------------------
// Section: Functions (33)
//--------------------------------------------------------------------------------
// TODO: this type is limited to platform 'windows8.0'
pub extern "WsmSvc" fn WSManInitialize(
flags: u32,
apiHandle: ?*?*WSMAN_API,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManDeinitialize(
apiHandle: ?*WSMAN_API,
flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManGetErrorMessage(
apiHandle: ?*WSMAN_API,
flags: u32,
languageCode: ?[*:0]const u16,
errorCode: u32,
messageLength: u32,
message: ?[*:0]u16,
messageLengthUsed: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManCreateSession(
apiHandle: ?*WSMAN_API,
connection: ?[*:0]const u16,
flags: u32,
serverAuthenticationCredentials: ?*WSMAN_AUTHENTICATION_CREDENTIALS,
proxyInfo: ?*WSMAN_PROXY_INFO,
session: ?*?*WSMAN_SESSION,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManCloseSession(
session: ?*WSMAN_SESSION,
flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManSetSessionOption(
session: ?*WSMAN_SESSION,
option: WSManSessionOption,
data: ?*WSMAN_DATA,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManGetSessionOptionAsDword(
session: ?*WSMAN_SESSION,
option: WSManSessionOption,
value: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManGetSessionOptionAsString(
session: ?*WSMAN_SESSION,
option: WSManSessionOption,
stringLength: u32,
string: ?[*:0]u16,
stringLengthUsed: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManCloseOperation(
operationHandle: ?*WSMAN_OPERATION,
flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManCreateShell(
session: ?*WSMAN_SESSION,
flags: u32,
resourceUri: ?[*:0]const u16,
startupInfo: ?*WSMAN_SHELL_STARTUP_INFO_V11,
options: ?*WSMAN_OPTION_SET,
createXml: ?*WSMAN_DATA,
@"async": ?*WSMAN_SHELL_ASYNC,
shell: ?*?*WSMAN_SHELL,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManRunShellCommand(
shell: ?*WSMAN_SHELL,
flags: u32,
commandLine: ?[*:0]const u16,
args: ?*WSMAN_COMMAND_ARG_SET,
options: ?*WSMAN_OPTION_SET,
@"async": ?*WSMAN_SHELL_ASYNC,
command: ?*?*WSMAN_COMMAND,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManSignalShell(
shell: ?*WSMAN_SHELL,
command: ?*WSMAN_COMMAND,
flags: u32,
code: ?[*:0]const u16,
@"async": ?*WSMAN_SHELL_ASYNC,
signalOperation: ?*?*WSMAN_OPERATION,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManReceiveShellOutput(
shell: ?*WSMAN_SHELL,
command: ?*WSMAN_COMMAND,
flags: u32,
desiredStreamSet: ?*WSMAN_STREAM_ID_SET,
@"async": ?*WSMAN_SHELL_ASYNC,
receiveOperation: ?*?*WSMAN_OPERATION,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManSendShellInput(
shell: ?*WSMAN_SHELL,
command: ?*WSMAN_COMMAND,
flags: u32,
streamId: ?[*:0]const u16,
streamData: ?*WSMAN_DATA,
endOfStream: BOOL,
@"async": ?*WSMAN_SHELL_ASYNC,
sendOperation: ?*?*WSMAN_OPERATION,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManCloseCommand(
commandHandle: ?*WSMAN_COMMAND,
flags: u32,
@"async": ?*WSMAN_SHELL_ASYNC,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManCloseShell(
shellHandle: ?*WSMAN_SHELL,
flags: u32,
@"async": ?*WSMAN_SHELL_ASYNC,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows8.0'
pub extern "WsmSvc" fn WSManCreateShellEx(
session: ?*WSMAN_SESSION,
flags: u32,
resourceUri: ?[*:0]const u16,
shellId: ?[*:0]const u16,
startupInfo: ?*WSMAN_SHELL_STARTUP_INFO_V11,
options: ?*WSMAN_OPTION_SET,
createXml: ?*WSMAN_DATA,
@"async": ?*WSMAN_SHELL_ASYNC,
shell: ?*?*WSMAN_SHELL,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows8.0'
pub extern "WsmSvc" fn WSManRunShellCommandEx(
shell: ?*WSMAN_SHELL,
flags: u32,
commandId: ?[*:0]const u16,
commandLine: ?[*:0]const u16,
args: ?*WSMAN_COMMAND_ARG_SET,
options: ?*WSMAN_OPTION_SET,
@"async": ?*WSMAN_SHELL_ASYNC,
command: ?*?*WSMAN_COMMAND,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows8.0'
pub extern "WsmSvc" fn WSManDisconnectShell(
shell: ?*WSMAN_SHELL,
flags: u32,
disconnectInfo: ?*WSMAN_SHELL_DISCONNECT_INFO,
@"async": ?*WSMAN_SHELL_ASYNC,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows8.0'
pub extern "WsmSvc" fn WSManReconnectShell(
shell: ?*WSMAN_SHELL,
flags: u32,
@"async": ?*WSMAN_SHELL_ASYNC,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows8.0'
pub extern "WsmSvc" fn WSManReconnectShellCommand(
commandHandle: ?*WSMAN_COMMAND,
flags: u32,
@"async": ?*WSMAN_SHELL_ASYNC,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows8.0'
pub extern "WsmSvc" fn WSManConnectShell(
session: ?*WSMAN_SESSION,
flags: u32,
resourceUri: ?[*:0]const u16,
shellID: ?[*:0]const u16,
options: ?*WSMAN_OPTION_SET,
connectXml: ?*WSMAN_DATA,
@"async": ?*WSMAN_SHELL_ASYNC,
shell: ?*?*WSMAN_SHELL,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows8.0'
pub extern "WsmSvc" fn WSManConnectShellCommand(
shell: ?*WSMAN_SHELL,
flags: u32,
commandID: ?[*:0]const u16,
options: ?*WSMAN_OPTION_SET,
connectXml: ?*WSMAN_DATA,
@"async": ?*WSMAN_SHELL_ASYNC,
command: ?*?*WSMAN_COMMAND,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManPluginReportContext(
requestDetails: ?*WSMAN_PLUGIN_REQUEST,
flags: u32,
context: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManPluginReceiveResult(
requestDetails: ?*WSMAN_PLUGIN_REQUEST,
flags: u32,
stream: ?[*:0]const u16,
streamResult: ?*WSMAN_DATA,
commandState: ?[*:0]const u16,
exitCode: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManPluginOperationComplete(
requestDetails: ?*WSMAN_PLUGIN_REQUEST,
flags: u32,
errorCode: u32,
extendedInformation: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManPluginGetOperationParameters(
requestDetails: ?*WSMAN_PLUGIN_REQUEST,
flags: u32,
data: ?*WSMAN_DATA,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "WsmSvc" fn WSManPluginGetConfiguration(
pluginContext: ?*anyopaque,
flags: u32,
data: ?*WSMAN_DATA,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "WsmSvc" fn WSManPluginReportCompletion(
pluginContext: ?*anyopaque,
flags: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManPluginFreeRequestDetails(
requestDetails: ?*WSMAN_PLUGIN_REQUEST,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManPluginAuthzUserComplete(
senderDetails: ?*WSMAN_SENDER_DETAILS,
flags: u32,
userAuthorizationContext: ?*anyopaque,
impersonationToken: ?HANDLE,
userIsAdministrator: BOOL,
errorCode: u32,
extendedErrorInformation: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManPluginAuthzOperationComplete(
senderDetails: ?*WSMAN_SENDER_DETAILS,
flags: u32,
userAuthorizationContext: ?*anyopaque,
errorCode: u32,
extendedErrorInformation: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.1'
pub extern "WsmSvc" fn WSManPluginAuthzQueryQuotaComplete(
senderDetails: ?*WSMAN_SENDER_DETAILS,
flags: u32,
quota: ?*WSMAN_AUTHZ_QUOTA,
errorCode: u32,
extendedErrorInformation: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) u32;
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (9)
//--------------------------------------------------------------------------------
const Guid = @import("../zig.zig").Guid;
const BOOL = @import("../foundation.zig").BOOL;
const BSTR = @import("../foundation.zig").BSTR;
const HANDLE = @import("../foundation.zig").HANDLE;
const HRESULT = @import("../foundation.zig").HRESULT;
const IDispatch = @import("../system/com.zig").IDispatch;
const IUnknown = @import("../system/com.zig").IUnknown;
const PWSTR = @import("../foundation.zig").PWSTR;
const VARIANT = @import("../system/com.zig").VARIANT;
test {
// The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476
if (@hasDecl(@This(), "WSMAN_SHELL_COMPLETION_FUNCTION")) { _ = WSMAN_SHELL_COMPLETION_FUNCTION; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_RELEASE_SHELL_CONTEXT")) { _ = WSMAN_PLUGIN_RELEASE_SHELL_CONTEXT; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_RELEASE_COMMAND_CONTEXT")) { _ = WSMAN_PLUGIN_RELEASE_COMMAND_CONTEXT; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_STARTUP")) { _ = WSMAN_PLUGIN_STARTUP; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_SHUTDOWN")) { _ = WSMAN_PLUGIN_SHUTDOWN; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_SHELL")) { _ = WSMAN_PLUGIN_SHELL; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_COMMAND")) { _ = WSMAN_PLUGIN_COMMAND; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_SEND")) { _ = WSMAN_PLUGIN_SEND; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_RECEIVE")) { _ = WSMAN_PLUGIN_RECEIVE; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_SIGNAL")) { _ = WSMAN_PLUGIN_SIGNAL; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_CONNECT")) { _ = WSMAN_PLUGIN_CONNECT; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_AUTHORIZE_USER")) { _ = WSMAN_PLUGIN_AUTHORIZE_USER; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_AUTHORIZE_OPERATION")) { _ = WSMAN_PLUGIN_AUTHORIZE_OPERATION; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_AUTHORIZE_QUERY_QUOTA")) { _ = WSMAN_PLUGIN_AUTHORIZE_QUERY_QUOTA; }
if (@hasDecl(@This(), "WSMAN_PLUGIN_AUTHORIZE_RELEASE_CONTEXT")) { _ = WSMAN_PLUGIN_AUTHORIZE_RELEASE_CONTEXT; }
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
}
|
win32/system/remote_management.zig
|
const std = @import("std");
const sort = std.sort;
const Allocator = std.mem.Allocator;
const PriorityQueue = std.PriorityQueue;
const testing = std.testing;
const DefaultPrng = std.rand.DefaultPrng;
const ArrayList = std.ArrayList;
fn less_than(a: i32, b: i32) std.math.Order {
return std.math.order(a, b);
}
fn greater_than(a: i32, b: i32) std.math.Order {
return std.math.order(a, b).invert();
}
const Median = struct {
const Self = @This();
lo: PriorityQueue(i32), // a max-heap of small items
hi: PriorityQueue(i32), // a min-heap of large items
fn new(allocator: *Allocator) Self {
return Median{
.lo = PriorityQueue(i32).init(allocator, greater_than),
.hi = PriorityQueue(i32).init(allocator, less_than),
};
}
fn deinit(self: *Self) void {
self.lo.deinit();
self.hi.deinit();
}
fn get_median(self: *Self) ?i32 {
return self.lo.peek();
}
fn push(self: *Self, value: i32) !void {
if (self.lo.peek()) |mid| {
if (value <= mid) {
try self.lo.add(value);
} else {
try self.hi.add(value);
}
} else {
try self.lo.add(value);
}
try self.rebalance();
}
fn rebalance(self: *Self) !void {
while (self.hi.count() > self.lo.count()) {
try self.lo.add(self.hi.remove());
}
while (self.lo.count() > self.hi.count() + 1) {
try self.hi.add(self.lo.remove());
}
}
};
test "smoke test" {
var m = Median.new(testing.allocator);
defer m.deinit();
try m.push(3);
try m.push(1);
try m.push(4);
try testing.expectEqual(m.get_median(), 3);
try m.push(1);
try m.push(5);
try m.push(9);
// [3, 1, 4, 1, 5, 9] --> [1, 1, 3, 4, 5, 9]
try testing.expectEqual(m.get_median(), 3);
}
test "sorted asc" {
var m = Median.new(testing.allocator);
defer m.deinit();
var i: i32 = 0;
while (i < 10) : (i += 1) {
try m.push(i);
try testing.expectEqual(m.get_median(), @divTrunc(i, 2));
}
}
test "psuedo-random" {
var m = Median.new(testing.allocator);
defer m.deinit();
var buf = ArrayList(i32).init(testing.allocator);
defer buf.deinit();
var prng = DefaultPrng.init(42);
var i: i32 = 0;
while (i < 1_000) : (i += 1) {
const x = prng.random.int(i32);
try m.push(x);
try buf.append(x);
try testing.expectEqual(m.get_median(), brute_force_median(buf.items));
}
}
fn brute_force_median(xs: []i32) ?i32 {
sort.sort(i32, xs, {}, comptime sort.asc(i32));
return xs[@divTrunc(xs.len - 1, 2)];
}
|
streaming-median/src/main.zig
|
const std = @import("std");
const gk = @import("gamekit");
const math = gk.math;
const gfx = gk.gfx;
const draw = gfx.draw;
var rng = std.rand.DefaultPrng.init(0x12345678);
pub fn range(comptime T: type, at_least: T, less_than: T) T {
if (@typeInfo(T) == .Int) {
return rng.random().intRangeLessThanBiased(T, at_least, less_than);
} else if (@typeInfo(T) == .Float) {
return at_least + rng.random().float(T) * (less_than - at_least);
}
unreachable;
}
pub fn randomColor() u32 {
const r = 200 + range(u8, 0, 55);
const g = 200 + range(u8, 0, 55);
const b = 200 + range(u8, 0, 55);
return (r) | (@as(u32, g) << 8) | (@as(u32, b) << 16) | (@as(u32, 255) << 24);
}
const Thing = struct {
texture: gfx.Texture,
pos: math.Vec2,
vel: math.Vec2,
col: u32,
pub fn init(tex: gfx.Texture) Thing {
return .{
.texture = tex,
.pos = .{
.x = range(f32, 0, 750),
.y = range(f32, 0, 50),
},
.vel = .{
.x = range(f32, 0, 0),
.y = range(f32, 0, 50),
},
.col = randomColor(),
};
}
};
var texture: gfx.Texture = undefined;
var checker_tex: gfx.Texture = undefined;
var white_tex: gfx.Texture = undefined;
var things: []Thing = undefined;
var pass: gfx.OffscreenPass = undefined;
var rt_pos: math.Vec2 = .{};
var camera: gk.utils.Camera = undefined;
pub fn main() !void {
rng.seed(@intCast(u64, std.time.milliTimestamp()));
try gk.run(.{
.init = init,
.update = update,
.render = render,
});
}
fn init() !void {
camera = gk.utils.Camera.init();
const size = gk.window.size();
camera.pos = .{ .x = @intToFloat(f32, size.w) * 0.5, .y = @intToFloat(f32, size.h) * 0.5 };
texture = gfx.Texture.initFromFile(std.heap.c_allocator, "examples/assets/textures/bee-8.png", .nearest) catch unreachable;
checker_tex = gfx.Texture.initCheckerTexture();
white_tex = gfx.Texture.initSingleColor(0xFFFFFFFF);
things = makeThings(12, texture);
pass = gfx.OffscreenPass.init(300, 200);
}
fn update() !void {
for (things) |*thing| {
thing.pos.x += thing.vel.x * gk.time.dt();
thing.pos.y += thing.vel.y * gk.time.dt();
}
rt_pos.x += 0.5;
rt_pos.y += 0.5;
if (gk.input.keyDown(.a)) {
camera.pos.x += 100 * gk.time.dt();
} else if (gk.input.keyDown(.d)) {
camera.pos.x -= 100 * gk.time.dt();
}
if (gk.input.keyDown(.w)) {
camera.pos.y -= 100 * gk.time.dt();
} else if (gk.input.keyDown(.s)) {
camera.pos.y += 100 * gk.time.dt();
}
}
fn render() !void {
// offscreen rendering
gk.gfx.beginPass(.{ .color = math.Color.purple, .pass = pass });
draw.tex(texture, .{ .x = 10 + range(f32, -5, 5) });
draw.tex(texture, .{ .x = 50 + range(f32, -5, 5) });
draw.tex(texture, .{ .x = 90 + range(f32, -5, 5) });
draw.tex(texture, .{ .x = 130 + range(f32, -5, 5) });
gk.gfx.endPass();
// backbuffer rendering
gk.gfx.beginPass(.{
.color = math.Color{ .value = randomColor() },
.trans_mat = camera.transMat(),
});
// render the offscreen texture to the backbuffer
draw.tex(pass.color_texture, rt_pos);
for (things) |thing| {
draw.tex(thing.texture, thing.pos);
}
draw.texScale(checker_tex, .{ .x = 350, .y = 50 }, 12);
draw.point(.{ .x = 400, .y = 300 }, 20, math.Color.orange);
draw.texScale(checker_tex, .{ .x = 0, .y = 0 }, 12.5); // bl
draw.texScale(checker_tex, .{ .x = 800 - 50, .y = 0 }, 12.5); // br
draw.texScale(checker_tex, .{ .x = 800 - 50, .y = 600 - 50 }, 12.5); // tr
draw.texScale(checker_tex, .{ .x = 0, .y = 600 - 50 }, 12.5); // tl
gk.gfx.endPass();
}
fn makeThings(n: usize, tex: gfx.Texture) []Thing {
var the_things = std.heap.c_allocator.alloc(Thing, n) catch unreachable;
for (the_things) |*thing| {
thing.* = Thing.init(tex);
}
return the_things;
}
|
examples/offscreen.zig
|
const std = @import("std");
const math = @import("kira/math/common.zig");
const UniqueFixedList = @import("kira/utils.zig").UniqueFixedList;
usingnamespace @import("sharedtypes.zig");
pub fn ObjectGeneric(comptime maxtag: u64, comptime complist: type) type {
return struct {
const Self = @This();
pub const ComponentList = complist;
pub const MaxTag = maxtag;
pub const FixedList = UniqueFixedList(u64, MaxTag);
id: u64 = undefined,
tags: FixedList = undefined,
components: ComponentList = undefined,
/// Does the tag exists?
pub fn hasTag(self: Self, tag: u64) bool {
return self.tags.isExists(tag);
}
/// Clears the tags
pub fn clearTags(self: *Self) void {
self.tags.clear();
}
pub fn addTags(self: *Self, tags: [MaxTag]u64) Error!void {
var i: u64 = 0;
while (i < MaxTag) : (i += 1) {
_ = try self.tags.insert(tags[i]);
}
}
/// Adds a component to the object
pub fn addComponent(self: *Self, comptime comptype: type, comptime compname: []const u8, component: comptype, tag: u64) Error!void {
comptime {
if (!@hasField(ComponentList, compname)) @compileError("Unknown component type " ++ compname ++ "!");
}
_ = try self.tags.insert(tag);
@field(self.components, compname) = component;
}
/// Removes a component to the object
pub fn removeComponent(self: *Self, tag: u64) Error!void {
try self.tags.remove(tag);
}
/// Gets a component to the object
pub fn getComponent(self: Self, comptime comptype: type, comptime compname: []const u8, tag: u64) Error!comptype {
comptime {
if (!@hasField(ComponentList, compname)) @compileError("Unknown component type " ++ compname ++ "!");
}
if (!self.tags.isExists(tag)) return Error.Unknown;
return @field(self.components, compname);
}
/// Replaces a component
pub fn replaceComponent(self: *Self, comptime comptype: type, comptime compname: []const u8, component: comptype, tag: u64) Error!void {
comptime {
if (!@hasField(ComponentList, compname)) @compileError("Unknown component type " ++ compname ++ "!");
}
if (!self.tags.isExists(tag)) return Error.Unknown;
@field(self.components, compname) = component;
}
};
}
pub fn WorldGeneric(comptime max_object: u64, comptime max_filter: u64, comptime object: type) type {
return struct {
const Self = @This();
pub const Object = object;
pub const MaxObject = max_object;
pub const MaxFilter = max_filter;
objectlist: [MaxObject]Object = undefined,
objectidlist: UniqueFixedList(u64, MaxObject) = undefined,
filteredlist: [MaxObject]*Object = undefined,
filteredidlist: UniqueFixedList(u64, MaxObject) = undefined,
filters: UniqueFixedList(u64, MaxFilter) = undefined,
/// Does the object exists?
pub fn hasObject(self: Self, id: u64) bool {
return self.objectidlist.isExists(id);
}
/// Does the object exists in the filters?
pub fn hasObjectInFilters(self: Self, id: u64) bool {
return self.filteredidlist.isExists(id);
}
/// Does the filter exists?
pub fn hasFilter(self: Self, filter: u64) bool {
return self.filters.isExists(filter);
}
/// Does the filters exists?
pub fn hasFilters(self: *Self, comptime max: u64, tags: [max]u64) bool {
var i: u64 = 0;
while (i < max) : (i += 1) {
if (!self.filters.isExists(tags[i])) return false;
}
return true;
}
/// Clears the objects
pub fn clearObjects(self: *Self) void {
self.objectidlist.clear();
self.filteredidlist.clear();
self.objectlist = undefined;
self.filteredlist = undefined;
}
/// Clears the filtered objects
pub fn clearFilteredObjects(self: *Self) void {
self.filteredidlist.clear();
self.filteredlist = undefined;
}
/// Clears the filters
pub fn clearFilters(self: *Self) void {
self.filters.clear();
}
/// Adds an object
pub fn addObject(self: *Self, obj: Object) Error!void {
if (self.objectidlist.isExists(obj.id)) return Error.Duplicate;
const i = try self.objectidlist.insert(obj.id);
self.objectlist[i] = obj;
}
/// Force pushes the object in to the filtered list
pub fn forceObjectToFilter(self: *Self, id: u64) Error!void {
if (!self.objectidlist.isExists(id)) return Error.Unknown;
if (self.filteredidlist.isExists(id)) return Error.Duplicate;
const index = try self.objectidlist.getIndex(id);
var ptr = &self.objectlist[index];
const i = try self.filteredidlist.insert(id);
self.filteredlist[i] = ptr;
}
/// Adds a filter
pub fn addFilter(self: *Self, filter: u64) Error!void {
_ = try self.filters.insert(filter);
}
/// Filter the objects
pub fn filterObjects(self: *Self) Error!void {
self.clearFilteredObjects();
var i: u64 = 0;
while (i < Self.MaxObject) : (i += 1) {
if (self.objectidlist.items[i].is_exists) {
var obj = &self.objectlist[i];
var j: u64 = 0;
var yes = true;
while (j < Self.MaxFilter) : (j += 1) {
if (self.filters.items[j].is_exists) {
const filter = self.filters.items[j].data;
if (!obj.hasTag(filter)) yes = false;
}
}
if (yes) {
try self.forceObjectToFilter(obj.id);
}
}
}
if (self.filteredidlist.count == 0) return Error.FailedToAdd;
}
};
}
|
src/kiragine/ecs.zig
|
pub fn portio_cpux(offset: usize) *volatile u32 {
return @intToPtr(*volatile u32, @as(usize, 0x01C2_0800) + offset);
}
pub fn portio_cpus(offset: usize) *volatile u32 {
return @intToPtr(*volatile u32, @as(usize, 0x01F0_2C00) + offset);
}
fn verify_port_pin(comptime port: u8, comptime pin: u5) void {
switch (port) {
'B' => if (pin >= 10) @compileError("Pin out of range!"),
'C' => if (pin >= 17) @compileError("Pin out of range!"),
'D' => if (pin >= 25) @compileError("Pin out of range!"),
'E' => if (pin >= 18) @compileError("Pin out of range!"),
'F' => if (pin >= 7) @compileError("Pin out of range!"),
'G' => if (pin >= 14) @compileError("Pin out of range!"),
'H' => if (pin >= 12) @compileError("Pin out of range!"),
'L' => if (pin >= 13) @compileError("Pin out of range!"),
else => @compileError("Unknown port!"),
}
}
fn get_config(comptime port: u8, comptime pin: u5) *volatile u32 {
const offset = @as(u16, @divTrunc(pin, 8) * 4);
return switch (port) {
'B' => portio_cpux(0x0024 + offset),
'C' => portio_cpux(0x0048 + offset),
'D' => portio_cpux(0x006C + offset),
'E' => portio_cpux(0x0090 + offset),
'F' => portio_cpux(0x00B4 + offset),
'G' => portio_cpux(0x00D8 + offset),
'H' => portio_cpux(0x00FC + offset),
'L' => portio_cpus(0x0000 + offset),
else => @compileError("Unknown port!"),
};
}
fn get_data(comptime port: u8) *volatile u32 {
return switch (port) {
'B' => portio_cpux(0x0034),
'C' => portio_cpux(0x0058),
'D' => portio_cpux(0x007C),
'E' => portio_cpux(0x00A0),
'F' => portio_cpux(0x00C4),
'G' => portio_cpux(0x00E8),
'H' => portio_cpux(0x010C),
'L' => portio_cpus(0x0010),
else => @compileError("Unknown port!"),
};
}
pub fn configure_port(comptime port: u8, comptime pin: u5, io: enum { Input, Output }) void {
comptime {
verify_port_pin(port, pin);
}
const field: u32 = if (io == .Input) 0b000 else 0b001;
const config = comptime get_config(port, pin);
const start_bit = comptime @as(u8, (pin % 8)) * 4;
config.* = (config.* & ~@as(u32, 0x7 << start_bit)) | (field << start_bit);
}
// Sets a port to output and writes the value to the pin
pub fn output_port(comptime port: u8, comptime pin: u5, value: bool) void {
configure_port(port, pin, .Output);
write_port(port, pin, value);
}
// Sets a port to input and reads the value from the pin
pub fn input_port(comptime port: u8, comptime pin: u5) bool {
configure_port(port, pin, .Input);
return read_port(port, pin);
}
pub fn write_port(comptime port: u8, comptime pin: u5, value: bool) void {
comptime {
verify_port_pin(port, pin);
}
const curr_bit: u32 = 1 << pin;
const d = comptime get_data(port);
if (value) {
d.* |= curr_bit;
} else {
d.* &= ~curr_bit;
}
}
pub fn read_port(comptime port: u8, comptime pin: u5) bool {
comptime {
verify_port_pin(port, pin);
}
const d = comptime get_data(port);
return (d.* & (1 << pin)) != 0;
}
|
src/platform/pine_aarch64/regs.zig
|
const std = @import("std");
const assert = std.debug.assert;
const Camera = @import("Camera.zig");
const Material = @import("Material.zig");
const zp = @import("../../zplay.zig");
const drawcall = zp.graphics.common.drawcall;
const Context = zp.graphics.common.Context;
const ShaderProgram = zp.graphics.common.ShaderProgram;
const VertexArray = zp.graphics.common.VertexArray;
const alg = zp.deps.alg;
const Vec2 = alg.Vec2;
const Vec3 = alg.Vec3;
const Vec4 = alg.Vec4;
const Mat4 = alg.Mat4;
const Self = @This();
/// vertex attribute locations
pub const ATTRIB_LOCATION_POS = 0;
const vs =
\\#version 330 core
\\layout (location = 0) in vec3 a_pos;
\\
\\uniform mat4 u_view;
\\uniform mat4 u_project;
\\
\\out vec3 v_tex;
\\
\\void main()
\\{
\\ vec4 pos = u_project * u_view * vec4(a_pos, 1.0);
\\ gl_Position = pos.xyww;
\\ v_tex = a_pos;
\\}
;
const fs =
\\#version 330 core
\\out vec4 frag_color;
\\
\\in vec3 v_tex;
\\
\\uniform samplerCube u_texture;
\\
\\void main()
\\{
\\ frag_color = texture(u_texture, v_tex);
\\}
;
/// lighting program
program: ShaderProgram,
/// unit cube
vertex_array: VertexArray,
/// create a simple renderer
pub fn init() Self {
var self = Self{
.program = ShaderProgram.init(vs, fs),
.vertex_array = VertexArray.init(1),
};
self.vertex_array.use();
defer self.vertex_array.disuse();
self.vertex_array.bufferData(
0,
f32,
&[_]f32{
-1.0, 1.0, -1.0,
-1.0, -1.0, -1.0,
1.0, -1.0, -1.0,
1.0, -1.0, -1.0,
1.0, 1.0, -1.0,
-1.0, 1.0, -1.0,
-1.0, -1.0, 1.0,
-1.0, -1.0, -1.0,
-1.0, 1.0, -1.0,
-1.0, 1.0, -1.0,
-1.0, 1.0, 1.0,
-1.0, -1.0, 1.0,
1.0, -1.0, -1.0,
1.0, -1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, -1.0,
1.0, -1.0, -1.0,
-1.0, -1.0, 1.0,
-1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, -1.0, 1.0,
-1.0, -1.0, 1.0,
-1.0, 1.0, -1.0,
1.0, 1.0, -1.0,
1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
-1.0, 1.0, 1.0,
-1.0, 1.0, -1.0,
-1.0, -1.0, -1.0,
-1.0, -1.0, 1.0,
1.0, -1.0, -1.0,
1.0, -1.0, -1.0,
-1.0, -1.0, 1.0,
1.0, -1.0, 1.0,
},
.array_buffer,
.static_draw,
);
self.vertex_array.setAttribute(
0,
ATTRIB_LOCATION_POS,
3,
f32,
false,
0,
0,
);
return self;
}
/// free resources
pub fn deinit(self: Self) void {
self.program.deinit();
self.vertex_array.deinit();
}
/// draw skybox
pub fn draw(
self: *Self,
graphics_context: *Context,
projection: Mat4,
camera: Camera,
material: Material,
) void {
const old_polygon_mode = graphics_context.polygon_mode;
graphics_context.setPolygonMode(.fill);
defer graphics_context.setPolygonMode(old_polygon_mode);
const old_depth_option = graphics_context.depth_option;
graphics_context.setDepthOption(.{ .test_func = .less_or_equal });
defer graphics_context.setDepthOption(old_depth_option);
self.program.use();
defer self.program.disuse();
self.vertex_array.use();
defer self.vertex_array.disuse();
// set uniforms
var view = camera.getViewMatrix();
view.data[3][0] = 0;
view.data[3][1] = 0;
view.data[3][2] = 0;
self.program.setUniformByName("u_view", view);
self.program.setUniformByName("u_project", projection);
assert(material.data == .single_cubemap);
self.program.setUniformByName(
"u_texture",
material.data.single_cubemap.tex.getTextureUnit(),
);
// issue draw call
drawcall.drawBuffer(
.triangles,
0,
36,
null,
);
}
|
src/graphics/3d/Skybox.zig
|
const std = @import("std");
const builtin = @import("builtin");
const build_options = @import("build_options");
const data = @import("../common/data.zig");
const assert = std.debug.assert;
const print = std.debug.print;
const trace = build_options.trace;
const Player = data.Player;
const ID = data.ID;
pub const ArgType = enum(u8) {
None,
// Special
LastStill,
LastMiss,
// Gen 1expect
Move,
Switch,
Cant,
Faint,
Turn,
Win,
Tie,
Damage,
Heal,
Status,
CureStatus,
Boost,
Unboost,
ClearAllBoost,
Fail,
Miss,
HitCount,
Prepare,
MustRecharge,
Activate,
FieldActivate,
Start,
End,
OHKO,
Crit,
SuperEffective,
Resisted,
Immune,
Transform,
// Gen 2
Drag,
Item,
EndItem,
CureTeam,
SetHP,
SetBoost,
CopyBoost,
SideStart,
SideEnd,
SingleMove,
SingleTurn,
Weather,
// Gen 3
Block,
Ability,
EndAbility,
ClearNegativeBoost,
FormeChange,
NoTarget,
// Gen 4
SwapBoost,
FieldStart,
FieldEnd,
DetailsChange,
// Gen 5
ClearPoke,
Poke,
TeamPreview,
Center,
Swap,
Combine,
Waiting,
Replace,
ClearBoost,
// Gen 6
Mega,
Primal,
InvertBoost,
// Gen 7
ZBroken,
ZPower,
Burst,
ClearPositiveBoost,
// Gen 8
CanDynamax,
SwapSideConditions,
};
pub const Move = enum(u8) {
None,
From,
};
pub const Cant = enum(u8) {
Sleep,
Freeze,
Paralysis,
Trapped,
Flinch,
Disable,
Recharge,
PP,
};
pub const Heal = enum(u8) {
None,
Silent,
Drain,
};
pub const Damage = enum(u8) {
None,
Poison,
Burn,
Confusion,
LeechSeed,
RecoilOf,
};
pub const Status = enum(u8) {
None,
Silent,
From,
};
pub const CureStatus = enum(u8) {
Message,
Silent,
};
pub const Boost = enum(u8) {
Rage,
Attack,
Defense,
Speed,
SpecialAttack,
SpecialDefense,
Accuracy,
Evasion,
};
pub const Fail = enum(u8) {
None,
Sleep,
Poison,
Burn,
Freeze,
Paralysis,
Toxic,
Substitute,
Weak,
};
pub const Activate = enum(u8) {
Bide,
Confusion,
Haze,
Mist,
Struggle,
Substitute,
Splash,
};
pub const Start = enum(u8) {
Bide,
Confusion,
ConfusionSilent,
FocusEnergy,
LeechSeed,
LightScreen,
Mist,
Reflect,
Substitute,
TypeChange,
Disable,
Mimic,
};
pub const End = enum(u8) {
Disable,
Confusion,
Bide,
Substitute,
// Silent
DisableSilent,
ConfusionSilent,
Mist,
FocusEnergy,
LeechSeed,
LightScreen,
Reflect,
};
pub const Immune = enum(u8) {
None,
OHKO,
};
pub const NULL = Log(@TypeOf(std.io.null_writer)){ .writer = std.io.null_writer };
pub fn Log(comptime Writer: type) type {
return struct {
const Self = @This();
writer: Writer,
pub const Error = Writer.Error;
pub fn move(self: Self, source: ID, m: anytype, target: ID, from: anytype) Error!void {
if (!trace) return;
assert(m != .None);
try self.writer.writeAll(&.{
@enumToInt(ArgType.Move),
@bitCast(u8, source),
@enumToInt(m),
@bitCast(u8, target),
});
const none = &[_]u8{@enumToInt(Move.None)};
try self.writer.writeAll(switch (@typeInfo(@TypeOf(from))) {
.Null => none,
.Optional => if (from) |f| &[_]u8{ @enumToInt(Move.From), @enumToInt(f) } else none,
else => &[_]u8{ @enumToInt(Move.From), @enumToInt(from) },
});
}
pub fn switched(self: Self, ident: ID, pokemon: anytype) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{ @enumToInt(ArgType.Switch), @bitCast(u8, ident) });
try self.writer.writeAll(&.{ @enumToInt(pokemon.species), pokemon.level });
try self.writer.writeIntNative(u16, pokemon.hp);
try self.writer.writeIntNative(u16, pokemon.stats.hp);
try self.writer.writeAll(&.{pokemon.status});
}
pub fn cant(self: Self, ident: ID, reason: Cant) Error!void {
if (!trace) return;
assert(reason != .Disable);
try self.writer.writeAll(&.{
@enumToInt(ArgType.Cant),
@bitCast(u8, ident),
@enumToInt(reason),
});
}
pub fn disabled(self: Self, ident: ID, m: anytype) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.Cant),
@bitCast(u8, ident),
@enumToInt(Cant.Disable),
@enumToInt(m),
});
}
pub fn faint(self: Self, ident: ID, done: bool) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{ @enumToInt(ArgType.Faint), @bitCast(u8, ident) });
if (done) try self.writer.writeByte(@enumToInt(ArgType.None));
}
pub fn turn(self: Self, num: u16) Error!void {
if (!trace) return;
try self.writer.writeByte(@enumToInt(ArgType.Turn));
try self.writer.writeIntNative(u16, num);
try self.writer.writeByte(@enumToInt(ArgType.None));
}
pub fn win(self: Self, player: Player) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.Win),
@enumToInt(player),
@enumToInt(ArgType.None),
});
}
pub fn tie(self: Self) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{ @enumToInt(ArgType.Tie), @enumToInt(ArgType.None) });
}
pub fn damage(self: Self, ident: ID, pokemon: anytype, reason: Damage) Error!void {
if (!trace) return;
assert(@enumToInt(reason) <= @enumToInt(Damage.LeechSeed));
try self.writer.writeAll(&.{ @enumToInt(ArgType.Damage), @bitCast(u8, ident) });
try self.writer.writeIntNative(u16, pokemon.hp);
try self.writer.writeIntNative(u16, pokemon.stats.hp);
try self.writer.writeAll(&.{ pokemon.status, @enumToInt(reason) });
}
pub fn damageOf(
self: Self,
ident: ID,
pokemon: anytype,
reason: Damage,
source: ID,
) Error!void {
if (!trace) return;
assert(@enumToInt(reason) == @enumToInt(Damage.RecoilOf));
try self.writer.writeAll(&.{ @enumToInt(ArgType.Damage), @bitCast(u8, ident) });
try self.writer.writeIntNative(u16, pokemon.hp);
try self.writer.writeIntNative(u16, pokemon.stats.hp);
try self.writer.writeAll(&.{
pokemon.status,
@enumToInt(reason),
@bitCast(u8, source),
});
}
pub fn heal(self: Self, ident: ID, pokemon: anytype, reason: Heal) Error!void {
if (!trace) return;
assert(reason != .Drain);
try self.writer.writeAll(&.{ @enumToInt(ArgType.Heal), @bitCast(u8, ident) });
try self.writer.writeIntNative(u16, pokemon.hp);
try self.writer.writeIntNative(u16, pokemon.stats.hp);
try self.writer.writeAll(&.{ pokemon.status, @enumToInt(reason) });
}
pub fn drain(self: Self, source: ID, pokemon: anytype, target: ID) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{ @enumToInt(ArgType.Heal), @bitCast(u8, source) });
try self.writer.writeIntNative(u16, pokemon.hp);
try self.writer.writeIntNative(u16, pokemon.stats.hp);
try self.writer.writeAll(&.{
pokemon.status,
@enumToInt(Heal.Drain),
@bitCast(u8, target),
});
}
pub fn status(self: Self, ident: ID, value: u8, reason: Status) Error!void {
if (!trace) return;
assert(reason != .From);
try self.writer.writeAll(&.{
@enumToInt(ArgType.Status),
@bitCast(u8, ident),
value,
@enumToInt(reason),
});
}
pub fn statusFrom(self: Self, ident: ID, value: u8, m: anytype) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.Status),
@bitCast(u8, ident),
value,
@enumToInt(Status.From),
@enumToInt(m),
});
}
pub fn curestatus(self: Self, ident: ID, value: u8, reason: CureStatus) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.CureStatus),
@bitCast(u8, ident),
value,
@enumToInt(reason),
});
}
pub fn boost(self: Self, ident: ID, reason: Boost, num: u8) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.Boost),
@bitCast(u8, ident),
@enumToInt(reason),
num,
});
}
pub fn unboost(self: Self, ident: ID, reason: Boost, num: u8) Error!void {
if (!trace) return;
assert(reason != .Rage);
try self.writer.writeAll(&.{
@enumToInt(ArgType.Unboost),
@bitCast(u8, ident),
@enumToInt(reason),
num,
});
}
pub fn clearallboost(self: Self) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{@enumToInt(ArgType.ClearAllBoost)});
}
pub fn fail(self: Self, ident: ID, reason: Fail) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.Fail),
@bitCast(u8, ident),
@enumToInt(reason),
});
}
pub fn miss(self: Self, source: ID) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{ @enumToInt(ArgType.Miss), @bitCast(u8, source) });
}
pub fn hitcount(self: Self, ident: ID, num: u8) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{ @enumToInt(ArgType.HitCount), @bitCast(u8, ident), num });
}
pub fn prepare(self: Self, source: ID, m: anytype) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.Prepare),
@bitCast(u8, source),
@enumToInt(m),
});
}
pub fn mustrecharge(self: Self, ident: ID) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{ @enumToInt(ArgType.MustRecharge), @bitCast(u8, ident) });
}
pub fn activate(self: Self, ident: ID, reason: Activate) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.Activate),
@bitCast(u8, ident),
@enumToInt(reason),
});
}
pub fn fieldactivate(self: Self) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{@enumToInt(ArgType.FieldActivate)});
}
pub fn start(self: Self, ident: ID, reason: Start) Error!void {
if (!trace) return;
assert(@enumToInt(reason) < @enumToInt(Start.TypeChange));
try self.writer.writeAll(&.{
@enumToInt(ArgType.Start),
@bitCast(u8, ident),
@enumToInt(reason),
});
}
pub fn typechange(self: Self, ident: ID, types: anytype) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.Start),
@bitCast(u8, ident),
@enumToInt(Start.TypeChange),
@bitCast(u8, types),
});
}
pub fn startEffect(self: Self, ident: ID, reason: Start, m: anytype) Error!void {
if (!trace) return;
assert(@enumToInt(reason) > @enumToInt(Start.TypeChange));
try self.writer.writeAll(&.{
@enumToInt(ArgType.Start),
@bitCast(u8, ident),
@enumToInt(reason),
@enumToInt(m),
});
}
pub fn end(self: Self, ident: ID, reason: End) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.End),
@bitCast(u8, ident),
@enumToInt(reason),
});
}
pub fn ohko(self: Self) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{@enumToInt(ArgType.OHKO)});
}
pub fn crit(self: Self, ident: ID) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{ @enumToInt(ArgType.Crit), @bitCast(u8, ident) });
}
pub fn supereffective(self: Self, ident: ID) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{ @enumToInt(ArgType.SuperEffective), @bitCast(u8, ident) });
}
pub fn resisted(self: Self, ident: ID) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{ @enumToInt(ArgType.Resisted), @bitCast(u8, ident) });
}
pub fn immune(self: Self, ident: ID, reason: Immune) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.Immune),
@bitCast(u8, ident),
@enumToInt(reason),
});
}
pub fn transform(self: Self, source: ID, target: ID) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{
@enumToInt(ArgType.Transform),
@bitCast(u8, source),
@bitCast(u8, target),
});
}
pub fn laststill(self: Self) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{@enumToInt(ArgType.LastStill)});
}
pub fn lastmiss(self: Self) Error!void {
if (!trace) return;
try self.writer.writeAll(&.{@enumToInt(ArgType.LastMiss)});
}
};
}
pub const FixedLog = Log(std.io.FixedBufferStream([]u8).Writer);
pub const Kind = enum { Move, Species, Type, Status };
pub const Formatter = fn (Kind, u8) []const u8;
// @test-only
pub fn format(formatter: Formatter, a: []const u8, b: ?[]const u8, color: bool) void {
print("\n", .{});
var i: usize = 0;
while (i < a.len) {
const arg = @intToEnum(ArgType, a[i]);
const name = switch (arg) {
.None => if (color) "\x1b[2m-\x1b[0m" else "-",
.LastStill => "|[still]",
.LastMiss => "|[miss]",
.Move => "|move|",
.Switch => "|switch|",
.Cant => "|cant|",
.Faint => "|faint|",
.Turn => "|turn|",
.Win => "|win|",
.Tie => "|tie|",
.Damage => "|-damage|",
.Heal => "|-heal|",
.Status => "|-status|",
.CureStatus => "|-curestatus|",
.Boost => "|-boost|",
.Unboost => "|-unboost|",
.ClearAllBoost => "|-clearallboost|",
.Fail => "|-fail|",
.Miss => "|-miss|",
.HitCount => "|-hitcount|",
.Prepare => "|-prepare|",
.MustRecharge => "|-mustrecharge|",
.Activate => "|-activate|",
.FieldActivate => "|-fieldactivate|",
.Start => "|-start|",
.End => "|-end|",
.OHKO => "|-ohko|",
.Crit => "|-crit|",
.SuperEffective => "|-supereffective|",
.Resisted => "|-resisted|",
.Immune => "|-immune|",
.Transform => "|-transform|",
else => unreachable,
};
printc("{s}", .{name}, a, b, &i, 1, color);
switch (arg) {
.None,
.LastStill,
.LastMiss,
.Tie,
.ClearAllBoost,
.FieldActivate,
.OHKO,
=> {},
.Move => {
const source = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(source.player), source.id }, a, b, &i, 1, color);
printc(" {s}", .{formatter(.Move, a[i])}, a, b, &i, 1, color);
const target = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(target.player), target.id }, a, b, &i, 1, color);
const reason = @intToEnum(Move, a[i]);
printc(" {s}", .{@tagName(reason)}, a, b, &i, 1, color);
if (reason == .From) printc(" {s}", .{formatter(.Move, a[i])}, a, b, &i, 1, color);
},
.Switch => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
printc(" {s}", .{formatter(.Species, a[i])}, a, b, &i, 1, color);
printc(" L{d}", .{a[i]}, a, b, &i, 1, color);
switch (endian) {
.Big => {
var hp = @as(u16, a[i]) << 8 | @as(u16, a[i + 1]);
printc(" {d}", .{hp}, a, b, &i, 2, color);
hp = @as(u16, a[i]) << 8 | @as(u16, a[i + 1]);
printc("/{d}", .{hp}, a, b, &i, 2, color);
},
.Little => {
var hp = @as(u16, a[i + 1]) << 8 | @as(u16, a[i]);
printc(" {d}", .{hp}, a, b, &i, 2, color);
hp = @as(u16, a[i + 1]) << 8 | @as(u16, a[i]);
printc("/{d}", .{hp}, a, b, &i, 2, color);
},
}
printc(" {s}", .{formatter(.Status, a[i])}, a, b, &i, 1, color);
},
.Cant => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
const reason = @intToEnum(Cant, a[i]);
printc(" {s}", .{@tagName(reason)}, a, b, &i, 1, color);
if (reason == .Disable) {
printc(" {s}", .{formatter(.Move, a[i])}, a, b, &i, 1, color);
}
},
.Faint,
.Miss,
.MustRecharge,
.Crit,
.SuperEffective,
.Resisted,
=> {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
},
.Turn => {
const turn = switch (endian) {
.Big => @as(u16, a[i]) << 8 | @as(u16, a[i + 1]),
.Little => @as(u16, a[i + 1]) << 8 | @as(u16, a[i]),
};
printc(" {d}", .{turn}, a, b, &i, 2, color);
},
.Win => printc(" {s}", .{@tagName(@intToEnum(Player, a[i]))}, a, b, &i, 1, color),
.Damage, .Heal => {
var id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
switch (endian) {
.Big => {
var hp = @as(u16, a[i]) << 8 | @as(u16, a[i + 1]);
printc(" {d}", .{hp}, a, b, &i, 2, color);
hp = @as(u16, a[i]) << 8 | @as(u16, a[i + 1]);
printc("/{d}", .{hp}, a, b, &i, 2, color);
},
.Little => {
var hp = @as(u16, a[i + 1]) << 8 | @as(u16, a[i]);
printc(" {d}", .{hp}, a, b, &i, 2, color);
hp = @as(u16, a[i + 1]) << 8 | @as(u16, a[i]);
printc("/{d}", .{hp}, a, b, &i, 2, color);
},
}
printc(" {s}", .{formatter(.Status, a[i])}, a, b, &i, 1, color);
if (arg == .Damage) {
const reason = a[i];
printc(" {s}", .{@tagName(@intToEnum(Damage, reason))}, a, b, &i, 1, color);
if (reason == @enumToInt(Damage.RecoilOf)) {
id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
}
} else {
const reason = @intToEnum(Heal, a[i]);
printc(" {s}", .{@tagName(reason)}, a, b, &i, 1, color);
if (reason == .Drain) {
id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
}
}
},
.Status => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
printc(" {s}", .{formatter(.Status, a[i])}, a, b, &i, 1, color);
const reason = @intToEnum(Status, a[i]);
printc(" {s}", .{@tagName(reason)}, a, b, &i, 1, color);
if (reason == .From) printc(" {s}", .{formatter(.Move, a[i])}, a, b, &i, 1, color);
},
.CureStatus => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
printc(" {s}", .{formatter(.Status, a[i])}, a, b, &i, 1, color);
printc(" {s}", .{@tagName(@intToEnum(CureStatus, a[i]))}, a, b, &i, 1, color);
},
.Boost, .Unboost => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
printc(" {s}", .{@tagName(@intToEnum(Boost, a[i]))}, a, b, &i, 1, color);
printc(" {d}", .{a[i]}, a, b, &i, 1, color);
},
.Fail => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
printc(" {s}", .{@tagName(@intToEnum(Fail, a[i]))}, a, b, &i, 1, color);
},
.HitCount => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
printc(" {d}", .{a[i]}, a, b, &i, 1, color);
},
.Prepare => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
printc(" {s}", .{formatter(.Move, a[i])}, a, b, &i, 1, color);
},
.Activate => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
printc(" {s}", .{@tagName(@intToEnum(Activate, a[i]))}, a, b, &i, 1, color);
},
.Start => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
const reason = a[i];
printc(" {s}", .{@tagName(@intToEnum(Start, reason))}, a, b, &i, 1, color);
if (@intToEnum(Start, reason) == .TypeChange) {
const types = @bitCast(gen1.Types, a[i]);
const args = .{
formatter(.Type, @enumToInt(types.type1)),
formatter(.Type, @enumToInt(types.type2)),
};
printc(" {s}/{s}", args, a, b, &i, 1, color);
} else if (reason >= @enumToInt(Start.Disable)) {
printc(" {s}", .{formatter(.Move, a[i])}, a, b, &i, 1, color);
}
},
.End => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
printc(" {s}", .{@tagName(@intToEnum(End, a[i]))}, a, b, &i, 1, color);
},
.Immune => {
const id = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(id.player), id.id }, a, b, &i, 1, color);
printc(" {s}", .{@tagName(@intToEnum(Immune, a[i]))}, a, b, &i, 1, color);
},
.Transform => {
const source = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(source.player), source.id }, a, b, &i, 1, color);
const target = ID.from(@truncate(u4, a[i]));
printc(" {s}({d})", .{ @tagName(target.player), target.id }, a, b, &i, 1, color);
},
else => unreachable,
}
print("\n", .{});
}
print("\n", .{});
}
fn printc(
comptime fmt: []const u8,
args: anytype,
a: []const u8,
b: ?[]const u8,
i: *usize,
n: usize,
color: bool,
) void {
const c = color and (if (b) |x| mismatch: {
const end = i.* + n;
if (end > a.len or end > x.len) break :mismatch true;
var j: usize = i.*;
while (j < end) : (j += 1) if (a[j] != x[j]) break :mismatch true;
break :mismatch false;
} else false);
if (c) print("\x1b[31m", .{});
print(fmt, args);
if (c) print("\x1b[0m", .{});
i.* += n;
}
// @test-only
pub fn expectLog(formatter: Formatter, expected: []const u8, actual: []const u8) !void {
if (!trace) return;
const color = color: {
if (std.process.hasEnvVarConstant("ZIG_DEBUG_COLOR")) {
break :color true;
} else if (std.process.hasEnvVarConstant("NO_COLOR")) {
break :color false;
} else {
break :color std.io.getStdErr().supportsAnsiEscapeCodes();
}
};
expectEqualBytes(expected, actual) catch |err| switch (err) {
error.TestExpectedEqual => {
format(formatter, expected, null, color);
format(formatter, actual, expected, color);
return err;
},
else => return err,
};
}
fn expectEqualBytes(expected: []const u8, actual: []const u8) !void {
const len = @minimum(expected.len, actual.len);
var i: usize = 0;
while (i < len) : (i += 1) {
if (expected[i] != actual[i]) {
print(
"index {} incorrect. expected 0x{X:0>2}, found 0x{X:0>2}\n",
.{ i, expected[i], actual[i] },
);
return error.TestExpectedEqual;
}
}
if (expected.len != actual.len) {
print(
"slice lengths differ. expected {d}, found {d}\n",
.{ expected.len, actual.len },
);
return error.TestExpectedEqual;
}
}
const endian = builtin.target.cpu.arch.endian();
fn N(e: anytype) u8 {
return @enumToInt(e);
}
const p1 = Player.P1;
const p2 = Player.P2;
const gen1 = struct {
usingnamespace @import("../gen1/data.zig");
pub const helpers = @import("../gen1/helpers.zig");
};
var buf: [gen1.LOG_SIZE]u8 = undefined;
var stream = std.io.fixedBufferStream(&buf);
var log: FixedLog = .{ .writer = stream.writer() };
const M = gen1.Move;
const S = gen1.Species;
fn expectLog1(expected: []const u8, actual: []const u8) !void {
return expectLog(gen1Formatter, expected, actual);
}
fn gen1Formatter(kind: Kind, byte: u8) []const u8 {
return switch (kind) {
.Move => @tagName(@intToEnum(M, byte)),
.Species => @tagName(@intToEnum(S, byte)),
.Type => @tagName(@intToEnum(gen1.Type, byte)),
.Status => gen1.Status.name(byte),
};
}
test "|move|" {
try log.move(p2.ident(4), M.Thunderbolt, p1.ident(5), null);
try expectLog1(
&.{ N(ArgType.Move), 0b1100, N(M.Thunderbolt), 0b0101, N(Move.None) },
buf[0..5],
);
stream.reset();
try log.move(p2.ident(4), M.Wrap, p1.ident(5), M.Wrap);
const wrap = N(M.Wrap);
try expectLog1(
&.{ N(ArgType.Move), 0b1100, wrap, 0b0101, N(Move.From), wrap },
buf[0..6],
);
stream.reset();
try log.move(p2.ident(4), M.SkullBash, .{}, null);
try log.laststill();
try expectLog1(
&.{
N(ArgType.Move),
0b1100,
N(M.SkullBash),
0,
N(Move.None),
N(ArgType.LastStill),
},
buf[0..6],
);
stream.reset();
try log.move(p2.ident(4), M.Tackle, p1.ident(5), null);
try log.lastmiss();
try expectLog1(
&.{
N(ArgType.Move),
0b1100,
N(M.Tackle),
0b0101,
N(Move.None),
N(ArgType.LastMiss),
},
buf[0..6],
);
stream.reset();
}
test "|switch|" {
var snorlax = gen1.helpers.Pokemon.init(.{ .species = .Snorlax, .moves = &.{.Splash} });
snorlax.level = 91;
snorlax.hp = 200;
snorlax.stats.hp = 400;
snorlax.status = gen1.Status.init(.PAR);
try log.switched(p2.ident(3), &snorlax);
const par = 0b1000000;
var expected: []const u8 = switch (endian) {
.Big => &.{ N(ArgType.Switch), 0b1011, N(S.Snorlax), 91, 0, 200, 1, 144, par },
.Little => &.{ N(ArgType.Switch), 0b1011, N(S.Snorlax), 91, 200, 0, 144, 1, par },
};
try expectLog1(expected, buf[0..9]);
stream.reset();
snorlax.level = 100;
snorlax.hp = 0;
snorlax.status = 0;
try log.switched(p2.ident(3), &snorlax);
expected = switch (endian) {
.Big => &.{ N(ArgType.Switch), 0b1011, N(S.Snorlax), 100, 0, 0, 1, 144, 0 },
.Little => &.{ N(ArgType.Switch), 0b1011, N(S.Snorlax), 100, 0, 0, 144, 1, 0 },
};
try expectLog1(expected, buf[0..9]);
stream.reset();
snorlax.hp = 400;
try log.switched(p2.ident(3), &snorlax);
expected = switch (endian) {
.Big => &.{ N(ArgType.Switch), 0b1011, N(S.Snorlax), 100, 1, 144, 1, 144, 0 },
.Little => &.{ N(ArgType.Switch), 0b1011, N(S.Snorlax), 100, 144, 1, 144, 1, 0 },
};
try expectLog1(expected, buf[0..9]);
stream.reset();
}
test "|cant|" {
try log.cant(p2.ident(6), .Trapped);
try expectLog1(&.{ N(ArgType.Cant), 0b1110, N(Cant.Trapped) }, buf[0..3]);
stream.reset();
try log.disabled(p1.ident(2), M.Earthquake);
try expectLog1(&.{ N(ArgType.Cant), 2, N(Cant.Disable), N(M.Earthquake) }, buf[0..4]);
stream.reset();
}
test "|faint|" {
try log.faint(p2.ident(2), false);
try expectLog1(&.{ N(ArgType.Faint), 0b1010 }, buf[0..2]);
stream.reset();
try log.faint(p2.ident(2), true);
try expectLog1(&.{ N(ArgType.Faint), 0b1010, N(ArgType.None) }, buf[0..3]);
stream.reset();
}
test "|turn|" {
try log.turn(42);
var expected = switch (endian) {
.Big => &.{ N(ArgType.Turn), 0, 42, N(ArgType.None) },
.Little => &.{ N(ArgType.Turn), 42, 0, N(ArgType.None) },
};
try expectLog1(expected, buf[0..4]);
stream.reset();
}
test "|win|" {
try log.win(.P2);
try expectLog1(&.{ N(ArgType.Win), 1, N(ArgType.None) }, buf[0..3]);
stream.reset();
}
test "|tie|" {
try log.tie();
try expectLog1(&.{ N(ArgType.Tie), N(ArgType.None) }, buf[0..2]);
stream.reset();
}
test "|-damage|" {
var chansey = gen1.helpers.Pokemon.init(.{ .species = .Chansey, .moves = &.{.Splash} });
chansey.hp = 612;
chansey.status = gen1.Status.slp(1);
try log.damage(p2.ident(2), &chansey, .None);
var expected: []const u8 = switch (endian) {
.Big => &.{ N(ArgType.Damage), 0b1010, 2, 100, 2, 191, 1, N(Damage.None) },
.Little => &.{ N(ArgType.Damage), 0b1010, 100, 2, 191, 2, 1, N(Damage.None) },
};
try expectLog1(expected, buf[0..8]);
stream.reset();
chansey.hp = 100;
chansey.stats.hp = 256;
chansey.status = 0;
try log.damage(p2.ident(2), &chansey, .Confusion);
expected = switch (endian) {
.Big => &.{ N(ArgType.Damage), 0b1010, 0, 100, 1, 0, 0, N(Damage.Confusion) },
.Little => &.{ N(ArgType.Damage), 0b1010, 100, 0, 0, 1, 0, N(Damage.Confusion) },
};
try expectLog1(expected, buf[0..8]);
stream.reset();
chansey.status = gen1.Status.init(.PSN);
try log.damageOf(p2.ident(2), &chansey, .RecoilOf, p1.ident(1));
expected = switch (endian) {
.Big => &.{ N(ArgType.Damage), 0b1010, 0, 100, 1, 0, 0b1000, N(Damage.RecoilOf), 1 },
.Little => &.{ N(ArgType.Damage), 0b1010, 100, 0, 0, 1, 0b1000, N(Damage.RecoilOf), 1 },
};
try expectLog1(expected, buf[0..9]);
stream.reset();
}
test "|-heal|" {
var chansey = gen1.helpers.Pokemon.init(.{ .species = .Chansey, .moves = &.{.Splash} });
chansey.hp = 612;
chansey.status = gen1.Status.slp(1);
try log.heal(p2.ident(2), &chansey, .None);
var expected: []const u8 = switch (endian) {
.Big => &.{ N(ArgType.Heal), 0b1010, 2, 100, 2, 191, 1, N(Heal.None) },
.Little => &.{ N(ArgType.Heal), 0b1010, 100, 2, 191, 2, 1, N(Heal.None) },
};
try expectLog1(expected, buf[0..8]);
stream.reset();
chansey.hp = 100;
chansey.stats.hp = 256;
chansey.status = 0;
try log.heal(p2.ident(2), &chansey, .Silent);
expected = switch (endian) {
.Big => &.{ N(ArgType.Heal), 0b1010, 0, 100, 1, 0, 0, N(Heal.Silent) },
.Little => &.{ N(ArgType.Heal), 0b1010, 100, 0, 0, 1, 0, N(Heal.Silent) },
};
try expectLog1(expected, buf[0..8]);
stream.reset();
try log.drain(p2.ident(2), &chansey, p1.ident(1));
expected = switch (endian) {
.Big => &.{ N(ArgType.Heal), 0b1010, 0, 100, 1, 0, 0, N(Heal.Drain), 1 },
.Little => &.{ N(ArgType.Heal), 0b1010, 100, 0, 0, 1, 0, N(Heal.Drain), 1 },
};
try expectLog1(expected, buf[0..9]);
stream.reset();
}
test "|-status|" {
try log.status(p2.ident(6), gen1.Status.init(.BRN), .None);
try expectLog1(&.{ N(ArgType.Status), 0b1110, 0b10000, N(Status.None) }, buf[0..4]);
stream.reset();
try log.status(p1.ident(2), gen1.Status.init(.FRZ), .Silent);
try expectLog1(&.{ N(ArgType.Status), 0b0010, 0b100000, N(Status.Silent) }, buf[0..4]);
stream.reset();
try log.statusFrom(p1.ident(1), gen1.Status.init(.PAR), M.BodySlam);
try expectLog1(
&.{ N(ArgType.Status), 0b0001, 0b1000000, N(Status.From), N(M.BodySlam) },
buf[0..5],
);
stream.reset();
}
test "|-curestatus|" {
try log.curestatus(p2.ident(6), gen1.Status.slp(7), .Message);
try expectLog1(&.{ N(ArgType.CureStatus), 0b1110, 0b111, N(CureStatus.Message) }, buf[0..4]);
stream.reset();
try log.curestatus(p1.ident(2), gen1.Status.init(.PSN), .Silent);
try expectLog1(&.{ N(ArgType.CureStatus), 0b0010, 0b1000, N(CureStatus.Silent) }, buf[0..4]);
stream.reset();
}
test "|-boost|" {
try log.boost(p2.ident(6), .Speed, 2);
try expectLog1(&.{ N(ArgType.Boost), 0b1110, N(Boost.Speed), 2 }, buf[0..4]);
stream.reset();
try log.boost(p1.ident(2), .Rage, 1);
try expectLog1(&.{ N(ArgType.Boost), 0b0010, N(Boost.Rage), 1 }, buf[0..4]);
stream.reset();
}
test "|-unboost|" {
try log.unboost(p2.ident(3), .Defense, 2);
try expectLog1(&.{ N(ArgType.Unboost), 0b1011, N(Boost.Defense), 2 }, buf[0..4]);
stream.reset();
}
test "|-clearallboost|" {
try log.clearallboost();
try expectLog1(&.{N(ArgType.ClearAllBoost)}, buf[0..1]);
stream.reset();
}
test "|-fail|" {
try log.fail(p2.ident(6), .None);
try expectLog1(&.{ N(ArgType.Fail), 0b1110, N(Fail.None) }, buf[0..3]);
stream.reset();
try log.fail(p2.ident(6), .Sleep);
try expectLog1(&.{ N(ArgType.Fail), 0b1110, N(Fail.Sleep) }, buf[0..3]);
stream.reset();
try log.fail(p2.ident(6), .Substitute);
try expectLog1(&.{ N(ArgType.Fail), 0b1110, N(Fail.Substitute) }, buf[0..3]);
stream.reset();
try log.fail(p2.ident(6), .Weak);
try expectLog1(&.{ N(ArgType.Fail), 0b1110, N(Fail.Weak) }, buf[0..3]);
stream.reset();
}
test "|-miss|" {
try log.miss(p2.ident(4));
try expectLog1(&.{ N(ArgType.Miss), 0b1100 }, buf[0..2]);
stream.reset();
}
test "|-hitcount|" {
try log.hitcount(p2.ident(1), 5);
try expectLog1(&.{ N(ArgType.HitCount), 0b1001, 5 }, buf[0..3]);
stream.reset();
}
test "|-prepare|" {
try log.prepare(p2.ident(2), M.Dig);
try expectLog1(&.{ N(ArgType.Prepare), 0b1010, N(M.Dig) }, buf[0..3]);
stream.reset();
}
test "|-mustrecharge|" {
try log.mustrecharge(p1.ident(6));
try expectLog1(&.{ N(ArgType.MustRecharge), 0b0110 }, buf[0..2]);
stream.reset();
}
test "|-activate|" {
try log.activate(p1.ident(2), .Struggle);
try expectLog1(&.{ N(ArgType.Activate), 0b0010, N(Activate.Struggle) }, buf[0..3]);
stream.reset();
try log.activate(p2.ident(6), .Substitute);
try expectLog1(&.{ N(ArgType.Activate), 0b1110, N(Activate.Substitute) }, buf[0..3]);
stream.reset();
try log.activate(p1.ident(2), .Splash);
try expectLog1(&.{ N(ArgType.Activate), 0b0010, N(Activate.Splash) }, buf[0..3]);
stream.reset();
}
test "|-fieldactivate|" {
try log.fieldactivate();
try expectLog1(&.{N(ArgType.FieldActivate)}, buf[0..1]);
stream.reset();
}
test "|-start|" {
try log.start(p2.ident(6), .Bide);
try expectLog1(&.{ N(ArgType.Start), 0b1110, N(Start.Bide) }, buf[0..3]);
stream.reset();
try log.start(p1.ident(2), .ConfusionSilent);
try expectLog1(&.{ N(ArgType.Start), 0b0010, N(Start.ConfusionSilent) }, buf[0..3]);
stream.reset();
try log.typechange(p2.ident(6), gen1.Types{ .type1 = .Fire, .type2 = .Fire });
try expectLog1(&.{ N(ArgType.Start), 0b1110, N(Start.TypeChange), 0b1000_1000 }, buf[0..4]);
stream.reset();
try log.typechange(p1.ident(2), gen1.Types{ .type1 = .Bug, .type2 = .Poison });
try expectLog1(&.{ N(ArgType.Start), 0b0010, N(Start.TypeChange), 0b0011_0110 }, buf[0..4]);
stream.reset();
try log.startEffect(p1.ident(2), .Disable, M.Surf);
try expectLog1(&.{ N(ArgType.Start), 0b0010, N(Start.Disable), N(M.Surf) }, buf[0..4]);
stream.reset();
try log.startEffect(p1.ident(2), .Mimic, M.Surf);
try expectLog1(&.{ N(ArgType.Start), 0b0010, N(Start.Mimic), N(M.Surf) }, buf[0..4]);
stream.reset();
}
test "|-end|" {
try log.end(p2.ident(6), .Bide);
try expectLog1(&.{ N(ArgType.End), 0b1110, N(End.Bide) }, buf[0..3]);
stream.reset();
try log.end(p1.ident(2), .ConfusionSilent);
try expectLog1(&.{ N(ArgType.End), 0b0010, N(End.ConfusionSilent) }, buf[0..3]);
stream.reset();
}
test "|-ohko|" {
try log.ohko();
try expectLog1(&.{N(ArgType.OHKO)}, buf[0..1]);
stream.reset();
}
test "|-crit|" {
try log.crit(p2.ident(5));
try expectLog1(&.{ N(ArgType.Crit), 0b1101 }, buf[0..2]);
stream.reset();
}
test "|-supereffective|" {
try log.supereffective(p1.ident(1));
try expectLog1(&.{ N(ArgType.SuperEffective), 0b0001 }, buf[0..2]);
stream.reset();
}
test "|-resisted|" {
try log.resisted(p2.ident(2));
try expectLog1(&.{ N(ArgType.Resisted), 0b1010 }, buf[0..2]);
stream.reset();
}
test "|-immune|" {
try log.immune(p1.ident(3), .None);
try expectLog1(&.{ N(ArgType.Immune), 0b0011, N(Immune.None) }, buf[0..3]);
stream.reset();
try log.immune(p2.ident(2), .OHKO);
try expectLog1(&.{ N(ArgType.Immune), 0b1010, N(Immune.OHKO) }, buf[0..3]);
stream.reset();
}
test "|-transform|" {
try log.transform(p2.ident(4), p1.ident(5));
try expectLog1(&.{ N(ArgType.Transform), 0b1100, 0b0101 }, buf[0..3]);
stream.reset();
}
|
src/lib/common/protocol.zig
|
const std = @import("std");
const dast = @import("main.zig");
const testing = std.testing;
test "slice()" {
const arr = "hello";
try testing.expectEqualStrings("hell", try dast.slice(u8, arr, 0, 4));
try testing.expectEqualStrings("hello", try dast.slice(u8, arr, 0, null));
try testing.expectEqualStrings("o", try dast.slice(u8, arr, 4, 5));
try testing.expectEqualStrings("", try dast.slice(u8, arr, 5, 5));
try testing.expectEqualStrings("", try dast.slice(u8, arr, 6, 7));
}
test "includes()" {
const arr = &[_]u8{ 1, 2, 3 };
try testing.expect(dast.includes(u8, arr, 2));
try testing.expect(!dast.includes(u8, arr, 0));
}
test "diff()" {
const arr1 = &[_]u8{ 1, 2, 3 };
const arr2 = &[_]u8{ 2, 3, 4 };
const out = (try dast.diff(u8, testing.allocator, arr1, arr2)).?;
try testing.expectEqualStrings(out, &[_]u8{ 1, 4 });
testing.allocator.free(out);
}
test "rm()" {
{
var arr = "good".*;
dast.rm(&arr, 1);
try testing.expectEqualStrings("god", arr[0..3]);
}
{
var arr = [_]u8{ 0, 1, 2, 0, 3, 0 };
dast.rm(&arr, 0);
dast.rm(&arr, 2);
try testing.expectEqualStrings(&[_]u8{ 1, 2, 3 }, arr[0..3]);
}
}
test "compact()" {
const arr = &[_]u8{ 0, 1, 2, 0, 3, 0, 0 };
const out = try dast.compact(u8, testing.allocator, arr);
try testing.expectEqualSlices(u8, &[_]u8{ 1, 2, 3 }, out);
testing.allocator.free(out);
}
test "chunk()" {
{
// {}
const out = try dast.chunk(u8, testing.allocator, "abc", 0);
try testing.expect(out.len == 0);
}
{
// { { a }, { b }, { c } }
var out = try dast.chunk(u8, testing.allocator, "abc", 1);
try testing.expectEqualStrings("a", out[0]);
try testing.expectEqualStrings("b", out[1]);
try testing.expectEqualStrings("c", out[2]);
testing.allocator.free(out);
}
{
// { { a, b }, { c } }
var out = try dast.chunk(u8, testing.allocator, "abc", 2);
try testing.expectEqualStrings("ab", out[0]);
try testing.expectEqualStrings("c", out[1]);
testing.allocator.free(out);
}
}
test "filter()" {
const predFn = struct {
fn call(el: u8, _: usize) bool {
return el > 2;
}
}.call;
const arr = &[_]u8{ 1, 2, 3, 4 };
const out = try dast.filter(u8, std.testing.allocator, arr, predFn);
try testing.expectEqualSlices(u8, &[_]u8{ 3, 4 }, out);
testing.allocator.free(out);
}
|
src/test.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const testing = std.testing;
const ColorBitmap = @import("ColorBitmap.zig");
width: u32,
height: u32,
indices: []u8,
const IndexedBitmap = @This();
pub fn init(allocator: Allocator, width: u32, height: u32) !IndexedBitmap {
var self = IndexedBitmap{
.width = width,
.height = height,
.indices = undefined,
};
self.indices = try allocator.alloc(u8, self.width * self.height);
return self;
}
pub fn deinit(self: IndexedBitmap, allocator: Allocator) void {
allocator.free(self.indices);
}
pub fn clone(self: IndexedBitmap, allocator: Allocator) !IndexedBitmap {
return IndexedBitmap{
.width = self.width,
.height = self.height,
.indices = try allocator.dupe(u8, self.indices),
};
}
pub fn eql(self: IndexedBitmap, bitmap: IndexedBitmap) bool {
return self.width == bitmap.width and
self.width == bitmap.width and
std.mem.eql(u8, self.indices, bitmap.indices);
}
pub fn convertToTruecolor(self: IndexedBitmap, allocator: Allocator, colormap: []const u8) !ColorBitmap {
const color_bitmap = try ColorBitmap.init(allocator, self.width, self.height);
const pixel_count = color_bitmap.width * color_bitmap.height;
var i: usize = 0;
while (i < pixel_count) : (i += 1) {
const index = @as(usize, self.indices[i]);
const pixel = colormap[4 * index ..][0..4];
color_bitmap.pixels[4 * i + 0] = pixel[0];
color_bitmap.pixels[4 * i + 1] = pixel[1];
color_bitmap.pixels[4 * i + 2] = pixel[2];
color_bitmap.pixels[4 * i + 3] = pixel[3];
}
return color_bitmap;
}
pub fn setIndex(self: IndexedBitmap, x: i32, y: i32, index: u8) bool {
if (x >= 0 and y >= 0) {
const ux = @intCast(u32, x);
const uy = @intCast(u32, y);
if (ux < self.width and uy < self.height) {
self.setIndexUnchecked(ux, uy, index);
return true;
}
}
return false;
}
pub fn setIndexUnchecked(self: IndexedBitmap, x: u32, y: u32, index: u8) void {
std.debug.assert(x < self.width);
const i = y * self.width + x;
self.indices[i] = index;
}
pub fn getIndex(self: IndexedBitmap, x: i32, y: i32) ?u8 {
if (x >= 0 and y >= 0) {
const ux = @intCast(u32, x);
const uy = @intCast(u32, y);
if (ux < self.width and uy < self.height) {
return self.getIndexUnchecked(ux, uy);
}
}
return null;
}
pub fn getIndexUnchecked(self: IndexedBitmap, x: u32, y: u32) u8 {
std.debug.assert(x < self.width);
const i = y * self.width + x;
return self.indices[i];
}
pub fn copyIndexUnchecked(self: IndexedBitmap, dst: IndexedBitmap, x: u32, y: u32) void {
const src_index = self.getIndexUnchecked(x, y);
dst.setIndexUnchecked(x, y, src_index);
}
pub fn drawLine(self: IndexedBitmap, x0: i32, y0: i32, x1: i32, y1: i32, index: u8, skip_first: bool) void {
const dx = std.math.absInt(x1 - x0) catch unreachable;
const sx: i32 = if (x0 < x1) 1 else -1;
const dy = -(std.math.absInt(y1 - y0) catch unreachable);
const sy: i32 = if (y0 < y1) 1 else -1;
var err = dx + dy;
if (!skip_first) {
_ = self.setIndex(x0, y0, index);
}
var x = x0;
var y = y0;
while (x != x1 or y != y1) {
const e2 = 2 * err;
if (e2 >= dy) {
err += dy;
x += sx;
}
if (e2 <= dx) {
err += dx;
y += sy;
}
_ = self.setIndex(x, y, index);
}
}
pub fn copyLine(self: IndexedBitmap, dst: IndexedBitmap, x0: i32, y0: i32, x1: i32, y1: i32) void {
const dx = std.math.absInt(x1 - x0) catch unreachable;
const sx: i32 = if (x0 < x1) 1 else -1;
const dy = -(std.math.absInt(y1 - y0) catch unreachable);
const sy: i32 = if (y0 < y1) 1 else -1;
var err = dx + dy;
var x = x0;
var y = y0;
while (true) {
if (self.getIndex(x, y)) |src_index| {
dst.setIndexUnchecked(@intCast(u32, x), @intCast(u32, y), src_index);
}
if (x == x1 and y == y1) break;
const e2 = 2 * err;
if (e2 >= dy) {
err += dy;
x += sx;
}
if (e2 <= dx) {
err += dx;
y += sy;
}
}
}
pub fn clear(self: IndexedBitmap) void {
std.mem.set(u8, self.indices, 0);
}
pub fn fill(self: IndexedBitmap, index: u8) void {
std.mem.set(u8, self.indices, index);
}
pub fn floodFill(self: *IndexedBitmap, allocator: Allocator, x: i32, y: i32, index: u8) !void {
const old_index = self.getIndex(x, y) orelse return;
if (old_index == index) return;
const start_coords = .{ .x = @intCast(u32, x), .y = @intCast(u32, y) };
self.setIndexUnchecked(start_coords.x, start_coords.y, index);
var stack = std.ArrayList(struct { x: u32, y: u32 }).init(allocator);
try stack.ensureTotalCapacity(self.width * self.height / 2);
defer stack.deinit();
try stack.append(start_coords);
while (stack.items.len > 0) {
const coords = stack.pop();
if (coords.y > 0) {
const new_coords = .{ .x = coords.x, .y = coords.y - 1 };
if (self.getIndexUnchecked(new_coords.x, new_coords.y) == old_index) {
self.setIndexUnchecked(new_coords.x, new_coords.y, index);
stack.appendAssumeCapacity(new_coords);
}
}
if (coords.y < self.height - 1) {
const new_coords = .{ .x = coords.x, .y = coords.y + 1 };
if (self.getIndexUnchecked(new_coords.x, new_coords.y) == old_index) {
self.setIndexUnchecked(new_coords.x, new_coords.y, index);
stack.appendAssumeCapacity(new_coords);
}
}
if (coords.x > 0) {
const new_coords = .{ .x = coords.x - 1, .y = coords.y };
if (self.getIndexUnchecked(new_coords.x, new_coords.y) == old_index) {
self.setIndexUnchecked(new_coords.x, new_coords.y, index);
stack.appendAssumeCapacity(new_coords);
}
}
if (coords.x < self.width - 1) {
const new_coords = .{ .x = coords.x + 1, .y = coords.y };
if (self.getIndexUnchecked(new_coords.x, new_coords.y) == old_index) {
self.setIndexUnchecked(new_coords.x, new_coords.y, index);
stack.appendAssumeCapacity(new_coords);
}
}
}
}
pub fn mirrorHorizontally(self: IndexedBitmap) void {
var y: u32 = 0;
while (y < self.height) : (y += 1) {
var x0: u32 = 0;
var x1: u32 = self.width - 1;
while (x0 < x1) {
const index0 = self.getIndexUnchecked(x0, y);
const index1 = self.getIndexUnchecked(x1, y);
self.setIndexUnchecked(x0, y, index1);
self.setIndexUnchecked(x1, y, index0);
x0 += 1;
x1 -= 1;
}
}
}
pub fn mirrorVertically(self: IndexedBitmap) void {
var y0: u32 = 0;
var y1: u32 = self.height - 1;
while (y0 < y1) {
var x: u32 = 0;
while (x < self.width) : (x += 1) {
const index0 = self.getIndexUnchecked(x, y0);
const index1 = self.getIndexUnchecked(x, y1);
self.setIndexUnchecked(x, y0, index1);
self.setIndexUnchecked(x, y1, index0);
}
y0 += 1;
y1 -= 1;
}
}
pub fn rotate(self: *IndexedBitmap, allocator: Allocator, clockwise: bool) !void {
const tmp_bitmap = try self.clone(allocator);
defer tmp_bitmap.deinit(allocator);
std.mem.swap(u32, &self.width, &self.height);
var y: u32 = 0;
while (y < self.width) : (y += 1) {
var x: u32 = 0;
while (x < self.height) : (x += 1) {
const index = tmp_bitmap.getIndexUnchecked(x, y);
if (clockwise) {
self.setIndexUnchecked(self.width - 1 - y, x, index);
} else {
self.setIndexUnchecked(y, self.height - 1 - x, index);
}
}
}
}
|
src/IndexedBitmap.zig
|
const std = @import("std");
const api = @import("buzz_api.zig");
export fn print(vm: *api.VM) c_int {
_ = std.io.getStdOut().write(std.mem.sliceTo(vm.bz_peek(0).bz_valueToString().?, 0)) catch {
// TODO: throw something?
return -1;
};
_ = std.io.getStdOut().write("\n") catch {
// TODO: throw something?
return -1;
};
return 0;
}
export fn parseNumber(vm: *api.VM) c_int {
const string = std.mem.sliceTo(vm.bz_peek(0).bz_valueToString().?, 0);
const number: f64 = std.fmt.parseFloat(f64, string) catch {
vm.bz_pushNull();
return 1;
};
vm.bz_pushNum(number);
return 1;
}
export fn runFile(self: *api.VM) c_int {
// Read file
const filename: [*:0]const u8 = api.Value.bz_valueToString(self.bz_peek(0)) orelse {
self.bz_throwString("Could not get filename");
return -1;
};
const filename_slice: []const u8 = std.mem.sliceTo(filename, 0);
var file: std.fs.File = (if (std.fs.path.isAbsolute(filename_slice))
std.fs.openFileAbsolute(filename_slice, .{})
else
std.fs.cwd().openFile(filename_slice, .{})) catch {
self.bz_throwString("Could not open file");
return -1;
};
defer file.close();
const source = api.VM.allocator.alloc(u8, (file.stat() catch {
self.bz_throwString("Could not read file");
return -1;
}).size) catch {
self.bz_throwString("Could not read file");
return -1;
};
_ = file.readAll(source) catch {
self.bz_throwString("Could not read file");
return -1;
};
var source_d: ?[]u8 = api.VM.allocator.dupeZ(u8, source) catch {
self.bz_throwString("Could not read file");
return -1;
};
if (source_d == null) {
self.bz_throwString("Could not read file");
return -1;
}
defer api.VM.allocator.free(source_d.?);
var source_0 = @ptrCast([*:0]u8, source_d.?);
defer api.VM.allocator.free(source);
// Init new VM
var vm = self.bz_newVM();
defer vm.bz_deinitVM();
// Compile
var function = vm.bz_compile(source_0, filename) orelse {
self.bz_throwString("File does not compile");
return -1;
};
// Run
if (!vm.bz_interpret(function)) {
self.bz_throwString("Error while running file");
return -1;
}
return 0;
}
|
lib/buzz_std.zig
|
const std = @import("../std.zig");
const crypto = std.crypto;
const debug = std.debug;
const mem = std.mem;
pub const HmacMd5 = Hmac(crypto.hash.Md5);
pub const HmacSha1 = Hmac(crypto.hash.Sha1);
pub const sha2 = struct {
pub const HmacSha224 = Hmac(crypto.hash.sha2.Sha224);
pub const HmacSha256 = Hmac(crypto.hash.sha2.Sha256);
pub const HmacSha384 = Hmac(crypto.hash.sha2.Sha384);
pub const HmacSha512 = Hmac(crypto.hash.sha2.Sha512);
};
pub fn Hmac(comptime Hash: type) type {
return struct {
const Self = @This();
pub const mac_length = Hash.digest_length;
pub const key_length_min = 0;
pub const key_length = 32; // recommended key length
o_key_pad: [Hash.block_length]u8,
hash: Hash,
// HMAC(k, m) = H(o_key_pad || H(i_key_pad || msg)) where || is concatenation
pub fn create(out: *[mac_length]u8, msg: []const u8, key: []const u8) void {
var ctx = Self.init(key);
ctx.update(msg);
ctx.final(out);
}
pub fn init(key: []const u8) Self {
var ctx: Self = undefined;
var scratch: [Hash.block_length]u8 = undefined;
var i_key_pad: [Hash.block_length]u8 = undefined;
// Normalize key length to block size of hash
if (key.len > Hash.block_length) {
Hash.hash(key, scratch[0..mac_length], .{});
mem.set(u8, scratch[mac_length..Hash.block_length], 0);
} else if (key.len < Hash.block_length) {
mem.copy(u8, scratch[0..key.len], key);
mem.set(u8, scratch[key.len..Hash.block_length], 0);
} else {
mem.copy(u8, scratch[0..], key);
}
for (ctx.o_key_pad) |*b, i| {
b.* = scratch[i] ^ 0x5c;
}
for (i_key_pad) |*b, i| {
b.* = scratch[i] ^ 0x36;
}
ctx.hash = Hash.init(.{});
ctx.hash.update(&i_key_pad);
return ctx;
}
pub fn update(ctx: *Self, msg: []const u8) void {
ctx.hash.update(msg);
}
pub fn final(ctx: *Self, out: *[mac_length]u8) void {
var scratch: [mac_length]u8 = undefined;
ctx.hash.final(&scratch);
var ohash = Hash.init(.{});
ohash.update(&ctx.o_key_pad);
ohash.update(&scratch);
ohash.final(out);
}
};
}
const htest = @import("test.zig");
test "hmac md5" {
var out: [HmacMd5.mac_length]u8 = undefined;
HmacMd5.create(out[0..], "", "");
htest.assertEqual("74e6f7298a9c2d168935f58c001bad88", out[0..]);
HmacMd5.create(out[0..], "The quick brown fox jumps over the lazy dog", "key");
htest.assertEqual("80070713463e7749b90c2dc24911e275", out[0..]);
}
test "hmac sha1" {
var out: [HmacSha1.mac_length]u8 = undefined;
HmacSha1.create(out[0..], "", "");
htest.assertEqual("fbdb1d1b18aa6c08324b7d64b71fb76370690e1d", out[0..]);
HmacSha1.create(out[0..], "The quick brown fox jumps over the lazy dog", "key");
htest.assertEqual("de7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9", out[0..]);
}
test "hmac sha256" {
var out: [sha2.HmacSha256.mac_length]u8 = undefined;
sha2.HmacSha256.create(out[0..], "", "");
htest.assertEqual("b613679a0814d9ec772f95d778c35fc5ff1697c493715653c6c712144292c5ad", out[0..]);
sha2.HmacSha256.create(out[0..], "The quick brown fox jumps over the lazy dog", "key");
htest.assertEqual("f7bc83f430538424b13298e6aa6fb143ef4d59a14946175997479dbc2d1a3cd8", out[0..]);
}
|
lib/std/crypto/hmac.zig
|
const std = @import("../../std.zig");
const os = std.os;
const mem = std.mem;
const net = std.net;
const time = std.time;
const builtin = std.builtin;
const testing = std.testing;
const Socket = @This();
/// A socket-address pair.
pub const Connection = struct {
socket: Socket,
address: net.Address,
};
/// The underlying handle of a socket.
fd: os.socket_t,
/// Open a new socket.
pub fn init(domain: u32, socket_type: u32, protocol: u32) !Socket {
return Socket{ .fd = try os.socket(domain, socket_type, protocol) };
}
/// Closes the socket.
pub fn deinit(self: Socket) void {
os.closeSocket(self.fd);
}
/// Shutdown either the read side, or write side, or the entirety of a socket.
pub fn shutdown(self: Socket, how: os.ShutdownHow) !void {
return os.shutdown(self.fd, how);
}
/// Binds the socket to an address.
pub fn bind(self: Socket, address: net.Address) !void {
return os.bind(self.fd, &address.any, address.getOsSockLen());
}
/// Start listening for incoming connections on the socket.
pub fn listen(self: Socket, max_backlog_size: u31) !void {
return os.listen(self.fd, max_backlog_size);
}
/// Have the socket attempt to the connect to an address.
pub fn connect(self: Socket, address: net.Address) !void {
return os.connect(self.fd, &address.any, address.getOsSockLen());
}
/// Accept a pending incoming connection queued to the kernel backlog
/// of the socket.
pub fn accept(self: Socket, flags: u32) !Socket.Connection {
var address: os.sockaddr = undefined;
var address_len: u32 = @sizeOf(os.sockaddr);
const fd = try os.accept(self.fd, &address, &address_len, flags);
return Connection{
.socket = Socket{ .fd = fd },
.address = net.Address.initPosix(@alignCast(4, &address)),
};
}
/// Read data from the socket into the buffer provided. It returns the
/// number of bytes read into the buffer provided.
pub fn read(self: Socket, buf: []u8) !usize {
return os.read(self.fd, buf);
}
/// Read data from the socket into the buffer provided with a set of flags
/// specified. It returns the number of bytes read into the buffer provided.
pub fn recv(self: Socket, buf: []u8, flags: u32) !usize {
return os.recv(self.fd, buf, flags);
}
/// Write a buffer of data provided to the socket. It returns the number
/// of bytes that are written to the socket.
pub fn write(self: Socket, buf: []const u8) !usize {
return os.write(self.fd, buf);
}
/// Writes multiple I/O vectors to the socket. It returns the number
/// of bytes that are written to the socket.
pub fn writev(self: Socket, buffers: []const os.iovec_const) !usize {
return os.writev(self.fd, buffers);
}
/// Write a buffer of data provided to the socket with a set of flags specified.
/// It returns the number of bytes that are written to the socket.
pub fn send(self: Socket, buf: []const u8, flags: u32) !usize {
return os.send(self.fd, buf, flags);
}
/// Writes multiple I/O vectors with a prepended message header to the socket
/// with a set of flags specified. It returns the number of bytes that are
/// written to the socket.
pub fn sendmsg(self: Socket, msg: os.msghdr_const, flags: u32) !usize {
return os.sendmsg(self.fd, msg, flags);
}
/// Query the address that the socket is locally bounded to.
pub fn getLocalAddress(self: Socket) !net.Address {
var address: os.sockaddr = undefined;
var address_len: u32 = @sizeOf(os.sockaddr);
try os.getsockname(self.fd, &address, &address_len);
return net.Address.initPosix(@alignCast(4, &address));
}
/// Query and return the latest cached error on the socket.
pub fn getError(self: Socket) !void {
return os.getsockoptError(self.fd);
}
/// Query the read buffer size of the socket.
pub fn getReadBufferSize(self: Socket) !u32 {
var value: u32 = undefined;
var value_len: u32 = @sizeOf(u32);
const rc = os.system.getsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVBUF, mem.asBytes(&value), &value_len);
return switch (os.errno(rc)) {
0 => value,
os.EBADF => error.BadFileDescriptor,
os.EFAULT => error.InvalidAddressSpace,
os.EINVAL => error.InvalidSocketOption,
os.ENOPROTOOPT => error.UnknownSocketOption,
os.ENOTSOCK => error.NotASocket,
else => |err| os.unexpectedErrno(err),
};
}
/// Query the write buffer size of the socket.
pub fn getWriteBufferSize(self: Socket) !u32 {
var value: u32 = undefined;
var value_len: u32 = @sizeOf(u32);
const rc = os.system.getsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDBUF, mem.asBytes(&value), &value_len);
return switch (os.errno(rc)) {
0 => value,
os.EBADF => error.BadFileDescriptor,
os.EFAULT => error.InvalidAddressSpace,
os.EINVAL => error.InvalidSocketOption,
os.ENOPROTOOPT => error.UnknownSocketOption,
os.ENOTSOCK => error.NotASocket,
else => |err| os.unexpectedErrno(err),
};
}
/// Allow multiple sockets on the same host to listen on the same address. It returns `error.UnsupportedSocketOption` if
/// the host does not support sockets listening the same address.
pub fn setReuseAddress(self: Socket, enabled: bool) !void {
if (comptime @hasDecl(os, "SO_REUSEADDR")) {
return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_REUSEADDR, mem.asBytes(&@as(usize, @boolToInt(enabled))));
}
return error.UnsupportedSocketOption;
}
/// Allow multiple sockets on the same host to listen on the same port. It returns `error.UnsupportedSocketOption` if
/// the host does not supports sockets listening on the same port.
pub fn setReusePort(self: Socket, enabled: bool) !void {
if (comptime @hasDecl(os, "SO_REUSEPORT")) {
return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_REUSEPORT, mem.asBytes(&@as(usize, @boolToInt(enabled))));
}
return error.UnsupportedSocketOption;
}
/// Disable Nagle's algorithm on a TCP socket. It returns `error.UnsupportedSocketOption` if the host does not support
/// sockets disabling Nagle's algorithm.
pub fn setNoDelay(self: Socket, enabled: bool) !void {
if (comptime @hasDecl(os, "TCP_NODELAY")) {
return os.setsockopt(self.fd, os.IPPROTO_TCP, os.TCP_NODELAY, mem.asBytes(&@as(usize, @boolToInt(enabled))));
}
return error.UnsupportedSocketOption;
}
/// Enables TCP Fast Open (RFC 7413) on a TCP socket. It returns `error.UnsupportedSocketOption` if the host does not
/// support TCP Fast Open.
pub fn setFastOpen(self: Socket, enabled: bool) !void {
if (comptime @hasDecl(os, "TCP_FASTOPEN")) {
return os.setsockopt(self.fd, os.IPPROTO_TCP, os.TCP_FASTOPEN, mem.asBytes(&@as(usize, @boolToInt(enabled))));
}
return error.UnsupportedSocketOption;
}
/// Enables TCP Quick ACK on a TCP socket to immediately send rather than delay ACKs when necessary. It returns
/// `error.UnsupportedSocketOption` if the host does not support TCP Quick ACK.
pub fn setQuickACK(self: Socket, enabled: bool) !void {
if (comptime @hasDecl(os, "TCP_QUICKACK")) {
return os.setsockopt(self.fd, os.IPPROTO_TCP, os.TCP_QUICKACK, mem.asBytes(&@as(usize, @boolToInt(enabled))));
}
return error.UnsupportedSocketOption;
}
/// Set the write buffer size of the socket.
pub fn setWriteBufferSize(self: Socket, size: u32) !void {
return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDBUF, mem.asBytes(&size));
}
/// Set the read buffer size of the socket.
pub fn setReadBufferSize(self: Socket, size: u32) !void {
return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVBUF, mem.asBytes(&size));
}
/// Set a timeout on the socket that is to occur if no messages are successfully written
/// to its bound destination after a specified number of milliseconds. A subsequent write
/// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded.
pub fn setWriteTimeout(self: Socket, milliseconds: usize) !void {
const timeout = os.timeval{
.tv_sec = @intCast(isize, milliseconds / time.ms_per_s),
.tv_usec = @intCast(isize, (milliseconds % time.ms_per_s) * time.us_per_ms),
};
return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDTIMEO, mem.asBytes(&timeout));
}
/// Set a timeout on the socket that is to occur if no messages are successfully read
/// from its bound destination after a specified number of milliseconds. A subsequent
/// read from the socket will thereafter return `error.WouldBlock` should the timeout be
/// exceeded.
pub fn setReadTimeout(self: Socket, milliseconds: usize) !void {
const timeout = os.timeval{
.tv_sec = @intCast(isize, milliseconds / time.ms_per_s),
.tv_usec = @intCast(isize, (milliseconds % time.ms_per_s) * time.us_per_ms),
};
return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVTIMEO, mem.asBytes(&timeout));
}
test {
testing.refAllDecls(@This());
}
test "socket/linux: set read timeout of 1 millisecond on blocking socket" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer a.deinit();
try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0));
try a.listen(128);
const binded_address = try a.getLocalAddress();
const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer b.deinit();
try b.connect(binded_address);
try b.setReadTimeout(1);
const ab = try a.accept(os.SOCK_CLOEXEC);
defer ab.socket.deinit();
var buf: [1]u8 = undefined;
testing.expectError(error.WouldBlock, b.read(&buf));
}
test "socket/linux: create non-blocking socket pair" {
if (builtin.os.tag != .linux) return error.SkipZigTest;
const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer a.deinit();
try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0));
try a.listen(128);
const binded_address = try a.getLocalAddress();
const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer b.deinit();
testing.expectError(error.WouldBlock, b.connect(binded_address));
try b.getError();
const ab = try a.accept(os.SOCK_NONBLOCK | os.SOCK_CLOEXEC);
defer ab.socket.deinit();
}
|
lib/std/x/os/Socket.zig
|
const std = @import("std");
const bld = std.build;
const Builder = bld.Builder;
const Step = bld.Step;
const common_flags = [_][]const u8 {
"-fstrict-aliasing",
};
const common_c_flags = common_flags;
const common_cpp_flags = common_flags ++ [_][]const u8{
"-fno-rtti",
"-fno-exceptions",
};
const spvcross_public_cpp_flags = [_][]const u8 {
"-DSPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS",
};
pub fn build(b: *bld.Builder) void {
_ = buildShdcExe(b, b.standardTargetOptions(.{}), b.standardReleaseOptions(), "");
}
pub fn buildShdcExe(b: *bld.Builder, target: std.zig.CrossTarget, mode: std.builtin.Mode, comptime prefix_path: []const u8) *bld.LibExeObjStep {
const dir = prefix_path ++ "src/shdc/";
const sources = [_][]const u8 {
"args.cc",
"bare.cc",
"bytecode.cc",
"input.cc",
"main.cc",
"sokol.cc",
"sokolzig.cc",
"spirv.cc",
"spirvcross.cc",
"util.cc"
};
const incl_dirs = [_][] const u8 {
"ext/fmt/include",
"ext/SPIRV-Cross",
"ext/pystring",
"ext/getopt/include",
"ext/glslang",
"ext/glslang/glslang/Public",
"ext/glslang/glslang/Include",
"ext/glslang/SPIRV",
"ext/SPIRV-Tools/include"
};
const flags = common_cpp_flags ++ spvcross_public_cpp_flags;
const exe = b.addExecutable("sokol-shdc", null);
exe.setTarget(target);
exe.setBuildMode(mode);
exe.linkSystemLibrary("c");
exe.linkSystemLibrary("c++");
exe.linkLibrary(lib_fmt(b, target, mode, prefix_path));
exe.linkLibrary(lib_getopt(b, target, mode, prefix_path));
exe.linkLibrary(lib_pystring(b,target, mode, prefix_path));
exe.linkLibrary(lib_spirvcross(b, target, mode,prefix_path));
exe.linkLibrary(lib_spirvtools(b, target, mode, prefix_path));
exe.linkLibrary(lib_glslang(b, target, mode, prefix_path));
inline for (incl_dirs) |incl_dir| {
exe.addIncludeDir(prefix_path ++ incl_dir);
}
inline for (sources) |src| {
exe.addCSourceFile(dir ++ src, &flags);
}
exe.install();
return exe;
}
/// Utility std.build.step for compiling shader within zig buildsystem
/// usage example (build.zig):
/// const sokol_tools = @import("path_to/sokol-tools/build.zig");
/// const native_target: std.zig.CrossTarget = .{}; // even when cross compiling we want to run this now on this machine
/// const shdc_exe = sokol_tools.buildShdcExe(builder, native_target, .ReleaseFast, "path_to/sokol-tools/");
/// const my_shader = sokol_tools.ShdcStep.create(builder, shdc_exe, "shaders/texcube.glsl", "shaders/texcube.zig", .{});
/// my_lib_or_exe.dependOn(&my_shader.step);
pub const ShdcStep = struct {
/// args passed to sokol-shdc, build and use `zig-out/bin/sokol-shdc --help` for more info
pub const Args = struct {
/// shdc output file format
format: []const u8 = "sokol_zig",
/// one or more shading lang
slangs: []const []const u8 = &.{ "glsl330", "glsl100", "glsl300es", "hlsl5", "metal_macos", "metal_ios", "wgpu" },
module: ?[]const u8 = null,
reflection: bool = true,
bytecode: bool = true,
/// any other extra args not listed here
extra: []const []const u8 = &.{},
};
args: Args = .{},
builder: *Builder,
step: Step,
exe: std.build.FileSource,
src: []const u8,
dest: []const u8,
max_stdout_size: usize = 4096,
check_modified: bool = false, // can be enabled if shaders don't depend on any other files etc.
src_mtime: i128 = 0, // set in make() if check_modified is on
pub fn create(builder: *Builder, shdc_exe: *std.build.LibExeObjStep, src_file_path: []const u8, dest_file_path: []const u8, args: Args) *ShdcStep {
const self = builder.allocator.create(ShdcStep) catch unreachable;
self.* = ShdcStep{
.args = args,
.builder = builder,
.step = Step.init(.custom, "ShdcStep", builder.allocator, make),
.exe = shdc_exe.getOutputSource(),
.src = src_file_path,
.dest = dest_file_path,
};
self.step.dependOn(&shdc_exe.step);
return self;
}
fn make(step: *Step) !void {
const self = @fieldParentPtr(ShdcStep, "step", step);
if (self.check_modified) chk_mod: {
const f_src = std.fs.cwd().openFile(self.src, .{}) catch break :chk_mod;
defer f_src.close();
const f_dst = std.fs.cwd().openFile(self.dest, .{}) catch break :chk_mod;
defer f_dst.close();
const src_stat = f_src.stat() catch break :chk_mod;
const dst_stat = f_dst.stat() catch break :chk_mod;
self.src_mtime = src_stat.mtime;
if (src_stat.mtime <= dst_stat.mtime) {
if (self.builder.verbose) std.debug.print("ShdcStep: skip `{s}` - not modified\n", .{self.src});
return;
}
}
if (self.builder.verbose) std.debug.print("ShdcStep: compiling `{s}` -> `{s}`\n", .{ self.src, self.dest });
// attempt to make dest directory tree in case it doesn't exist
self.builder.makePath(std.fs.path.dirname(self.dest).?) catch {};
var argv = std.ArrayList([]const u8).init(self.builder.allocator);
defer argv.deinit();
argv.append(self.exe.getPath(self.builder)) catch unreachable;
argv.append("-f") catch unreachable;
argv.append(self.args.format) catch unreachable;
argv.append("-l") catch unreachable;
argv.append(std.mem.join(self.builder.allocator, ":", self.args.slangs) catch unreachable) catch unreachable;
if (self.args.module) |m| {
try argv.append("-m");
try argv.append(m);
}
if (self.args.reflection) try argv.append("-r");
if (self.args.bytecode) try argv.append("-b");
try argv.append("-i");
try argv.append(self.src);
try argv.append("-o");
try argv.append(self.dest);
for (self.args.extra) |arg| {
try argv.append(arg);
}
const child = try std.ChildProcess.init(argv.items, self.builder.allocator);
defer child.deinit();
child.cwd = self.builder.build_root;
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
child.spawn() catch |err| {
std.debug.print("ShdcStep: Unable to spawn {s}: {s}\n", .{ argv.items, @errorName(err) });
return err;
};
var stdout: []const u8 = try child.stdout.?.reader().readAllAlloc(self.builder.allocator, self.max_stdout_size);
defer self.builder.allocator.free(stdout);
var stderr: []const u8 = try child.stderr.?.reader().readAllAlloc(self.builder.allocator, self.max_stdout_size);
defer self.builder.allocator.free(stderr);
const term = child.wait() catch |err| {
std.debug.print("Unable to spawn {s}: {s}\n", .{ argv.items[0], @errorName(err) });
return err;
};
var rc: ?u8 = null; // return code
if (term == .Exited) rc = term.Exited;
// shdc should return 0 and its stdout/err usually should be empty - print this debug otherwise
if (rc == null or rc.? != 0 or stdout.len > 0 or stderr.len > 0) {
std.debug.print(
\\ShdcStep command: {s}
\\========= stdout: ====================
\\{s}
\\========= stderr: ====================
\\{s}
\\======================================
\\
, .{ argv.items, stdout, stderr });
}
if (rc) |c| {
if (c != 0) {
std.debug.print("ShdcStep: bad exit code {}:\n", .{c});
return error.UnexpectedExitCode;
}
} else {
std.debug.print("ShdcStep: command terminated unexpectedly:\n", .{});
return error.UncleanExit;
}
}
};
fn lib_getopt(b: *bld.Builder, target: std.zig.CrossTarget, mode: std.builtin.Mode, comptime prefix_path: []const u8) *bld.LibExeObjStep {
const lib = b.addStaticLibrary("getopt", null);
lib.setTarget(target);
lib.setBuildMode(mode);
lib.linkSystemLibrary("c");
lib.addIncludeDir(prefix_path ++ "ext/getopt/include");
const flags = common_c_flags;
lib.addCSourceFile(prefix_path ++ "ext/getopt/src/getopt.c", &flags);
return lib;
}
fn lib_pystring(b: *bld.Builder, target : std.zig.CrossTarget, mode : std.builtin.Mode, comptime prefix_path: []const u8) *bld.LibExeObjStep {
const lib = b.addStaticLibrary("pystring", null);
lib.setTarget(target);
lib.setBuildMode(mode);
lib.linkSystemLibrary("c");
lib.linkSystemLibrary("c++");
const flags = common_cpp_flags;
lib.addCSourceFile(prefix_path ++ "ext/pystring/pystring.cpp", &flags);
return lib;
}
fn lib_fmt(b: *bld.Builder, target : std.zig.CrossTarget, mode : std.builtin.Mode, comptime prefix_path: []const u8) *bld.LibExeObjStep {
const dir = prefix_path ++ "ext/fmt/src/";
const sources = [_][]const u8 {
"format.cc",
"os.cc"
};
const lib = b.addStaticLibrary("fmt", null);
lib.setBuildMode(mode);
lib.setTarget(target);
lib.linkSystemLibrary("c");
lib.linkSystemLibrary("c++");
lib.addIncludeDir(prefix_path ++ "ext/fmt/include");
const flags = common_cpp_flags;
inline for (sources) |src| {
lib.addCSourceFile(dir ++ src, &flags);
}
return lib;
}
fn lib_spirvcross(b: *bld.Builder, target : std.zig.CrossTarget, mode : std.builtin.Mode, comptime prefix_path: []const u8) *bld.LibExeObjStep {
const dir = prefix_path ++ "ext/SPIRV-Cross/";
const sources = [_][]const u8 {
"spirv_cross.cpp",
"spirv_parser.cpp",
"spirv_cross_parsed_ir.cpp",
"spirv_cfg.cpp",
"spirv_glsl.cpp",
"spirv_msl.cpp",
"spirv_hlsl.cpp",
"spirv_reflect.cpp",
"spirv_cross_util.cpp",
};
const flags = common_cpp_flags ++ spvcross_public_cpp_flags;
const lib = b.addStaticLibrary("spirvcross", null);
lib.setTarget(target);
lib.setBuildMode(mode);
lib.linkSystemLibrary("c");
lib.linkSystemLibrary("c++");
lib.addIncludeDir("ext/SPIRV-Cross");
inline for (sources) |src| {
lib.addCSourceFile(dir ++ src, &flags);
}
return lib;
}
fn lib_glslang(b: *bld.Builder, target : std.zig.CrossTarget, mode : std.builtin.Mode, comptime prefix_path: []const u8) *bld.LibExeObjStep {
const lib = b.addStaticLibrary("glslang", null);
const dir = prefix_path ++ "ext/glslang/";
const sources = [_][]const u8 {
"OGLCompilersDLL/InitializeDll.cpp",
"glslang/CInterface/glslang_c_interface.cpp",
"glslang/GenericCodeGen/CodeGen.cpp",
"glslang/GenericCodeGen/Link.cpp",
"glslang/MachineIndependent/preprocessor/PpAtom.cpp",
"glslang/MachineIndependent/preprocessor/PpContext.cpp",
"glslang/MachineIndependent/preprocessor/Pp.cpp",
"glslang/MachineIndependent/preprocessor/PpScanner.cpp",
"glslang/MachineIndependent/preprocessor/PpTokens.cpp",
"glslang/MachineIndependent/attribute.cpp",
"glslang/MachineIndependent/Constant.cpp",
"glslang/MachineIndependent/glslang_tab.cpp",
"glslang/MachineIndependent/InfoSink.cpp",
"glslang/MachineIndependent/Initialize.cpp",
"glslang/MachineIndependent/Intermediate.cpp",
"glslang/MachineIndependent/intermOut.cpp",
"glslang/MachineIndependent/IntermTraverse.cpp",
"glslang/MachineIndependent/iomapper.cpp",
"glslang/MachineIndependent/limits.cpp",
"glslang/MachineIndependent/linkValidate.cpp",
"glslang/MachineIndependent/parseConst.cpp",
"glslang/MachineIndependent/ParseContextBase.cpp",
"glslang/MachineIndependent/ParseHelper.cpp",
"glslang/MachineIndependent/PoolAlloc.cpp",
"glslang/MachineIndependent/propagateNoContraction.cpp",
"glslang/MachineIndependent/reflection.cpp",
"glslang/MachineIndependent/RemoveTree.cpp",
"glslang/MachineIndependent/Scan.cpp",
"glslang/MachineIndependent/ShaderLang.cpp",
"glslang/MachineIndependent/SymbolTable.cpp",
"glslang/MachineIndependent/Versions.cpp",
"glslang/MachineIndependent/SpirvIntrinsics.cpp",
"SPIRV/disassemble.cpp",
"SPIRV/doc.cpp",
"SPIRV/GlslangToSpv.cpp",
"SPIRV/InReadableOrder.cpp",
"SPIRV/Logger.cpp",
"SPIRV/SpvBuilder.cpp",
"SPIRV/SpvPostProcess.cpp",
"SPIRV/SPVRemapper.cpp",
"SPIRV/SpvTools.cpp",
};
const incl_dirs = [_][]const u8 {
"ext/generated",
"ext/glslang",
"ext/glslang/glslang",
"ext/glslang/glslang/Public",
"ext/glslang/glslang/Include",
"ext/glslang/SPIRV",
"ext/SPIRV-Tools/include"
};
const win_sources = [_][]const u8 {
"glslang/OSDependent/Windows/ossource.cpp"
};
const unix_sources = [_][]const u8 {
"glslang/OSDependent/Unix/ossource.cpp"
};
const cmn_flags = common_cpp_flags ++ [_][]const u8{ "-DENABLE_OPT=1" };
const win_flags = cmn_flags ++ [_][]const u8{ "-DGLSLANG_OSINCLUDE_WIN32" };
const unx_flags = cmn_flags ++ [_][]const u8{ "-DGLSLANG_OSINCLUDE_UNIX" };
const flags = if (lib.target.isWindows()) win_flags else unx_flags;
lib.setTarget(target);
lib.setBuildMode(mode);
lib.linkSystemLibrary("c");
lib.linkSystemLibrary("c++");
inline for (incl_dirs) |incl_dir| {
lib.addIncludeDir(prefix_path ++ incl_dir);
}
inline for (sources) |src| {
lib.addCSourceFile(dir ++ src, &flags);
}
if (lib.target.isWindows()) {
inline for (win_sources) |src| {
lib.addCSourceFile(dir ++ src, &flags);
}
}
else {
inline for (unix_sources) |src| {
lib.addCSourceFile(dir ++ src, &flags);
}
}
return lib;
}
fn lib_spirvtools(b: *bld.Builder, target: std.zig.CrossTarget, mode: std.builtin.Mode, comptime prefix_path: []const u8) *bld.LibExeObjStep {
const dir = prefix_path ++ "ext/SPIRV-Tools/source/";
const sources = [_][]const u8 {
"assembly_grammar.cpp",
"binary.cpp",
"diagnostic.cpp",
"disassemble.cpp",
"enum_string_mapping.cpp",
"extensions.cpp",
"ext_inst.cpp",
"libspirv.cpp",
"name_mapper.cpp",
"opcode.cpp",
"operand.cpp",
"parsed_operand.cpp",
"pch_source.cpp",
"print.cpp",
"software_version.cpp",
"spirv_endian.cpp",
"spirv_fuzzer_options.cpp",
"spirv_optimizer_options.cpp",
"spirv_reducer_options.cpp",
"spirv_target_env.cpp",
"spirv_validator_options.cpp",
"table.cpp",
"text.cpp",
"text_handler.cpp",
"util/bit_vector.cpp",
"util/parse_number.cpp",
"util/string_utils.cpp",
"util/timer.cpp",
"val/basic_block.cpp",
"val/construct.cpp",
"val/function.cpp",
"val/instruction.cpp",
"val/validate_adjacency.cpp",
"val/validate_annotation.cpp",
"val/validate_arithmetics.cpp",
"val/validate_atomics.cpp",
"val/validate_barriers.cpp",
"val/validate_bitwise.cpp",
"val/validate_builtins.cpp",
"val/validate_capability.cpp",
"val/validate_cfg.cpp",
"val/validate_composites.cpp",
"val/validate_constants.cpp",
"val/validate_conversion.cpp",
"val/validate.cpp",
"val/validate_debug.cpp",
"val/validate_decorations.cpp",
"val/validate_derivatives.cpp",
"val/validate_execution_limitations.cpp",
"val/validate_extensions.cpp",
"val/validate_function.cpp",
"val/validate_id.cpp",
"val/validate_image.cpp",
"val/validate_instruction.cpp",
"val/validate_interfaces.cpp",
"val/validate_layout.cpp",
"val/validate_literals.cpp",
"val/validate_logicals.cpp",
"val/validate_memory.cpp",
"val/validate_memory_semantics.cpp",
"val/validate_misc.cpp",
"val/validate_mode_setting.cpp",
"val/validate_non_uniform.cpp",
"val/validate_primitives.cpp",
"val/validate_scopes.cpp",
"val/validate_small_type_uses.cpp",
"val/validate_type.cpp",
"val/validation_state.cpp",
"opt/aggressive_dead_code_elim_pass.cpp",
"opt/amd_ext_to_khr.cpp",
"opt/basic_block.cpp",
"opt/block_merge_pass.cpp",
"opt/block_merge_util.cpp",
"opt/build_module.cpp",
"opt/ccp_pass.cpp",
"opt/cfg_cleanup_pass.cpp",
"opt/cfg.cpp",
"opt/code_sink.cpp",
"opt/combine_access_chains.cpp",
"opt/compact_ids_pass.cpp",
"opt/composite.cpp",
"opt/constants.cpp",
"opt/const_folding_rules.cpp",
"opt/convert_to_half_pass.cpp",
"opt/copy_prop_arrays.cpp",
"opt/dead_branch_elim_pass.cpp",
"opt/dead_insert_elim_pass.cpp",
"opt/dead_variable_elimination.cpp",
"opt/decoration_manager.cpp",
"opt/def_use_manager.cpp",
"opt/desc_sroa.cpp",
"opt/dominator_analysis.cpp",
"opt/dominator_tree.cpp",
"opt/eliminate_dead_constant_pass.cpp",
"opt/eliminate_dead_functions_pass.cpp",
"opt/eliminate_dead_functions_util.cpp",
"opt/eliminate_dead_members_pass.cpp",
"opt/feature_manager.cpp",
"opt/fix_storage_class.cpp",
"opt/flatten_decoration_pass.cpp",
"opt/fold.cpp",
"opt/folding_rules.cpp",
"opt/fold_spec_constant_op_and_composite_pass.cpp",
"opt/freeze_spec_constant_value_pass.cpp",
"opt/function.cpp",
"opt/graphics_robust_access_pass.cpp",
"opt/if_conversion.cpp",
"opt/inline_exhaustive_pass.cpp",
"opt/inline_opaque_pass.cpp",
"opt/inline_pass.cpp",
"opt/inst_bindless_check_pass.cpp",
"opt/inst_buff_addr_check_pass.cpp",
"opt/instruction.cpp",
"opt/instruction_list.cpp",
"opt/instrument_pass.cpp",
"opt/ir_context.cpp",
"opt/ir_loader.cpp",
"opt/licm_pass.cpp",
"opt/local_access_chain_convert_pass.cpp",
"opt/local_redundancy_elimination.cpp",
"opt/local_single_block_elim_pass.cpp",
"opt/local_single_store_elim_pass.cpp",
"opt/loop_dependence.cpp",
"opt/loop_dependence_helpers.cpp",
"opt/loop_descriptor.cpp",
"opt/loop_fission.cpp",
"opt/loop_fusion.cpp",
"opt/loop_fusion_pass.cpp",
"opt/loop_peeling.cpp",
"opt/loop_unroller.cpp",
"opt/loop_unswitch_pass.cpp",
"opt/loop_utils.cpp",
"opt/mem_pass.cpp",
"opt/merge_return_pass.cpp",
"opt/module.cpp",
"opt/optimizer.cpp",
"opt/pass.cpp",
"opt/pass_manager.cpp",
"opt/pch_source_opt.cpp",
"opt/private_to_local_pass.cpp",
"opt/propagator.cpp",
"opt/reduce_load_size.cpp",
"opt/redundancy_elimination.cpp",
"opt/register_pressure.cpp",
"opt/relax_float_ops_pass.cpp",
"opt/remove_duplicates_pass.cpp",
"opt/replace_invalid_opc.cpp",
"opt/scalar_analysis.cpp",
"opt/scalar_analysis_simplification.cpp",
"opt/scalar_replacement_pass.cpp",
"opt/set_spec_constant_default_value_pass.cpp",
"opt/simplification_pass.cpp",
"opt/ssa_rewrite_pass.cpp",
"opt/strength_reduction_pass.cpp",
"opt/strip_debug_info_pass.cpp",
"opt/struct_cfg_analysis.cpp",
"opt/type_manager.cpp",
"opt/types.cpp",
"opt/unify_const_pass.cpp",
"opt/upgrade_memory_model.cpp",
"opt/value_number_table.cpp",
"opt/vector_dce.cpp",
"opt/workaround1209.cpp",
"opt/wrap_opkill.cpp",
"opt/strip_nonsemantic_info_pass.cpp",
"opt/convert_to_sampled_image_pass.cpp",
"opt/interp_fixup_pass.cpp",
"opt/inst_debug_printf_pass.cpp",
"opt/remove_unused_interface_variables_pass.cpp",
"opt/debug_info_manager.cpp",
"opt/replace_desc_array_access_using_var_index.cpp",
"opt/desc_sroa_util.cpp",
};
const incl_dirs = [_][]const u8 {
"ext/generated",
"ext/SPIRV-Tools",
"ext/SPIRV-Tools/include",
"ext/SPIRV-Headers/include",
};
const flags = common_cpp_flags;
const lib = b.addStaticLibrary("spirvtools", null);
lib.setTarget(target);
lib.setBuildMode(mode);
lib.linkSystemLibrary("c");
lib.linkSystemLibrary("c++");
inline for (incl_dirs) |incl_dir| {
lib.addIncludeDir(prefix_path ++ incl_dir);
}
inline for (sources) |src| {
lib.addCSourceFile(dir ++ src, &flags);
}
return lib;
}
|
build.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.vr);
pub const log_level: std.log.Level = .debug;
const config = @import("config.zig");
const MessageBus = @import("message_bus.zig").MessageBusReplica;
const Message = @import("message_bus.zig").Message;
const ConcurrentRanges = @import("concurrent_ranges.zig").ConcurrentRanges;
const Range = @import("concurrent_ranges.zig").Range;
const Operation = @import("state_machine.zig").Operation;
const StateMachine = @import("state_machine.zig").StateMachine;
pub const Status = enum {
normal,
view_change,
recovering,
};
// TODO Command for client to fetch its latest request number from the cluster.
/// Viewstamped Replication protocol commands:
pub const Command = packed enum(u8) {
reserved,
ping,
pong,
request,
prepare,
prepare_ok,
reply,
commit,
start_view_change,
do_view_change,
start_view,
request_start_view,
request_headers,
request_prepare,
headers,
nack_prepare,
};
/// Network message and journal entry header:
/// We reuse the same header for both so that prepare messages from the leader can simply be
/// journalled as is by the followers without requiring any further modification.
pub const Header = packed struct {
comptime {
assert(@sizeOf(Header) == 128);
}
/// A checksum covering only the rest of this header (but including checksum_body):
/// This enables the header to be trusted without having to recv() or read() associated body.
/// This checksum is enough to uniquely identify a network message or journal entry.
checksum: u128 = 0,
/// A checksum covering only associated body.
checksum_body: u128 = 0,
/// The checksum of the message to which this message refers, or a unique recovery nonce:
/// We use this nonce in various ways, for example:
/// * A prepare sets nonce to the checksum of the prior prepare to create a hash chain.
/// * A prepare_ok sets nonce to the checksum of the prepare it wants to ack.
/// * A commit sets nonce to the checksum of the latest committed op.
/// This adds an additional cryptographic safety control beyond VR's op and commit numbers.
nonce: u128 = 0,
/// Each client records its own client id and a current request number. A client is allowed to
/// have just one outstanding request at a time.
client: u128 = 0,
/// The cluster id binds intention into the header, so that a client or replica can indicate
/// which cluster it thinks it's speaking to, instead of accidentally talking to the wrong
/// cluster (for example, staging vs production).
cluster: u128,
/// Every message sent from one replica to another contains the sending replica's current view:
view: u64 = 0,
/// The op number:
op: u64 = 0,
/// The commit number:
commit: u64 = 0,
/// The journal offset to which this message relates:
/// This enables direct access to a prepare, without requiring previous variable-length entries.
/// While we use fixed-size data structures, a batch will contain a variable amount of them.
offset: u64 = 0,
/// The size of this message header and any associated body:
/// This must be 0 for an empty header with command == .reserved.
size: u32 = @sizeOf(Header),
/// The cluster reconfiguration epoch number (for future use):
epoch: u32 = 0,
/// Each request is given a number by the client and later requests must have larger numbers
/// than earlier ones. The request number is used by the replicas to avoid running requests more
/// than once; it is also used by the client to discard duplicate responses to its requests.
request: u32 = 0,
/// The index of the replica in the cluster configuration array that originated this message:
/// This only identifies the ultimate author because messages may be forwarded amongst replicas.
replica: u16 = 0,
/// The VR protocol command for this message:
command: Command,
/// The state machine operation to apply:
operation: Operation = .reserved,
pub fn calculate_checksum(self: *const Header) u128 {
const checksum_size = @sizeOf(@TypeOf(self.checksum));
assert(checksum_size == 16);
var target: [32]u8 = undefined;
std.crypto.hash.Blake3.hash(std.mem.asBytes(self)[checksum_size..], target[0..], .{});
return @bitCast(u128, target[0..checksum_size].*);
}
pub fn calculate_checksum_body(self: *const Header, body: []const u8) u128 {
assert(self.size == @sizeOf(Header) + body.len);
const checksum_size = @sizeOf(@TypeOf(self.checksum_body));
assert(checksum_size == 16);
var target: [32]u8 = undefined;
std.crypto.hash.Blake3.hash(body[0..], target[0..], .{});
return @bitCast(u128, target[0..checksum_size].*);
}
/// This must be called only after set_checksum_body() so that checksum_body is also covered:
pub fn set_checksum(self: *Header) void {
self.checksum = self.calculate_checksum();
}
pub fn set_checksum_body(self: *Header, body: []const u8) void {
self.checksum_body = self.calculate_checksum_body(body);
}
pub fn valid_checksum(self: *const Header) bool {
return self.checksum == self.calculate_checksum();
}
pub fn valid_checksum_body(self: *const Header, body: []const u8) bool {
return self.checksum_body == self.calculate_checksum_body(body);
}
/// Returns null if all fields are set correctly according to the command, or else a warning.
/// This does not verify that checksum is valid, and expects that this has already been done.
pub fn invalid(self: *const Header) ?[]const u8 {
if (self.size < @sizeOf(Header)) return "size < @sizeOf(Header)";
if (self.epoch != 0) return "epoch != 0";
return switch (self.command) {
.reserved => self.invalid_reserved(),
.request => self.invalid_request(),
.prepare => self.invalid_prepare(),
.prepare_ok => self.invalid_prepare_ok(),
else => return null, // TODO Add validators for all commands.
};
}
fn invalid_reserved(self: *const Header) ?[]const u8 {
assert(self.command == .reserved);
if (self.nonce != 0) return "nonce != 0";
if (self.client != 0) return "client != 0";
if (self.cluster != 0) return "cluster != 0";
if (self.view != 0) return "view != 0";
if (self.op != 0) return "op != 0";
if (self.commit != 0) return "commit != 0";
if (self.offset != 0) return "offset != 0";
if (self.request != 0) return "request != 0";
if (self.replica != 0) return "replica != 0";
if (self.operation != .reserved) return "operation != .reserved";
return null;
}
fn invalid_request(self: *const Header) ?[]const u8 {
assert(self.command == .request);
if (self.nonce != 0) return "nonce != 0";
if (self.client == 0) return "client == 0";
if (self.cluster == 0) return "cluster == 0";
if (self.view != 0) return "view != 0";
if (self.op != 0) return "op != 0";
if (self.commit != 0) return "commit != 0";
if (self.offset != 0) return "offset != 0";
if (self.request == 0) return "request == 0";
if (self.replica != 0) return "replica != 0";
if (self.operation == .reserved) return "operation == .reserved";
return null;
}
fn invalid_prepare(self: *const Header) ?[]const u8 {
assert(self.command == .prepare);
switch (self.operation) {
.reserved => return "operation == .reserved",
.init => {
if (self.nonce != 0) return "init: nonce != 0";
if (self.client != 0) return "init: client != 0";
if (self.cluster == 0) return "init: cluster == 0";
if (self.view != 0) return "init: view != 0";
if (self.op != 0) return "init: op != 0";
if (self.commit != 0) return "init: commit != 0";
if (self.offset != 0) return "init: offset != 0";
if (self.size != @sizeOf(Header)) return "init: size != @sizeOf(Header)";
if (self.request != 0) return "init: request != 0";
if (self.replica != 0) return "init: replica != 0";
},
else => {
if (self.client == 0) return "client == 0";
if (self.cluster == 0) return "cluster == 0";
if (self.op == 0) return "op == 0";
if (self.op <= self.commit) return "op <= commit";
if (self.request == 0) return "request == 0";
},
}
return null;
}
fn invalid_prepare_ok(self: *const Header) ?[]const u8 {
assert(self.command == .prepare_ok);
if (self.size != @sizeOf(Header)) return "size != @sizeOf(Header)";
if (self.cluster == 0) return "cluster == 0";
switch (self.operation) {
.reserved => return "operation == .reserved",
.init => {
if (self.nonce != 0) return "init: nonce != 0";
if (self.client != 0) return "init: client != 0";
if (self.view != 0) return "init: view != 0";
if (self.op != 0) return "init: op != 0";
if (self.commit != 0) return "init: commit != 0";
if (self.offset != 0) return "init: offset != 0";
if (self.request != 0) return "init: request != 0";
if (self.replica != 0) return "init: replica != 0";
},
else => {
if (self.client == 0) return "client == 0";
if (self.op == 0) return "op == 0";
if (self.op <= self.commit) return "op <= commit";
if (self.request == 0) return "request == 0";
},
}
return null;
}
/// Returns whether the immediate sender is a replica or client (if this can be determined).
/// Some commands such as .request or .prepare may be forwarded on to other replicas so that
/// Header.replica or Header.client only identifies the ultimate origin, not the latest peer.
pub fn peer_type(self: *const Header) enum { unknown, replica, client } {
switch (self.command) {
.reserved => unreachable,
// These messages cannot identify the peer as they may have been forwarded:
.request, .prepare => return .unknown,
// These messages identify the peer as either a replica or a client:
.ping, .pong => {
if (self.client > 0) {
assert(self.replica == 0);
return .client;
} else {
return .replica;
}
},
// All other messages identify the peer as a replica:
else => return .replica,
}
}
pub fn reserved() Header {
var header = Header{ .command = .reserved, .cluster = 0 };
header.set_checksum_body(&[0]u8{});
header.set_checksum();
assert(header.invalid() == null);
return header;
}
};
const Client = struct {};
// TODO Client table should warn if the client's request number has wrapped past 32 bits.
// This is easy to detect.
// If a client has done a few billion requests, we don't expect to see request 0 come through.
const ClientTable = struct {};
/// TODO Use IO and callbacks:
pub const Storage = struct {
allocator: *Allocator,
memory: []u8 align(config.sector_size),
size: u64,
pub fn init(allocator: *Allocator, size: u64) !Storage {
var memory = try allocator.allocAdvanced(u8, config.sector_size, size, .exact);
errdefer allocator.free(memory);
std.mem.set(u8, memory, 0);
return Storage{
.allocator = allocator,
.memory = memory,
.size = size,
};
}
pub fn deinit() void {
self.allocator.free(self.memory);
}
/// Detects whether the underlying file system for a given directory fd supports Direct I/O.
/// Not all Linux file systems support `O_DIRECT`, e.g. a shared macOS volume.
pub fn fs_supports_direct_io(dir_fd: std.os.fd_t) !bool {
if (!@hasDecl(std.os, "O_DIRECT")) return false;
const os = std.os;
const path = "fs_supports_direct_io";
const dir = fs.Dir{ .fd = dir_fd };
const fd = try os.openatZ(dir_fd, path, os.O_CLOEXEC | os.O_CREAT | os.O_TRUNC, 0o666);
defer os.close(fd);
defer dir.deleteFile(path) catch {};
while (true) {
const res = os.system.openat(dir_fd, path, os.O_CLOEXEC | os.O_RDONLY | os.O_DIRECT, 0);
switch (linux.getErrno(res)) {
0 => {
os.close(@intCast(os.fd_t, res));
return true;
},
linux.EINTR => continue,
linux.EINVAL => return false,
else => |err| return os.unexpectedErrno(err),
}
}
}
pub fn read(self: *Storage, buffer: []u8, offset: u64) void {
self.assert_bounds_and_alignment(buffer, offset);
if (self.read_all(buffer, offset)) |bytes_read| {
if (bytes_read != buffer.len) {
assert(bytes_read < buffer.len);
log.emerg("short read: bytes_read={} buffer_len={} offset={}", .{
bytes_read,
buffer.len,
offset,
});
@panic("fs corruption: file inode size truncated");
}
} else |err| switch (err) {
error.InputOutput => {
// The disk was unable to read some sectors (an internal CRC or hardware failure):
if (buffer.len > config.sector_size) {
log.err("latent sector error: offset={}, subdividing read...", .{offset});
// Subdivide the read into sectors to read around the faulty sector(s):
// This is considerably slower than doing a bulk read.
// By now we might have also experienced the disk's read timeout (in seconds).
// TODO Docs should instruct on why and how to reduce disk firmware timeouts.
var buffer_offset = 0;
while (buffer_offset < buffer.len) : (buffer_offset += config.sector_size) {
self.read(
buffer[buffer_offset..][0..config.sector_size],
offset + buffer_offset,
);
}
assert(buffer_offset == buffer.len);
} else {
// Zero any remaining sectors that cannot be read:
// We treat these EIO errors the same as a checksum failure.
log.err("latent sector error: offset={}, zeroing buffer sector...", .{offset});
assert(buffer.len == config.sector_size);
mem.set(u8, buffer, 0);
}
},
else => {
log.emerg("impossible read: buffer_len={} offset={} error={}", .{
buffer_len,
offset,
err,
});
@panic("impossible read");
},
}
}
pub fn write(self: *Storage, buffer: []const u8, offset: u64) void {
self.assert_bounds_and_alignment(buffer, offset);
self.write_all(buffer, offset) catch |err| switch (err) {
// We assume that the disk will attempt to reallocate a spare sector for any LSE.
// TODO What if we receive an EIO error because of a faulty cable?
error.InputOutput => @panic("latent sector error: no spare sectors to reallocate"),
else => {
log.emerg("write: buffer.len={} offset={} error={}", .{ buffer.len, offset, err });
@panic("unrecoverable disk error");
},
};
}
fn assert_bounds_and_alignment(self: *Storage, buffer: []const u8, offset: u64) void {
assert(buffer.len > 0);
assert(offset + buffer.len <= self.size);
// Ensure that the read or write is aligned correctly for Direct I/O:
// If this is not the case, the underlying syscall will return EINVAL.
assert(@mod(@ptrToInt(buffer.ptr), config.sector_size) == 0);
assert(@mod(buffer.len, config.sector_size) == 0);
assert(@mod(offset, config.sector_size) == 0);
}
fn read_all(self: *Storage, buffer: []u8, offset: u64) !u64 {
std.mem.copy(u8, buffer, self.memory[offset .. offset + buffer.len]);
return buffer.len;
}
fn write_all(self: *Storage, buffer: []const u8, offset: u64) !void {
std.mem.copy(u8, self.memory[offset .. offset + buffer.len], buffer);
}
};
pub const Journal = struct {
allocator: *Allocator,
storage: *Storage,
replica: u16,
size: u64,
size_headers: u64,
size_circular_buffer: u64,
headers: []Header align(config.sector_size),
/// Whether an entry is in memory only and needs to be written or is being written:
/// We use this in the same sense as a dirty bit in the kernel page cache.
/// A dirty bit means that we have not yet prepared the entry, or need to repair a faulty entry.
dirty: BitSet,
/// Whether an entry was written to disk and this write was subsequently lost due to:
/// * corruption,
/// * a misdirected write (or a misdirected read, we do not distinguish), or else
/// * a latent sector error, where the sector can no longer be read.
/// A faulty bit means that we prepared and then lost the entry.
/// A faulty bit requires the dirty bit to also be set so that functions need not check both.
/// A faulty bit is used then only to qualify the severity of the dirty bit.
faulty: BitSet,
/// We copy-on-write to this buffer when writing, as in-memory headers may change concurrently:
write_headers_buffer: []u8 align(config.sector_size),
/// Apart from the header written with the entry, we also store two redundant copies of each
/// header at different locations on disk, and we alternate between these for each append.
/// This tracks which version (0 or 1) should be written to next:
write_headers_version: u1 = 0,
/// These serialize concurrent writes but only for overlapping ranges:
writing_headers: ConcurrentRanges = .{ .name = "write_headers" },
writing_sectors: ConcurrentRanges = .{ .name = "write_sectors" },
pub fn init(
allocator: *Allocator,
storage: *Storage,
replica: u16,
size: u64,
headers_count: u32,
) !Journal {
if (@mod(size, config.sector_size) != 0) return error.SizeMustBeAMultipleOfSectorSize;
if (!std.math.isPowerOfTwo(headers_count)) return error.HeadersCountMustBeAPowerOfTwo;
assert(storage.size == size);
const headers_per_sector = @divExact(config.sector_size, @sizeOf(Header));
assert(headers_per_sector > 0);
assert(headers_count >= headers_per_sector);
var headers = try allocator.allocAdvanced(
Header,
config.sector_size,
headers_count,
.exact,
);
errdefer allocator.free(headers);
std.mem.set(Header, headers, Header.reserved());
var dirty = try BitSet.init(allocator, headers.len);
errdefer dirty.deinit();
var faulty = try BitSet.init(allocator, headers.len);
errdefer faulty.deinit();
var write_headers_buffer = try allocator.allocAdvanced(
u8,
config.sector_size,
@sizeOf(Header) * headers.len,
.exact,
);
errdefer allocator.free(write_headers_buffer);
std.mem.set(u8, write_headers_buffer, 0);
const header_copies = 2;
const size_headers = headers.len * @sizeOf(Header);
const size_headers_copies = size_headers * header_copies;
if (size_headers_copies >= size) return error.SizeTooSmallForHeadersCount;
const size_circular_buffer = size - size_headers_copies;
if (size_circular_buffer < 64 * 1024 * 1024) return error.SizeTooSmallForCircularBuffer;
log.debug("{}: journal: size={} headers_len={} headers={} circular_buffer={}", .{
replica,
std.fmt.fmtIntSizeBin(size),
headers.len,
std.fmt.fmtIntSizeBin(size_headers),
std.fmt.fmtIntSizeBin(size_circular_buffer),
});
var self = Journal{
.allocator = allocator,
.storage = storage,
.replica = replica,
.size = size,
.size_headers = size_headers,
.size_circular_buffer = size_circular_buffer,
.headers = headers,
.dirty = dirty,
.faulty = faulty,
.write_headers_buffer = write_headers_buffer,
};
assert(@mod(self.size_circular_buffer, config.sector_size) == 0);
assert(@mod(@ptrToInt(&self.headers[0]), config.sector_size) == 0);
assert(self.dirty.bits.len == self.headers.len);
assert(self.faulty.bits.len == self.headers.len);
assert(self.write_headers_buffer.len == @sizeOf(Header) * self.headers.len);
return self;
}
pub fn deinit(self: *Journal) void {}
/// Asserts that headers are .reserved (zeroed) from `op_min` (inclusive).
pub fn assert_headers_reserved_from(self: *Journal, op_min: u64) void {
// TODO Snapshots
for (self.headers[op_min..]) |header| assert(header.command == .reserved);
}
/// Returns any existing entry at the location indicated by header.op.
/// This existing entry may have an older or newer op number.
pub fn entry(self: *Journal, header: *const Header) ?*const Header {
assert(header.command == .prepare);
return self.entry_for_op(header.op);
}
/// We use the op number directly to index into the headers array and locate ops without a scan.
/// Op numbers cycle through the headers array and do not wrap when offsets wrap. The reason for
/// this is to prevent variable offsets from impacting the location of an op. Otherwise, the
/// same op number but for different views could exist at multiple locations in the journal.
pub fn entry_for_op(self: *Journal, op: u64) ?*const Header {
// TODO Snapshots
const existing = &self.headers[op];
if (existing.command == .reserved) return null;
assert(existing.command == .prepare);
return existing;
}
/// Returns the entry at `@mod(op)` location, but only if `entry.op == op`, else `null`.
/// Be careful of using this without considering that there may still be an existing op.
pub fn entry_for_op_exact(self: *Journal, op: u64) ?*const Header {
if (self.entry_for_op(op)) |existing| {
if (existing.op == op) return existing;
}
return null;
}
/// As per `entry_for_op_exact()`, but only if there is an optional checksum match.
pub fn entry_for_op_exact_with_checksum(
self: *Journal,
op: u64,
checksum: ?u128,
) ?*const Header {
if (self.entry_for_op_exact(op)) |existing| {
assert(existing.op == op);
if (checksum == null or existing.checksum == checksum.?) return existing;
}
return null;
}
pub fn previous_entry(self: *Journal, header: *const Header) ?*const Header {
// TODO Snapshots
if (header.op == 0) return null;
return self.entry_for_op(header.op - 1);
}
pub fn next_entry(self: *Journal, header: *const Header) ?*const Header {
// TODO Snapshots
if (header.op + 1 == self.headers.len) return null;
return self.entry_for_op(header.op + 1);
}
pub fn next_offset(self: *Journal, header: *const Header) u64 {
// TODO Snapshots
assert(header.command == .prepare);
return header.offset + Journal.sector_ceil(header.size);
}
pub fn has(self: *Journal, header: *const Header) bool {
// TODO Snapshots
const existing = &self.headers[header.op];
if (existing.command == .reserved) {
assert(existing.checksum == 0);
assert(existing.checksum_body == 0);
assert(existing.offset == 0);
assert(existing.size == 0);
return false;
} else {
if (existing.checksum == header.checksum) {
assert(existing.checksum_body == header.checksum_body);
assert(existing.op == header.op);
return true;
} else {
return false;
}
}
}
pub fn has_clean(self: *Journal, header: *const Header) bool {
// TODO Snapshots
return self.has(header) and !self.dirty.bit(header.op);
}
pub fn has_dirty(self: *Journal, header: *const Header) bool {
// TODO Snapshots
return self.has(header) and self.dirty.bit(header.op);
}
/// Copies latest headers between `op_min` and `op_max` (both inclusive) as will fit in `dest`.
/// Reverses the order when copying so that latest headers are copied first, which also protects
/// against the callsite slicing the buffer the wrong way and incorrectly.
/// Skips .reserved headers (gaps between headers).
/// Zeroes the `dest` buffer in case the copy would underflow and leave a buffer bleed.
/// Returns the number of headers actually copied.
pub fn copy_latest_headers_between(
self: *Journal,
op_min: u64,
op_max: u64,
dest: []Header,
) usize {
assert(op_min <= op_max);
assert(dest.len > 0);
var copied: usize = 0;
std.mem.set(Header, dest, Header.reserved());
// We start at op_max + 1 but front-load the decrement to avoid overflow when op_min == 0:
var op = op_max + 1;
while (op > op_min) {
op -= 1;
if (self.entry_for_op_exact(op)) |header| {
dest[copied] = header.*;
assert(dest[copied].invalid() == null);
copied += 1;
}
}
return copied;
}
const HeaderRange = struct { op_min: u64, op_max: u64 };
/// Finds the latest break in headers, searching between `op_min` and `op_max` (both inclusive).
/// A break is a missing header or a header not connected to the next header (by hash chain).
/// Upon finding the highest break, extends the range downwards to cover as much as possible.
///
/// For example: If ops 3, 9 and 10 are missing, returns: `{ .op_min = 9, .op_max = 10 }`.
///
/// Another example: If op 17 is disconnected from op 18, 16 is connected to 17, and 12-15 are
/// missing, returns: `{ .op_min = 12, .op_max = 17 }`.
pub fn find_latest_headers_break_between(
self: *Journal,
op_min: u64,
op_max: u64,
) ?HeaderRange {
assert(op_min <= op_max);
var range: ?HeaderRange = null;
// We set B, the op after op_max, to null because we only examine breaks <= op_max:
// In other words, we may report a missing header for op_max itself but not a broken chain.
var B: ?*const Header = null;
var op = op_max + 1;
while (op > op_min) {
op -= 1;
// Get the entry at @mod(op) location, but only if entry.op == op, else null:
var A = self.entry_for_op_exact(op);
if (A) |a| {
if (B) |b| {
// If A was reordered then A may have a newer op than B (but an older view).
// However, here we use entry_for_op_exact() so we can assert a.op + 1 == b.op:
assert(a.op + 1 == b.op);
// Further, while repair_header() should never put an older view to the right
// of a newer view, it may put a newer view to the left of an older view.
// We therefore do not assert a.view <= b.view unless the hash chain is intact.
// A exists and B exists:
if (range) |*r| {
assert(b.op == r.op_min);
if (a.checksum == b.nonce) {
// A is connected to B, but B is disconnected, add A to range:
assert(a.view <= b.view);
r.op_min = a.op;
} else if (a.view < b.view) {
// A is not connected to B, and A is older than B, add A to range:
r.op_min = a.op;
} else if (a.view > b.view) {
// A is not connected to B, but A is newer than B, close range:
break;
} else {
// Op numbers in the same view must be connected.
unreachable;
}
} else if (a.checksum == b.nonce) {
// A is connected to B, and B is connected or B is op_max.
assert(a.view <= b.view);
} else if (a.view < b.view) {
// A is not connected to B, and A is older than B, open range:
range = .{ .op_min = a.op, .op_max = a.op };
} else if (a.view > b.view) {
// A is not connected to B, but A is newer than B, open and close range:
// TODO Add unit test especially for this.
// This is important if we see `self.op < self.commit_max` then request
// prepares and then later receive a newer view to the left of `self.op`.
// We must then repair `self.op` which was reordered through a view change.
range = .{ .op_min = b.op, .op_max = b.op };
break;
} else {
// Op numbers in the same view must be connected.
unreachable;
}
} else {
// A exists and B does not exist (or B has a lower op number):
if (range) |r| {
// We therefore cannot compare A to B, A may be older/newer, close range:
assert(r.op_min == op + 1);
break;
} else {
// We expect a range if B does not exist, unless:
assert(a.op == op_max);
}
}
} else {
// A does not exist (or A has a lower op number):
if (self.entry_for_op(op)) |wrapped_a| assert(wrapped_a.op < op);
if (range) |*r| {
// Add A to range:
assert(r.op_min == op + 1);
r.op_min = op;
} else {
// Open range:
assert(B != null or op == op_max);
range = .{ .op_min = op, .op_max = op };
}
}
B = A;
}
return range;
}
fn read_sectors(self: *Journal, buffer: []u8, offset: u64) void {
// Memory must not be owned by self.headers as self.headers may be modified concurrently:
assert(@ptrToInt(buffer.ptr) < @ptrToInt(self.headers.ptr) or
@ptrToInt(buffer.ptr) > @ptrToInt(self.headers.ptr) + self.size_headers);
log.debug("{}: journal: read_sectors: offset={} len={}", .{
self.replica,
offset,
buffer.len,
});
self.storage.read(buffer, offset);
}
/// A safe way of removing an entry, where the header must match the current entry to succeed.
fn remove_entry(self: *Journal, header: *const Header) void {
// Copy the header.op by value to avoid a reset() followed by undefined header.op usage:
const op = header.op;
log.debug("{}: journal: remove_entry: op={}", .{ self.replica, op });
assert(self.entry(header).?.checksum == header.checksum);
assert(self.headers[op].checksum == header.checksum); // TODO Snapshots
defer self.headers[op] = Header.reserved();
self.dirty.clear(op);
self.faulty.clear(op);
}
/// Removes entries from `op_min` (inclusive) onwards.
/// This is used after a view change to remove uncommitted entries discarded by the new leader.
pub fn remove_entries_from(self: *Journal, op_min: u64) void {
// TODO Snapshots
// TODO Optimize to jump directly to op:
assert(op_min > 0);
log.debug("{}: journal: remove_entries_from: op_min={}", .{ self.replica, op_min });
for (self.headers) |*header| {
if (header.op >= op_min and header.command == .prepare) {
self.remove_entry(header);
}
}
self.assert_headers_reserved_from(op_min);
// TODO At startup we need to handle entries that may have been removed but now reappear.
// This is because we do not call `write_headers_between()` here.
}
pub fn set_entry_as_dirty(self: *Journal, header: *const Header) void {
log.debug("{}: journal: set_entry_as_dirty: op={} checksum={}", .{
self.replica,
header.op,
header.checksum,
});
if (self.entry(header)) |existing| {
if (existing.checksum != header.checksum) {
self.faulty.clear(header.op);
}
}
self.headers[header.op] = header.*;
self.dirty.set(header.op);
// Do not clear any faulty bit for the same entry.
}
pub fn write(self: *Journal, message: *const Message) void {
assert(message.header.command == .prepare);
assert(message.header.size >= @sizeOf(Header));
assert(message.header.size <= message.buffer.len);
// The underlying header memory must be owned by the buffer and not by self.headers:
// Otherwise, concurrent writes may modify the memory of the pointer while we write.
assert(@ptrToInt(message.header) == @ptrToInt(message.buffer.ptr));
// There should be no concurrency between setting an entry as dirty and deciding to write:
assert(self.has_dirty(message.header));
const sectors = message.buffer[0..Journal.sector_ceil(message.header.size)];
assert(message.header.offset + sectors.len <= self.size_circular_buffer);
if (std.builtin.mode == .Debug) {
// Assert that any sector padding has already been zeroed:
var sum_of_sector_padding_bytes: u32 = 0;
for (sectors[message.header.size..]) |byte| sum_of_sector_padding_bytes += byte;
assert(sum_of_sector_padding_bytes == 0);
}
self.write_debug(message.header, "starting");
self.write_sectors(message.buffer, self.offset_in_circular_buffer(message.header.offset));
if (!self.has(message.header)) {
self.write_debug(message.header, "entry changed while writing sectors");
return;
}
// TODO Snapshots
self.write_headers_between(message.header.op, message.header.op);
if (!self.has(message.header)) {
self.write_debug(message.header, "entry changed while writing headers");
return;
}
self.write_debug(message.header, "complete, marking clean");
// TODO Snapshots
self.dirty.clear(message.header.op);
self.faulty.clear(message.header.op);
}
fn write_debug(self: *Journal, header: *const Header, status: []const u8) void {
log.debug("{}: journal: write: view={} op={} offset={} len={}: {} {s}", .{
self.replica,
header.view,
header.op,
header.offset,
header.size,
header.checksum,
status,
});
}
fn offset_in_circular_buffer(self: *Journal, offset: u64) u64 {
assert(offset < self.size_circular_buffer);
return self.size_headers + offset;
}
fn offset_in_headers_version(self: *Journal, offset: u64, version: u1) u64 {
assert(offset < self.size_headers);
return switch (version) {
0 => offset,
1 => self.size_headers + self.size_circular_buffer + offset,
};
}
/// Writes headers between `op_min` and `op_max` (both inclusive).
fn write_headers_between(self: *Journal, op_min: u64, op_max: u64) void {
// TODO Snapshots
assert(op_min <= op_max);
const offset = Journal.sector_floor(op_min * @sizeOf(Header));
const len = Journal.sector_ceil((op_max - op_min + 1) * @sizeOf(Header));
assert(len > 0);
// We must acquire the concurrent range using the sector offset and len:
// Different headers may share the same sector without any op_min or op_max overlap.
// TODO Use a callback to acquire the range instead of suspend/resume:
var range = Range{ .offset = offset, .len = len };
self.writing_headers.acquire(&range);
defer self.writing_headers.release(&range);
const source = std.mem.sliceAsBytes(self.headers)[offset .. offset + len];
var slice = self.write_headers_buffer[offset .. offset + len];
assert(slice.len == source.len);
assert(slice.len == len);
std.mem.copy(u8, slice, source);
log.debug("{}: journal: write_headers: op_min={} op_max={} sectors[{}..{}]", .{
self.replica,
op_min,
op_max,
offset,
offset + len,
});
// Versions must be incremented upfront:
// write_headers_to_version() will block while other calls may proceed concurrently.
// If we don't increment upfront we could end up writing to the same copy twice.
// We would then lose the redundancy required to locate headers or overwrite all copies.
// TODO Snapshots
if (self.write_headers_once(self.headers[op_min .. op_max + 1])) {
const version_a = self.write_headers_increment_version();
self.write_headers_to_version(version_a, slice, offset);
} else {
const version_a = self.write_headers_increment_version();
const version_b = self.write_headers_increment_version();
self.write_headers_to_version(version_a, slice, offset);
self.write_headers_to_version(version_b, slice, offset);
}
}
fn write_headers_increment_version(self: *Journal) u1 {
self.write_headers_version +%= 1;
return self.write_headers_version;
}
/// Since we allow gaps in the journal, we may have to write our headers twice.
/// If a dirty header is being written as reserved (empty) then write twice to make this clear.
/// If a dirty header has no previous clean chained entry to give its offset then write twice.
/// Otherwise, we only need to write the headers once because their other copy can be located in
/// the body of the journal (using the previous entry's offset and size).
fn write_headers_once(self: *Journal, headers: []const Header) bool {
for (headers) |*header| {
// TODO Snapshots
// We must use header.op and not the loop index as we are working from a slice:
if (!self.dirty.bit(header.op)) continue;
if (header.command == .reserved) {
log.debug("{}: journal: write_headers_once: dirty reserved header", .{
self.replica,
});
return false;
}
if (self.previous_entry(header)) |previous| {
assert(previous.command == .prepare);
if (previous.checksum != header.nonce) {
log.debug("{}: journal: write_headers_once: no hash chain", .{
self.replica,
});
return false;
}
// TODO Add is_dirty(header)
// TODO Snapshots
if (self.dirty.bit(previous.op)) {
log.debug("{}: journal: write_headers_once: previous entry is dirty", .{
self.replica,
});
return false;
}
} else {
log.debug("{}: journal: write_headers_once: no previous entry", .{
self.replica,
});
return false;
}
}
return true;
}
fn write_headers_to_version(self: *Journal, version: u1, buffer: []const u8, offset: u64) void {
log.debug("{}: journal: write_headers_to_version: version={} offset={} len={}", .{
self.replica,
version,
offset,
buffer.len,
});
assert(offset + buffer.len <= self.size_headers);
self.write_sectors(buffer, self.offset_in_headers_version(offset, version));
}
fn write_sectors(self: *Journal, buffer: []const u8, offset: u64) void {
// Memory must not be owned by self.headers as self.headers may be modified concurrently:
assert(@ptrToInt(buffer.ptr) < @ptrToInt(self.headers.ptr) or
@ptrToInt(buffer.ptr) > @ptrToInt(self.headers.ptr) + self.size_headers);
// TODO We can move this range queuing right into Storage and remove write_sectors entirely.
// Our ConcurrentRange structure would also need to be weaned off of async/await but at
// least then we can manage this all in one place (i.e. in Storage).
var range = Range{ .offset = offset, .len = buffer.len };
self.writing_sectors.acquire(&range);
defer self.writing_sectors.release(&range);
log.debug("{}: journal: write_sectors: offset={} len={}", .{
self.replica,
offset,
buffer.len,
});
self.storage.write(buffer, offset);
}
pub fn sector_floor(offset: u64) u64 {
const sectors = std.math.divFloor(u64, offset, config.sector_size) catch unreachable;
return sectors * config.sector_size;
}
pub fn sector_ceil(offset: u64) u64 {
const sectors = std.math.divCeil(u64, offset, config.sector_size) catch unreachable;
return sectors * config.sector_size;
}
};
// TODO Snapshots
pub const BitSet = struct {
allocator: *Allocator,
bits: []bool,
/// The number of bits set (updated incrementally as bits are set or cleared):
len: u64 = 0,
fn init(allocator: *Allocator, count: u64) !BitSet {
var bits = try allocator.alloc(bool, count);
errdefer allocator.free(bits);
std.mem.set(bool, bits, false);
return BitSet{
.allocator = allocator,
.bits = bits,
};
}
fn deinit(self: *BitSet) void {
self.allocator.free(self.bits);
}
/// Clear the bit for an op (idempotent):
pub fn clear(self: *BitSet, op: u64) void {
if (self.bits[op]) {
self.bits[op] = false;
self.len -= 1;
}
}
/// Whether the bit for an op is set:
pub fn bit(self: *BitSet, op: u64) bool {
return self.bits[op];
}
/// Set the bit for an op (idempotent):
pub fn set(self: *BitSet, op: u64) void {
if (!self.bits[op]) {
self.bits[op] = true;
self.len += 1;
assert(self.len <= self.bits.len);
}
}
};
pub const Timeout = struct {
name: []const u8,
/// TODO: get rid of this field as this is used by Client as well
replica: u16,
after: u64,
ticks: u64 = 0,
ticking: bool = false,
/// It's important to check that when fired() is acted on that the timeout is stopped/started,
/// otherwise further ticks around the event loop may trigger a thundering herd of messages.
pub fn fired(self: *Timeout) bool {
if (self.ticking and self.ticks >= self.after) {
log.debug("{}: {s} fired", .{ self.replica, self.name });
if (self.ticks > self.after) {
log.emerg("{}: {s} is firing every tick", .{ self.replica, self.name });
@panic("timeout was not reset correctly");
}
return true;
} else {
return false;
}
}
pub fn reset(self: *Timeout) void {
assert(self.ticking);
self.ticks = 0;
log.debug("{}: {s} reset", .{ self.replica, self.name });
}
pub fn start(self: *Timeout) void {
self.ticks = 0;
self.ticking = true;
log.debug("{}: {s} started", .{ self.replica, self.name });
}
pub fn stop(self: *Timeout) void {
self.ticks = 0;
self.ticking = false;
log.debug("{}: {s} stopped", .{ self.replica, self.name });
}
pub fn tick(self: *Timeout) void {
if (self.ticking) self.ticks += 1;
}
};
pub const Replica = struct {
allocator: *Allocator,
/// The id of the cluster to which this replica belongs:
cluster: u128,
/// The number of replicas in the cluster:
replica_count: u16,
/// The index of this replica's address in the configuration array held by the MessageBus:
replica: u16,
/// The maximum number of replicas that may be faulty:
f: u16,
/// The persistent log of hash-chained journal entries:
journal: *Journal,
/// An abstraction to send messages from the replica to itself or another replica or client.
/// The recipient replica or client may be a local in-memory pointer or network-addressable.
/// The message bus will also deliver messages to this replica by calling Replica.on_message().
message_bus: *MessageBus,
/// For executing service up-calls after an operation has been committed:
state_machine: *StateMachine,
/// The current view, initially 0:
view: u64,
/// Whether we have experienced a view jump:
/// If this is true then we must request a start_view message from the leader before committing.
/// This prevents us from committing ops that may have been reordered through a view change.
view_jump_barrier: bool = false,
/// The current status, either normal, view_change, or recovering:
/// TODO Don't default to normal, set the starting status according to the journal's health.
status: Status = .normal,
/// The op number assigned to the most recently prepared operation:
op: u64,
/// The op number of the latest committed and executed operation (according to the replica):
/// The replica may have to wait for repairs to complete before commit_min reaches commit_max.
commit_min: u64,
/// The op number of the latest committed operation (according to the cluster):
/// This is the commit number in terms of the VRR paper.
commit_max: u64,
/// The current request's checksum (used for now to enforce one-at-a-time request processing):
request_checksum: ?u128 = null,
/// The current prepare message (used to cross-check prepare_ok messages, and for resending):
prepare_message: ?*Message = null,
prepare_attempt: u64 = 0,
appending: bool = false,
appending_frame: @Frame(write_to_journal) = undefined,
repairing: bool = false,
repairing_frame: @Frame(write_to_journal) = undefined,
committing: bool = false,
sending_prepare: bool = false,
sending_prepare_frame: @Frame(send_prepare_to_replica) = undefined,
/// TODO Size repair_queue_max according to a reasonable bandwidth-delay product:
repair_queue: ?*Message = null,
repair_queue_len: usize = 0,
repair_queue_max: usize = 3,
/// Unique prepare_ok messages for the same view, op number and checksum from ALL replicas:
prepare_ok_from_all_replicas: []?*Message,
/// Unique start_view_change messages for the same view from OTHER replicas (excluding ourself):
start_view_change_from_other_replicas: []?*Message,
/// Unique do_view_change messages for the same view from ALL replicas (including ourself):
do_view_change_from_all_replicas: []?*Message,
/// Unique nack_prepare messages for the same view from OTHER replicas (excluding ourself):
nack_prepare_from_other_replicas: []?*Message,
/// Whether a replica has received a quorum of start_view_change messages for the view change:
start_view_change_quorum: bool = false,
/// Whether the leader has received a quorum of do_view_change messages for the view change:
/// Determines whether the leader may effect repairs according to the CTRL protocol.
do_view_change_quorum: bool = false,
/// Whether the leader is expecting to receive a nack_prepare and for which op:
nack_prepare_op: ?u64 = null,
/// The number of ticks before a leader or follower broadcasts a ping to the other replicas:
/// TODO Explain why we need this (MessageBus handshaking, leapfrogging faulty replicas,
/// deciding whether starting a view change would be detrimental under some network partitions).
ping_timeout: Timeout,
/// The number of ticks without enough prepare_ok's before the leader resends a prepare:
/// TODO Adjust this dynamically to match sliding window EWMA of recent network latencies.
prepare_timeout: Timeout,
/// The number of ticks before the leader sends a commit heartbeat:
/// The leader always sends a commit heartbeat irrespective of when it last sent a prepare.
/// This improves liveness when prepare messages cannot be replicated fully due to partitions.
commit_timeout: Timeout,
/// The number of ticks without hearing from the leader before a follower starts a view change:
/// This transitions from .normal status to .view_change status.
normal_timeout: Timeout,
/// The number of ticks before a view change is timed out:
/// This transitions from `view_change` status to `view_change` status but for a newer view.
view_change_timeout: Timeout,
/// The number of ticks before resending a `start_view_change` or `do_view_change` message:
view_change_message_timeout: Timeout,
/// The number of ticks before repairing missing/disconnected headers and/or dirty entries:
repair_timeout: Timeout,
/// Used to provide deterministic entropy to `choose_any_other_replica()`.
/// Incremented whenever `choose_any_other_replica()` is called.
choose_any_other_replica_ticks: u64 = 0,
pub fn init(
allocator: *Allocator,
cluster: u128,
replica_count: u16,
replica: u16,
journal: *Journal,
message_bus: *MessageBus,
state_machine: *StateMachine,
) !Replica {
// The smallest f such that 2f + 1 is less than or equal to the number of replicas.
const f = (replica_count - 1) / 2;
assert(cluster > 0);
assert(replica_count > 0);
assert(replica_count > f);
assert(replica < replica_count);
assert(f > 0 or replica_count <= 2);
var prepare_ok = try allocator.alloc(?*Message, replica_count);
errdefer allocator.free(prepare_ok);
std.mem.set(?*Message, prepare_ok, null);
var start_view_change = try allocator.alloc(?*Message, replica_count);
errdefer allocator.free(start_view_change);
std.mem.set(?*Message, start_view_change, null);
var do_view_change = try allocator.alloc(?*Message, replica_count);
errdefer allocator.free(do_view_change);
std.mem.set(?*Message, do_view_change, null);
var nack_prepare = try allocator.alloc(?*Message, replica_count);
errdefer allocator.free(nack_prepare);
std.mem.set(?*Message, nack_prepare, null);
// TODO Initialize the journal when initializing the cluster:
var init_prepare = Header{
.nonce = 0,
.client = 0,
.cluster = cluster,
.view = 0,
.op = 0,
.commit = 0,
.offset = 0,
.size = @sizeOf(Header),
.epoch = 0,
.request = 0,
.replica = 0,
.command = .prepare,
.operation = .init,
};
init_prepare.set_checksum_body(&[0]u8{});
init_prepare.set_checksum();
assert(init_prepare.valid_checksum());
assert(init_prepare.invalid() == null);
journal.headers[0] = init_prepare;
journal.assert_headers_reserved_from(init_prepare.op + 1);
var self = Replica{
.allocator = allocator,
.cluster = cluster,
.replica_count = replica_count,
.replica = replica,
.f = f,
.journal = journal,
.message_bus = message_bus,
.state_machine = state_machine,
.view = init_prepare.view,
.op = init_prepare.op,
.commit_min = init_prepare.commit,
.commit_max = init_prepare.commit,
.prepare_ok_from_all_replicas = prepare_ok,
.start_view_change_from_other_replicas = start_view_change,
.do_view_change_from_all_replicas = do_view_change,
.nack_prepare_from_other_replicas = nack_prepare,
.ping_timeout = Timeout{
.name = "ping_timeout",
.replica = replica,
.after = 100,
},
.prepare_timeout = Timeout{
.name = "prepare_timeout",
.replica = replica,
.after = 50,
},
.commit_timeout = Timeout{
.name = "commit_timeout",
.replica = replica,
.after = 100,
},
.normal_timeout = Timeout{
.name = "normal_timeout",
.replica = replica,
.after = 500,
},
.view_change_timeout = Timeout{
.name = "view_change_timeout",
.replica = replica,
.after = 500,
},
.view_change_message_timeout = Timeout{
.name = "view_change_message_timeout",
.replica = replica,
.after = 50,
},
.repair_timeout = Timeout{
.name = "repair_timeout",
.replica = replica,
.after = 50,
},
};
// We must initialize timeouts here, not in tick() on the first tick, because on_message()
// can race with tick()... before timeouts have been initialized:
assert(self.status == .normal);
if (self.leader()) {
log.debug("{}: init: leader", .{self.replica});
self.ping_timeout.start();
self.commit_timeout.start();
self.repair_timeout.start();
} else {
log.debug("{}: init: follower", .{self.replica});
self.ping_timeout.start();
self.normal_timeout.start();
self.repair_timeout.start();
}
return self;
}
pub fn deinit(self: *Replica) void {
self.allocator.free(self.prepare_ok_from_all_replicas);
self.allocator.free(self.start_view_change_from_other_replicas);
self.allocator.free(self.do_view_change_from_all_replicas);
self.allocator.free(self.nack_prepare_from_other_replicas);
}
/// Returns whether the replica is a follower for the current view.
/// This may be used only when the replica status is normal.
pub fn follower(self: *Replica) bool {
return !self.leader();
}
/// Returns whether the replica is the leader for the current view.
/// This may be used only when the replica status is normal.
pub fn leader(self: *Replica) bool {
assert(self.status == .normal);
return self.leader_index(self.view) == self.replica;
}
/// Returns the index into the configuration of the leader for a given view.
pub fn leader_index(self: *Replica, view: u64) u16 {
return @intCast(u16, @mod(view, self.replica_count));
}
/// Time is measured in logical ticks that are incremented on every call to tick().
/// This eliminates a dependency on the system time and enables deterministic testing.
pub fn tick(self: *Replica) void {
self.ping_timeout.tick();
self.prepare_timeout.tick();
self.commit_timeout.tick();
self.normal_timeout.tick();
self.view_change_timeout.tick();
self.view_change_message_timeout.tick();
self.repair_timeout.tick();
if (self.ping_timeout.fired()) self.on_ping_timeout();
if (self.prepare_timeout.fired()) self.on_prepare_timeout();
if (self.commit_timeout.fired()) self.on_commit_timeout();
if (self.normal_timeout.fired()) self.on_normal_timeout();
if (self.view_change_timeout.fired()) self.on_view_change_timeout();
if (self.view_change_message_timeout.fired()) self.on_view_change_message_timeout();
if (self.repair_timeout.fired()) self.on_repair_timeout();
self.repair_last_queued_message_if_any();
}
/// Called by the MessageBus to deliver a message to the replica.
pub fn on_message(self: *Replica, message: *Message) void {
log.debug("{}:", .{self.replica});
log.debug("{}: on_message: view={} status={s} {}", .{
self.replica,
self.view,
@tagName(self.status),
message.header,
});
if (message.header.invalid()) |reason| {
log.debug("{}: on_message: invalid ({s})", .{ self.replica, reason });
return;
}
if (message.header.cluster != self.cluster) {
log.warn("{}: on_message: wrong cluster (message.header.cluster={} instead of {})", .{
self.replica,
message.header.cluster,
self.cluster,
});
return;
}
assert(message.header.replica < self.replica_count);
switch (message.header.command) {
.ping => self.on_ping(message),
.pong => self.on_pong(message),
.request => self.on_request(message),
.prepare => self.on_prepare(message),
.prepare_ok => self.on_prepare_ok(message),
.commit => self.on_commit(message),
.start_view_change => self.on_start_view_change(message),
.do_view_change => self.on_do_view_change(message),
.start_view => self.on_start_view(message),
.request_start_view => self.on_request_start_view(message),
.request_prepare => self.on_request_prepare(message),
.request_headers => self.on_request_headers(message),
.headers => self.on_headers(message),
.nack_prepare => self.on_nack_prepare(message),
else => unreachable,
}
}
fn on_ping(self: *Replica, message: *const Message) void {
if (self.status != .normal and self.status != .view_change) return;
assert(self.status == .normal or self.status == .view_change);
// TODO Drop pings that were not addressed to us.
var pong = Header{
.command = .pong,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
};
if (message.header.client > 0) {
assert(message.header.replica == 0);
// 4.5 Client Recovery
// If a client crashes and recovers it must start up with a request number larger than
// what it had before it failed. It fetches its latest number from the replicas and adds
// 2 to this value to be sure the new request number is big enough. Adding 2 ensures
// that its next request will have a unique number even in the odd case where the latest
// request it sent before it failed is still in transit (since that request will have as
// its request number the number the client learns plus 1).
//
// TODO Lookup latest request number from client table:
pong.request = 0;
self.message_bus.send_header_to_client(message.header.client, pong);
} else if (message.header.replica == self.replica) {
log.warn("{}: on_ping: ignoring (self)", .{self.replica});
} else {
self.message_bus.send_header_to_replica(message.header.replica, pong);
}
}
fn on_pong(self: *Replica, message: *const Message) void {
// TODO Update cluster connectivity stats.
}
/// The primary advances op-number, adds the request to the end of the log, and updates the
/// information for this client in the client-table to contain the new request number, s.
/// Then it sends a ⟨PREPARE v, m, n, k⟩ message to the other replicas, where v is the current
/// view-number, m is the message it received from the client, n is the op-number it assigned to
/// the request, and k is the commit-number.
fn on_request(self: *Replica, message: *Message) void {
if (self.status != .normal) {
log.debug("{}: on_request: ignoring ({})", .{ self.replica, self.status });
return;
}
if (message.header.view > self.view) {
log.debug("{}: on_request: ignoring (newer view)", .{self.replica});
return;
}
if (self.follower()) {
// TODO Re-enable this dead branch when the Client starts pinging the cluster.
// Otherwise, we will trip our one-request-at-a-time limit.
if (message.header.view < self.view and false) {
log.debug("{}: on_request: forwarding (follower)", .{self.replica});
self.send_message_to_replica(self.leader_index(self.view), message);
} else {
// The message has the same view, but was routed to the wrong replica.
// Don't amplify traffic, let the client retry to another replica.
log.warn("{}: on_request: ignoring (follower)", .{self.replica});
}
return;
}
assert(self.status == .normal);
assert(self.leader());
// TODO Check the client table to see if this is a duplicate request and reply if so.
// TODO If request is pending then this will also reflect in client table and we can ignore.
// TODO Add client information to client table.
if (self.request_checksum) |request_checksum| {
assert(message.header.command == .request);
if (message.header.checksum == request_checksum) {
log.debug("{}: on_request: ignoring (already preparing)", .{self.replica});
return;
}
}
// TODO Queue (or drop client requests after a limit) to handle one request at a time:
// TODO Clear this queue if we lose our leadership (critical for correctness).
assert(self.commit_min == self.commit_max and self.commit_max == self.op);
assert(self.request_checksum == null);
self.request_checksum = message.header.checksum;
log.debug("{}: on_request: request {}", .{ self.replica, message.header.checksum });
var body = message.buffer[@sizeOf(Header)..message.header.size];
self.state_machine.prepare(message.header.operation, body);
var latest_entry = self.journal.entry_for_op_exact(self.op).?;
message.header.nonce = latest_entry.checksum;
message.header.view = self.view;
message.header.op = self.op + 1;
message.header.commit = self.commit_max;
message.header.offset = self.journal.next_offset(latest_entry);
message.header.replica = self.replica;
message.header.command = .prepare;
message.header.set_checksum_body(body);
message.header.set_checksum();
assert(message.header.checksum != self.request_checksum.?);
log.debug("{}: on_request: prepare {}", .{ self.replica, message.header.checksum });
assert(self.prepare_message == null);
assert(self.prepare_attempt == 0);
for (self.prepare_ok_from_all_replicas) |received| assert(received == null);
assert(self.prepare_timeout.ticking == false);
self.prepare_message = message.ref();
self.prepare_attempt = 0;
self.prepare_timeout.start();
// Use the same replication code path for the leader and followers:
self.send_message_to_replica(self.replica, message);
}
/// Replication is simple, with a single code path for the leader and followers:
///
/// The leader starts by sending a prepare message to itself.
///
/// Each replica (including the leader) then forwards this prepare message to the next replica
/// in the configuration, in parallel to writing to its own journal, closing the circle until
/// the next replica is back to the leader, in which case the replica does not forward.
///
/// This keeps the leader's outgoing bandwidth limited (one-for-one) to incoming bandwidth,
/// since the leader need only replicate to the next replica. Otherwise, the leader would need
/// to replicate to multiple followers, dividing available bandwidth.
///
/// This does not impact latency, since with Flexible Paxos we need only one remote prepare_ok.
/// It is ideal if this synchronous replication to one remote replica is to the next replica,
/// since that is the replica next in line to be leader, which will need to be up-to-date before
/// it can start the next view.
///
/// At the same time, asynchronous replication keeps going, so that if our local disk is slow
/// then any latency spike will be masked by more remote prepare_ok messages as they come in.
/// This gives automatic tail latency tolerance for storage latency spikes.
///
/// The remaining problem then is tail latency tolerance for network latency spikes.
/// If the next replica is down or partitioned, then the leader's prepare timeout will fire,
/// and the leader will resend but to another replica, until it receives enough prepare_ok's.
fn on_prepare(self: *Replica, message: *Message) void {
self.view_jump(message.header);
if (self.is_repair(message)) {
log.debug("{}: on_prepare: ignoring (repair)", .{self.replica});
self.on_repair(message);
return;
}
if (self.status != .normal) {
log.debug("{}: on_prepare: ignoring ({})", .{ self.replica, self.status });
return;
}
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(self.leader() or self.follower());
assert(message.header.replica == self.leader_index(message.header.view));
assert(message.header.op > self.op);
assert(message.header.op > self.commit_min);
if (self.follower()) self.normal_timeout.reset();
if (message.header.op > self.op + 1) {
log.debug("{}: on_prepare: newer op", .{self.replica});
self.jump_to_newer_op_in_normal_status(message.header);
}
if (self.journal.previous_entry(message.header)) |previous| {
// Any previous entry may be a whole journal's worth of ops behind due to wrapping.
// We therefore do not do any further op, offset or checksum assertions beyond this:
self.panic_if_hash_chain_would_break_in_the_same_view(previous, message.header);
}
// We must advance our op and set the header as dirty before replicating and journalling.
// The leader needs this before its journal is outrun by any prepare_ok quorum:
log.debug("{}: on_prepare: advancing: op={}..{} checksum={}..{}", .{
self.replica,
self.op,
message.header.op,
message.header.nonce,
message.header.checksum,
});
assert(message.header.op == self.op + 1);
self.op = message.header.op;
self.journal.set_entry_as_dirty(message.header);
// We have the latest op from the leader and have therefore cleared the view jump barrier:
if (self.view_jump_barrier) {
self.view_jump_barrier = false;
log.notice("{}: on_prepare: cleared view jump barrier", .{self.replica});
}
// TODO Update client's information in the client table.
self.replicate(message);
self.append(message);
if (self.follower()) {
// A prepare may already be committed if requested by repair() so take the max:
self.commit_ops_through(std.math.max(message.header.commit, self.commit_max));
}
}
fn on_prepare_ok(self: *Replica, message: *Message) void {
if (self.status != .normal) {
log.warn("{}: on_prepare_ok: ignoring ({})", .{ self.replica, self.status });
return;
}
if (message.header.view < self.view) {
log.debug("{}: on_prepare_ok: ignoring (older view)", .{self.replica});
return;
}
if (message.header.view > self.view) {
// Another replica is treating us as the leader for a view we do not know about.
// This may be caused by a fault in the network topology.
log.warn("{}: on_prepare_ok: ignoring (newer view)", .{self.replica});
return;
}
if (self.follower()) {
// This may be caused by a fault in the network topology.
log.warn("{}: on_prepare_ok: ignoring (follower)", .{self.replica});
return;
}
if (self.prepare_message) |prepare_message| {
if (message.header.nonce != prepare_message.header.checksum) {
log.debug("{}: on_prepare_ok: ignoring (different nonce)", .{self.replica});
return;
}
} else {
log.debug("{}: on_prepare_ok: ignoring (not preparing)", .{self.replica});
return;
}
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(self.leader());
assert(message.header.command == .prepare_ok);
assert(message.header.nonce == self.prepare_message.?.header.checksum);
assert(message.header.client == self.prepare_message.?.header.client);
assert(message.header.cluster == self.prepare_message.?.header.cluster);
assert(message.header.view == self.prepare_message.?.header.view);
assert(message.header.op == self.prepare_message.?.header.op);
assert(message.header.commit == self.prepare_message.?.header.commit);
assert(message.header.offset == self.prepare_message.?.header.offset);
assert(message.header.epoch == self.prepare_message.?.header.epoch);
assert(message.header.request == self.prepare_message.?.header.request);
assert(message.header.operation == self.prepare_message.?.header.operation);
assert(message.header.op == self.op);
assert(message.header.op == self.commit_min + 1);
assert(message.header.op == self.commit_max + 1);
// Wait until we have `f + 1` messages (including ourself) for quorum:
const threshold = self.f + 1;
const count = self.add_message_and_receive_quorum_exactly_once(
self.prepare_ok_from_all_replicas,
message,
threshold,
) orelse return;
assert(count == threshold);
log.debug("{}: on_prepare_ok: quorum received", .{self.replica});
self.commit_op(self.prepare_message.?);
assert(self.commit_min == self.op);
assert(self.commit_max == self.op);
self.reset_quorum_prepare();
}
fn on_commit(self: *Replica, message: *const Message) void {
self.view_jump(message.header);
if (self.status != .normal) {
log.debug("{}: on_commit: ignoring ({})", .{ self.replica, self.status });
return;
}
if (message.header.view < self.view) {
log.debug("{}: on_commit: ignoring (older view)", .{self.replica});
return;
}
if (self.leader()) {
log.warn("{}: on_commit: ignoring (leader)", .{self.replica});
return;
}
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(self.follower());
assert(message.header.replica == self.leader_index(message.header.view));
// We may not always have the latest commit entry but if we do these checksums must match:
if (self.journal.entry_for_op_exact(message.header.commit)) |commit_entry| {
if (commit_entry.checksum == message.header.nonce) {
log.debug("{}: on_commit: verified commit checksum", .{self.replica});
} else {
@panic("commit checksum verification failed");
}
}
self.normal_timeout.reset();
self.commit_ops_through(message.header.commit);
}
fn on_repair(self: *Replica, message: *Message) void {
assert(message.header.command == .prepare);
if (self.status != .normal and self.status != .view_change) {
log.debug("{}: on_repair: ignoring ({})", .{ self.replica, self.status });
return;
}
if (message.header.view > self.view) {
log.debug("{}: on_repair: ignoring (newer view)", .{self.replica});
return;
}
if (self.status == .view_change and message.header.view == self.view) {
log.debug("{}: on_repair: ignoring (view started)", .{self.replica});
return;
}
if (self.status == .view_change and self.leader_index(self.view) != self.replica) {
log.debug("{}: on_repair: ignoring (view change, follower)", .{self.replica});
return;
}
if (self.status == .view_change and !self.do_view_change_quorum) {
log.debug("{}: on_repair: ignoring (view change, waiting for quorum)", .{self.replica});
return;
}
if (message.header.op > self.op) {
assert(message.header.view < self.view);
log.debug("{}: on_repair: ignoring (would advance self.op)", .{self.replica});
return;
}
assert(self.status == .normal or self.status == .view_change);
assert(self.repairs_allowed());
assert(message.header.view <= self.view);
assert(message.header.op <= self.op); // Repairs may never advance `self.op`.
if (self.journal.has_clean(message.header)) {
log.debug("{}: on_repair: duplicate", .{self.replica});
self.send_prepare_ok(message.header);
return;
}
if (self.repair_header(message.header)) {
assert(self.journal.has_dirty(message.header));
if (self.nack_prepare_op) |nack_prepare_op| {
if (nack_prepare_op == message.header.op) {
log.debug("{}: on_repair: repairing uncommitted op={}", .{
self.replica,
message.header.op,
});
self.reset_quorum_nack_prepare();
}
}
if (self.repairing) return self.repair_later(message);
log.debug("{}: on_repair: repairing journal", .{self.replica});
self.repairing_frame = async self.write_to_journal(message, &self.repairing);
}
}
fn on_start_view_change(self: *Replica, message: *Message) void {
if (self.ignore_view_change_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view >= self.view);
assert(message.header.replica != self.replica);
self.view_jump(message.header);
assert(!self.view_jump_barrier);
assert(self.status == .view_change);
assert(message.header.view == self.view);
// Wait until we have `f` messages (excluding ourself) for quorum:
assert(self.replica_count > 1);
assert(self.f > 0 or self.replica_count == 2);
const threshold = std.math.max(1, self.f);
const count = self.add_message_and_receive_quorum_exactly_once(
self.start_view_change_from_other_replicas,
message,
threshold,
) orelse return;
assert(count == threshold);
assert(self.start_view_change_from_other_replicas[self.replica] == null);
log.debug("{}: on_start_view_change: quorum received", .{self.replica});
assert(!self.start_view_change_quorum);
assert(!self.do_view_change_quorum);
self.start_view_change_quorum = true;
// When replica i receives start_view_change messages for its view from f other replicas,
// it sends a ⟨do_view_change v, l, v’, n, k, i⟩ message to the node that will be the
// primary in the new view. Here v is its view, l is its log, v′ is the view number of the
// latest view in which its status was normal, n is the op number, and k is the commit
// number.
self.send_do_view_change();
}
/// When the new primary receives f + 1 do_view_change messages from different replicas
/// (including itself), it sets its view number to that in the messages and selects as the
/// new log the one contained in the message with the largest v′; if several messages have
/// the same v′ it selects the one among them with the largest n. It sets its op number to
/// that of the topmost entry in the new log, sets its commit number to the largest such
/// number it received in the do_view_change messages, changes its status to normal, and
/// informs the other replicas of the completion of the view change by sending
/// ⟨start_view v, l, n, k⟩ messages to the other replicas, where l is the new log, n is the
/// op number, and k is the commit number.
fn on_do_view_change(self: *Replica, message: *Message) void {
if (self.ignore_view_change_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view >= self.view);
assert(self.leader_index(message.header.view) == self.replica);
self.view_jump(message.header);
assert(!self.view_jump_barrier);
assert(self.status == .view_change);
assert(message.header.view == self.view);
// We may receive a `do_view_change` quorum from other replicas, which already have a
// `start_view_change_quorum`, before we receive a `start_view_change_quorum`:
if (!self.start_view_change_quorum) {
log.notice("{}: on_do_view_change: waiting for start_view_change quorum", .{
self.replica,
});
return;
}
// Wait until we have `f + 1` messages (including ourself) for quorum:
const threshold = self.f + 1;
const count = self.add_message_and_receive_quorum_exactly_once(
self.do_view_change_from_all_replicas,
message,
threshold,
) orelse return;
assert(count == threshold);
assert(self.do_view_change_from_all_replicas[self.replica] != null);
log.debug("{}: on_do_view_change: quorum received", .{self.replica});
var latest = Header.reserved();
var k: ?u64 = null;
for (self.do_view_change_from_all_replicas) |received, replica| {
if (received) |m| {
assert(m.header.command == .do_view_change);
assert(m.header.cluster == self.cluster);
assert(m.header.replica == replica);
assert(m.header.view == self.view);
if (k == null or m.header.commit > k.?) k = m.header.commit;
self.set_latest_header(self.message_body_as_headers(m), &latest);
}
}
self.set_latest_op_and_k(&latest, k.?, "on_do_view_change");
// Now that we have the latest op in place, repair any other headers:
for (self.do_view_change_from_all_replicas) |received| {
if (received) |m| {
for (self.message_body_as_headers(m)) |*h| {
_ = self.repair_header(h);
}
}
}
// Verify that the repairs above have not replaced or advanced the latest op:
assert(self.journal.entry_for_op_exact(self.op).?.checksum == latest.checksum);
assert(self.start_view_change_quorum);
assert(!self.do_view_change_quorum);
self.do_view_change_quorum = true;
// Start repairs according to the CTRL protocol:
assert(!self.repair_timeout.ticking);
self.repair_timeout.start();
self.repair();
}
/// When other replicas receive the start_view message, they replace their log with the one
/// in the message, set their op number to that of the latest entry in the log, set their
/// view number to the view number in the message, change their status to normal, and update
/// the information in their client table. If there are non-committed operations in the log,
/// they send a ⟨prepare_ok v, n, i⟩ message to the primary; here n is the op-number. Then
/// they execute all operations known to be committed that they haven’t executed previously,
/// advance their commit number, and update the information in their client table.
fn on_start_view(self: *Replica, message: *const Message) void {
if (self.ignore_view_change_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view >= self.view);
assert(message.header.replica != self.replica);
assert(message.header.replica == self.leader_index(message.header.view));
self.view_jump(message.header);
assert(!self.view_jump_barrier or self.status == .normal);
assert(self.status == .view_change or self.view_jump_barrier);
assert(message.header.view == self.view);
var latest = Header.reserved();
self.set_latest_header(self.message_body_as_headers(message), &latest);
assert(latest.op == message.header.op);
self.set_latest_op_and_k(&latest, message.header.commit, "on_start_view");
// Now that we have the latest op in place, repair any other headers:
for (self.message_body_as_headers(message)) |*h| {
_ = self.repair_header(h);
}
// Verify that the repairs above have not replaced or advanced the latest op:
assert(self.journal.entry_for_op_exact(self.op).?.checksum == latest.checksum);
if (self.view_jump_barrier) {
assert(self.status == .normal);
self.view_jump_barrier = false;
log.notice("{}: on_start_view: resolved view jump barrier", .{self.replica});
} else {
assert(self.status == .view_change);
self.transition_to_normal_status(message.header.view);
}
assert(!self.view_jump_barrier);
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(self.follower());
// TODO Send prepare_ok messages for uncommitted ops.
self.commit_ops_through(self.commit_max);
self.repair();
}
fn on_request_start_view(self: *Replica, message: *const Message) void {
if (self.ignore_repair_message(message)) return;
assert(self.status == .normal);
assert(message.header.view == self.view);
assert(message.header.replica != self.replica);
assert(self.leader());
const start_view = self.create_do_view_change_or_start_view_message(.start_view) orelse {
log.debug("{}: on_request_start_view: dropping start_view, no message available", .{
self.replica,
});
return;
};
defer self.message_bus.unref(start_view);
assert(start_view.references == 1);
assert(start_view.header.command == .start_view);
assert(start_view.header.view == self.view);
assert(start_view.header.op == self.op);
assert(start_view.header.commit == self.commit_max);
self.send_message_to_replica(message.header.replica, start_view);
}
fn on_request_prepare(self: *Replica, message: *const Message) void {
if (self.ignore_repair_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view == self.view);
assert(message.header.replica != self.replica);
const op = message.header.op;
var checksum: ?u128 = message.header.nonce;
if (self.leader_index(self.view) == self.replica and message.header.nonce == 0) {
checksum = null;
}
if (self.journal.entry_for_op_exact_with_checksum(op, checksum)) |entry| {
assert(entry.op == op);
if (!self.journal.dirty.bit(op)) {
assert(!self.journal.faulty.bit(op));
if (self.sending_prepare) return;
self.sending_prepare_frame = async self.send_prepare_to_replica(
message.header.replica,
op,
checksum,
);
// We have guaranteed the prepare and our copy is clean (not safe to nack).
return;
} else if (self.journal.faulty.bit(op)) {
// We have gauranteed the prepare but our copy is faulty (not safe to nack).
return;
}
// We know of the prepare but we have yet to write or guarantee it (safe to nack).
// Continue through below...
}
if (self.status == .view_change) {
assert(message.header.replica == self.leader_index(self.view));
assert(checksum != null);
if (self.journal.entry_for_op_exact_with_checksum(op, checksum) != null) {
assert(self.journal.dirty.bit(op) and !self.journal.faulty.bit(op));
}
self.send_header_to_replica(message.header.replica, .{
.command = .nack_prepare,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.op = op,
.nonce = checksum.?,
});
}
}
fn on_request_headers(self: *Replica, message: *const Message) void {
if (self.ignore_repair_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view == self.view);
assert(message.header.replica != self.replica);
const op_min = message.header.commit;
const op_max = message.header.op;
assert(op_max >= op_min);
// We must add 1 because op_max and op_min are both inclusive:
const count_max = @intCast(u32, std.math.min(64, op_max - op_min + 1));
assert(count_max > 0);
const size_max = @sizeOf(Header) + @sizeOf(Header) * count_max;
const response = self.message_bus.get_message() orelse {
log.debug("{}: on_request_headers: dropping response, no message available", .{
self.replica,
});
return;
};
defer self.message_bus.unref(response);
response.header.* = .{
.command = .headers,
// We echo the nonce back to the replica so that they can match up our response:
.nonce = message.header.nonce,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
};
const count = self.journal.copy_latest_headers_between(
op_min,
op_max,
std.mem.bytesAsSlice(Header, response.buffer[@sizeOf(Header)..size_max]),
);
response.header.size = @intCast(u32, @sizeOf(Header) + @sizeOf(Header) * count);
const body = response.buffer[@sizeOf(Header)..response.header.size];
response.header.set_checksum_body(body);
response.header.set_checksum();
self.send_message_to_replica(message.header.replica, response);
}
fn on_nack_prepare(self: *Replica, message: *Message) void {
if (self.ignore_repair_message(message)) return;
assert(self.status == .view_change);
assert(message.header.view == self.view);
assert(message.header.replica != self.replica);
assert(self.leader_index(self.view) == self.replica);
assert(self.do_view_change_quorum);
assert(self.repairs_allowed());
if (self.nack_prepare_op == null) {
log.debug("{}: on_nack_prepare: ignoring (no longer expected)", .{self.replica});
return;
}
const op = self.nack_prepare_op.?;
const checksum = self.journal.entry_for_op_exact(op).?.checksum;
if (message.header.op != op) {
log.debug("{}: on_nack_prepare: ignoring (repairing another op)", .{self.replica});
return;
}
// Followers may not send a `nack_prepare` for a different checksum:
assert(message.header.nonce == checksum);
// We require a `nack_prepare` from a majority of followers if our op is faulty:
// Otherwise, we know we do not have the op and need only `f` other nacks.
assert(self.replica_count > 1);
assert(self.f > 0 or self.replica_count == 2);
assert(self.f + 1 == (self.replica_count - 1) / 2 + 1);
const threshold = if (self.journal.faulty.bit(op)) self.f + 1 else std.math.max(1, self.f);
// Wait until we have `threshold` messages for quorum:
const count = self.add_message_and_receive_quorum_exactly_once(
self.nack_prepare_from_other_replicas,
message,
threshold,
) orelse return;
assert(count == threshold);
assert(self.nack_prepare_from_other_replicas[self.replica] == null);
log.debug("{}: on_nack_prepare: quorum received", .{self.replica});
assert(self.valid_hash_chain("on_nack_prepare"));
assert(op > self.commit_max);
assert(op <= self.op);
assert(self.journal.entry_for_op_exact_with_checksum(op, checksum) != null);
assert(self.journal.dirty.bit(op));
log.debug("{}: on_nack_prepare: discarding uncommitted ops={}..{}", .{
self.replica,
op,
self.op,
});
self.journal.remove_entries_from(op);
self.op = op - 1;
assert(self.journal.entry_for_op(op) == null);
assert(!self.journal.dirty.bit(op));
assert(!self.journal.faulty.bit(op));
// We require that `self.op` always exists. Rewinding `self.op` could change that.
// However, we do this only as the leader within a view change, with all headers intact.
assert(self.journal.entry_for_op_exact(self.op) != null);
self.reset_quorum_nack_prepare();
self.repair();
}
fn on_headers(self: *Replica, message: *const Message) void {
if (self.ignore_repair_message(message)) return;
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view == self.view);
assert(message.header.replica != self.replica);
var op_min: ?u64 = null;
var op_max: ?u64 = null;
for (self.message_body_as_headers(message)) |*h| {
if (op_min == null or h.op < op_min.?) op_min = h.op;
if (op_max == null or h.op > op_max.?) op_max = h.op;
_ = self.repair_header(h);
}
assert(op_max.? >= op_min.?);
self.repair();
}
fn on_ping_timeout(self: *Replica) void {
self.ping_timeout.reset();
// TODO We may want to ping for connectivity during a view change.
assert(self.status == .normal);
assert(self.leader() or self.follower());
var ping = Header{
.command = .ping,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
};
self.send_header_to_other_replicas(ping);
}
fn on_prepare_timeout(self: *Replica) void {
// TODO Exponential backoff.
// TODO Prevent flooding the network due to multiple concurrent rounds of replication.
self.prepare_timeout.reset();
self.prepare_attempt += 1;
assert(self.status == .normal);
assert(self.leader());
assert(self.request_checksum != null);
assert(self.prepare_message != null);
var message = self.prepare_message.?;
assert(message.header.view == self.view);
// The list of remote replicas yet to send a prepare_ok:
var waiting: [32]u16 = undefined;
var waiting_len: usize = 0;
for (self.prepare_ok_from_all_replicas) |received, replica| {
if (received == null and replica != self.replica) {
waiting[waiting_len] = @intCast(u16, replica);
waiting_len += 1;
if (waiting_len == waiting.len) break;
}
}
if (waiting_len == 0) {
log.debug("{}: on_prepare_timeout: waiting for journal", .{self.replica});
assert(self.prepare_ok_from_all_replicas[self.replica] == null);
return;
}
for (waiting[0..waiting_len]) |replica| {
log.debug("{}: on_prepare_timeout: waiting for replica {}", .{ self.replica, replica });
}
// Cycle through the list for each attempt to reach live replicas and get around partitions:
// If only the first replica in the list was chosen... liveness would suffer if it was down!
var replica = waiting[@mod(self.prepare_attempt, waiting_len)];
assert(replica != self.replica);
log.debug("{}: on_prepare_timeout: replicating to replica {}", .{ self.replica, replica });
self.send_message_to_replica(replica, message);
}
fn on_commit_timeout(self: *Replica) void {
self.commit_timeout.reset();
assert(self.status == .normal);
assert(self.leader());
assert(self.commit_min == self.commit_max);
// TODO Snapshots: Use snapshot checksum if commit is no longer in journal.
const latest_committed_entry = self.journal.entry_for_op_exact(self.commit_max).?;
self.send_header_to_other_replicas(.{
.command = .commit,
.nonce = latest_committed_entry.checksum,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.commit = self.commit_max,
});
}
fn on_normal_timeout(self: *Replica) void {
assert(self.status == .normal);
assert(self.follower());
self.transition_to_view_change_status(self.view + 1);
}
fn on_view_change_timeout(self: *Replica) void {
assert(self.status == .view_change);
self.transition_to_view_change_status(self.view + 1);
}
fn on_view_change_message_timeout(self: *Replica) void {
self.view_change_message_timeout.reset();
assert(self.status == .view_change);
// Keep sending `start_view_change` messages:
// We may have a `start_view_change_quorum` but other replicas may not.
// However, the leader may stop sending once it has a `do_view_change_quorum`.
if (!self.do_view_change_quorum) self.send_start_view_change();
// It is critical that a `do_view_change` message implies a `start_view_change_quorum`:
if (self.start_view_change_quorum) {
// The leader need not retry to send a `do_view_change` message to itself:
// We assume the MessageBus will not drop messages sent by a replica to itself.
if (self.leader_index(self.view) != self.replica) self.send_do_view_change();
}
}
fn on_repair_timeout(self: *Replica) void {
assert(self.status == .normal or self.status == .view_change);
self.repair();
}
fn add_message_and_receive_quorum_exactly_once(
self: *Replica,
messages: []?*Message,
message: *Message,
threshold: u32,
) ?usize {
assert(messages.len == self.replica_count);
assert(message.header.cluster == self.cluster);
assert(message.header.view == self.view);
switch (message.header.command) {
.prepare_ok => {
assert(self.status == .normal);
assert(self.leader());
assert(message.header.nonce == self.prepare_message.?.header.checksum);
},
.start_view_change => assert(self.status == .view_change),
.do_view_change, .nack_prepare => {
assert(self.status == .view_change);
assert(self.leader_index(self.view) == self.replica);
},
else => unreachable,
}
assert(threshold >= 1);
assert(threshold <= self.replica_count);
const command: []const u8 = @tagName(message.header.command);
// Do not allow duplicate messages to trigger multiple passes through a state transition:
if (messages[message.header.replica]) |m| {
// Assert that this truly is a duplicate message and not a different message:
assert(m.header.command == message.header.command);
assert(m.header.replica == message.header.replica);
assert(m.header.view == message.header.view);
assert(m.header.op == message.header.op);
assert(m.header.commit == message.header.commit);
assert(m.header.checksum_body == message.header.checksum_body);
assert(m.header.checksum == message.header.checksum);
log.debug("{}: on_{s}: ignoring (duplicate message)", .{ self.replica, command });
return null;
}
// Record the first receipt of this message:
assert(messages[message.header.replica] == null);
messages[message.header.replica] = message.ref();
// Count the number of unique messages now received:
const count = self.count_quorum(messages, message.header.command, message.header.nonce);
log.debug("{}: on_{s}: {} message(s)", .{ self.replica, command, count });
// Wait until we have exactly `threshold` messages for quorum:
if (count < threshold) {
log.debug("{}: on_{s}: waiting for quorum", .{ self.replica, command });
return null;
}
// This is not the first time we have had quorum, the state transition has already happened:
if (count > threshold) {
log.debug("{}: on_{s}: ignoring (quorum received already)", .{ self.replica, command });
return null;
}
assert(count == threshold);
return count;
}
fn append(self: *Replica, message: *Message) void {
assert(self.status == .normal);
assert(message.header.command == .prepare);
assert(message.header.view == self.view);
assert(message.header.op == self.op);
if (self.appending) {
log.debug("{}: append: skipping (slow journal outrun by quorum)", .{self.replica});
self.repair_later(message);
return;
}
log.debug("{}: append: appending to journal", .{self.replica});
self.appending_frame = async self.write_to_journal(message, &self.appending);
}
/// Returns whether `b` succeeds `a` by having a newer view or same view and newer op.
fn ascending_viewstamps(
self: *Replica,
a: *const Header,
b: *const Header,
) bool {
assert(a.command == .prepare);
assert(b.command == .prepare);
if (a.view < b.view) {
// We do not assert b.op >= a.op, ops may be reordered during a view change.
return true;
} else if (a.view > b.view) {
// We do not assert b.op <= a.op, ops may be reordered during a view change.
return false;
} else if (a.op < b.op) {
assert(a.view == b.view);
return true;
} else if (a.op > b.op) {
assert(a.view == b.view);
return false;
} else {
unreachable;
}
}
/// Choose a different replica each time if possible (excluding ourself).
/// The choice of replica is a deterministic function of:
/// 1. `choose_any_other_replica_ticks`, and
/// 2. whether the replica is connected and ready for sending in the MessageBus.
fn choose_any_other_replica(self: *Replica) ?u16 {
var count: usize = 0;
while (count < self.replica_count) : (count += 1) {
self.choose_any_other_replica_ticks += 1;
const replica = @mod(
self.replica + self.choose_any_other_replica_ticks,
self.replica_count,
);
if (replica == self.replica) continue;
// TODO if (!MessageBus.can_send_to_replica(replica)) continue;
return @intCast(u16, replica);
}
return null;
}
fn commit_ops_through(self: *Replica, commit: u64) void {
// TODO Restrict `view_change` status only to the leader purely as defense-in-depth.
// Be careful of concurrency when doing this, as successive view changes can happen quickly.
assert(self.status == .normal or self.status == .view_change);
assert(self.commit_min <= self.commit_max);
assert(self.commit_min <= self.op);
assert(self.commit_max <= self.op or self.commit_max > self.op);
assert(commit <= self.op or commit > self.op);
// We have already committed this far:
if (commit <= self.commit_min) return;
// Guard against multiple concurrent invocations of commit_ops_through():
if (self.committing) {
log.debug("{}: commit_ops_through: already committing...", .{self.replica});
return;
}
self.committing = true;
defer self.committing = false;
if (commit > self.commit_max) {
log.debug("{}: commit_ops_through: advancing commit_max={}..{}", .{
self.replica,
self.commit_max,
commit,
});
self.commit_max = commit;
}
if (!self.valid_hash_chain("commit_ops_through")) return;
assert(!self.view_jump_barrier);
assert(self.op >= self.commit_max);
// We may receive commit numbers for ops we do not yet have (`commit_max > self.op`):
// Even a naive state transfer may fail to correct for this.
while (self.commit_min < self.commit_max and self.commit_min < self.op) {
const op = self.commit_min + 1;
const checksum = self.journal.entry_for_op_exact(op).?.checksum;
if (self.create_prepare_message(op, checksum)) |prepare| {
defer self.message_bus.unref(prepare);
// Things change quickly when we're reading from disk:
if (self.status != .normal and self.status != .view_change) return;
// Guard against any re-entrancy concurrent to reading this prepare from disk:
assert(op == self.commit_min + 1);
assert(prepare.header.op == op);
assert(prepare.header.checksum == checksum);
const commit_min = self.commit_min;
self.commit_op(prepare);
assert(self.commit_min == commit_min + 1);
assert(self.commit_min <= self.op);
} else {
return;
}
}
// This is an optimization to expedite the view change without waiting for `repair_timeout`:
if (self.status == .view_change and self.repairs_allowed()) self.repair();
}
fn commit_op(self: *Replica, prepare: *const Message) void {
assert(self.status == .normal or self.status == .view_change);
assert(prepare.header.command == .prepare);
assert(prepare.header.op == self.commit_min + 1);
assert(prepare.header.op <= self.op);
if (!self.valid_hash_chain("commit_op")) return;
assert(!self.view_jump_barrier);
assert(self.op >= self.commit_max);
const reply = self.message_bus.get_message() orelse {
log.debug("{}: commit_op: waiting for a message", .{self.replica});
return;
};
defer self.message_bus.unref(reply);
const reply_body_size = @intCast(u32, self.state_machine.commit(
prepare.header.operation,
prepare.buffer[@sizeOf(Header)..prepare.header.size],
reply.buffer[@sizeOf(Header)..],
));
log.debug("{}: commit_op: executing op={} checksum={} ({s})", .{
self.replica,
prepare.header.op,
prepare.header.checksum,
@tagName(prepare.header.operation),
});
self.commit_min += 1;
assert(self.commit_min == prepare.header.op);
if (self.commit_min > self.commit_max) self.commit_max = self.commit_min;
reply.header.* = .{
.command = .reply,
.operation = prepare.header.operation,
.nonce = prepare.header.checksum,
.client = prepare.header.client,
.request = prepare.header.request,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.op = prepare.header.op,
.commit = prepare.header.op,
.size = @sizeOf(Header) + reply_body_size,
};
assert(reply.header.offset == 0);
assert(reply.header.epoch == 0);
reply.header.set_checksum_body(reply.buffer[@sizeOf(Header)..reply.header.size]);
reply.header.set_checksum();
// TODO Add reply to the client table to answer future duplicate requests idempotently.
// Lookup client table entry using client id.
// If client's last request id is <= this request id, then update client table entry.
// Otherwise the client is already ahead of us, and we don't need to update the entry.
if (self.leader_index(self.view) == self.replica) {
log.debug("{}: commit_op: replying to client: {}", .{ self.replica, reply.header });
self.message_bus.send_message_to_client(reply.header.client, reply);
}
}
fn count_quorum(self: *Replica, messages: []?*Message, command: Command, nonce: u128) usize {
assert(messages.len == self.replica_count);
var count: usize = 0;
for (messages) |received, replica| {
if (received) |m| {
assert(m.header.command == command);
assert(m.header.nonce == nonce);
assert(m.header.cluster == self.cluster);
assert(m.header.replica == replica);
assert(m.header.view == self.view);
switch (command) {
.prepare_ok => {},
.start_view_change => assert(m.header.replica != self.replica),
.do_view_change => {},
.nack_prepare => {
assert(m.header.replica != self.replica);
assert(m.header.op == self.nack_prepare_op.?);
},
else => unreachable,
}
count += 1;
}
}
return count;
}
/// The caller owns the returned message, if any, which has exactly 1 reference.
fn create_do_view_change_or_start_view_message(self: *Replica, command: Command) ?*Message {
assert(command == .do_view_change or command == .start_view);
// We may also send a start_view message in normal status to resolve a follower's view jump:
assert(self.status == .normal or self.status == .view_change);
const size_max = @sizeOf(Header) * 8;
const message = self.message_bus.get_message() orelse return null;
defer self.message_bus.unref(message);
message.header.* = .{
.command = command,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.op = self.op,
.commit = self.commit_max,
};
var dest = std.mem.bytesAsSlice(Header, message.buffer[@sizeOf(Header)..size_max]);
const count = self.journal.copy_latest_headers_between(0, self.op, dest);
assert(count > 0);
message.header.size = @intCast(u32, @sizeOf(Header) + @sizeOf(Header) * count);
const body = message.buffer[@sizeOf(Header)..message.header.size];
message.header.set_checksum_body(body);
message.header.set_checksum();
return message.ref();
}
/// The caller owns the returned message, if any, which has exactly 1 reference.
fn create_prepare_message(self: *Replica, op: u64, checksum: ?u128) ?*Message {
if (op > self.op) {
self.create_prepare_message_notice(op, checksum, "beyond self.op");
return null;
}
const exact = self.journal.entry_for_op_exact_with_checksum(op, checksum);
if (exact == null) {
self.create_prepare_message_notice(op, checksum, "no entry exactly");
return null;
}
if (self.journal.faulty.bit(op)) {
self.create_prepare_message_notice(op, checksum, "faulty");
return null;
}
if (self.journal.dirty.bit(op)) {
self.create_prepare_message_notice(op, checksum, "dirty");
return null;
}
// Do not use this pointer beyond the read() below, as the header memory may then change:
const entry = exact.?;
const sector_size = @intCast(u32, Journal.sector_ceil(entry.size));
assert(sector_size >= entry.size);
const message = self.message_bus.get_message() orelse {
self.create_prepare_message_notice(op, checksum, "no message available");
return null;
};
defer self.message_bus.unref(message);
// Skip the disk read if the header is all we need:
if (entry.size == @sizeOf(Header)) {
message.header.* = entry.*;
return message.ref();
}
assert(entry.offset + sector_size <= self.journal.size_circular_buffer);
self.journal.read_sectors(
message.buffer[0..sector_size],
self.journal.offset_in_circular_buffer(entry.offset),
);
if (!message.header.valid_checksum()) {
self.create_prepare_message_notice(op, checksum, "corrupt header after read");
return null;
}
const body = message.buffer[@sizeOf(Header)..message.header.size];
if (!message.header.valid_checksum_body(body)) {
self.create_prepare_message_notice(op, checksum, "corrupt body after read");
return null;
}
if (message.header.op != op) {
self.create_prepare_message_notice(op, checksum, "op changed during read");
return null;
}
if (checksum != null and message.header.checksum != checksum.?) {
self.create_prepare_message_notice(op, checksum, "checksum changed during read");
return null;
}
return message.ref();
}
fn create_prepare_message_notice(
self: *Replica,
op: u64,
checksum: ?u128,
notice: []const u8,
) void {
log.notice("{}: create_prepare_message: op={} checksum={}: {s}", .{
self.replica,
op,
checksum,
notice,
});
}
fn discard_repair_queue(self: *Replica) void {
while (self.repair_queue) |message| {
log.notice("{}: discard_repair_queue: op={}", .{ self.replica, message.header.op });
assert(self.repair_queue_len > 0);
self.repair_queue = message.next;
self.repair_queue_len -= 1;
message.next = null;
self.message_bus.unref(message);
}
assert(self.repair_queue_len == 0);
}
fn ignore_repair_message(self: *Replica, message: *const Message) bool {
assert(message.header.command == .request_start_view or
message.header.command == .request_headers or
message.header.command == .request_prepare or
message.header.command == .headers or
message.header.command == .nack_prepare);
const command: []const u8 = @tagName(message.header.command);
if (self.status != .normal and self.status != .view_change) {
log.debug("{}: on_{s}: ignoring ({})", .{ self.replica, command, self.status });
return true;
}
if (message.header.view < self.view) {
log.debug("{}: on_{s}: ignoring (older view)", .{ self.replica, command });
return true;
}
// We should never view jump unless we know what our status should be after the jump:
// Otherwise we may be normal before the leader, or in a view change that has completed.
if (message.header.view > self.view) {
log.debug("{}: on_{s}: ignoring (newer view)", .{ self.replica, command });
return true;
}
if (self.ignore_repair_message_during_view_change(message)) return true;
if (message.header.replica == self.replica) {
log.warn("{}: on_{s}: ignoring (self)", .{ self.replica, command });
return true;
}
if (self.leader_index(self.view) != self.replica) {
switch (message.header.command) {
// Only the leader may receive these messages:
.request_start_view, .nack_prepare => {
log.warn("{}: on_{s}: ignoring (follower)", .{ self.replica, command });
return true;
},
// Only the leader may answer a request for a prepare without a nonce:
.request_prepare => if (message.header.nonce == 0) {
log.warn("{}: on_{s}: ignoring (no nonce)", .{ self.replica, command });
return true;
},
else => {},
}
}
if (message.header.command == .nack_prepare and self.status == .normal) {
log.debug("{}: on_{s}: ignoring (view started)", .{ self.replica, command });
return true;
}
// Only allow repairs for same view as defense-in-depth:
assert(message.header.view == self.view);
return false;
}
fn ignore_repair_message_during_view_change(self: *Replica, message: *const Message) bool {
if (self.status != .view_change) return false;
const command: []const u8 = @tagName(message.header.command);
switch (message.header.command) {
.request_start_view => {
log.debug("{}: on_{s}: ignoring (view change)", .{ self.replica, command });
return true;
},
.request_headers, .request_prepare => {
if (self.leader_index(self.view) != message.header.replica) {
log.debug("{}: on_{s}: ignoring (view change, requested by follower)", .{
self.replica,
command,
});
return true;
}
},
.headers, .nack_prepare => {
if (self.leader_index(self.view) != self.replica) {
log.debug("{}: on_{s}: ignoring (view change, received by follower)", .{
self.replica,
command,
});
return true;
} else if (!self.do_view_change_quorum) {
log.debug("{}: on_{s}: ignoring (view change, waiting for quorum)", .{
self.replica,
command,
});
return true;
}
},
else => unreachable,
}
return false;
}
fn ignore_view_change_message(self: *Replica, message: *const Message) bool {
assert(message.header.command == .start_view_change or
message.header.command == .do_view_change or
message.header.command == .start_view);
assert(message.header.view > 0); // The initial view is already zero.
const command: []const u8 = @tagName(message.header.command);
// 4.3 Recovery
// While a replica's status is recovering it does not participate in either the request
// processing protocol or the view change protocol.
// This is critical for correctness (to avoid data loss):
if (self.status == .recovering) {
log.debug("{}: on_{s}: ignoring (recovering)", .{ self.replica, command });
return true;
}
if (message.header.view < self.view) {
log.debug("{}: on_{s}: ignoring (older view)", .{ self.replica, command });
return true;
}
if (message.header.view == self.view and self.status == .normal) {
if (message.header.command != .start_view or !self.view_jump_barrier) {
log.debug("{}: on_{s}: ignoring (view started)", .{ self.replica, command });
return true;
}
}
// These may be caused by faults in the network topology.
switch (message.header.command) {
.start_view_change, .start_view => {
if (message.header.replica == self.replica) {
log.warn("{}: on_{s}: ignoring (self)", .{ self.replica, command });
return true;
}
},
.do_view_change => {
if (self.leader_index(message.header.view) != self.replica) {
log.warn("{}: on_{s}: ignoring (follower)", .{ self.replica, command });
return true;
}
},
else => unreachable,
}
return false;
}
fn is_repair(self: *Replica, message: *const Message) bool {
assert(message.header.command == .prepare);
if (self.status == .normal) {
if (message.header.view < self.view) return true;
if (message.header.view == self.view and message.header.op <= self.op) return true;
} else if (self.status == .view_change) {
if (message.header.view < self.view) return true;
// The view has already started or is newer.
}
return false;
}
/// Advances `op` to where we need to be before `header` can be processed as a prepare:
fn jump_to_newer_op_in_normal_status(self: *Replica, header: *const Header) void {
assert(self.status == .normal);
assert(self.follower());
assert(header.view == self.view);
assert(header.op > self.op + 1);
// We may have learned of a higher `commit_max` through a commit message before jumping to a
// newer op that is less than `commit_max` but greater than `commit_min`:
assert(header.op > self.commit_min);
log.debug("{}: jump_to_newer_op: advancing: op={}..{} checksum={}..{}", .{
self.replica,
self.op,
header.op - 1,
self.journal.entry_for_op_exact(self.op).?.checksum,
header.nonce,
});
self.op = header.op - 1;
assert(self.op >= self.commit_min);
assert(self.op + 1 == header.op);
}
fn message_body_as_headers(self: *Replica, message: *const Message) []Header {
// TODO Assert message commands that we expect this to be called for.
assert(message.header.size > @sizeOf(Header)); // Body must contain at least one header.
return std.mem.bytesAsSlice(Header, message.buffer[@sizeOf(Header)..message.header.size]);
}
/// Panics if immediate neighbors in the same view would have a broken hash chain.
/// Assumes gaps and does not require that a preceeds b.
fn panic_if_hash_chain_would_break_in_the_same_view(
self: *Replica,
a: *const Header,
b: *const Header,
) void {
assert(a.command == .prepare);
assert(b.command == .prepare);
assert(a.cluster == b.cluster);
if (a.view == b.view and a.op + 1 == b.op and a.checksum != b.nonce) {
assert(a.valid_checksum());
assert(b.valid_checksum());
log.emerg("{}: panic_if_hash_chain_would_break: a: {}", .{ self.replica, a });
log.emerg("{}: panic_if_hash_chain_would_break: b: {}", .{ self.replica, b });
@panic("hash chain would break");
}
}
/// Starting from the latest journal entry, backfill any missing or disconnected headers.
/// A header is disconnected if it breaks the hash chain with its newer neighbor to the right.
/// Since we work backwards from the latest entry, we should always be able to fix the chain.
/// Once headers are connected, backfill any dirty or faulty prepares.
fn repair(self: *Replica) void {
self.repair_timeout.reset();
assert(self.status == .normal or self.status == .view_change);
assert(self.repairs_allowed());
assert(self.commit_min <= self.op);
assert(self.commit_min <= self.commit_max);
// TODO Handle case where we are requesting reordered headers that no longer exist.
// We expect these always to exist:
assert(self.journal.entry_for_op_exact(self.commit_min) != null);
assert(self.journal.entry_for_op_exact(self.op) != null);
// Resolve any view jump by requesting the leader's latest op:
if (self.view_jump_barrier) {
assert(self.status == .normal);
assert(self.follower());
log.notice("{}: repair: resolving view jump barrier", .{self.replica});
self.send_header_to_replica(self.leader_index(self.view), .{
.command = .request_start_view,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
});
return;
}
// Request outstanding committed prepares to advance our op number:
// This handles the case of an idle cluster, where a follower will not otherwise advance.
// This is not required for correctness, but for durability.
if (self.op < self.commit_max) {
// If the leader repairs during a view change, it will have already advanced `self.op`
// to the latest op according to the quorum of `do_view_change` messages received, so we
// must therefore be in normal status:
assert(self.status == .normal);
assert(self.follower());
log.notice("{}: repair: op={} < commit_max={}", .{
self.replica,
self.op,
self.commit_max,
});
// We need to advance our op number and therefore have to `request_prepare`,
// since only `on_prepare()` can do this, not `repair_header()` in `on_headers()`.
self.send_header_to_replica(self.leader_index(self.view), .{
.command = .request_prepare,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.op = self.commit_max,
// We cannot yet know the nonce so we set it to 0:
// The nonce is optional when requesting from the leader but required otherwise.
.nonce = 0,
});
return;
}
// Request any missing or disconnected headers:
// TODO Snapshots: Ensure that self.commit_min op always exists in the journal.
var broken = self.journal.find_latest_headers_break_between(self.commit_min, self.op);
if (broken) |range| {
log.notice("{}: repair: latest break: {}", .{ self.replica, range });
assert(range.op_min > self.commit_min);
assert(range.op_max < self.op);
// A range of `op_min=0` or `op_max=0` should be impossible as a header break:
// This is the init op that is prepared when the cluster is initialized.
assert(range.op_min > 0);
assert(range.op_max > 0);
if (self.choose_any_other_replica()) |replica| {
self.send_header_to_replica(replica, .{
.command = .request_headers,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.commit = range.op_min,
.op = range.op_max,
});
}
return;
}
// Assert that all headers are now present and connected with a perfect hash chain:
assert(!self.view_jump_barrier);
assert(self.op >= self.commit_max);
assert(self.valid_hash_chain_between(self.commit_min, self.op));
// Request and repair any dirty or faulty prepares:
if (self.journal.dirty.len > 0) return self.repair_prepares();
// Commit ops, which may in turn discover faulty prepares and drive more repairs:
if (self.commit_min < self.commit_max) return self.commit_ops_through(self.commit_max);
if (self.status == .view_change and self.leader_index(self.view) == self.replica) {
// TODO Drive uncommitted ops to completion through replication to a majority:
if (self.commit_max < self.op) {
log.debug("{}: repair: waiting for uncomitted ops to replicate", .{self.replica});
return;
}
// Start the view as the new leader:
self.start_view_as_the_new_leader();
}
}
/// Decide whether or not to insert or update a header:
///
/// A repair may never advance or replace `self.op` (critical for correctness):
///
/// Repairs must always backfill in behind `self.op` but may never advance `self.op`.
/// Otherwise, a split-brain leader may reapply an op that was removed through a view
/// change, which could be committed by a higher `commit_max` number in a commit message.
///
/// See this commit message for an example:
/// https://github.com/coilhq/tigerbeetle/commit/6119c7f759f924d09c088422d5c60ac6334d03de
///
/// Our guiding principles around repairs in general:
///
/// * The latest op makes sense of everything else and must not be replaced with a different op
/// or advanced except by the leader in the current view.
///
/// * Do not jump to a view in normal status without imposing a view jump barrier.
///
/// * Do not commit before resolving the view jump barrier with the leader.
///
/// * Do not commit until the hash chain between `self.commit_min` and `self.op` is fully
/// connected, to ensure that all the ops in this range are correct.
///
/// * Ensure that `self.commit_max` is never advanced for a newer view without first imposing a
/// view jump barrier, otherwise `self.commit_max` may again refer to different ops.
///
/// * Ensure that `self.op` is never advanced by a repair since repairs may occur in a view
/// change where the view has not yet started.
///
/// * Do not assume that an existing op with a older viewstamp can be replaced by an op with a
/// newer viewstamp, but only compare ops in the same view or with reference to the hash chain.
/// See Figure 3.7 on page 41 in Diego Ongaro's Raft thesis for an example of where an op with
/// an older view number may be committed instead of an op with a newer view number:
/// http://web.stanford.edu/~ouster/cgi-bin/papers/OngaroPhD.pdf.
///
fn repair_header(self: *Replica, header: *const Header) bool {
assert(header.valid_checksum());
assert(header.invalid() == null);
assert(header.command == .prepare);
switch (self.status) {
.normal => assert(header.view <= self.view),
.view_change => assert(header.view < self.view),
else => unreachable,
}
if (header.op > self.op) {
log.debug("{}: repair_header: ignoring (would advance self.op)", .{self.replica});
return false;
} else if (header.op == self.op) {
if (self.journal.entry_for_op_exact_with_checksum(self.op, header.checksum) == null) {
log.debug("{}: repair_header: ignoring (would replace self.op)", .{self.replica});
return false;
}
}
if (self.journal.entry(header)) |existing| {
// Do not replace any existing op lightly as doing so may impair durability and even
// violate correctness by undoing a prepare already acknowledged to the leader:
if (existing.checksum == header.checksum) {
if (self.journal.dirty.bit(header.op)) {
// We may safely replace this existing op (with hash chain and overlap caveats):
log.debug("{}: repair_header: exists (dirty checksum)", .{self.replica});
} else {
log.debug("{}: repair_header: ignoring (clean checksum)", .{self.replica});
return false;
}
} else if (existing.view == header.view) {
// We expect that the same view and op must have the same checksum:
assert(existing.op != header.op);
// The journal must have wrapped:
if (existing.op < header.op) {
// We may safely replace this existing op (with hash chain and overlap caveats):
log.debug("{}: repair_header: exists (same view, older op)", .{self.replica});
} else if (existing.op > header.op) {
log.debug("{}: repair_header: ignoring (same view, newer op)", .{self.replica});
return false;
} else {
unreachable;
}
} else {
assert(existing.view != header.view);
assert(existing.op == header.op or existing.op != header.op);
if (self.repair_header_would_connect_hash_chain(header)) {
// We may safely replace this existing op:
log.debug("{}: repair_header: exists (hash chain break)", .{self.replica});
} else {
// We cannot replace this existing op until we are sure that doing so would not
// violate any prior commitments made to the leader.
log.debug("{}: repair_header: ignoring (hash chain doubt)", .{self.replica});
return false;
}
}
} else {
// We may repair the gap (with hash chain and overlap caveats):
log.debug("{}: repair_header: gap", .{self.replica});
}
// Caveat: Do not repair an existing op or gap if doing so would break the hash chain:
if (self.repair_header_would_break_hash_chain_with_next_entry(header)) {
log.debug("{}: repair_header: ignoring (would break hash chain)", .{self.replica});
return false;
}
// Caveat: Do not repair an existing op or gap if doing so would overlap another:
if (self.repair_header_would_overlap_another(header)) {
if (self.repair_header_would_connect_hash_chain(header)) {
// We may overlap previous entries in order to connect the hash chain:
log.debug("{}: repair_header: overlap (would connect hash chain)", .{self.replica});
} else {
log.debug("{}: repair_header: ignoring (would overlap another)", .{self.replica});
return false;
}
}
// TODO Snapshots: Skip if this header is already snapshotted.
assert(header.op < self.op or
self.journal.entry_for_op_exact(self.op).?.checksum == header.checksum);
self.journal.set_entry_as_dirty(header);
return true;
}
/// If we repair this header, then would this break the hash chain only to our immediate right?
/// This offers a weak guarantee compared to `repair_header_would_connect_hash_chain()` below.
/// However, this is useful for allowing repairs when the hash chain is sparse.
fn repair_header_would_break_hash_chain_with_next_entry(
self: *Replica,
header: *const Header,
) bool {
if (self.journal.previous_entry(header)) |previous| {
self.panic_if_hash_chain_would_break_in_the_same_view(previous, header);
}
if (self.journal.next_entry(header)) |next| {
self.panic_if_hash_chain_would_break_in_the_same_view(header, next);
if (header.checksum == next.nonce) {
assert(header.view <= next.view);
assert(header.op + 1 == next.op);
// We don't break with `next` but this is no guarantee that `next` does not break.
return false;
} else {
// If the journal has wrapped, then err in favor of a break regardless of op order:
return true;
}
}
// We are not completely sure since there is no entry to the immediate right:
return false;
}
/// If we repair this header, then would this connect the hash chain through to the latest op?
/// This offers a strong guarantee that may be used to replace or overlap an existing op.
///
/// Here is an example of what could go wrong if we did not check for complete connection:
///
/// 1. We do a prepare that's going to be committed.
/// 2. We do a stale prepare to the right of this, ignoring the hash chain break to the left.
/// 3. We do another stale prepare that replaces the first op because it connects to the second.
///
/// This would violate our quorum replication commitment to the leader.
/// The mistake in this example was not that we ignored the break to the left, which we must do
/// to repair reordered ops, but that we did not check for complete connection to the right.
fn repair_header_would_connect_hash_chain(self: *Replica, header: *const Header) bool {
var entry = header;
while (entry.op < self.op) {
if (self.journal.next_entry(entry)) |next| {
if (entry.checksum == next.nonce) {
assert(entry.view <= next.view);
assert(entry.op + 1 == next.op);
entry = next;
} else {
return false;
}
} else {
return false;
}
}
assert(entry.op == self.op);
assert(entry.checksum == self.journal.entry_for_op_exact(self.op).?.checksum);
return true;
}
/// If we repair this header, then would this overlap and overwrite part of another batch?
/// Journal entries have variable-sized batches that may overlap if entries are disconnected.
fn repair_header_would_overlap_another(self: *Replica, header: *const Header) bool {
// TODO Snapshots: Handle journal wrap around.
{
// Look behind this entry for any preceeding entry that this would overlap:
var op: u64 = header.op;
while (op > 0) {
op -= 1;
if (self.journal.entry_for_op(op)) |neighbor| {
if (self.journal.next_offset(neighbor) > header.offset) return true;
break;
}
}
}
{
// Look beyond this entry for any succeeding entry that this would overlap:
var op: u64 = header.op + 1;
while (op <= self.op) : (op += 1) {
if (self.journal.entry_for_op(op)) |neighbor| {
if (self.journal.next_offset(header) > neighbor.offset) return true;
break;
}
}
}
return false;
}
fn repair_last_queued_message_if_any(self: *Replica) void {
if (self.status != .normal and self.status != .view_change) return;
if (!self.repairs_allowed()) return;
while (!self.repairing) {
if (self.repair_queue) |message| {
defer self.message_bus.unref(message);
assert(self.repair_queue_len > 0);
self.repair_queue = message.next;
self.repair_queue_len -= 1;
message.next = null;
self.on_repair(message);
assert(self.repair_queue != message); // Catch an accidental requeue by on_repair().
} else {
assert(self.repair_queue_len == 0);
break;
}
}
}
fn repair_later(self: *Replica, message: *Message) void {
assert(self.repairs_allowed());
assert(self.appending or self.repairing);
assert(message.references > 0);
assert(message.header.command == .prepare);
assert(message.next == null);
if (!self.repairing) {
log.debug("{}: repair_later: repairing immediately", .{self.replica});
self.on_repair(message);
return;
}
log.debug("{}: repair_later: {} message(s)", .{ self.replica, self.repair_queue_len });
if (self.repair_queue_len == self.repair_queue_max) {
log.debug("{}: repair_later: dropping", .{self.replica});
return;
}
log.debug("{}: repair_later: queueing", .{self.replica});
assert(self.repair_queue_len < self.repair_queue_max);
message.next = self.repair_queue;
self.repair_queue = message.ref();
self.repair_queue_len += 1;
}
fn repair_prepares(self: *Replica) void {
assert(self.status == .normal or self.status == .view_change);
assert(self.repairs_allowed());
assert(self.journal.dirty.len > 0);
if (self.repair_queue_len == self.repair_queue_max) {
log.debug("{}: repair_prepares: waiting for repair queue to drain", .{self.replica});
return;
}
// We may be appending to or repairing the journal concurrently.
// We do not want to re-request any of these prepares unnecessarily.
// TODO Add journal.writing bits to clear this up (and needed anyway).
if (self.appending or self.repairing) {
log.debug("{}: repair_prepares: waiting for dirty bits to settle", .{self.replica});
return;
}
// Request enough prepares to fill the repair queue:
var budget = self.repair_queue_max - self.repair_queue_len;
assert(budget > 0);
var op = self.op + 1;
while (op > 0) {
op -= 1;
if (self.journal.dirty.bit(op)) {
// If this is an uncommitted op, and we are the leader in `view_change` status, then
// we will `request_prepare` from the rest of the cluster, set `nack_prepare_op`,
// and stop repairing any further prepares:
// This will also rebroadcast any `request_prepare` every `repair_timeout` tick.
self.repair_prepare(op);
if (self.nack_prepare_op) |nack_prepare_op| {
assert(nack_prepare_op == op);
assert(self.status == .view_change);
assert(self.leader_index(self.view) == self.replica);
assert(op > self.commit_max);
return;
}
// Otherwise, we continue to request prepares until our budget is used up:
budget -= 1;
if (budget == 0) {
log.debug("{}: repair_prepares: request budget used up", .{self.replica});
return;
}
} else {
assert(!self.journal.faulty.bit(op));
}
}
}
/// During a view change, for uncommitted ops, which may be one or two, we optimize for latency:
///
/// * request a `prepare` or `nack_prepare` from all followers in parallel,
/// * repair as soon as we receive a `prepare`, or
/// * discard as soon as we receive a majority of `nack_prepare` messages for the same checksum.
///
/// For committed ops, which likely represent the bulk of repairs, we optimize for throughput:
///
/// * have multiple requests in flight to prime the repair queue,
/// * rotate these requests across the cluster round-robin,
/// * to spread the load across connected peers,
/// * to take advantage of each peer's outgoing bandwidth, and
/// * to parallelize disk seeks and disk read bandwidth.
///
/// This is effectively "many-to-one" repair, where a single replica recovers using the
/// resources of many replicas, for faster recovery.
fn repair_prepare(self: *Replica, op: u64) void {
assert(self.status == .normal or self.status == .view_change);
assert(self.repairs_allowed());
assert(self.journal.dirty.bit(op));
const request_prepare = Header{
.command = .request_prepare,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
.op = op,
// If we request a prepare from a follower, as below, it is critical to pass a checksum:
// Otherwise we could receive different prepares for the same op number.
.nonce = self.journal.entry_for_op_exact(op).?.checksum,
};
if (self.status == .view_change and op > self.commit_max) {
// Only the leader is allowed to do repairs in a view change:
assert(self.leader_index(self.view) == self.replica);
// Initialize the `nack_prepare` quorum counter for this uncommitted op:
// It is also possible that we may start repairing a lower uncommitted op, having
// initialized `nack_prepare_op` before we learn of a higher uncommitted dirty op,
// in which case we also want to reset the quorum counter.
if (self.nack_prepare_op) |nack_prepare_op| {
assert(nack_prepare_op <= op);
if (nack_prepare_op != op) {
self.nack_prepare_op = op;
self.reset_quorum_counter(self.nack_prepare_from_other_replicas, .nack_prepare);
}
} else {
self.nack_prepare_op = op;
self.reset_quorum_counter(self.nack_prepare_from_other_replicas, .nack_prepare);
}
log.debug("{}: repair_prepare: requesting uncommitted op={}", .{ self.replica, op });
assert(self.nack_prepare_op.? == op);
assert(request_prepare.nonce != 0);
self.send_header_to_other_replicas(request_prepare);
} else {
log.debug("{}: repair_prepare: requesting committed op={}", .{ self.replica, op });
// We expect that `repair_prepare()` is called in reverse chronological order:
// Any uncommitted ops should have already been dealt with.
// We never roll back committed ops, and thus never regard any `nack_prepare` responses.
assert(self.nack_prepare_op == null);
assert(request_prepare.nonce != 0);
if (self.choose_any_other_replica()) |replica| {
self.send_header_to_replica(replica, request_prepare);
}
}
}
fn repairs_allowed(self: *Replica) bool {
switch (self.status) {
.view_change => {
if (self.do_view_change_quorum) {
assert(self.leader_index(self.view) == self.replica);
return true;
} else {
return false;
}
},
.normal => return true,
else => return false,
}
}
/// Replicates to the next replica in the configuration (until we get back to the leader):
/// Replication starts and ends with the leader, we never forward back to the leader.
/// Does not flood the network with prepares that have already committed.
/// TODO Use recent heartbeat data for next replica to leapfrog if faulty.
fn replicate(self: *Replica, message: *Message) void {
assert(self.status == .normal);
assert(message.header.command == .prepare);
assert(message.header.view == self.view);
assert(message.header.op == self.op);
if (message.header.op <= self.commit_max) {
log.debug("{}: replicate: not replicating (committed)", .{self.replica});
return;
}
const next = @mod(self.replica + 1, @intCast(u16, self.replica_count));
if (next == self.leader_index(message.header.view)) {
log.debug("{}: replicate: not replicating (completed)", .{self.replica});
return;
}
log.debug("{}: replicate: replicating to replica {}", .{ self.replica, next });
self.send_message_to_replica(next, message);
}
fn reset_quorum_counter(self: *Replica, messages: []?*Message, command: Command) void {
var count: usize = 0;
for (messages) |*received, replica| {
if (received.*) |message| {
assert(message.header.command == command);
assert(message.header.replica == replica);
assert(message.header.view <= self.view);
self.message_bus.unref(message);
count += 1;
}
received.* = null;
}
log.debug("{}: reset {} {s} message(s)", .{ self.replica, count, @tagName(command) });
}
fn reset_quorum_do_view_change(self: *Replica) void {
self.reset_quorum_counter(self.do_view_change_from_all_replicas, .do_view_change);
self.do_view_change_quorum = false;
}
fn reset_quorum_nack_prepare(self: *Replica) void {
self.reset_quorum_counter(self.nack_prepare_from_other_replicas, .nack_prepare);
self.nack_prepare_op = null;
}
fn reset_quorum_prepare(self: *Replica) void {
if (self.prepare_message) |message| {
self.request_checksum = null;
self.message_bus.unref(message);
self.prepare_message = null;
self.prepare_attempt = 0;
self.prepare_timeout.stop();
self.reset_quorum_counter(self.prepare_ok_from_all_replicas, .prepare_ok);
}
assert(self.request_checksum == null);
assert(self.prepare_message == null);
assert(self.prepare_attempt == 0);
assert(self.prepare_timeout.ticking == false);
for (self.prepare_ok_from_all_replicas) |received| assert(received == null);
}
fn reset_quorum_start_view_change(self: *Replica) void {
self.reset_quorum_counter(self.start_view_change_from_other_replicas, .start_view_change);
self.start_view_change_quorum = false;
}
fn send_prepare_ok(self: *Replica, header: *const Header) void {
assert(header.command == .prepare);
assert(header.cluster == self.cluster);
assert(header.replica == self.leader_index(header.view));
assert(header.view <= self.view);
assert(header.op <= self.op or header.view < self.view);
if (self.status != .normal) {
log.debug("{}: send_prepare_ok: not sending ({})", .{ self.replica, self.status });
return;
}
if (header.op > self.op) {
assert(header.view < self.view);
// An op may be reordered concurrently through a view change while being journalled:
log.debug("{}: send_prepare_ok: not sending (reordered)", .{self.replica});
return;
}
assert(self.status == .normal);
// After a view change followers must send prepare_oks for uncommitted ops with older views:
// However, we will only ever send to the leader of our current view.
assert(header.view <= self.view);
assert(header.op <= self.op);
if (header.op <= self.commit_max) {
log.debug("{}: send_prepare_ok: not sending (committed)", .{self.replica});
return;
}
// TODO Think through a scenario of where not doing this would be wrong.
if (!self.valid_hash_chain("send_prepare_ok")) return;
assert(!self.view_jump_barrier);
assert(self.op >= self.commit_max);
if (self.journal.has_clean(header)) {
// It is crucial that replicas stop accepting prepare messages from earlier views once
// they start the view change protocol. Without this constraint, the system could get
// into a state in which there are two active primaries: the old one, which hasn't
// failed but is merely slow or not well connected to the network, and the new one. If a
// replica sent a prepare_ok message to the old primary after sending its log to the new
// one, the old primary might commit an operation that the new primary doesn't learn
// about in the do_view_change messages.
// We therefore only send to the leader of the current view, never to the leader of the
// prepare header's view:
// TODO We could surprise the new leader with this, if it is preparing a different op.
self.send_header_to_replica(self.leader_index(self.view), .{
.command = .prepare_ok,
.nonce = header.checksum,
.client = header.client,
.cluster = self.cluster,
.replica = self.replica,
.view = header.view,
.op = header.op,
.commit = header.commit,
.offset = header.offset,
.epoch = header.epoch,
.request = header.request,
.operation = header.operation,
});
} else {
log.debug("{}: send_prepare_ok: not sending (dirty)", .{self.replica});
return;
}
}
fn send_prepare_oks_through(self: *Replica, op: u64) void {
assert(self.status == .normal);
assert(op >= self.commit_max);
assert(op <= self.op);
if (!self.valid_hash_chain("send_prepare_oks_through")) return;
assert(!self.view_jump_barrier);
assert(self.op >= self.commit_max);
while (op > self.commit_max and op < self.op) : (op += 1) {
const header = self.journal.entry_for_op_exact(op).?;
assert(header.op == op);
assert(header.operation != .init);
self.send_prepare_ok(header);
}
}
fn send_prepare_to_replica(self: *Replica, replica: u16, op: u64, checksum: ?u128) void {
assert(self.status == .normal or self.status == .view_change);
assert(replica != self.replica);
assert(!self.sending_prepare);
self.sending_prepare = true;
defer self.sending_prepare = false;
if (self.create_prepare_message(op, checksum)) |message| {
defer self.message_bus.unref(message);
assert(message.header.op == op);
assert(checksum == null or message.header.checksum == checksum.?);
self.send_message_to_replica(replica, message);
}
}
fn send_start_view_change(self: *Replica) void {
assert(self.status == .view_change);
assert(!self.do_view_change_quorum);
// Send only to other replicas (and not to ourself) to avoid a quorum off-by-one error:
// This could happen if the replica mistakenly counts its own message in the quorum.
self.send_header_to_other_replicas(.{
.command = .start_view_change,
.cluster = self.cluster,
.replica = self.replica,
.view = self.view,
});
}
fn send_do_view_change(self: *Replica) void {
assert(self.status == .view_change);
assert(self.start_view_change_quorum);
assert(!self.do_view_change_quorum);
const count_start_view_change = self.count_quorum(
self.start_view_change_from_other_replicas,
.start_view_change,
0,
);
assert(count_start_view_change >= self.f);
const message = self.create_do_view_change_or_start_view_message(.do_view_change) orelse {
log.warn("{}: send_do_view_change: dropping do_view_change, no message available", .{
self.replica,
});
return;
};
defer self.message_bus.unref(message);
assert(message.references == 1);
assert(message.header.command == .do_view_change);
assert(message.header.view == self.view);
assert(message.header.op == self.op);
assert(message.header.commit == self.commit_max);
// TODO Assert that latest header in message body matches self.op.
self.send_message_to_replica(self.leader_index(self.view), message);
}
fn send_header_to_other_replicas(self: *Replica, header: Header) void {
var replica: u16 = 0;
while (replica < self.replica_count) : (replica += 1) {
if (replica != self.replica) {
self.send_header_to_replica(replica, header);
}
}
}
// TODO Work out the maximum number of messages a replica may output per tick() or on_message().
fn send_header_to_replica(self: *Replica, replica: u16, header: Header) void {
log.debug("{}: sending {s} to replica {}: {}", .{
self.replica,
@tagName(header.command),
replica,
header,
});
assert(header.replica == self.replica);
assert(header.view == self.view);
self.message_bus.send_header_to_replica(replica, header);
}
fn send_message_to_other_replicas(self: *Replica, message: *Message) void {
var replica: u16 = 0;
while (replica < self.replica_count) : (replica += 1) {
if (replica != self.replica) {
self.send_message_to_replica(replica, message);
}
}
}
fn send_message_to_replica(self: *Replica, replica: u16, message: *Message) void {
log.debug("{}: sending {s} to replica {}: {}", .{
self.replica,
@tagName(message.header.command),
replica,
message.header,
});
switch (message.header.command) {
.request => {
// We do not assert message.header.replica as we would for send_header_to_replica()
// because we may forward .request or .prepare messages.
assert(self.status == .normal);
assert(message.header.view <= self.view);
},
.prepare => {
switch (self.status) {
.normal => assert(message.header.view <= self.view),
.view_change => assert(message.header.view < self.view),
else => unreachable,
}
},
.do_view_change => {
assert(self.status == .view_change);
assert(self.start_view_change_quorum);
assert(!self.do_view_change_quorum);
assert(message.header.view == self.view);
},
.start_view => switch (self.status) {
.normal => {
// A follower may ask the leader to resend the start_view message.
assert(!self.start_view_change_quorum);
assert(!self.do_view_change_quorum);
assert(message.header.view == self.view);
},
.view_change => {
assert(self.start_view_change_quorum);
assert(self.do_view_change_quorum);
assert(message.header.view == self.view);
},
else => unreachable,
},
.headers => {
assert(self.status == .normal or self.status == .view_change);
assert(message.header.view == self.view);
assert(message.header.replica == self.replica);
},
else => unreachable,
}
assert(message.header.cluster == self.cluster);
self.message_bus.send_message_to_replica(replica, message);
}
fn set_latest_header(self: *Replica, headers: []Header, latest: *Header) void {
switch (latest.command) {
.reserved, .prepare => assert(latest.valid_checksum()),
else => unreachable,
}
for (headers) |header| {
assert(header.valid_checksum());
assert(header.invalid() == null);
assert(header.command == .prepare);
if (latest.command == .reserved) {
latest.* = header;
} else if (header.view > latest.view) {
latest.* = header;
} else if (header.view == latest.view and header.op > latest.op) {
latest.* = header;
}
}
}
fn set_latest_op_and_k(self: *Replica, latest: *const Header, k: u64, method: []const u8) void {
assert(self.status == .view_change);
assert(latest.valid_checksum());
assert(latest.invalid() == null);
assert(latest.command == .prepare);
assert(latest.cluster == self.cluster);
assert(latest.view < self.view); // Latest normal view before this view change.
// Ops may be rewound through a view change so we use `self.commit_max` and not `self.op`:
assert(latest.op >= self.commit_max);
// We expect that `commit_min` may be greater than `latest.commit` because the latter is
// only the commit number at the time the latest op was prepared (but not committed).
// We therefore only assert `latest.commit` against `commit_max` above and `k` below.
assert(k >= latest.commit);
assert(k >= self.commit_max);
log.debug("{}: {s}: view={} op={}..{} commit={}..{} checksum={} offset={}", .{
self.replica,
method,
self.view,
self.op,
latest.op,
self.commit_max,
k,
latest.checksum,
latest.offset,
});
self.op = latest.op;
self.commit_max = k;
// Do not set the latest op as dirty if we already have it exactly:
// Otherwise, this would trigger a repair and delay the view change.
if (self.journal.entry_for_op_exact_with_checksum(latest.op, latest.checksum) == null) {
self.journal.set_entry_as_dirty(latest);
} else {
log.debug("{}: {s}: latest op exists exactly", .{ self.replica, method });
}
assert(self.op == latest.op);
self.journal.remove_entries_from(self.op + 1);
assert(self.journal.entry_for_op_exact(self.op).?.checksum == latest.checksum);
}
fn start_view_as_the_new_leader(self: *Replica) void {
assert(self.status == .view_change);
assert(self.leader_index(self.view) == self.replica);
assert(self.do_view_change_quorum);
// TODO Do one last count of our do_view_change quorum messages.
assert(!self.view_jump_barrier);
assert(self.commit_min == self.op);
assert(self.commit_max == self.op);
assert(self.valid_hash_chain_between(self.commit_min, self.op));
assert(self.journal.dirty.len == 0);
assert(self.journal.faulty.len == 0);
assert(self.nack_prepare_op == null);
const start_view = self.create_do_view_change_or_start_view_message(.start_view) orelse {
log.warn("{}: start_view_as_the_new_leader: waiting for a message", .{self.replica});
return;
};
defer self.message_bus.unref(start_view);
self.transition_to_normal_status(self.view);
assert(self.status == .normal);
assert(self.leader());
assert(start_view.references == 1);
assert(start_view.header.command == .start_view);
assert(start_view.header.view == self.view);
assert(start_view.header.op == self.op);
assert(start_view.header.commit == self.commit_max);
self.send_message_to_other_replicas(start_view);
}
fn transition_to_normal_status(self: *Replica, new_view: u64) void {
log.debug("{}: transition_to_normal_status: view={}", .{ self.replica, new_view });
// In the VRR paper it's possible to transition from .normal to .normal for the same view.
// For example, this could happen after a state transfer triggered by an op jump.
assert(new_view >= self.view);
self.view = new_view;
self.status = .normal;
if (self.leader()) {
log.debug("{}: transition_to_normal_status: leader", .{self.replica});
self.ping_timeout.start();
self.commit_timeout.start();
self.normal_timeout.stop();
self.view_change_timeout.stop();
self.view_change_message_timeout.stop();
self.repair_timeout.start();
} else {
log.debug("{}: transition_to_normal_status: follower", .{self.replica});
self.ping_timeout.start();
self.commit_timeout.stop();
self.normal_timeout.start();
self.view_change_timeout.stop();
self.view_change_message_timeout.stop();
self.repair_timeout.start();
}
self.reset_quorum_prepare();
self.reset_quorum_start_view_change();
self.reset_quorum_do_view_change();
self.reset_quorum_nack_prepare();
assert(self.start_view_change_quorum == false);
assert(self.do_view_change_quorum == false);
assert(self.nack_prepare_op == null);
}
/// A replica i that notices the need for a view change advances its view, sets its status to
/// view_change, and sends a ⟨start_view_change v, i⟩ message to all the other replicas,
/// where v identifies the new view. A replica notices the need for a view change either based
/// on its own timer, or because it receives a start_view_change or do_view_change message for
/// a view with a larger number than its own view.
fn transition_to_view_change_status(self: *Replica, new_view: u64) void {
log.debug("{}: transition_to_view_change_status: view={}", .{ self.replica, new_view });
assert(new_view > self.view);
self.view = new_view;
self.status = .view_change;
self.ping_timeout.stop();
self.commit_timeout.stop();
self.normal_timeout.stop();
self.view_change_timeout.start();
self.view_change_message_timeout.start();
self.repair_timeout.stop();
// Do not reset quorum counters only on entering a view, assuming that the view will be
// followed only by a single subsequent view change to the next view, because multiple
// successive view changes can fail, e.g. after a view change timeout.
// We must therefore reset our counters here to avoid counting messages from an older view,
// which would violate the quorum intersection property essential for correctness.
self.reset_quorum_prepare();
self.reset_quorum_start_view_change();
self.reset_quorum_do_view_change();
self.reset_quorum_nack_prepare();
assert(self.start_view_change_quorum == false);
assert(self.do_view_change_quorum == false);
assert(self.nack_prepare_op == null);
self.send_start_view_change();
}
/// Whether it is safe to commit or send prepare_ok messages.
/// Returns true if the hash chain is valid and up to date for the current view.
/// This is a stronger guarantee than `valid_hash_chain_between()` below.
fn valid_hash_chain(self: *Replica, method: []const u8) bool {
// If we know we have uncommitted ops that may have been reordered through a view change
// then wait until the latest of these has been resolved with the leader:
if (self.view_jump_barrier) {
log.notice("{}: {s}: waiting to resolve view jump barrier", .{ self.replica, method });
return false;
}
// If we know we could validate the hash chain even further, then wait until we can:
// This is partial defense-in-depth in case `self.op` is ever advanced by a reordered op.
if (self.op < self.commit_max) {
log.notice("{}: {s}: waiting for repair (op={} < commit={})", .{
self.replica,
method,
self.op,
self.commit_max,
});
return false;
}
// We must validate the hash chain as far as possible, since `self.op` may disclose a fork:
if (!self.valid_hash_chain_between(self.commit_min, self.op)) {
log.notice("{}: {s}: waiting for repair (hash chain)", .{ self.replica, method });
return false;
}
return true;
}
/// Returns true if all operations are present, correctly ordered and connected by hash chain,
/// between `op_min` and `op_max` (both inclusive).
fn valid_hash_chain_between(self: *Replica, op_min: u64, op_max: u64) bool {
assert(op_min <= op_max);
// If we use anything less than self.op then we may commit ops for a forked hash chain that
// have since been reordered by a new leader.
assert(op_max == self.op);
var b = self.journal.entry_for_op_exact(op_max).?;
var op = op_max;
while (op > op_min) {
op -= 1;
if (self.journal.entry_for_op_exact(op)) |a| {
assert(a.op + 1 == b.op);
if (a.checksum == b.nonce) {
assert(self.ascending_viewstamps(a, b));
b = a;
} else {
log.notice("{}: valid_hash_chain_between: break: A: {}", .{ self.replica, a });
log.notice("{}: valid_hash_chain_between: break: B: {}", .{ self.replica, b });
return false;
}
} else {
log.notice("{}: valid_hash_chain_between: missing op={}", .{ self.replica, op });
return false;
}
}
assert(b.op == op_min);
return true;
}
fn view_jump(self: *Replica, header: *const Header) void {
const to_status: Status = switch (header.command) {
.prepare, .commit => .normal,
.start_view_change, .do_view_change, .start_view => .view_change,
else => unreachable,
};
if (self.status != .normal and self.status != .view_change) return;
// If this is for an older view, then ignore:
if (header.view < self.view) return;
// Compare status transitions and decide whether to view jump or ignore:
switch (self.status) {
.normal => switch (to_status) {
// If the transition is to `.normal`, then ignore if this is for the same view:
.normal => if (header.view == self.view) return,
// If the transition is to `.view_change`, then ignore if the view has started:
.view_change => if (header.view == self.view) return,
else => unreachable,
},
.view_change => switch (to_status) {
// This is an interesting special case:
// If the transition is to `.normal` in the same view, then we missed the
// `start_view` message and we must also consider this a view jump:
// If we don't view jump here, then our `view_change_timeout` will fire and we will
// disrupt the cluster by starting another view change for a newer view.
.normal => {},
// If the transition is to `.view_change`, then ignore if this is for the same view:
.view_change => if (header.view == self.view) return,
else => unreachable,
},
else => unreachable,
}
if (to_status == .normal) {
assert(header.view >= self.view);
const command: []const u8 = @tagName(header.command);
if (header.view == self.view) {
assert(self.status == .view_change and to_status == .normal);
log.debug("{}: view_jump: exiting view change and starting view", .{self.replica});
} else {
log.debug("{}: view_jump: jumping to newer view", .{self.replica});
}
if (self.op > self.commit_max) {
// We have uncommitted ops, and these may have been removed or replaced by the new
// leader through a view change in which we were not involved.
//
// In Section 5.2, the VR paper simply removes these uncommitted ops and does a
// state transfer. However, while strictly safe, this impairs safety in terms of
// durability, and adds unnecessary repair overhead if the ops were committed.
//
// We rather impose a view jump barrier to keep `commit_ops_through()` from
// committing. This preserves and maximizes durability and minimizes repair traffic.
//
// This view jump barrier is cleared or may be resolved, respectively, as soon as:
// 1. we receive a new prepare from the leader that advances our latest op, or
// 2. we request and receive a `start_view` message from the leader for this view.
//
// This is safe because advancing our latest op in the current view or receiving the
// latest op from the leader both ensure that we have the latest hash chain head.
log.notice("{}: view_jump: imposing view jump barrier", .{self.replica});
self.view_jump_barrier = true;
} else {
assert(self.op == self.commit_max);
// We may still need to resolve any prior view jump barrier:
// For example, if we jump to view 3 and jump again to view 7 both in normal status.
assert(self.view_jump_barrier == true or self.view_jump_barrier == false);
}
} else if (to_status == .view_change) {
assert(header.view > self.view);
// The view change will set the latest op in on_do_view_change() or on_start_view():
// There is no need to impose a view jump barrier and any existing barrier is cleared.
// We only need to transition to view change status.
if (self.view_jump_barrier) {
log.notice("{}: view_jump: clearing view jump barrier", .{self.replica});
self.view_jump_barrier = false;
}
} else {
unreachable;
}
switch (to_status) {
.normal => self.transition_to_normal_status(header.view),
.view_change => self.transition_to_view_change_status(header.view),
else => unreachable,
}
}
fn write_to_journal(self: *Replica, message: *Message, lock: *bool) void {
assert(lock.* == false);
lock.* = true;
defer lock.* = false;
assert(message.references > 0);
assert(message.header.command == .prepare);
assert(message.header.view <= self.view);
assert(message.header.op <= self.op);
if (!self.journal.has(message.header)) {
log.debug("{}: write_to_journal: ignoring (header changed)", .{self.replica});
return;
}
if (self.journal.dirty.bit(message.header.op)) {
self.journal.write(message);
} else {
// Any function that sets the faulty bit should also set the dirty bit:
assert(!self.journal.faulty.bit(message.header.op));
log.debug("{}: write_to_journal: skipping (clean)", .{self.replica});
// Continue through below to send a prepare_ok message if necessary.
}
self.send_prepare_ok(message.header);
// If this was a repair, continue immediately to repair the next prepare:
// This is an optimization to eliminate waiting until the next repair timeout.
if (lock == &self.repairing) self.repair();
}
};
/// Returns An array containing the remote or local addresses of each of the 2f + 1 replicas:
/// Unlike the VRR paper, we do not sort the array but leave the order explicitly to the user.
/// There are several advantages to this:
/// * The operator may deploy a cluster with proximity in mind since replication follows order.
/// * A replica's IP address may be changed without reconfiguration.
/// This does require that the user specify the same order to all replicas.
/// The caller owns the memory of the returned slice of addresses.
/// TODO Unit tests.
/// TODO Integrate into `src/cli.zig`.
pub fn parse_configuration(allocator: *std.mem.Allocator, raw: []const u8) ![]std.net.Address {
var addresses = try allocator.alloc(std.net.Address, config.replicas_max);
errdefer allocator.free(addresses);
var index: usize = 0;
var comma_iterator = std.mem.split(raw, ",");
while (comma_iterator.next()) |raw_address| : (index += 1) {
if (raw_address.len == 0) return error.AddressHasTrailingComma;
if (index == config.replicas_max) return error.AddressLimitExceeded;
var colon_iterator = std.mem.split(raw_address, ":");
// The split iterator will always return non-null once, even if the delimiter is not found:
const raw_ipv4 = colon_iterator.next().?;
if (colon_iterator.next()) |raw_port| {
if (colon_iterator.next() != null) return error.AddressHasMoreThanOneColon;
const port = std.fmt.parseUnsigned(u16, raw_port, 10) catch |err| switch (err) {
error.Overflow => return error.PortOverflow,
error.InvalidCharacter => return error.PortInvalid,
};
addresses[index] = std.net.Address.parseIp4(raw_ipv4, port) catch {
return error.AddressInvalid;
};
} else {
// There was no colon in the address so there are now two cases:
// 1. an IPv4 address with the default port, or
// 2. a port with the default IPv4 address.
// Let's try parsing as a port first:
if (std.fmt.parseUnsigned(u16, raw_address, 10)) |port| {
addresses[index] = std.net.Address.parseIp4(config.address, port) catch unreachable;
} else |err| switch (err) {
error.Overflow => return error.PortOverflow,
error.InvalidCharacter => {
// Something was not a digit, let's try parsing as an IPv4 instead:
addresses[index] = std.net.Address.parseIp4(raw_address, config.port) catch {
return error.AddressInvalid;
};
},
}
}
}
return addresses[0..index];
}
|
src/vr.zig
|
const utils = @import("utils");
const georgios = @import("georgios");
const Info = georgios.ProcessInfo;
const platform = @import("platform.zig");
const pthreading = platform.impl.threading;
const kernel = @import("kernel.zig");
const memory = @import("memory.zig");
const print = @import("print.zig");
const MappedList = @import("mapped_list.zig").MappedList;
pub const debug = false;
pub const Error = georgios.threading.Error;
pub const Thread = struct {
pub const Id = u32;
pub const State = enum {
Run,
Wait,
};
id: Id = undefined,
state: State = .Run,
// TODO: Support multiple waits?
wake_on_exit: ?*Thread = null,
kernel_mode: bool = false,
impl: pthreading.ThreadImpl = undefined,
process: ?*Process = null,
prev_in_system: ?*Thread = null,
next_in_system: ?*Thread = null,
/// Address of First Instruction
entry: usize = 0,
pub fn init(self: *Thread, boot_thread: bool) Error!void {
if (self.process) |process| {
if (process.info) |*info| {
self.kernel_mode = info.kernel_mode;
}
}
try self.impl.init(self, boot_thread);
}
pub fn start(self: *Thread) Error!void {
try self.impl.start();
}
pub fn finish(self: *Thread) void {
if (self.wake_on_exit) |other| {
other.state = .Run;
}
}
};
pub const Process = struct {
pub const Id = u32;
// pub const OpenedFiles = MappedList(, *Thread, tid_eql, tid_cmp);
info: ?Info = null,
id: Id = undefined,
impl: pthreading.ProcessImpl = .{},
main_thread: Thread = .{},
/// Address of First Instruction for Main Thread
entry: usize = 0,
/// Current Working Directory
cwd: ?[]const u8 = null,
pub fn init(self: *Process, current: ?*Process) Error!void {
// TODO: Cleanup on failure
if (self.info) |*info| {
// Duplicate info data because we can't trust it will stay.
const path_temp = try kernel.memory_mgr.alloc.alloc_array(u8, info.path.len);
_ = utils.memory_copy_truncate(path_temp, info.path);
info.path = path_temp;
if (info.name.len > 0) {
const name_temp =
try kernel.memory_mgr.alloc.alloc_array(u8, info.name.len);
_ = utils.memory_copy_truncate(name_temp, info.name);
info.name = name_temp;
}
if (info.args.len > 0) {
const args_temp =
try kernel.memory_mgr.alloc.alloc_array([]u8, info.args.len);
for (info.args) |arg, i| {
if (arg.len > 0) {
args_temp[i] = try kernel.memory_mgr.alloc.alloc_array(u8, arg.len);
_ = utils.memory_copy_truncate(args_temp[i], arg);
} else {
args_temp[i] = utils.make_slice(u8, @intToPtr([*]u8, 1024), 0);
}
}
info.args = args_temp;
}
}
try self.set_cwd(if (current) |c| c.cwd.? else "/");
try self.impl.init(self);
self.main_thread.process = self;
try self.main_thread.init(false);
}
pub fn start(self: *Process) Error!void {
self.main_thread.entry = self.entry;
try self.impl.start();
}
pub fn address_space_copy(self: *Process,
address: usize, data: []const u8) memory.AllocError!void {
try self.impl.address_space_copy(address, data);
}
pub fn address_space_set(self: *Process,
address: usize, byte: u8, len: usize) memory.AllocError!void {
try self.impl.address_space_set(address, byte, len);
}
pub fn set_cwd(self: *Process, dir: []const u8) Error!void {
// Reuse existing memory?
const new_cwd = try kernel.memory_mgr.alloc.alloc_array(u8, dir.len);
_ = utils.memory_copy_truncate(new_cwd, dir);
if (self.cwd) |cwd| {
try kernel.memory_mgr.alloc.free_array(cwd);
}
self.cwd = new_cwd;
}
};
const TimeQueue = struct {
const Element = struct {
valid: bool,
tid: Thread.Id,
when: platform.Time,
prev: ?*Element,
next: ?*Element,
};
elements: []Element = undefined,
head: ?*Element = null,
tail: ?*Element = null,
pub fn init(self: *TimeQueue) Error!void {
self.elements = try kernel.alloc.alloc_array(Element, 8);
for (self.elements) |*e| {
e.valid = false;
}
}
pub fn insert(self: *TimeQueue, tid: Thread.Id, when: platform.Time) void {
// print.format("insert {} {}\n", .{tid, when});
var index: usize = 0;
while (self.elements[index].valid) {
index += 1;
if (index >= self.elements.len) @panic("TimeQueue Full"); // TODO
}
const element = &self.elements[index];
element.valid = true;
element.tid = tid;
element.when = when;
if (self.head) |head| {
_ = head;
var compare = self.head;
while (compare) |cmp| {
if (when < cmp.when) {
break;
}
compare = cmp.next;
}
if (compare) |cmp| { // Comes before something else
if (cmp.prev) |prev| {
prev.next = element;
} else {
self.head = element;
}
element.prev = cmp.prev;
cmp.prev = element;
element.next = cmp;
} else { // Tack on end
element.prev = self.tail;
element.next = null;
self.tail = element;
}
} else { // Nothing in Queue
self.head = element;
self.tail = element;
element.prev = null;
element.next = null;
}
}
pub fn check(self: *TimeQueue) ?Thread.Id {
if (self.head) |head| {
if (platform.time() >= head.when) {
head.valid = false;
if (head.next) |next| {
next.prev = null;
}
self.head = head.next;
if (self.head == null) {
self.tail = null;
}
return head.tid;
}
}
return null;
}
};
pub const Manager = struct {
fn tid_eql(a: Thread.Id, b: Thread.Id) bool {
return a == b;
}
fn tid_cmp(a: Thread.Id, b: Thread.Id) bool {
return a > b;
}
const ThreadList = MappedList(Thread.Id, *Thread, tid_eql, tid_cmp);
fn pid_eql(a: Process.Id, b: Process.Id) bool {
return a == b;
}
fn pid_cmp(a: Process.Id, b: Process.Id) bool {
return a > b;
}
const ProcessList = MappedList(Process.Id, *Process, pid_eql, pid_cmp);
thread_list: ThreadList = undefined,
idle_thread: Thread = .{.kernel_mode = true, .state = .Wait},
boot_thread: Thread = .{.kernel_mode = true},
next_thread_id: Thread.Id = 0,
current_thread: ?*Thread = null,
head_thread: ?*Thread = null,
tail_thread: ?*Thread = null,
process_list: ProcessList = undefined,
next_process_id: Process.Id = 0,
current_process: ?*Process = null,
waiting_for_keyboard: ?*Thread = null,
time_queue: TimeQueue = .{},
pub fn init(self: *Manager) Error!void {
self.thread_list = .{.alloc = kernel.alloc};
self.process_list = .{.alloc = kernel.alloc};
try self.idle_thread.init(false);
self.idle_thread.entry = @ptrToInt(platform.idle);
try self.boot_thread.init(true);
self.current_thread = &self.boot_thread;
platform.disable_interrupts();
try self.insert_thread(&self.idle_thread);
try self.insert_thread(&self.boot_thread);
try self.time_queue.init();
platform.enable_interrupts();
}
pub fn new_process_i(self: *Manager) Error!*Process {
_ = self;
const p = try kernel.alloc.alloc(Process);
p.* = .{.info = undefined};
return p;
}
pub fn new_process(self: *Manager, info: *const Info) Error!*Process {
const p = try self.new_process_i();
p.info = info.*;
try p.init(self.current_process);
return p;
}
pub fn start_process(self: *Manager, process: *Process) Error!void {
platform.disable_interrupts();
// Assign ID
process.id = self.next_process_id;
self.next_process_id += 1;
// Start
try self.process_list.push_back(process.id, process);
try self.insert_thread(&process.main_thread);
try process.start();
}
pub fn insert_thread(self: *Manager, thread: *Thread) Error!void {
// Assign ID
thread.id = self.next_thread_id;
self.next_thread_id += 1;
// Insert into Thread List
if (self.head_thread == null) {
self.head_thread = thread;
}
if (self.tail_thread) |tail| {
tail.next_in_system = thread;
}
thread.prev_in_system = self.tail_thread;
thread.next_in_system = null;
self.tail_thread = thread;
try self.thread_list.push_back(thread.id, thread);
}
pub fn remove_process(self: *Manager, process: *Process) void {
_ = self.process_list.find_remove(process.id)
catch @panic("remove_process: process_list.find_remove");
kernel.alloc.free(process) catch @panic("remove_process: free(process)");
}
pub fn remove_thread(self: *Manager, thread: *Thread) void {
if (thread.next_in_system) |nt| {
nt.prev_in_system = thread.prev_in_system;
}
if (thread.prev_in_system) |pt| {
pt.next_in_system = thread.next_in_system;
}
if (self.head_thread == thread) {
self.head_thread = thread.next_in_system;
}
if (self.tail_thread == thread) {
self.tail_thread = thread.prev_in_system;
}
thread.finish();
if (thread.process) |process| {
if (thread == &process.main_thread) {
self.remove_process(process);
}
}
_ = self.thread_list.find_remove(thread.id)
catch @panic("remove_thread: thread_list.find_remove");
}
pub fn remove_current_thread(self: *Manager) void {
platform.disable_interrupts();
if (self.current_thread) |thread| {
if (debug) print.format("Thread {} has Finished\n", .{thread.id});
self.remove_thread(thread);
// TODO: Cleanup Process, Memory
while (true) {
self.yield();
}
} else {
@panic("remove_current_thread: no current thread");
}
@panic("remove_current_thread: reached end");
}
fn next_from(self: *const Manager, thread_maybe: ?*Thread) ?*Thread {
if (thread_maybe) |thread| {
const nt = thread.next_in_system orelse self.head_thread;
if (debug) print.format("+{}->{}+", .{thread.id, nt.?.id});
return nt;
}
return null;
}
fn next(self: *Manager) ?*Thread {
if (self.time_queue.check()) |tid| {
// print.uint(tid);
if (self.thread_list.find(tid)) |thread| {
// print.uint(thread.id);
thread.state = .Run;
return thread;
}
}
var next_thread: ?*Thread = null;
var thread = self.current_thread;
while (next_thread == null) {
thread = self.next_from(thread);
if (thread) |t| {
if (t == self.current_thread.?) {
if (self.current_thread.?.state != .Run) {
if (debug) print.string("&I&");
self.idle_thread.state = .Run;
next_thread = &self.idle_thread;
} else {
if (debug) print.string("&C&");
}
break;
}
if (t.state == .Run) {
next_thread = t;
}
} else break;
}
if (next_thread) |nt| {
if (nt != &self.idle_thread and self.idle_thread.state == .Run) {
if (debug) print.string("&i&");
self.idle_thread.state = .Wait;
}
}
if (debug) {
if (next_thread) |nt| {
print.format("({})", .{nt.id});
} else {
print.string("(null)");
}
}
return next_thread;
}
pub fn yield(self: *Manager) void {
platform.disable_interrupts();
if (self.next()) |next_thread| {
next_thread.impl.switch_to();
}
}
pub fn thread_is_running(self: *Manager, id: Thread.Id) bool {
return self.thread_list.find(id) != null;
}
pub fn wait_for_thread(self: *Manager, id: Thread.Id) void {
if (debug) print.format("<Wait for tid {}>\n", .{id});
platform.disable_interrupts();
if (self.thread_list.find(id)) |thread| {
if (debug) print.string("<tid found>");
if (thread.wake_on_exit != null)
@panic("wait_for_thread: wake_on_exit not null");
self.current_thread.?.state = .Wait;
thread.wake_on_exit = self.current_thread;
self.yield();
}
if (debug) print.format("<Wait for tid {} is done>\n", .{id});
}
pub fn process_is_running(self: *Manager, id: Process.Id) bool {
return self.process_list.find(id) != null;
}
pub fn wait_for_process(self: *Manager, id: Process.Id) void {
if (debug) print.format("<Wait for pid {}>\n", .{id});
platform.disable_interrupts();
if (self.process_list.find(id)) |proc| {
if (debug) print.string("<pid found>");
if (proc.main_thread.wake_on_exit != null)
@panic("wait_for_process: wake_on_exit not null");
self.current_thread.?.state = .Wait;
proc.main_thread.wake_on_exit = self.current_thread;
self.yield();
}
if (debug) print.format("<Wait for pid {} is done>\n", .{id});
}
// TODO Make this and keyboard_event_occured generic
pub fn wait_for_keyboard(self: *Manager) void {
platform.disable_interrupts();
self.current_thread.?.state = .Wait;
self.waiting_for_keyboard = self.current_thread;
self.yield();
}
pub fn keyboard_event_occured(self: *Manager) void {
platform.disable_interrupts();
if (self.waiting_for_keyboard) |t| {
t.state = .Run;
self.waiting_for_keyboard = null;
// TODO: yield to waiting process?
}
}
pub fn get_cwd(self: *Manager, buffer: []u8) Error![]const u8 {
if (self.current_process) |proc| {
return buffer[0..try utils.memory_copy_error(buffer, proc.cwd.?)];
} else {
return "/";
}
}
pub fn get_cwd_heap(self: *Manager) Error![]const u8 {
const cwd = if (self.current_process) |proc| proc.cwd.? else "/";
const rv = try kernel.memory_mgr.alloc.alloc_array(u8, cwd.len);
_ = utils.memory_copy_truncate(rv, cwd);
return rv;
}
pub fn set_cwd(self: *Manager, dir: []const u8) georgios.ThreadingOrFsError!void {
if (self.current_process) |proc| {
const resolved = try kernel.filesystem.resolve_directory_path(dir);
try proc.set_cwd(resolved);
try kernel.memory_mgr.alloc.free_array(resolved);
} else {
return Error.NoCurrentProcess;
}
}
// TODO: If this is called by shell sleep for too short a time (like less
// than 5ms), it causes a really weird looking page fault. Might have
// something to do with the keyboard interrupt for the enter release.
pub fn sleep_milliseconds(self: *Manager, ms: u64) void {
if (ms == 0) return;
platform.disable_interrupts();
self.current_thread.?.state = .Wait;
self.time_queue.insert(self.current_thread.?.id,
platform.time() + platform.milliseconds_to_time(ms));
self.yield();
}
pub fn sleep_seconds(self: *Manager, s: u64) void {
if (s == 0) return;
platform.disable_interrupts();
self.current_thread.?.state = .Wait;
self.time_queue.insert(self.current_thread.?.id,
platform.time() + platform.seconds_to_time(s));
self.yield();
}
};
|
kernel/threading.zig
|
const std = @import("std");
const test_input = @embedFile("day-19_test-input");
pub fn main() !void {
std.debug.print("--- Day 19 ---\n", .{});
}
test "test-input" {
const input = test_input;
var alloc = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer alloc.deinit();
const scanners = try parseScanners(input, alloc.allocator());
try std.testing.expectEqual(@as(usize, 5), scanners.len);
try std.testing.expectEqual(@as(usize, 25), scanners[0].beacons.len);
try std.testing.expectEqual(@as(usize, 25), scanners[1].beacons.len);
try std.testing.expectEqual(@as(usize, 26), scanners[2].beacons.len);
try std.testing.expectEqual(@as(usize, 25), scanners[3].beacons.len);
try std.testing.expectEqual(@as(usize, 26), scanners[4].beacons.len);
}
fn parseScanners(input: []const u8, allocator: std.mem.Allocator) ![]Scanner {
var scanners = std.ArrayList(Scanner).init(allocator);
var beacons = std.ArrayList(Pos).init(allocator);
var first_beacons = std.ArrayList(usize).init(allocator);
var current_beacon: usize = 0;
defer first_beacons.deinit();
var line_it = std.mem.tokenize(u8, input, "\n\r");
while (line_it.next()) |line| {
if (line.len == 0) continue;
if (std.mem.startsWith(u8, line, "--- scanner ")) {
try first_beacons.append(current_beacon);
}
else {
const beacon = try parseBeacon(line);
try beacons.append(beacon);
current_beacon += 1;
}
}
for (first_beacons.items) |start, i| {
if (i < first_beacons.items.len - 1) {
const end = first_beacons.items[i + 1];
const scanner = Scanner { .beacons = beacons.items[start..end] };
try scanners.append(scanner);
}
else {
const scanner = Scanner { .beacons = beacons.items[start..] };
try scanners.append(scanner);
}
}
return scanners.items;
}
fn parseBeacon(line: []const u8) !Pos {
var coord_it = std.mem.tokenize(u8, line, ",");
return Pos {
.x = try std.fmt.parseInt(i32, coord_it.next().?, 10),
.y = try std.fmt.parseInt(i32, coord_it.next().?, 10),
.z = try std.fmt.parseInt(i32, coord_it.next().?, 10),
};
}
test "parseBeacon" {
const input = "-456,654,0";
const expected = Pos {
.x = -456,
.y = 654,
.z = 0,
};
try std.testing.expectEqual(expected, try parseBeacon(input));
}
const Scanner = struct {
beacons: []Pos,
};
const Pos = struct {
x: i32,
y: i32,
z: i32,
};
|
day-19.zig
|
const std = @import("std");
const real_input = "target area: x=60..94, y=-171..-136";
pub fn main() !void {
std.debug.print("--- Day 17 ---\n", .{});
const target = try parseRect(removePrefix(real_input));
const result = findHighestAndCount(target);
std.debug.print("highest achievable Y is {}\n", .{ result.highest_y });
std.debug.print("total hit count is {}\n", .{ result.count });
}
fn findHighestAndCount(target: Rect) struct { highest_y: i32, count: u32, } {
const limit: i32 = 1000;
var highest_y: i32 = std.math.minInt(i32);
var hit_count: u32 = 0;
var y: i32 = -limit;
while (y < limit):(y += 1) {
var x : i32 = 1;
while (x < limit):(x += 1) {
const initial_velocity = Vector { .x = x, .y = y };
const result = simulateProbe(target, initial_velocity);
if (result.is_hit) {
hit_count += 1;
if (result.highest_y > highest_y) {
highest_y = result.highest_y;
}
}
}
}
return .{ .highest_y = highest_y, .count = hit_count };
}
test "findHighestAndCount" {
const input_area = Rect { .min_x = 20, .max_x = 30, .min_y = -10, .max_y = -5 };
const expected_highest: i32 = 45;
const expected_count: u32 = 112;
const result = findHighestAndCount(input_area);
try std.testing.expectEqual(expected_highest, result.highest_y);
try std.testing.expectEqual(expected_count, result.count);
}
fn simulateProbe(target: Rect, initial_velocity: Vector) ProbeResult {
var probe = Probe { .vel = initial_velocity };
var highest_y: i32 = probe.pos.y;
var is_hit = while (probe.pos.x <= target.max_x and probe.pos.y >= target.min_y):(probe.step()) {
highest_y = @maximum(highest_y, probe.pos.y);
if (probe.pos.x >= target.min_x and
probe.pos.y <= target.max_y) {
break true;
}
} else false;
return .{ .highest_y = highest_y, .is_hit = is_hit };
}
const ProbeResult = struct{
highest_y: i32,
is_hit: bool,
};
test "simulateProbe" {
const input_area = Rect { .min_x = 20, .max_x = 30, .min_y = -10, .max_y = -5 };
const test_pairs = [_]struct { initial_velocity: Vector, expected: ProbeResult, } {
.{ .initial_velocity = .{ .x = 7, .y = 2 }, .expected = .{ .highest_y = 3, .is_hit = true } },
.{ .initial_velocity = .{ .x = 6, .y = 3 }, .expected = .{ .highest_y = 6, .is_hit = true } },
.{ .initial_velocity = .{ .x = 9, .y = 0 }, .expected = .{ .highest_y = 0, .is_hit = true } },
.{ .initial_velocity = .{ .x = 17, .y = -4 }, .expected = .{ .highest_y = 0, .is_hit = false } },
.{ .initial_velocity = .{ .x = 6, .y = 9 }, .expected = .{ .highest_y = 45, .is_hit = true } },
};
for (test_pairs) |pair| {
const result = simulateProbe(input_area, pair.initial_velocity);
try std.testing.expectEqual(pair.expected, result);
}
}
const Probe = struct {
pos: Vector = .{ .x = 0, .y = 0 },
vel: Vector,
fn step(probe: *@This()) void {
probe.pos.x += probe.vel.x;
probe.pos.y += probe.vel.y;
if (probe.vel.x > 0) probe.vel.x -= 1;
probe.vel.y -= 1;
}
};
const Vector = struct {
x: i32,
y: i32,
};
const Rect = struct {
min_x: i32,
max_x: i32,
min_y: i32,
max_y: i32,
};
fn parseRect(string: []const u8) !Rect {
var it = std.mem.tokenize(u8, string, "xy=,. ");
return Rect {
.min_x = try nextCoord(&it),
.max_x = try nextCoord(&it),
.min_y = try nextCoord(&it),
.max_y = try nextCoord(&it),
};
}
fn nextCoord(iterator: anytype) !i32 {
const string = iterator.next().?;
return try std.fmt.parseInt(i32, string, 10);
}
test "parseRect" {
const input = "x=20..30, y=-10..-5";
const expected = Rect { .min_x = 20, .max_x = 30, .min_y = -10, .max_y = -5 };
const result = try parseRect(input);
try std.testing.expectEqual(expected, result);
}
fn removePrefix(string: []const u8) []const u8 {
const prefix = "target area: ";
return string[prefix.len..];
}
test "removePrefix" {
const input = "target area: <this is the stuff>";
const expected = "<this is the stuff>";
const result = removePrefix(input);
try std.testing.expectEqualStrings(expected, result);
}
|
day-17.zig
|
const std = @import("std");
const Arena = std.heap.ArenaAllocator;
const expectEqual = std.testing.expectEqual;
const expectEqualStrings = std.testing.expectEqualStrings;
const yeti = @import("yeti");
const initCodebase = yeti.initCodebase;
const tokenize = yeti.tokenize;
const parse = yeti.parse;
const analyzeSemantics = yeti.analyzeSemantics;
const codegen = yeti.codegen;
const printWasm = yeti.printWasm;
const components = yeti.components;
const literalOf = yeti.query.literalOf;
const typeOf = yeti.query.typeOf;
const MockFileSystem = yeti.FileSystem;
const Entity = yeti.ecs.Entity;
test "parse plus equal" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const module = try codebase.createEntity(.{});
const code =
\\start() u64 {
\\ x = 10
\\ x += 1
\\ x
\\}
;
var tokens = try tokenize(module, code);
try parse(module, &tokens);
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start");
const overloads = start.get(components.Overloads).slice();
try expectEqual(overloads.len, 1);
const body = overloads[0].get(components.Body).slice();
try expectEqual(body.len, 3);
const define = body[0];
try expectEqual(define.get(components.AstKind), .define);
try expectEqualStrings(literalOf(define.get(components.Name).entity), "x");
try expectEqualStrings(literalOf(define.get(components.Value).entity), "10");
const plus_equal = body[1];
try expectEqual(plus_equal.get(components.AstKind), .plus_equal);
const arguments = plus_equal.get(components.Arguments).slice();
try expectEqualStrings(literalOf(arguments[0]), "x");
try expectEqualStrings(literalOf(arguments[1]), "1");
const x = body[2];
try expectEqual(x.get(components.AstKind), .symbol);
try expectEqualStrings(literalOf(x), "x");
}
test "parse times equal" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const module = try codebase.createEntity(.{});
const code =
\\start() u64 {
\\ x = 10
\\ x *= 1
\\ x
\\}
;
var tokens = try tokenize(module, code);
try parse(module, &tokens);
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start");
const overloads = start.get(components.Overloads).slice();
try expectEqual(overloads.len, 1);
const body = overloads[0].get(components.Body).slice();
try expectEqual(body.len, 3);
const define = body[0];
try expectEqual(define.get(components.AstKind), .define);
try expectEqualStrings(literalOf(define.get(components.Name).entity), "x");
try expectEqualStrings(literalOf(define.get(components.Value).entity), "10");
const times_equal = body[1];
try expectEqual(times_equal.get(components.AstKind), .times_equal);
const arguments = times_equal.get(components.Arguments).slice();
try expectEqualStrings(literalOf(arguments[0]), "x");
try expectEqualStrings(literalOf(arguments[1]), "1");
const x = body[2];
try expectEqual(x.get(components.AstKind), .symbol);
try expectEqualStrings(literalOf(x), "x");
}
test "analyze semantics of assignment" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const builtins = codebase.get(components.Builtins);
const types = [_][]const u8{"i64"};
const builtin_types = [_]Entity{builtins.I64};
for (types) |type_of, i| {
var fs = try MockFileSystem.init(&arena);
_ = try fs.newFile("foo.yeti", try std.fmt.allocPrint(arena.allocator(),
\\start() {s} {{
\\ x: {s} = 10
\\ x = 3
\\ x
\\}}
, .{ type_of, type_of }));
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start").get(components.Overloads).slice()[0];
try expectEqualStrings(literalOf(start.get(components.Module).entity), "foo");
try expectEqualStrings(literalOf(start.get(components.Name).entity), "start");
try expectEqual(start.get(components.Parameters).len(), 0);
try expectEqual(start.get(components.ReturnType).entity, builtin_types[i]);
const body = start.get(components.Body).slice();
try expectEqual(body.len, 3);
const x = blk: {
const define = body[0];
try expectEqual(define.get(components.AstKind), .define);
try expectEqual(typeOf(define), builtins.Void);
try expectEqualStrings(literalOf(define.get(components.Value).entity), "10");
const local = define.get(components.Local).entity;
try expectEqual(local.get(components.AstKind), .local);
try expectEqualStrings(literalOf(local.get(components.Name).entity), "x");
try expectEqual(typeOf(local), builtin_types[i]);
break :blk local;
};
const assign = body[1];
try expectEqual(assign.get(components.AstKind), .assign);
try expectEqual(typeOf(assign), builtins.Void);
try expectEqual(assign.get(components.Local).entity, x);
try expectEqualStrings(literalOf(assign.get(components.Value).entity), "3");
try expectEqual(body[2], x);
}
}
test "analyze semantics of increment" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const builtins = codebase.get(components.Builtins);
var fs = try MockFileSystem.init(&arena);
_ = try fs.newFile("foo.yeti",
\\start() i64 {
\\ x = 0
\\ x = x + 1
\\ x
\\}
);
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start").get(components.Overloads).slice()[0];
try expectEqualStrings(literalOf(start.get(components.Module).entity), "foo");
try expectEqualStrings(literalOf(start.get(components.Name).entity), "start");
try expectEqual(start.get(components.Parameters).len(), 0);
try expectEqual(start.get(components.ReturnType).entity, builtins.I64);
const body = start.get(components.Body).slice();
try expectEqual(body.len, 3);
const x = blk: {
const define = body[0];
try expectEqual(define.get(components.AstKind), .define);
try expectEqual(typeOf(define), builtins.Void);
try expectEqualStrings(literalOf(define.get(components.Value).entity), "0");
const local = define.get(components.Local).entity;
try expectEqual(local.get(components.AstKind), .local);
try expectEqualStrings(literalOf(local.get(components.Name).entity), "x");
try expectEqual(typeOf(local), builtins.I64);
break :blk local;
};
const assign = body[1];
try expectEqual(assign.get(components.AstKind), .assign);
try expectEqual(typeOf(assign), builtins.Void);
try expectEqual(assign.get(components.Local).entity, x);
const intrinsic = assign.get(components.Value).entity;
try expectEqual(intrinsic.get(components.AstKind), .intrinsic);
try expectEqual(intrinsic.get(components.Intrinsic), .add);
try expectEqual(typeOf(intrinsic), builtins.I64);
const arguments = intrinsic.get(components.Arguments).slice();
try expectEqual(arguments.len, 2);
try expectEqual(arguments[0], x);
const rhs = arguments[1];
try expectEqual(rhs.get(components.AstKind), .int);
try expectEqualStrings(literalOf(rhs), "1");
try expectEqual(body[2], x);
}
test "analyze semantics of add between typed and inferred" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const builtins = codebase.get(components.Builtins);
var fs = try MockFileSystem.init(&arena);
_ = try fs.newFile("foo.yeti",
\\start() i64 {
\\ a: i64 = 10
\\ b = 0
\\ b = a + b
\\ b
\\}
);
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start").get(components.Overloads).slice()[0];
try expectEqualStrings(literalOf(start.get(components.Module).entity), "foo");
try expectEqualStrings(literalOf(start.get(components.Name).entity), "start");
try expectEqual(start.get(components.Parameters).len(), 0);
try expectEqual(start.get(components.ReturnType).entity, builtins.I64);
const body = start.get(components.Body).slice();
try expectEqual(body.len, 4);
const a = blk: {
const define = body[0];
try expectEqual(define.get(components.AstKind), .define);
try expectEqual(typeOf(define), builtins.Void);
try expectEqualStrings(literalOf(define.get(components.Value).entity), "10");
const local = define.get(components.Local).entity;
try expectEqual(local.get(components.AstKind), .local);
try expectEqualStrings(literalOf(local.get(components.Name).entity), "a");
try expectEqual(typeOf(local), builtins.I64);
break :blk local;
};
const b = blk: {
const define = body[1];
try expectEqual(define.get(components.AstKind), .define);
try expectEqual(typeOf(define), builtins.Void);
try expectEqualStrings(literalOf(define.get(components.Value).entity), "0");
const local = define.get(components.Local).entity;
try expectEqual(local.get(components.AstKind), .local);
try expectEqualStrings(literalOf(local.get(components.Name).entity), "b");
try expectEqual(typeOf(local), builtins.I64);
break :blk local;
};
const assign = body[2];
try expectEqual(assign.get(components.AstKind), .assign);
try expectEqual(typeOf(assign), builtins.Void);
try expectEqual(assign.get(components.Local).entity, b);
const intrinsic = assign.get(components.Value).entity;
try expectEqual(intrinsic.get(components.AstKind), .intrinsic);
try expectEqual(intrinsic.get(components.Intrinsic), .add);
try expectEqual(typeOf(intrinsic), builtins.I64);
const arguments = intrinsic.get(components.Arguments).slice();
try expectEqual(arguments.len, 2);
try expectEqual(arguments[0], a);
try expectEqual(arguments[1], b);
try expectEqual(body[3], b);
}
test "analyze semantics of plus equal" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
var fs = try MockFileSystem.init(&arena);
const builtins = codebase.get(components.Builtins);
_ = try fs.newFile("foo.yeti",
\\start() i64 {
\\ x = 0
\\ x += 1
\\ x
\\}
);
_ = try analyzeSemantics(codebase, fs, "foo.yeti");
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start").get(components.Overloads).slice()[0];
try expectEqualStrings(literalOf(start.get(components.Module).entity), "foo");
try expectEqualStrings(literalOf(start.get(components.Name).entity), "start");
try expectEqual(start.get(components.Parameters).len(), 0);
try expectEqual(start.get(components.ReturnType).entity, builtins.I64);
const body = start.get(components.Body).slice();
try expectEqual(body.len, 3);
const x = blk: {
const define = body[0];
try expectEqual(define.get(components.AstKind), .define);
try expectEqual(typeOf(define), builtins.Void);
const local = define.get(components.Local).entity;
try expectEqual(local.get(components.AstKind), .local);
try expectEqualStrings(literalOf(local.get(components.Name).entity), "x");
try expectEqualStrings(literalOf(define.get(components.Value).entity), "0");
break :blk local;
};
const assign = body[1];
try expectEqual(assign.get(components.AstKind), .assign);
try expectEqual(typeOf(assign), builtins.Void);
try expectEqual(assign.get(components.Local).entity, x);
const add = assign.get(components.Value).entity;
try expectEqual(add.get(components.AstKind), .intrinsic);
try expectEqual(add.get(components.Intrinsic), .add);
const arguments = add.get(components.Arguments).slice();
try expectEqual(arguments[0], x);
try expectEqualStrings(literalOf(arguments[1]), "1");
try expectEqual(body[2], x);
}
test "analyze semantics of times equal" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
var fs = try MockFileSystem.init(&arena);
const builtins = codebase.get(components.Builtins);
_ = try fs.newFile("foo.yeti",
\\start() i64 {
\\ x = 0
\\ x *= 1
\\ x
\\}
);
_ = try analyzeSemantics(codebase, fs, "foo.yeti");
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start").get(components.Overloads).slice()[0];
try expectEqualStrings(literalOf(start.get(components.Module).entity), "foo");
try expectEqualStrings(literalOf(start.get(components.Name).entity), "start");
try expectEqual(start.get(components.Parameters).len(), 0);
try expectEqual(start.get(components.ReturnType).entity, builtins.I64);
const body = start.get(components.Body).slice();
try expectEqual(body.len, 3);
const x = blk: {
const define = body[0];
try expectEqual(define.get(components.AstKind), .define);
try expectEqual(typeOf(define), builtins.Void);
const local = define.get(components.Local).entity;
try expectEqual(local.get(components.AstKind), .local);
try expectEqualStrings(literalOf(local.get(components.Name).entity), "x");
try expectEqualStrings(literalOf(define.get(components.Value).entity), "0");
break :blk local;
};
const assign = body[1];
try expectEqual(assign.get(components.AstKind), .assign);
try expectEqual(typeOf(assign), builtins.Void);
try expectEqual(assign.get(components.Local).entity, x);
const multiply = assign.get(components.Value).entity;
try expectEqual(multiply.get(components.AstKind), .intrinsic);
try expectEqual(multiply.get(components.Intrinsic), .multiply);
const arguments = multiply.get(components.Arguments).slice();
try expectEqual(arguments[0], x);
try expectEqualStrings(literalOf(arguments[1]), "1");
try expectEqual(body[2], x);
}
test "codegen assignment" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const types = [_][]const u8{ "i64", "i32", "u64", "u32", "f64", "f32" };
const const_kinds = [_]components.WasmInstructionKind{ .i64_const, .i32_const, .i64_const, .i32_const, .f64_const, .f32_const };
for (types) |type_, i| {
var fs = try MockFileSystem.init(&arena);
_ = try fs.newFile("foo.yeti", try std.fmt.allocPrint(arena.allocator(),
\\start() {s} {{
\\ x: {s} = 10
\\ x = 3
\\ x
\\}}
, .{ type_, type_ }));
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
try codegen(module);
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start").get(components.Overloads).slice()[0];
const start_instructions = start.get(components.WasmInstructions).slice();
try expectEqual(start_instructions.len, 5);
{
const constant = start_instructions[0];
try expectEqual(constant.get(components.WasmInstructionKind), const_kinds[i]);
try expectEqualStrings(literalOf(constant.get(components.Constant).entity), "10");
}
{
const local_set = start_instructions[1];
try expectEqual(local_set.get(components.WasmInstructionKind), .local_set);
const local = local_set.get(components.Local).entity;
try expectEqualStrings(literalOf(local.get(components.Name).entity), "x");
}
{
const constant = start_instructions[2];
try expectEqual(constant.get(components.WasmInstructionKind), const_kinds[i]);
try expectEqualStrings(literalOf(constant.get(components.Constant).entity), "3");
}
{
const local_set = start_instructions[3];
try expectEqual(local_set.get(components.WasmInstructionKind), .local_set);
const local = local_set.get(components.Local).entity;
try expectEqualStrings(literalOf(local.get(components.Name).entity), "x");
}
const local_get = start_instructions[4];
try expectEqual(local_get.get(components.WasmInstructionKind), .local_get);
const local = local_get.get(components.Local).entity;
try expectEqualStrings(literalOf(local.get(components.Name).entity), "x");
}
}
test "print wasm assignment" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const types = [_][]const u8{ "i64", "i32", "u64", "u32", "f64", "f32" };
const wasm_types = [_][]const u8{ "i64", "i32", "i64", "i32", "f64", "f32" };
for (types) |type_of, i| {
var fs = try MockFileSystem.init(&arena);
_ = try fs.newFile("foo.yeti", try std.fmt.allocPrint(arena.allocator(),
\\start() {s} {{
\\ x: {s} = 10
\\ x = 3
\\ x
\\}}
, .{ type_of, type_of }));
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
try codegen(module);
const wasm = try printWasm(module);
try expectEqualStrings(wasm, try std.fmt.allocPrint(arena.allocator(),
\\(module
\\
\\ (func $foo/start (result {s})
\\ (local $x {s})
\\ ({s}.const 10)
\\ (local.set $x)
\\ ({s}.const 3)
\\ (local.set $x)
\\ (local.get $x))
\\
\\ (export "_start" (func $foo/start)))
, .{ wasm_types[i], wasm_types[i], wasm_types[i], wasm_types[i] }));
}
}
|
src/tests/test_assignment.zig
|
const std = @import("std");
pub fn Matrix(comptime T: type, widt: usize, heigh: usize) type {
return packed struct {
const Self = @This();
pub const width = widt;
pub const height = heigh;
data: [height * width]T align(@alignOf(T)),
comptime width: usize = width,
comptime height: usize = height,
pub fn toArraySlice(self: *Self) []T {
return &self.data;
}
pub fn toSlice(self: *Self) MatrixSlice(T) {
return MatrixSlice(T){
.ptr = &self.data,
.width = width,
.height = height,
};
}
pub fn reset(self: *Self, val: T) void {
std.mem.set(T, &self.data, val);
}
pub fn get(self: Self, x: usize, y: usize) T {
const i = self.idx(x, y);
return self.data[i];
}
pub fn set(self: *Self, x: usize, y: usize, val: T) void {
const i = self.idx(x, y);
self.data[i] = val;
}
pub fn sliceLine(self: *Self, x: usize, y: usize) []T {
const start = self.idx(x, y);
const len = self.width - (x % self.width);
return self.data[start .. start + len];
}
fn idx(self: Self, x: usize, y: usize) usize {
std.debug.assert(x < width);
std.debug.assert(y < height);
return x + y * width;
}
};
}
pub fn MatrixSlice(comptime T: type) type {
return struct {
const Self = @This();
ptr: [*]T,
width: usize,
height: usize,
pub fn get(self: Self, x: usize, y: usize) T {
const i = self.idx(x, y);
return self.ptr[i];
}
pub fn set(self: Self, x: usize, y: usize, val: T) void {
const i = self.idx(x, y);
self.ptr[i] = val;
}
pub fn sliceLine(self: *Self, x: usize, y: usize) []T {
const start = self.idx(x, y);
const len = self.width - (x % self.width);
return self.ptr[start .. start + len];
}
fn idx(self: Self, x: usize, y: usize) usize {
std.debug.assert(x < self.width);
std.debug.assert(y < self.height);
return x + y * self.width;
}
};
}
// Adapted from https://github.com/ziglang/zig/issues/793#issuecomment-482927820
pub fn EnumArray(comptime E: type, comptime T: type) type {
return extern struct {
const len = @typeInfo(E).Enum.fields.len;
data: [len]T,
pub fn get(self: @This(), tag: E) T {
return self.data[@enumToInt(tag)];
}
pub fn set(self: *@This(), tag: E, value: T) void {
self.data[@enumToInt(tag)] = value;
}
pub fn copy(self: *@This(), dst: E, src: E) void {
self.set(dst, self.get(src));
}
};
}
/// Super simple "perfect hash" algorithm
/// Only really useful for switching on strings
// TODO: can we auto detect and promote the underlying type?
pub fn Swhash(comptime max_bytes: comptime_int) type {
const T = std.meta.Int(.unsigned, max_bytes * 8);
return struct {
pub fn match(string: []const u8) T {
return hash(string) orelse std.math.maxInt(T);
}
pub fn case(comptime string: []const u8) T {
return hash(string) orelse @compileError("Cannot hash '" ++ string ++ "'");
}
fn hash(string: []const u8) ?T {
if (string.len > max_bytes) return null;
var tmp = [_]u8{0} ** max_bytes;
std.mem.copy(u8, &tmp, string);
return std.mem.readIntNative(T, &tmp);
}
};
}
pub fn makeUpper(buffer: []u8) []u8 {
for (buffer) |*letter| {
letter.* = std.ascii.toUpper(letter.*);
}
return buffer;
}
|
src/util.zig
|
const std = @import("std");
const print = std.debug.print;
const _chunk = @import("./chunk.zig");
const _value = @import("./value.zig");
const _obj = @import("./obj.zig");
const _vm = @import("./vm.zig");
const VM = _vm.VM;
const Chunk = _chunk.Chunk;
const OpCode = _chunk.OpCode;
const ObjFunction = _obj.ObjFunction;
pub fn disassembleChunk(chunk: *Chunk, name: []const u8) !void {
print("\u{001b}[2m", .{}); // Dimmed
print("=== {s} ===\n", .{name});
var offset: usize = 0;
while (offset < chunk.code.items.len) {
offset = try disassembleInstruction(chunk, offset);
}
print("\u{001b}[0m", .{});
}
fn invokeInstruction(code: OpCode, chunk: *Chunk, offset: usize) !usize {
const constant: u24 = @intCast(u24, 0x00ffffff & chunk.code.items[offset]);
const arg_count: u8 = @intCast(u8, chunk.code.items[offset + 1] >> 24);
const catch_count: u24 = @intCast(u8, 0x00ffffff & chunk.code.items[offset + 1]);
var value_str: []const u8 = try _value.valueToString(std.heap.c_allocator, chunk.constants.items[constant]);
defer std.heap.c_allocator.free(value_str);
print("{}\t{s}({} args, {} catches)", .{ code, value_str, arg_count, catch_count });
return offset + 2;
}
fn simpleInstruction(code: OpCode, offset: usize) usize {
print("{}\t", .{code});
return offset + 1;
}
fn byteInstruction(code: OpCode, chunk: *Chunk, offset: usize) usize {
const slot: u24 = @intCast(u24, 0x00ffffff & chunk.code.items[offset]);
print("{}\t{}", .{ code, slot });
return offset + 1;
}
fn bytesInstruction(code: OpCode, chunk: *Chunk, offset: usize) usize {
const a: u24 = @intCast(u24, 0x00ffffff & chunk.code.items[offset]);
const b: u24 = @intCast(u24, chunk.code.items[offset + 1]);
print("{}\t{} {}", .{ code, a, b });
return offset + 2;
}
fn triInstruction(code: OpCode, chunk: *Chunk, offset: usize) usize {
const full_instruction: u32 = chunk.code.items[offset];
const a: u8 = @intCast(u8, (0x00ffffff & full_instruction) >> 16);
const b: u16 = @intCast(u16, 0x0000ffff & full_instruction);
print("{}\t{} {}", .{ code, a, b });
return offset + 1;
}
fn constantInstruction(code: OpCode, chunk: *Chunk, offset: usize) !usize {
const constant: u24 = @intCast(u24, 0x00ffffff & chunk.code.items[offset]);
var value_str: []const u8 = try _value.valueToString(std.heap.c_allocator, chunk.constants.items[constant]);
defer std.heap.c_allocator.free(value_str);
print("{}\t{} {s}", .{ code, constant, value_str });
return offset + 1;
}
fn jumpInstruction(code: OpCode, chunk: *Chunk, direction: bool, offset: usize) !usize {
const jump: u24 = @intCast(u24, 0x00ffffff & chunk.code.items[offset]);
if (direction) {
print("{}\t{} -> {}", .{ code, offset, offset + 1 + 1 * jump });
} else {
print("{}\t{} -> {}", .{ code, offset, offset + 1 - 1 * jump });
}
return offset + 1;
}
pub fn dumpStack(vm: *VM) !void {
print("\u{001b}[2m", .{}); // Dimmed
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n", .{});
var value = @ptrCast([*]_value.Value, vm.stack[0..]);
while (@ptrToInt(value) < @ptrToInt(vm.stack_top)) {
var value_str: []const u8 = try _value.valueToString(std.heap.c_allocator, value[0]);
defer std.heap.c_allocator.free(value_str);
if (vm.currentFrame().?.slots == value) {
print("{*} {s} frame\n ", .{ value, value_str });
} else {
print("{*} {s}\n ", .{ value, value_str });
}
value += 1;
}
print("{*} top\n", .{vm.stack_top});
print("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n\n", .{});
print("\u{001b}[0m", .{});
}
pub fn disassembleInstruction(chunk: *Chunk, offset: usize) !usize {
print("\n{:0>3} ", .{offset});
if (offset > 0 and chunk.lines.items[offset] == chunk.lines.items[offset - 1]) {
print("| ", .{});
} else {
print("{:0>3} ", .{chunk.lines.items[offset]});
}
const full_instruction: u32 = chunk.code.items[offset];
const instruction: OpCode = @intToEnum(OpCode, @intCast(u8, full_instruction >> 24));
const arg: u24 = @intCast(u24, 0x00ffffff & full_instruction);
return switch (instruction) {
.OP_NULL,
.OP_VOID,
.OP_TRUE,
.OP_FALSE,
.OP_POP,
.OP_EQUAL,
.OP_IS,
.OP_GREATER,
.OP_LESS,
.OP_ADD,
.OP_SUBTRACT,
.OP_MULTIPLY,
.OP_DIVIDE,
.OP_NOT,
.OP_NEGATE,
.OP_MOD,
.OP_SHL,
.OP_SHR,
.OP_UNWRAP,
.OP_NULL_OR,
.OP_ENUM_CASE,
.OP_GET_ENUM_CASE_VALUE,
.OP_LIST_APPEND,
.OP_SET_MAP,
.OP_GET_SUBSCRIPT,
.OP_SET_SUBSCRIPT,
.OP_THROW,
.OP_IMPORT,
.OP_TO_STRING,
.OP_INSTANCE,
.OP_FOREACH,
.OP_GET_SUPER,
=> simpleInstruction(instruction, offset),
.OP_SWAP => bytesInstruction(instruction, chunk, offset),
.OP_DEFINE_GLOBAL,
.OP_GET_GLOBAL,
.OP_SET_GLOBAL,
.OP_GET_LOCAL,
.OP_SET_LOCAL,
.OP_GET_UPVALUE,
.OP_SET_UPVALUE,
.OP_GET_ENUM_CASE,
.OP_MAP,
.OP_EXPORT,
.OP_COPY,
.OP_CLOSE_UPVALUE,
.OP_RETURN,
=> byteInstruction(instruction, chunk, offset),
.OP_OBJECT,
.OP_ENUM,
.OP_LIST,
.OP_CLASS,
.OP_METHOD,
.OP_PROPERTY,
.OP_GET_PROPERTY,
.OP_SET_PROPERTY,
.OP_INHERIT,
.OP_CONSTANT,
=> try constantInstruction(instruction, chunk, offset),
.OP_JUMP, .OP_JUMP_IF_FALSE => jumpInstruction(instruction, chunk, true, offset),
.OP_LOOP => jumpInstruction(instruction, chunk, false, offset),
.OP_SUPER_INVOKE, .OP_INVOKE => try invokeInstruction(instruction, chunk, offset),
.OP_CALL => triInstruction(instruction, chunk, offset),
.OP_CLOSURE => closure: {
var constant: u24 = arg;
var off_offset: usize = offset + 1;
var value_str: []const u8 = try _value.valueToString(std.heap.c_allocator, chunk.constants.items[constant]);
defer std.heap.c_allocator.free(value_str);
print("{}\t{} {s}", .{ instruction, constant, value_str });
var function: *ObjFunction = ObjFunction.cast(chunk.constants.items[constant].Obj).?;
var i: u8 = 0;
while (i < function.upvalue_count) : (i += 1) {
var is_local: bool = chunk.code.items[off_offset] == 1;
off_offset += 1;
var index: u8 = @intCast(u8, chunk.code.items[off_offset]);
off_offset += 1;
print("\n{:0>3} | \t{s} {}\n", .{ off_offset - 2, if (is_local) "local " else "upvalue", index });
}
break :closure off_offset;
},
.OP_PRINT => simpleInstruction(instruction, offset),
};
}
|
src/disassembler.zig
|
const std = @import("std.zig");
const builtin = @import("builtin");
const root = @import("root");
const c = std.c;
const math = std.math;
const assert = std.debug.assert;
const os = std.os;
const fs = std.fs;
const mem = std.mem;
const meta = std.meta;
const trait = meta.trait;
const File = std.fs.File;
pub const Mode = enum {
/// I/O operates normally, waiting for the operating system syscalls to complete.
blocking,
/// I/O functions are generated async and rely on a global event loop. Event-based I/O.
evented,
};
/// The application's chosen I/O mode. This defaults to `Mode.blocking` but can be overridden
/// by `root.event_loop`.
pub const mode: Mode = if (@hasDecl(root, "io_mode"))
root.io_mode
else if (@hasDecl(root, "event_loop"))
Mode.evented
else
Mode.blocking;
pub const is_async = mode != .blocking;
/// This is an enum value to use for I/O mode at runtime, since it takes up zero bytes at runtime,
/// and makes expressions comptime-known when `is_async` is `false`.
pub const ModeOverride = if (is_async) Mode else enum { blocking };
pub const default_mode: ModeOverride = if (is_async) Mode.evented else .blocking;
fn getStdOutHandle() os.fd_t {
if (builtin.os.tag == .windows) {
return os.windows.peb().ProcessParameters.hStdOutput;
}
if (@hasDecl(root, "os") and @hasDecl(root.os, "io") and @hasDecl(root.os.io, "getStdOutHandle")) {
return root.os.io.getStdOutHandle();
}
return os.STDOUT_FILENO;
}
/// TODO: async stdout on windows without a dedicated thread.
/// https://github.com/ziglang/zig/pull/4816#issuecomment-604521023
pub fn getStdOut() File {
return File{
.handle = getStdOutHandle(),
.capable_io_mode = .blocking,
.intended_io_mode = default_mode,
};
}
fn getStdErrHandle() os.fd_t {
if (builtin.os.tag == .windows) {
return os.windows.peb().ProcessParameters.hStdError;
}
if (@hasDecl(root, "os") and @hasDecl(root.os, "io") and @hasDecl(root.os.io, "getStdErrHandle")) {
return root.os.io.getStdErrHandle();
}
return os.STDERR_FILENO;
}
/// This returns a `File` that is configured to block with every write, in order
/// to facilitate better debugging. This can be changed by modifying the `intended_io_mode` field.
pub fn getStdErr() File {
return File{
.handle = getStdErrHandle(),
.capable_io_mode = .blocking,
.intended_io_mode = .blocking,
};
}
fn getStdInHandle() os.fd_t {
if (builtin.os.tag == .windows) {
return os.windows.peb().ProcessParameters.hStdInput;
}
if (@hasDecl(root, "os") and @hasDecl(root.os, "io") and @hasDecl(root.os.io, "getStdInHandle")) {
return root.os.io.getStdInHandle();
}
return os.STDIN_FILENO;
}
/// TODO: async stdin on windows without a dedicated thread.
/// https://github.com/ziglang/zig/pull/4816#issuecomment-604521023
pub fn getStdIn() File {
return File{
.handle = getStdInHandle(),
.capable_io_mode = .blocking,
.intended_io_mode = default_mode,
};
}
pub const Reader = @import("io/reader.zig").Reader;
/// Deprecated: use `Reader`
pub const InStream = Reader;
pub const Writer = @import("io/writer.zig").Writer;
/// Deprecated: use `Writer`
pub const OutStream = Writer;
pub const SeekableStream = @import("io/seekable_stream.zig").SeekableStream;
pub const BufferedWriter = @import("io/buffered_writer.zig").BufferedWriter;
pub const bufferedWriter = @import("io/buffered_writer.zig").bufferedWriter;
/// Deprecated: use `BufferedWriter`
pub const BufferedOutStream = BufferedWriter;
/// Deprecated: use `bufferedWriter`
pub const bufferedOutStream = bufferedWriter;
pub const BufferedReader = @import("io/buffered_reader.zig").BufferedReader;
pub const bufferedReader = @import("io/buffered_reader.zig").bufferedReader;
/// Deprecated: use `BufferedReader`
pub const BufferedInStream = BufferedReader;
/// Deprecated: use `bufferedReader`
pub const bufferedInStream = bufferedReader;
pub const PeekStream = @import("io/peek_stream.zig").PeekStream;
pub const peekStream = @import("io/peek_stream.zig").peekStream;
pub const FixedBufferStream = @import("io/fixed_buffer_stream.zig").FixedBufferStream;
pub const fixedBufferStream = @import("io/fixed_buffer_stream.zig").fixedBufferStream;
pub const CWriter = @import("io/c_writer.zig").CWriter;
pub const cWriter = @import("io/c_writer.zig").cWriter;
/// Deprecated: use `CWriter`
pub const COutStream = CWriter;
/// Deprecated: use `cWriter`
pub const cOutStream = cWriter;
pub const CountingWriter = @import("io/counting_writer.zig").CountingWriter;
pub const countingWriter = @import("io/counting_writer.zig").countingWriter;
pub const CountingReader = @import("io/counting_reader.zig").CountingReader;
pub const countingReader = @import("io/counting_reader.zig").countingReader;
/// Deprecated: use `CountingWriter`
pub const CountingOutStream = CountingWriter;
/// Deprecated: use `countingWriter`
pub const countingOutStream = countingWriter;
pub const MultiWriter = @import("io/multi_writer.zig").MultiWriter;
pub const multiWriter = @import("io/multi_writer.zig").multiWriter;
/// Deprecated: use `MultiWriter`
pub const MultiOutStream = MultiWriter;
/// Deprecated: use `multiWriter`
pub const multiOutStream = multiWriter;
pub const BitReader = @import("io/bit_reader.zig").BitReader;
pub const bitReader = @import("io/bit_reader.zig").bitReader;
/// Deprecated: use `BitReader`
pub const BitInStream = BitReader;
/// Deprecated: use `bitReader`
pub const bitInStream = bitReader;
pub const BitWriter = @import("io/bit_writer.zig").BitWriter;
pub const bitWriter = @import("io/bit_writer.zig").bitWriter;
/// Deprecated: use `BitWriter`
pub const BitOutStream = BitWriter;
/// Deprecated: use `bitWriter`
pub const bitOutStream = bitWriter;
pub const AutoIndentingStream = @import("io/auto_indenting_stream.zig").AutoIndentingStream;
pub const autoIndentingStream = @import("io/auto_indenting_stream.zig").autoIndentingStream;
pub const ChangeDetectionStream = @import("io/change_detection_stream.zig").ChangeDetectionStream;
pub const changeDetectionStream = @import("io/change_detection_stream.zig").changeDetectionStream;
pub const FindByteOutStream = @import("io/find_byte_out_stream.zig").FindByteOutStream;
pub const findByteOutStream = @import("io/find_byte_out_stream.zig").findByteOutStream;
pub const BufferedAtomicFile = @import("io/buffered_atomic_file.zig").BufferedAtomicFile;
pub const StreamSource = @import("io/stream_source.zig").StreamSource;
/// A Writer that doesn't write to anything.
pub const null_writer = @as(NullWriter, .{ .context = {} });
/// Deprecated: use `null_writer`
pub const null_out_stream = null_writer;
const NullWriter = Writer(void, error{}, dummyWrite);
/// Deprecated: use NullWriter
const NullOutStream = NullWriter;
fn dummyWrite(context: void, data: []const u8) error{}!usize {
return data.len;
}
test "null_writer" {
null_writer.writeAll("yay" ** 10) catch |err| switch (err) {};
}
test "" {
_ = @import("io/bit_reader.zig");
_ = @import("io/bit_writer.zig");
_ = @import("io/buffered_atomic_file.zig");
_ = @import("io/buffered_reader.zig");
_ = @import("io/buffered_writer.zig");
_ = @import("io/c_writer.zig");
_ = @import("io/counting_writer.zig");
_ = @import("io/fixed_buffer_stream.zig");
_ = @import("io/reader.zig");
_ = @import("io/writer.zig");
_ = @import("io/peek_stream.zig");
_ = @import("io/seekable_stream.zig");
_ = @import("io/stream_source.zig");
_ = @import("io/test.zig");
}
pub const writeFile = @compileError("deprecated: use std.fs.Dir.writeFile with math.maxInt(usize)");
pub const readFileAlloc = @compileError("deprecated: use std.fs.Dir.readFileAlloc");
|
lib/std/io.zig
|
const builtin = @import("builtin");
const std = @import("../../std.zig");
const maxInt = std.math.maxInt;
usingnamespace @import("../bits.zig");
pub usingnamespace switch (builtin.arch) {
.mips, .mipsel => @import("linux/errno-mips.zig"),
else => @import("linux/errno-generic.zig"),
};
pub usingnamespace switch (builtin.arch) {
.i386 => @import("linux/i386.zig"),
.x86_64 => @import("linux/x86_64.zig"),
.aarch64 => @import("linux/arm64.zig"),
.arm => @import("linux/arm-eabi.zig"),
.riscv64 => @import("linux/riscv64.zig"),
.sparcv9 => @import("linux/sparc64.zig"),
.mips, .mipsel => @import("linux/mips.zig"),
.powerpc64, .powerpc64le => @import("linux/powerpc64.zig"),
else => struct {},
};
pub usingnamespace @import("linux/netlink.zig");
pub usingnamespace @import("linux/prctl.zig");
pub usingnamespace @import("linux/securebits.zig");
const is_mips = builtin.arch.isMIPS();
const is_ppc64 = builtin.arch.isPPC64();
const is_sparc = builtin.arch.isSPARC();
pub const pid_t = i32;
pub const fd_t = i32;
pub const uid_t = u32;
pub const gid_t = u32;
pub const clock_t = isize;
pub const NAME_MAX = 255;
pub const PATH_MAX = 4096;
pub const IOV_MAX = 1024;
/// Largest hardware address length
/// e.g. a mac address is a type of hardware address
pub const MAX_ADDR_LEN = 32;
pub const STDIN_FILENO = 0;
pub const STDOUT_FILENO = 1;
pub const STDERR_FILENO = 2;
/// Special value used to indicate openat should use the current working directory
pub const AT_FDCWD = -100;
/// Do not follow symbolic links
pub const AT_SYMLINK_NOFOLLOW = 0x100;
/// Remove directory instead of unlinking file
pub const AT_REMOVEDIR = 0x200;
/// Follow symbolic links.
pub const AT_SYMLINK_FOLLOW = 0x400;
/// Suppress terminal automount traversal
pub const AT_NO_AUTOMOUNT = 0x800;
/// Allow empty relative pathname
pub const AT_EMPTY_PATH = 0x1000;
/// Type of synchronisation required from statx()
pub const AT_STATX_SYNC_TYPE = 0x6000;
/// - Do whatever stat() does
pub const AT_STATX_SYNC_AS_STAT = 0x0000;
/// - Force the attributes to be sync'd with the server
pub const AT_STATX_FORCE_SYNC = 0x2000;
/// - Don't sync attributes with the server
pub const AT_STATX_DONT_SYNC = 0x4000;
/// Apply to the entire subtree
pub const AT_RECURSIVE = 0x8000;
/// Default is extend size
pub const FALLOC_FL_KEEP_SIZE = 0x01;
/// De-allocates range
pub const FALLOC_FL_PUNCH_HOLE = 0x02;
/// Reserved codepoint
pub const FALLOC_FL_NO_HIDE_STALE = 0x04;
/// Removes a range of a file without leaving a hole in the file
pub const FALLOC_FL_COLLAPSE_RANGE = 0x08;
/// Converts a range of file to zeros preferably without issuing data IO
pub const FALLOC_FL_ZERO_RANGE = 0x10;
/// Inserts space within the file size without overwriting any existing data
pub const FALLOC_FL_INSERT_RANGE = 0x20;
/// Unshares shared blocks within the file size without overwriting any existing data
pub const FALLOC_FL_UNSHARE_RANGE = 0x40;
pub const FUTEX_WAIT = 0;
pub const FUTEX_WAKE = 1;
pub const FUTEX_FD = 2;
pub const FUTEX_REQUEUE = 3;
pub const FUTEX_CMP_REQUEUE = 4;
pub const FUTEX_WAKE_OP = 5;
pub const FUTEX_LOCK_PI = 6;
pub const FUTEX_UNLOCK_PI = 7;
pub const FUTEX_TRYLOCK_PI = 8;
pub const FUTEX_WAIT_BITSET = 9;
pub const FUTEX_PRIVATE_FLAG = 128;
pub const FUTEX_CLOCK_REALTIME = 256;
/// page can not be accessed
pub const PROT_NONE = 0x0;
/// page can be read
pub const PROT_READ = 0x1;
/// page can be written
pub const PROT_WRITE = 0x2;
/// page can be executed
pub const PROT_EXEC = 0x4;
/// page may be used for atomic ops
pub const PROT_SEM = switch (builtin.arch) {
// TODO: also xtensa
.mips, .mipsel, .mips64, .mips64el => 0x10,
else => 0x8,
};
/// mprotect flag: extend change to start of growsdown vma
pub const PROT_GROWSDOWN = 0x01000000;
/// mprotect flag: extend change to end of growsup vma
pub const PROT_GROWSUP = 0x02000000;
/// Share changes
pub const MAP_SHARED = 0x01;
/// Changes are private
pub const MAP_PRIVATE = 0x02;
/// share + validate extension flags
pub const MAP_SHARED_VALIDATE = 0x03;
/// Mask for type of mapping
pub const MAP_TYPE = 0x0f;
/// Interpret addr exactly
pub const MAP_FIXED = 0x10;
/// don't use a file
pub const MAP_ANONYMOUS = if (is_mips) 0x800 else 0x20;
// MAP_ 0x0100 - 0x4000 flags are per architecture
/// populate (prefault) pagetables
pub const MAP_POPULATE = if (is_mips) 0x10000 else 0x8000;
/// do not block on IO
pub const MAP_NONBLOCK = if (is_mips) 0x20000 else 0x10000;
/// give out an address that is best suited for process/thread stacks
pub const MAP_STACK = if (is_mips) 0x40000 else 0x20000;
/// create a huge page mapping
pub const MAP_HUGETLB = if (is_mips) 0x80000 else 0x40000;
/// perform synchronous page faults for the mapping
pub const MAP_SYNC = 0x80000;
/// MAP_FIXED which doesn't unmap underlying mapping
pub const MAP_FIXED_NOREPLACE = 0x100000;
/// For anonymous mmap, memory could be uninitialized
pub const MAP_UNINITIALIZED = 0x4000000;
pub const FD_CLOEXEC = 1;
pub const F_OK = 0;
pub const X_OK = 1;
pub const W_OK = 2;
pub const R_OK = 4;
pub const WNOHANG = 1;
pub const WUNTRACED = 2;
pub const WSTOPPED = 2;
pub const WEXITED = 4;
pub const WCONTINUED = 8;
pub const WNOWAIT = 0x1000000;
pub usingnamespace if (is_mips)
struct {
pub const SA_NOCLDSTOP = 1;
pub const SA_NOCLDWAIT = 0x10000;
pub const SA_SIGINFO = 8;
pub const SA_RESTART = 0x10000000;
pub const SA_RESETHAND = 0x80000000;
pub const SA_ONSTACK = 0x08000000;
pub const SA_NODEFER = 0x40000000;
pub const SA_RESTORER = 0x04000000;
pub const SIG_BLOCK = 1;
pub const SIG_UNBLOCK = 2;
pub const SIG_SETMASK = 3;
}
else if (is_sparc)
struct {
pub const SA_NOCLDSTOP = 0x8;
pub const SA_NOCLDWAIT = 0x100;
pub const SA_SIGINFO = 0x200;
pub const SA_RESTART = 0x2;
pub const SA_RESETHAND = 0x4;
pub const SA_ONSTACK = 0x1;
pub const SA_NODEFER = 0x20;
pub const SA_RESTORER = 0x04000000;
pub const SIG_BLOCK = 1;
pub const SIG_UNBLOCK = 2;
pub const SIG_SETMASK = 4;
}
else
struct {
pub const SA_NOCLDSTOP = 1;
pub const SA_NOCLDWAIT = 2;
pub const SA_SIGINFO = 4;
pub const SA_RESTART = 0x10000000;
pub const SA_RESETHAND = 0x80000000;
pub const SA_ONSTACK = 0x08000000;
pub const SA_NODEFER = 0x40000000;
pub const SA_RESTORER = 0x04000000;
pub const SIG_BLOCK = 0;
pub const SIG_UNBLOCK = 1;
pub const SIG_SETMASK = 2;
};
pub const SIGHUP = 1;
pub const SIGINT = 2;
pub const SIGQUIT = 3;
pub const SIGILL = 4;
pub const SIGTRAP = 5;
pub const SIGABRT = 6;
pub const SIGIOT = SIGABRT;
pub const SIGBUS = 7;
pub const SIGFPE = 8;
pub const SIGKILL = 9;
pub const SIGUSR1 = 10;
pub const SIGSEGV = 11;
pub const SIGUSR2 = 12;
pub const SIGPIPE = 13;
pub const SIGALRM = 14;
pub const SIGTERM = 15;
pub const SIGSTKFLT = 16;
pub const SIGCHLD = 17;
pub const SIGCONT = 18;
pub const SIGSTOP = 19;
pub const SIGTSTP = 20;
pub const SIGTTIN = 21;
pub const SIGTTOU = 22;
pub const SIGURG = 23;
pub const SIGXCPU = 24;
pub const SIGXFSZ = 25;
pub const SIGVTALRM = 26;
pub const SIGPROF = 27;
pub const SIGWINCH = 28;
pub const SIGIO = 29;
pub const SIGPOLL = 29;
pub const SIGPWR = 30;
pub const SIGSYS = 31;
pub const SIGUNUSED = SIGSYS;
pub const O_RDONLY = 0o0;
pub const O_WRONLY = 0o1;
pub const O_RDWR = 0o2;
pub const kernel_rwf = u32;
/// high priority request, poll if possible
pub const RWF_HIPRI = kernel_rwf(0x00000001);
/// per-IO O_DSYNC
pub const RWF_DSYNC = kernel_rwf(0x00000002);
/// per-IO O_SYNC
pub const RWF_SYNC = kernel_rwf(0x00000004);
/// per-IO, return -EAGAIN if operation would block
pub const RWF_NOWAIT = kernel_rwf(0x00000008);
/// per-IO O_APPEND
pub const RWF_APPEND = kernel_rwf(0x00000010);
pub const SEEK_SET = 0;
pub const SEEK_CUR = 1;
pub const SEEK_END = 2;
pub const SHUT_RD = 0;
pub const SHUT_WR = 1;
pub const SHUT_RDWR = 2;
pub const SOCK_STREAM = if (is_mips) 2 else 1;
pub const SOCK_DGRAM = if (is_mips) 1 else 2;
pub const SOCK_RAW = 3;
pub const SOCK_RDM = 4;
pub const SOCK_SEQPACKET = 5;
pub const SOCK_DCCP = 6;
pub const SOCK_PACKET = 10;
pub const SOCK_CLOEXEC = 0o2000000;
pub const SOCK_NONBLOCK = if (is_mips) 0o200 else 0o4000;
pub const PF_UNSPEC = 0;
pub const PF_LOCAL = 1;
pub const PF_UNIX = PF_LOCAL;
pub const PF_FILE = PF_LOCAL;
pub const PF_INET = 2;
pub const PF_AX25 = 3;
pub const PF_IPX = 4;
pub const PF_APPLETALK = 5;
pub const PF_NETROM = 6;
pub const PF_BRIDGE = 7;
pub const PF_ATMPVC = 8;
pub const PF_X25 = 9;
pub const PF_INET6 = 10;
pub const PF_ROSE = 11;
pub const PF_DECnet = 12;
pub const PF_NETBEUI = 13;
pub const PF_SECURITY = 14;
pub const PF_KEY = 15;
pub const PF_NETLINK = 16;
pub const PF_ROUTE = PF_NETLINK;
pub const PF_PACKET = 17;
pub const PF_ASH = 18;
pub const PF_ECONET = 19;
pub const PF_ATMSVC = 20;
pub const PF_RDS = 21;
pub const PF_SNA = 22;
pub const PF_IRDA = 23;
pub const PF_PPPOX = 24;
pub const PF_WANPIPE = 25;
pub const PF_LLC = 26;
pub const PF_IB = 27;
pub const PF_MPLS = 28;
pub const PF_CAN = 29;
pub const PF_TIPC = 30;
pub const PF_BLUETOOTH = 31;
pub const PF_IUCV = 32;
pub const PF_RXRPC = 33;
pub const PF_ISDN = 34;
pub const PF_PHONET = 35;
pub const PF_IEEE802154 = 36;
pub const PF_CAIF = 37;
pub const PF_ALG = 38;
pub const PF_NFC = 39;
pub const PF_VSOCK = 40;
pub const PF_KCM = 41;
pub const PF_QIPCRTR = 42;
pub const PF_SMC = 43;
pub const PF_MAX = 44;
pub const AF_UNSPEC = PF_UNSPEC;
pub const AF_LOCAL = PF_LOCAL;
pub const AF_UNIX = AF_LOCAL;
pub const AF_FILE = AF_LOCAL;
pub const AF_INET = PF_INET;
pub const AF_AX25 = PF_AX25;
pub const AF_IPX = PF_IPX;
pub const AF_APPLETALK = PF_APPLETALK;
pub const AF_NETROM = PF_NETROM;
pub const AF_BRIDGE = PF_BRIDGE;
pub const AF_ATMPVC = PF_ATMPVC;
pub const AF_X25 = PF_X25;
pub const AF_INET6 = PF_INET6;
pub const AF_ROSE = PF_ROSE;
pub const AF_DECnet = PF_DECnet;
pub const AF_NETBEUI = PF_NETBEUI;
pub const AF_SECURITY = PF_SECURITY;
pub const AF_KEY = PF_KEY;
pub const AF_NETLINK = PF_NETLINK;
pub const AF_ROUTE = PF_ROUTE;
pub const AF_PACKET = PF_PACKET;
pub const AF_ASH = PF_ASH;
pub const AF_ECONET = PF_ECONET;
pub const AF_ATMSVC = PF_ATMSVC;
pub const AF_RDS = PF_RDS;
pub const AF_SNA = PF_SNA;
pub const AF_IRDA = PF_IRDA;
pub const AF_PPPOX = PF_PPPOX;
pub const AF_WANPIPE = PF_WANPIPE;
pub const AF_LLC = PF_LLC;
pub const AF_IB = PF_IB;
pub const AF_MPLS = PF_MPLS;
pub const AF_CAN = PF_CAN;
pub const AF_TIPC = PF_TIPC;
pub const AF_BLUETOOTH = PF_BLUETOOTH;
pub const AF_IUCV = PF_IUCV;
pub const AF_RXRPC = PF_RXRPC;
pub const AF_ISDN = PF_ISDN;
pub const AF_PHONET = PF_PHONET;
pub const AF_IEEE802154 = PF_IEEE802154;
pub const AF_CAIF = PF_CAIF;
pub const AF_ALG = PF_ALG;
pub const AF_NFC = PF_NFC;
pub const AF_VSOCK = PF_VSOCK;
pub const AF_KCM = PF_KCM;
pub const AF_QIPCRTR = PF_QIPCRTR;
pub const AF_SMC = PF_SMC;
pub const AF_MAX = PF_MAX;
pub usingnamespace if (!is_mips)
struct {
pub const SO_DEBUG = 1;
pub const SO_REUSEADDR = 2;
pub const SO_TYPE = 3;
pub const SO_ERROR = 4;
pub const SO_DONTROUTE = 5;
pub const SO_BROADCAST = 6;
pub const SO_SNDBUF = 7;
pub const SO_RCVBUF = 8;
pub const SO_KEEPALIVE = 9;
pub const SO_OOBINLINE = 10;
pub const SO_NO_CHECK = 11;
pub const SO_PRIORITY = 12;
pub const SO_LINGER = 13;
pub const SO_BSDCOMPAT = 14;
pub const SO_REUSEPORT = 15;
pub const SO_PASSCRED = 16;
pub const SO_PEERCRED = 17;
pub const SO_RCVLOWAT = 18;
pub const SO_SNDLOWAT = 19;
pub const SO_RCVTIMEO = 20;
pub const SO_SNDTIMEO = 21;
pub const SO_ACCEPTCONN = 30;
pub const SO_PEERSEC = 31;
pub const SO_SNDBUFFORCE = 32;
pub const SO_RCVBUFFORCE = 33;
pub const SO_PROTOCOL = 38;
pub const SO_DOMAIN = 39;
}
else
struct {};
pub const SO_SECURITY_AUTHENTICATION = 22;
pub const SO_SECURITY_ENCRYPTION_TRANSPORT = 23;
pub const SO_SECURITY_ENCRYPTION_NETWORK = 24;
pub const SO_BINDTODEVICE = 25;
pub const SO_ATTACH_FILTER = 26;
pub const SO_DETACH_FILTER = 27;
pub const SO_GET_FILTER = SO_ATTACH_FILTER;
pub const SO_PEERNAME = 28;
pub const SO_TIMESTAMP_OLD = 29;
pub const SO_PASSSEC = 34;
pub const SO_TIMESTAMPNS_OLD = 35;
pub const SO_MARK = 36;
pub const SO_TIMESTAMPING_OLD = 37;
pub const SO_RXQ_OVFL = 40;
pub const SO_WIFI_STATUS = 41;
pub const SCM_WIFI_STATUS = SO_WIFI_STATUS;
pub const SO_PEEK_OFF = 42;
pub const SO_NOFCS = 43;
pub const SO_LOCK_FILTER = 44;
pub const SO_SELECT_ERR_QUEUE = 45;
pub const SO_BUSY_POLL = 46;
pub const SO_MAX_PACING_RATE = 47;
pub const SO_BPF_EXTENSIONS = 48;
pub const SO_INCOMING_CPU = 49;
pub const SO_ATTACH_BPF = 50;
pub const SO_DETACH_BPF = SO_DETACH_FILTER;
pub const SO_ATTACH_REUSEPORT_CBPF = 51;
pub const SO_ATTACH_REUSEPORT_EBPF = 52;
pub const SO_CNX_ADVICE = 53;
pub const SCM_TIMESTAMPING_OPT_STATS = 54;
pub const SO_MEMINFO = 55;
pub const SO_INCOMING_NAPI_ID = 56;
pub const SO_COOKIE = 57;
pub const SCM_TIMESTAMPING_PKTINFO = 58;
pub const SO_PEERGROUPS = 59;
pub const SO_ZEROCOPY = 60;
pub const SO_TXTIME = 61;
pub const SCM_TXTIME = SO_TXTIME;
pub const SO_BINDTOIFINDEX = 62;
pub const SO_TIMESTAMP_NEW = 63;
pub const SO_TIMESTAMPNS_NEW = 64;
pub const SO_TIMESTAMPING_NEW = 65;
pub const SO_RCVTIMEO_NEW = 66;
pub const SO_SNDTIMEO_NEW = 67;
pub const SO_DETACH_REUSEPORT_BPF = 68;
pub const SOL_SOCKET = if (is_mips) 65535 else 1;
pub const SOL_IP = 0;
pub const SOL_IPV6 = 41;
pub const SOL_ICMPV6 = 58;
pub const SOL_RAW = 255;
pub const SOL_DECNET = 261;
pub const SOL_X25 = 262;
pub const SOL_PACKET = 263;
pub const SOL_ATM = 264;
pub const SOL_AAL = 265;
pub const SOL_IRDA = 266;
pub const SOL_NETBEUI = 267;
pub const SOL_LLC = 268;
pub const SOL_DCCP = 269;
pub const SOL_NETLINK = 270;
pub const SOL_TIPC = 271;
pub const SOL_RXRPC = 272;
pub const SOL_PPPOL2TP = 273;
pub const SOL_BLUETOOTH = 274;
pub const SOL_PNPIPE = 275;
pub const SOL_RDS = 276;
pub const SOL_IUCV = 277;
pub const SOL_CAIF = 278;
pub const SOL_ALG = 279;
pub const SOL_NFC = 280;
pub const SOL_KCM = 281;
pub const SOL_TLS = 282;
pub const SOMAXCONN = 128;
pub const MSG_OOB = 0x0001;
pub const MSG_PEEK = 0x0002;
pub const MSG_DONTROUTE = 0x0004;
pub const MSG_CTRUNC = 0x0008;
pub const MSG_PROXY = 0x0010;
pub const MSG_TRUNC = 0x0020;
pub const MSG_DONTWAIT = 0x0040;
pub const MSG_EOR = 0x0080;
pub const MSG_WAITALL = 0x0100;
pub const MSG_FIN = 0x0200;
pub const MSG_SYN = 0x0400;
pub const MSG_CONFIRM = 0x0800;
pub const MSG_RST = 0x1000;
pub const MSG_ERRQUEUE = 0x2000;
pub const MSG_NOSIGNAL = 0x4000;
pub const MSG_MORE = 0x8000;
pub const MSG_WAITFORONE = 0x10000;
pub const MSG_BATCH = 0x40000;
pub const MSG_ZEROCOPY = 0x4000000;
pub const MSG_FASTOPEN = 0x20000000;
pub const MSG_CMSG_CLOEXEC = 0x40000000;
pub const DT_UNKNOWN = 0;
pub const DT_FIFO = 1;
pub const DT_CHR = 2;
pub const DT_DIR = 4;
pub const DT_BLK = 6;
pub const DT_REG = 8;
pub const DT_LNK = 10;
pub const DT_SOCK = 12;
pub const DT_WHT = 14;
pub const TCGETS = if (is_mips) 0x540D else 0x5401;
pub const TCSETS = 0x5402;
pub const TCSETSW = 0x5403;
pub const TCSETSF = 0x5404;
pub const TCGETA = 0x5405;
pub const TCSETA = 0x5406;
pub const TCSETAW = 0x5407;
pub const TCSETAF = 0x5408;
pub const TCSBRK = 0x5409;
pub const TCXONC = 0x540A;
pub const TCFLSH = 0x540B;
pub const TIOCEXCL = 0x540C;
pub const TIOCNXCL = 0x540D;
pub const TIOCSCTTY = 0x540E;
pub const TIOCGPGRP = 0x540F;
pub const TIOCSPGRP = 0x5410;
pub const TIOCOUTQ = if (is_mips) 0x7472 else 0x5411;
pub const TIOCSTI = 0x5412;
pub const TIOCGWINSZ = if (is_mips or is_ppc64) 0x40087468 else 0x5413;
pub const TIOCSWINSZ = if (is_mips or is_ppc64) 0x80087467 else 0x5414;
pub const TIOCMGET = 0x5415;
pub const TIOCMBIS = 0x5416;
pub const TIOCMBIC = 0x5417;
pub const TIOCMSET = 0x5418;
pub const TIOCGSOFTCAR = 0x5419;
pub const TIOCSSOFTCAR = 0x541A;
pub const FIONREAD = if (is_mips) 0x467F else 0x541B;
pub const TIOCINQ = FIONREAD;
pub const TIOCLINUX = 0x541C;
pub const TIOCCONS = 0x541D;
pub const TIOCGSERIAL = 0x541E;
pub const TIOCSSERIAL = 0x541F;
pub const TIOCPKT = 0x5420;
pub const FIONBIO = 0x5421;
pub const TIOCNOTTY = 0x5422;
pub const TIOCSETD = 0x5423;
pub const TIOCGETD = 0x5424;
pub const TCSBRKP = 0x5425;
pub const TIOCSBRK = 0x5427;
pub const TIOCCBRK = 0x5428;
pub const TIOCGSID = 0x5429;
pub const TIOCGRS485 = 0x542E;
pub const TIOCSRS485 = 0x542F;
pub const TIOCGPTN = 0x80045430;
pub const TIOCSPTLCK = 0x40045431;
pub const TIOCGDEV = 0x80045432;
pub const TCGETX = 0x5432;
pub const TCSETX = 0x5433;
pub const TCSETXF = 0x5434;
pub const TCSETXW = 0x5435;
pub const TIOCSIG = 0x40045436;
pub const TIOCVHANGUP = 0x5437;
pub const TIOCGPKT = 0x80045438;
pub const TIOCGPTLCK = 0x80045439;
pub const TIOCGEXCL = 0x80045440;
pub const EPOLL_CLOEXEC = O_CLOEXEC;
pub const EPOLL_CTL_ADD = 1;
pub const EPOLL_CTL_DEL = 2;
pub const EPOLL_CTL_MOD = 3;
pub const EPOLLIN = 0x001;
pub const EPOLLPRI = 0x002;
pub const EPOLLOUT = 0x004;
pub const EPOLLRDNORM = 0x040;
pub const EPOLLRDBAND = 0x080;
pub const EPOLLWRNORM = if (is_mips) 0x004 else 0x100;
pub const EPOLLWRBAND = if (is_mips) 0x100 else 0x200;
pub const EPOLLMSG = 0x400;
pub const EPOLLERR = 0x008;
pub const EPOLLHUP = 0x010;
pub const EPOLLRDHUP = 0x2000;
pub const EPOLLEXCLUSIVE = (@as(u32, 1) << 28);
pub const EPOLLWAKEUP = (@as(u32, 1) << 29);
pub const EPOLLONESHOT = (@as(u32, 1) << 30);
pub const EPOLLET = (@as(u32, 1) << 31);
pub const CLOCK_REALTIME = 0;
pub const CLOCK_MONOTONIC = 1;
pub const CLOCK_PROCESS_CPUTIME_ID = 2;
pub const CLOCK_THREAD_CPUTIME_ID = 3;
pub const CLOCK_MONOTONIC_RAW = 4;
pub const CLOCK_REALTIME_COARSE = 5;
pub const CLOCK_MONOTONIC_COARSE = 6;
pub const CLOCK_BOOTTIME = 7;
pub const CLOCK_REALTIME_ALARM = 8;
pub const CLOCK_BOOTTIME_ALARM = 9;
pub const CLOCK_SGI_CYCLE = 10;
pub const CLOCK_TAI = 11;
pub const CSIGNAL = 0x000000ff;
pub const CLONE_VM = 0x00000100;
pub const CLONE_FS = 0x00000200;
pub const CLONE_FILES = 0x00000400;
pub const CLONE_SIGHAND = 0x00000800;
pub const CLONE_PTRACE = 0x00002000;
pub const CLONE_VFORK = 0x00004000;
pub const CLONE_PARENT = 0x00008000;
pub const CLONE_THREAD = 0x00010000;
pub const CLONE_NEWNS = 0x00020000;
pub const CLONE_SYSVSEM = 0x00040000;
pub const CLONE_SETTLS = 0x00080000;
pub const CLONE_PARENT_SETTID = 0x00100000;
pub const CLONE_CHILD_CLEARTID = 0x00200000;
pub const CLONE_DETACHED = 0x00400000;
pub const CLONE_UNTRACED = 0x00800000;
pub const CLONE_CHILD_SETTID = 0x01000000;
pub const CLONE_NEWCGROUP = 0x02000000;
pub const CLONE_NEWUTS = 0x04000000;
pub const CLONE_NEWIPC = 0x08000000;
pub const CLONE_NEWUSER = 0x10000000;
pub const CLONE_NEWPID = 0x20000000;
pub const CLONE_NEWNET = 0x40000000;
pub const CLONE_IO = 0x80000000;
// Flags for the clone3() syscall.
/// Clear any signal handler and reset to SIG_DFL.
pub const CLONE_CLEAR_SIGHAND = 0x100000000;
// cloning flags intersect with CSIGNAL so can be used with unshare and clone3 syscalls only.
/// New time namespace
pub const CLONE_NEWTIME = 0x00000080;
pub const EFD_SEMAPHORE = 1;
pub const EFD_CLOEXEC = O_CLOEXEC;
pub const EFD_NONBLOCK = O_NONBLOCK;
pub const MS_RDONLY = 1;
pub const MS_NOSUID = 2;
pub const MS_NODEV = 4;
pub const MS_NOEXEC = 8;
pub const MS_SYNCHRONOUS = 16;
pub const MS_REMOUNT = 32;
pub const MS_MANDLOCK = 64;
pub const MS_DIRSYNC = 128;
pub const MS_NOATIME = 1024;
pub const MS_NODIRATIME = 2048;
pub const MS_BIND = 4096;
pub const MS_MOVE = 8192;
pub const MS_REC = 16384;
pub const MS_SILENT = 32768;
pub const MS_POSIXACL = (1 << 16);
pub const MS_UNBINDABLE = (1 << 17);
pub const MS_PRIVATE = (1 << 18);
pub const MS_SLAVE = (1 << 19);
pub const MS_SHARED = (1 << 20);
pub const MS_RELATIME = (1 << 21);
pub const MS_KERNMOUNT = (1 << 22);
pub const MS_I_VERSION = (1 << 23);
pub const MS_STRICTATIME = (1 << 24);
pub const MS_LAZYTIME = (1 << 25);
pub const MS_NOREMOTELOCK = (1 << 27);
pub const MS_NOSEC = (1 << 28);
pub const MS_BORN = (1 << 29);
pub const MS_ACTIVE = (1 << 30);
pub const MS_NOUSER = (1 << 31);
pub const MS_RMT_MASK = (MS_RDONLY | MS_SYNCHRONOUS | MS_MANDLOCK | MS_I_VERSION | MS_LAZYTIME);
pub const MS_MGC_VAL = 0xc0ed0000;
pub const MS_MGC_MSK = 0xffff0000;
pub const MNT_FORCE = 1;
pub const MNT_DETACH = 2;
pub const MNT_EXPIRE = 4;
pub const UMOUNT_NOFOLLOW = 8;
pub const IN_CLOEXEC = O_CLOEXEC;
pub const IN_NONBLOCK = O_NONBLOCK;
pub const IN_ACCESS = 0x00000001;
pub const IN_MODIFY = 0x00000002;
pub const IN_ATTRIB = 0x00000004;
pub const IN_CLOSE_WRITE = 0x00000008;
pub const IN_CLOSE_NOWRITE = 0x00000010;
pub const IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE;
pub const IN_OPEN = 0x00000020;
pub const IN_MOVED_FROM = 0x00000040;
pub const IN_MOVED_TO = 0x00000080;
pub const IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO;
pub const IN_CREATE = 0x00000100;
pub const IN_DELETE = 0x00000200;
pub const IN_DELETE_SELF = 0x00000400;
pub const IN_MOVE_SELF = 0x00000800;
pub const IN_ALL_EVENTS = 0x00000fff;
pub const IN_UNMOUNT = 0x00002000;
pub const IN_Q_OVERFLOW = 0x00004000;
pub const IN_IGNORED = 0x00008000;
pub const IN_ONLYDIR = 0x01000000;
pub const IN_DONT_FOLLOW = 0x02000000;
pub const IN_EXCL_UNLINK = 0x04000000;
pub const IN_MASK_ADD = 0x20000000;
pub const IN_ISDIR = 0x40000000;
pub const IN_ONESHOT = 0x80000000;
pub const S_IFMT = 0o170000;
pub const S_IFDIR = 0o040000;
pub const S_IFCHR = 0o020000;
pub const S_IFBLK = 0o060000;
pub const S_IFREG = 0o100000;
pub const S_IFIFO = 0o010000;
pub const S_IFLNK = 0o120000;
pub const S_IFSOCK = 0o140000;
pub const S_ISUID = 0o4000;
pub const S_ISGID = 0o2000;
pub const S_ISVTX = 0o1000;
pub const S_IRUSR = 0o400;
pub const S_IWUSR = 0o200;
pub const S_IXUSR = 0o100;
pub const S_IRWXU = 0o700;
pub const S_IRGRP = 0o040;
pub const S_IWGRP = 0o020;
pub const S_IXGRP = 0o010;
pub const S_IRWXG = 0o070;
pub const S_IROTH = 0o004;
pub const S_IWOTH = 0o002;
pub const S_IXOTH = 0o001;
pub const S_IRWXO = 0o007;
pub fn S_ISREG(m: u32) bool {
return m & S_IFMT == S_IFREG;
}
pub fn S_ISDIR(m: u32) bool {
return m & S_IFMT == S_IFDIR;
}
pub fn S_ISCHR(m: u32) bool {
return m & S_IFMT == S_IFCHR;
}
pub fn S_ISBLK(m: u32) bool {
return m & S_IFMT == S_IFBLK;
}
pub fn S_ISFIFO(m: u32) bool {
return m & S_IFMT == S_IFIFO;
}
pub fn S_ISLNK(m: u32) bool {
return m & S_IFMT == S_IFLNK;
}
pub fn S_ISSOCK(m: u32) bool {
return m & S_IFMT == S_IFSOCK;
}
pub const UTIME_NOW = 0x3fffffff;
pub const UTIME_OMIT = 0x3ffffffe;
pub const TFD_NONBLOCK = O_NONBLOCK;
pub const TFD_CLOEXEC = O_CLOEXEC;
pub const TFD_TIMER_ABSTIME = 1;
pub const TFD_TIMER_CANCEL_ON_SET = (1 << 1);
pub fn WEXITSTATUS(s: u32) u32 {
return (s & 0xff00) >> 8;
}
pub fn WTERMSIG(s: u32) u32 {
return s & 0x7f;
}
pub fn WSTOPSIG(s: u32) u32 {
return WEXITSTATUS(s);
}
pub fn WIFEXITED(s: u32) bool {
return WTERMSIG(s) == 0;
}
pub fn WIFSTOPPED(s: u32) bool {
return @intCast(u16, ((s & 0xffff) *% 0x10001) >> 8) > 0x7f00;
}
pub fn WIFSIGNALED(s: u32) bool {
return (s & 0xffff) -% 1 < 0xff;
}
pub const winsize = extern struct {
ws_row: u16,
ws_col: u16,
ws_xpixel: u16,
ws_ypixel: u16,
};
/// NSIG is the total number of signals defined.
/// As signal numbers are sequential, NSIG is one greater than the largest defined signal number.
pub const NSIG = if (is_mips) 128 else 65;
pub const sigset_t = [1024 / 32]u32;
pub const all_mask: sigset_t = [_]u32{0xffffffff} ** sigset_t.len;
pub const app_mask: sigset_t = [2]u32{ 0xfffffffc, 0x7fffffff } ++ [_]u32{0xffffffff} ** 30;
pub const k_sigaction = if (is_mips)
extern struct {
flags: usize,
sigaction: ?fn (i32, *siginfo_t, ?*c_void) callconv(.C) void,
mask: [4]u32,
restorer: fn () callconv(.C) void,
}
else
extern struct {
sigaction: ?fn (i32, *siginfo_t, ?*c_void) callconv(.C) void,
flags: usize,
restorer: fn () callconv(.C) void,
mask: [2]u32,
};
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
pub const sigaction_fn = fn (i32, *siginfo_t, ?*c_void) callconv(.C) void;
sigaction: ?sigaction_fn,
mask: sigset_t,
flags: u32,
restorer: ?fn () callconv(.C) void = null,
};
pub const SIG_ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
pub const SIG_DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
pub const SIG_IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
pub const empty_sigset = [_]u32{0} ** @typeInfo(sigset_t).Array.len;
pub const signalfd_siginfo = extern struct {
signo: u32,
errno: i32,
code: i32,
pid: u32,
uid: uid_t,
fd: i32,
tid: u32,
band: u32,
overrun: u32,
trapno: u32,
status: i32,
int: i32,
ptr: u64,
utime: u64,
stime: u64,
addr: u64,
addr_lsb: u16,
__pad2: u16,
syscall: i32,
call_addr: u64,
arch: u32,
__pad: [28]u8,
};
pub const in_port_t = u16;
pub const sa_family_t = u16;
pub const socklen_t = u32;
pub const sockaddr = extern struct {
family: sa_family_t,
data: [14]u8,
};
/// IPv4 socket address
pub const sockaddr_in = extern struct {
family: sa_family_t = AF_INET,
port: in_port_t,
addr: u32,
zero: [8]u8 = [8]u8{ 0, 0, 0, 0, 0, 0, 0, 0 },
};
/// IPv6 socket address
pub const sockaddr_in6 = extern struct {
family: sa_family_t = AF_INET6,
port: in_port_t,
flowinfo: u32,
addr: [16]u8,
scope_id: u32,
};
/// UNIX domain socket address
pub const sockaddr_un = extern struct {
family: sa_family_t = AF_UNIX,
path: [108]u8,
};
pub const mmsghdr = extern struct {
msg_hdr: msghdr,
msg_len: u32,
};
pub const mmsghdr_const = extern struct {
msg_hdr: msghdr_const,
msg_len: u32,
};
pub const epoll_data = extern union {
ptr: usize,
fd: i32,
@"u32": u32,
@"u64": u64,
};
// On x86_64 the structure is packed so that it matches the definition of its
// 32bit counterpart
pub const epoll_event = switch (builtin.arch) {
.x86_64 => packed struct {
events: u32,
data: epoll_data,
},
else => extern struct {
events: u32,
data: epoll_data,
},
};
pub const _LINUX_CAPABILITY_VERSION_1 = 0x19980330;
pub const _LINUX_CAPABILITY_U32S_1 = 1;
pub const _LINUX_CAPABILITY_VERSION_2 = 0x20071026;
pub const _LINUX_CAPABILITY_U32S_2 = 2;
pub const _LINUX_CAPABILITY_VERSION_3 = 0x20080522;
pub const _LINUX_CAPABILITY_U32S_3 = 2;
pub const VFS_CAP_REVISION_MASK = 0xFF000000;
pub const VFS_CAP_REVISION_SHIFT = 24;
pub const VFS_CAP_FLAGS_MASK = ~VFS_CAP_REVISION_MASK;
pub const VFS_CAP_FLAGS_EFFECTIVE = 0x000001;
pub const VFS_CAP_REVISION_1 = 0x01000000;
pub const VFS_CAP_U32_1 = 1;
pub const XATTR_CAPS_SZ_1 = @sizeOf(u32) * (1 + 2 * VFS_CAP_U32_1);
pub const VFS_CAP_REVISION_2 = 0x02000000;
pub const VFS_CAP_U32_2 = 2;
pub const XATTR_CAPS_SZ_2 = @sizeOf(u32) * (1 + 2 * VFS_CAP_U32_2);
pub const XATTR_CAPS_SZ = XATTR_CAPS_SZ_2;
pub const VFS_CAP_U32 = VFS_CAP_U32_2;
pub const VFS_CAP_REVISION = VFS_CAP_REVISION_2;
pub const vfs_cap_data = extern struct {
//all of these are mandated as little endian
//when on disk.
const Data = struct {
permitted: u32,
inheritable: u32,
};
magic_etc: u32,
data: [VFS_CAP_U32]Data,
};
pub const CAP_CHOWN = 0;
pub const CAP_DAC_OVERRIDE = 1;
pub const CAP_DAC_READ_SEARCH = 2;
pub const CAP_FOWNER = 3;
pub const CAP_FSETID = 4;
pub const CAP_KILL = 5;
pub const CAP_SETGID = 6;
pub const CAP_SETUID = 7;
pub const CAP_SETPCAP = 8;
pub const CAP_LINUX_IMMUTABLE = 9;
pub const CAP_NET_BIND_SERVICE = 10;
pub const CAP_NET_BROADCAST = 11;
pub const CAP_NET_ADMIN = 12;
pub const CAP_NET_RAW = 13;
pub const CAP_IPC_LOCK = 14;
pub const CAP_IPC_OWNER = 15;
pub const CAP_SYS_MODULE = 16;
pub const CAP_SYS_RAWIO = 17;
pub const CAP_SYS_CHROOT = 18;
pub const CAP_SYS_PTRACE = 19;
pub const CAP_SYS_PACCT = 20;
pub const CAP_SYS_ADMIN = 21;
pub const CAP_SYS_BOOT = 22;
pub const CAP_SYS_NICE = 23;
pub const CAP_SYS_RESOURCE = 24;
pub const CAP_SYS_TIME = 25;
pub const CAP_SYS_TTY_CONFIG = 26;
pub const CAP_MKNOD = 27;
pub const CAP_LEASE = 28;
pub const CAP_AUDIT_WRITE = 29;
pub const CAP_AUDIT_CONTROL = 30;
pub const CAP_SETFCAP = 31;
pub const CAP_MAC_OVERRIDE = 32;
pub const CAP_MAC_ADMIN = 33;
pub const CAP_SYSLOG = 34;
pub const CAP_WAKE_ALARM = 35;
pub const CAP_BLOCK_SUSPEND = 36;
pub const CAP_AUDIT_READ = 37;
pub const CAP_LAST_CAP = CAP_AUDIT_READ;
pub fn cap_valid(u8: x) bool {
return x >= 0 and x <= CAP_LAST_CAP;
}
pub fn CAP_TO_MASK(cap: u8) u32 {
return @as(u32, 1) << @intCast(u5, cap & 31);
}
pub fn CAP_TO_INDEX(cap: u8) u8 {
return cap >> 5;
}
pub const cap_t = extern struct {
hdrp: *cap_user_header_t,
datap: *cap_user_data_t,
};
pub const cap_user_header_t = extern struct {
version: u32,
pid: usize,
};
pub const cap_user_data_t = extern struct {
effective: u32,
permitted: u32,
inheritable: u32,
};
pub const inotify_event = extern struct {
wd: i32,
mask: u32,
cookie: u32,
len: u32,
//name: [?]u8,
};
pub const dirent64 = extern struct {
d_ino: u64,
d_off: u64,
d_reclen: u16,
d_type: u8,
d_name: u8, // field address is the address of first byte of name https://github.com/ziglang/zig/issues/173
pub fn reclen(self: dirent64) u16 {
return self.d_reclen;
}
};
pub const dl_phdr_info = extern struct {
dlpi_addr: usize,
dlpi_name: ?[*:0]const u8,
dlpi_phdr: [*]std.elf.Phdr,
dlpi_phnum: u16,
};
pub const CPU_SETSIZE = 128;
pub const cpu_set_t = [CPU_SETSIZE / @sizeOf(usize)]usize;
pub const cpu_count_t = std.meta.Int(.unsigned, std.math.log2(CPU_SETSIZE * 8));
pub fn CPU_COUNT(set: cpu_set_t) cpu_count_t {
var sum: cpu_count_t = 0;
for (set) |x| {
sum += @popCount(usize, x);
}
return sum;
}
// TODO port these over
//#define CPU_SET(i, set) CPU_SET_S(i,sizeof(cpu_set_t),set)
//#define CPU_CLR(i, set) CPU_CLR_S(i,sizeof(cpu_set_t),set)
//#define CPU_ISSET(i, set) CPU_ISSET_S(i,sizeof(cpu_set_t),set)
//#define CPU_AND(d,s1,s2) CPU_AND_S(sizeof(cpu_set_t),d,s1,s2)
//#define CPU_OR(d,s1,s2) CPU_OR_S(sizeof(cpu_set_t),d,s1,s2)
//#define CPU_XOR(d,s1,s2) CPU_XOR_S(sizeof(cpu_set_t),d,s1,s2)
//#define CPU_COUNT(set) CPU_COUNT_S(sizeof(cpu_set_t),set)
//#define CPU_ZERO(set) CPU_ZERO_S(sizeof(cpu_set_t),set)
//#define CPU_EQUAL(s1,s2) CPU_EQUAL_S(sizeof(cpu_set_t),s1,s2)
pub const MINSIGSTKSZ = switch (builtin.arch) {
.i386, .x86_64, .arm, .mipsel => 2048,
.aarch64 => 5120,
else => @compileError("MINSIGSTKSZ not defined for this architecture"),
};
pub const SIGSTKSZ = switch (builtin.arch) {
.i386, .x86_64, .arm, .mipsel => 8192,
.aarch64 => 16384,
else => @compileError("SIGSTKSZ not defined for this architecture"),
};
pub const SS_ONSTACK = 1;
pub const SS_DISABLE = 2;
pub const SS_AUTODISARM = 1 << 31;
pub const stack_t = if (is_mips)
// IRIX compatible stack_t
extern struct {
ss_sp: [*]u8,
ss_size: usize,
ss_flags: i32,
}
else
extern struct {
ss_sp: [*]u8,
ss_flags: i32,
ss_size: usize,
};
pub const sigval = extern union {
int: i32,
ptr: *c_void,
};
const siginfo_fields_union = extern union {
pad: [128 - 2 * @sizeOf(c_int) - @sizeOf(c_long)]u8,
common: extern struct {
first: extern union {
piduid: extern struct {
pid: pid_t,
uid: uid_t,
},
timer: extern struct {
timerid: i32,
overrun: i32,
},
},
second: extern union {
value: sigval,
sigchld: extern struct {
status: i32,
utime: clock_t,
stime: clock_t,
},
},
},
sigfault: extern struct {
addr: *c_void,
addr_lsb: i16,
first: extern union {
addr_bnd: extern struct {
lower: *c_void,
upper: *c_void,
},
pkey: u32,
},
},
sigpoll: extern struct {
band: isize,
fd: i32,
},
sigsys: extern struct {
call_addr: *c_void,
syscall: i32,
arch: u32,
},
};
pub const siginfo_t = if (is_mips)
extern struct {
signo: i32,
code: i32,
errno: i32,
fields: siginfo_fields_union,
}
else
extern struct {
signo: i32,
errno: i32,
code: i32,
fields: siginfo_fields_union,
};
pub const io_uring_params = extern struct {
sq_entries: u32,
cq_entries: u32,
flags: u32,
sq_thread_cpu: u32,
sq_thread_idle: u32,
features: u32,
wq_fd: u32,
resv: [3]u32,
sq_off: io_sqring_offsets,
cq_off: io_cqring_offsets,
};
// io_uring_params.features flags
pub const IORING_FEAT_SINGLE_MMAP = 1 << 0;
pub const IORING_FEAT_NODROP = 1 << 1;
pub const IORING_FEAT_SUBMIT_STABLE = 1 << 2;
pub const IORING_FEAT_RW_CUR_POS = 1 << 3;
pub const IORING_FEAT_CUR_PERSONALITY = 1 << 4;
pub const IORING_FEAT_FAST_POLL = 1 << 5;
pub const IORING_FEAT_POLL_32BITS = 1 << 6;
// io_uring_params.flags
/// io_context is polled
pub const IORING_SETUP_IOPOLL = 1 << 0;
/// SQ poll thread
pub const IORING_SETUP_SQPOLL = 1 << 1;
/// sq_thread_cpu is valid
pub const IORING_SETUP_SQ_AFF = 1 << 2;
/// app defines CQ size
pub const IORING_SETUP_CQSIZE = 1 << 3;
/// clamp SQ/CQ ring sizes
pub const IORING_SETUP_CLAMP = 1 << 4;
/// attach to existing wq
pub const IORING_SETUP_ATTACH_WQ = 1 << 5;
pub const io_sqring_offsets = extern struct {
/// offset of ring head
head: u32,
/// offset of ring tail
tail: u32,
/// ring mask value
ring_mask: u32,
/// entries in ring
ring_entries: u32,
/// ring flags
flags: u32,
/// number of sqes not submitted
dropped: u32,
/// sqe index array
array: u32,
resv1: u32,
resv2: u64,
};
// io_sqring_offsets.flags
/// needs io_uring_enter wakeup
pub const IORING_SQ_NEED_WAKEUP = 1 << 0;
/// kernel has cqes waiting beyond the cq ring
pub const IORING_SQ_CQ_OVERFLOW = 1 << 1;
pub const io_cqring_offsets = extern struct {
head: u32,
tail: u32,
ring_mask: u32,
ring_entries: u32,
overflow: u32,
cqes: u32,
resv: [2]u64,
};
pub const io_uring_sqe = extern struct {
opcode: IORING_OP,
flags: u8,
ioprio: u16,
fd: i32,
off: u64,
addr: u64,
len: u32,
rw_flags: u32,
user_data: u64,
buf_index: u16,
personality: u16,
splice_fd_in: i32,
__pad2: [2]u64,
};
pub const IOSQE_BIT = extern enum(u8) {
FIXED_FILE,
IO_DRAIN,
IO_LINK,
IO_HARDLINK,
ASYNC,
BUFFER_SELECT,
_,
};
// io_uring_sqe.flags
/// use fixed fileset
pub const IOSQE_FIXED_FILE = 1 << @enumToInt(IOSQE_BIT.FIXED_FILE);
/// issue after inflight IO
pub const IOSQE_IO_DRAIN = 1 << @enumToInt(IOSQE_BIT.IO_DRAIN);
/// links next sqe
pub const IOSQE_IO_LINK = 1 << @enumToInt(IOSQE_BIT.IO_LINK);
/// like LINK, but stronger
pub const IOSQE_IO_HARDLINK = 1 << @enumToInt(IOSQE_BIT.IO_HARDLINK);
/// always go async
pub const IOSQE_ASYNC = 1 << @enumToInt(IOSQE_BIT.ASYNC);
/// select buffer from buf_group
pub const IOSQE_BUFFER_SELECT = 1 << @enumToInt(IOSQE_BIT.BUFFER_SELECT);
pub const IORING_OP = extern enum(u8) {
NOP,
READV,
WRITEV,
FSYNC,
READ_FIXED,
WRITE_FIXED,
POLL_ADD,
POLL_REMOVE,
SYNC_FILE_RANGE,
SENDMSG,
RECVMSG,
TIMEOUT,
TIMEOUT_REMOVE,
ACCEPT,
ASYNC_CANCEL,
LINK_TIMEOUT,
CONNECT,
FALLOCATE,
OPENAT,
CLOSE,
FILES_UPDATE,
STATX,
READ,
WRITE,
FADVISE,
MADVISE,
SEND,
RECV,
OPENAT2,
EPOLL_CTL,
SPLICE,
PROVIDE_BUFFERS,
REMOVE_BUFFERS,
TEE,
_,
};
// io_uring_sqe.fsync_flags
pub const IORING_FSYNC_DATASYNC = 1 << 0;
// io_uring_sqe.timeout_flags
pub const IORING_TIMEOUT_ABS = 1 << 0;
// IO completion data structure (Completion Queue Entry)
pub const io_uring_cqe = extern struct {
/// io_uring_sqe.data submission passed back
user_data: u64,
/// result code for this event
res: i32,
flags: u32,
};
pub const IORING_OFF_SQ_RING = 0;
pub const IORING_OFF_CQ_RING = 0x8000000;
pub const IORING_OFF_SQES = 0x10000000;
// io_uring_enter flags
pub const IORING_ENTER_GETEVENTS = 1 << 0;
pub const IORING_ENTER_SQ_WAKEUP = 1 << 1;
// io_uring_register opcodes and arguments
pub const IORING_REGISTER = extern enum(u32) {
REGISTER_BUFFERS,
UNREGISTER_BUFFERS,
REGISTER_FILES,
UNREGISTER_FILES,
REGISTER_EVENTFD,
UNREGISTER_EVENTFD,
REGISTER_FILES_UPDATE,
REGISTER_EVENTFD_ASYNC,
REGISTER_PROBE,
REGISTER_PERSONALITY,
UNREGISTER_PERSONALITY,
_,
};
pub const io_uring_files_update = struct {
offset: u32,
resv: u32,
fds: u64,
};
pub const IO_URING_OP_SUPPORTED = 1 << 0;
pub const io_uring_probe_op = struct {
op: IORING_OP,
resv: u8,
/// IO_URING_OP_* flags
flags: u16,
resv2: u32,
};
pub const io_uring_probe = struct {
/// last opcode supported
last_op: IORING_OP,
/// Number of io_uring_probe_op following
ops_len: u8,
resv: u16,
resv2: u32[3],
// Followed by up to `ops_len` io_uring_probe_op structures
};
pub const utsname = extern struct {
sysname: [64:0]u8,
nodename: [64:0]u8,
release: [64:0]u8,
version: [64:0]u8,
machine: [64:0]u8,
domainname: [64:0]u8,
};
pub const HOST_NAME_MAX = 64;
pub const STATX_TYPE = 0x0001;
pub const STATX_MODE = 0x0002;
pub const STATX_NLINK = 0x0004;
pub const STATX_UID = 0x0008;
pub const STATX_GID = 0x0010;
pub const STATX_ATIME = 0x0020;
pub const STATX_MTIME = 0x0040;
pub const STATX_CTIME = 0x0080;
pub const STATX_INO = 0x0100;
pub const STATX_SIZE = 0x0200;
pub const STATX_BLOCKS = 0x0400;
pub const STATX_BASIC_STATS = 0x07ff;
pub const STATX_BTIME = 0x0800;
pub const STATX_ATTR_COMPRESSED = 0x0004;
pub const STATX_ATTR_IMMUTABLE = 0x0010;
pub const STATX_ATTR_APPEND = 0x0020;
pub const STATX_ATTR_NODUMP = 0x0040;
pub const STATX_ATTR_ENCRYPTED = 0x0800;
pub const STATX_ATTR_AUTOMOUNT = 0x1000;
pub const statx_timestamp = extern struct {
tv_sec: i64,
tv_nsec: u32,
__pad1: u32,
};
/// Renamed to `Statx` to not conflict with the `statx` function.
pub const Statx = extern struct {
/// Mask of bits indicating filled fields
mask: u32,
/// Block size for filesystem I/O
blksize: u32,
/// Extra file attribute indicators
attributes: u64,
/// Number of hard links
nlink: u32,
/// User ID of owner
uid: uid_t,
/// Group ID of owner
gid: gid_t,
/// File type and mode
mode: u16,
__pad1: u16,
/// Inode number
ino: u64,
/// Total size in bytes
size: u64,
/// Number of 512B blocks allocated
blocks: u64,
/// Mask to show what's supported in `attributes`.
attributes_mask: u64,
/// Last access file timestamp
atime: statx_timestamp,
/// Creation file timestamp
btime: statx_timestamp,
/// Last status change file timestamp
ctime: statx_timestamp,
/// Last modification file timestamp
mtime: statx_timestamp,
/// Major ID, if this file represents a device.
rdev_major: u32,
/// Minor ID, if this file represents a device.
rdev_minor: u32,
/// Major ID of the device containing the filesystem where this file resides.
dev_major: u32,
/// Minor ID of the device containing the filesystem where this file resides.
dev_minor: u32,
__pad2: [14]u64,
};
pub const addrinfo = extern struct {
flags: i32,
family: i32,
socktype: i32,
protocol: i32,
addrlen: socklen_t,
addr: ?*sockaddr,
canonname: ?[*:0]u8,
next: ?*addrinfo,
};
pub const IPPORT_RESERVED = 1024;
pub const IPPROTO_IP = 0;
pub const IPPROTO_HOPOPTS = 0;
pub const IPPROTO_ICMP = 1;
pub const IPPROTO_IGMP = 2;
pub const IPPROTO_IPIP = 4;
pub const IPPROTO_TCP = 6;
pub const IPPROTO_EGP = 8;
pub const IPPROTO_PUP = 12;
pub const IPPROTO_UDP = 17;
pub const IPPROTO_IDP = 22;
pub const IPPROTO_TP = 29;
pub const IPPROTO_DCCP = 33;
pub const IPPROTO_IPV6 = 41;
pub const IPPROTO_ROUTING = 43;
pub const IPPROTO_FRAGMENT = 44;
pub const IPPROTO_RSVP = 46;
pub const IPPROTO_GRE = 47;
pub const IPPROTO_ESP = 50;
pub const IPPROTO_AH = 51;
pub const IPPROTO_ICMPV6 = 58;
pub const IPPROTO_NONE = 59;
pub const IPPROTO_DSTOPTS = 60;
pub const IPPROTO_MTP = 92;
pub const IPPROTO_BEETPH = 94;
pub const IPPROTO_ENCAP = 98;
pub const IPPROTO_PIM = 103;
pub const IPPROTO_COMP = 108;
pub const IPPROTO_SCTP = 132;
pub const IPPROTO_MH = 135;
pub const IPPROTO_UDPLITE = 136;
pub const IPPROTO_MPLS = 137;
pub const IPPROTO_RAW = 255;
pub const IPPROTO_MAX = 256;
pub const RR_A = 1;
pub const RR_CNAME = 5;
pub const RR_AAAA = 28;
/// Turn off Nagle's algorithm
pub const TCP_NODELAY = 1;
/// Limit MSS
pub const TCP_MAXSEG = 2;
/// Never send partially complete segments.
pub const TCP_CORK = 3;
/// Start keeplives after this period, in seconds
pub const TCP_KEEPIDLE = 4;
/// Interval between keepalives
pub const TCP_KEEPINTVL = 5;
/// Number of keepalives before death
pub const TCP_KEEPCNT = 6;
/// Number of SYN retransmits
pub const TCP_SYNCNT = 7;
/// Life time of orphaned FIN-WAIT-2 state
pub const TCP_LINGER2 = 8;
/// Wake up listener only when data arrive
pub const TCP_DEFER_ACCEPT = 9;
/// Bound advertised window
pub const TCP_WINDOW_CLAMP = 10;
/// Information about this connection.
pub const TCP_INFO = 11;
/// Block/reenable quick acks
pub const TCP_QUICKACK = 12;
/// Congestion control algorithm
pub const TCP_CONGESTION = 13;
/// TCP MD5 Signature (RFC2385)
pub const TCP_MD5SIG = 14;
/// Use linear timeouts for thin streams
pub const TCP_THIN_LINEAR_TIMEOUTS = 16;
/// Fast retrans. after 1 dupack
pub const TCP_THIN_DUPACK = 17;
/// How long for loss retry before timeout
pub const TCP_USER_TIMEOUT = 18;
/// TCP sock is under repair right now
pub const TCP_REPAIR = 19;
pub const TCP_REPAIR_QUEUE = 20;
pub const TCP_QUEUE_SEQ = 21;
pub const TCP_REPAIR_OPTIONS = 22;
/// Enable FastOpen on listeners
pub const TCP_FASTOPEN = 23;
pub const TCP_TIMESTAMP = 24;
/// limit number of unsent bytes in write queue
pub const TCP_NOTSENT_LOWAT = 25;
/// Get Congestion Control (optional) info
pub const TCP_CC_INFO = 26;
/// Record SYN headers for new connections
pub const TCP_SAVE_SYN = 27;
/// Get SYN headers recorded for connection
pub const TCP_SAVED_SYN = 28;
/// Get/set window parameters
pub const TCP_REPAIR_WINDOW = 29;
/// Attempt FastOpen with connect
pub const TCP_FASTOPEN_CONNECT = 30;
/// Attach a ULP to a TCP connection
pub const TCP_ULP = 31;
/// TCP MD5 Signature with extensions
pub const TCP_MD5SIG_EXT = 32;
/// Set the key for Fast Open (cookie)
pub const TCP_FASTOPEN_KEY = 33;
/// Enable TFO without a TFO cookie
pub const TCP_FASTOPEN_NO_COOKIE = 34;
pub const TCP_ZEROCOPY_RECEIVE = 35;
/// Notify bytes available to read as a cmsg on read
pub const TCP_INQ = 36;
pub const TCP_CM_INQ = TCP_INQ;
/// delay outgoing packets by XX usec
pub const TCP_TX_DELAY = 37;
pub const TCP_REPAIR_ON = 1;
pub const TCP_REPAIR_OFF = 0;
/// Turn off without window probes
pub const TCP_REPAIR_OFF_NO_WP = -1;
pub const tcp_repair_opt = extern struct {
opt_code: u32,
opt_val: u32,
};
pub const tcp_repair_window = extern struct {
snd_wl1: u32,
snd_wnd: u32,
max_window: u32,
rcv_wnd: u32,
rcv_wup: u32,
};
pub const TcpRepairOption = extern enum {
TCP_NO_QUEUE,
TCP_RECV_QUEUE,
TCP_SEND_QUEUE,
TCP_QUEUES_NR,
};
/// why fastopen failed from client perspective
pub const tcp_fastopen_client_fail = extern enum {
/// catch-all
TFO_STATUS_UNSPEC,
/// if not in TFO_CLIENT_NO_COOKIE mode
TFO_COOKIE_UNAVAILABLE,
/// SYN-ACK did not ack SYN data
TFO_DATA_NOT_ACKED,
/// SYN-ACK did not ack SYN data after timeout
TFO_SYN_RETRANSMITTED,
};
/// for TCP_INFO socket option
pub const TCPI_OPT_TIMESTAMPS = 1;
pub const TCPI_OPT_SACK = 2;
pub const TCPI_OPT_WSCALE = 4;
/// ECN was negociated at TCP session init
pub const TCPI_OPT_ECN = 8;
/// we received at least one packet with ECT
pub const TCPI_OPT_ECN_SEEN = 16;
/// SYN-ACK acked data in SYN sent or rcvd
pub const TCPI_OPT_SYN_DATA = 32;
pub const nfds_t = usize;
pub const pollfd = extern struct {
fd: fd_t,
events: i16,
revents: i16,
};
pub const POLLIN = 0x001;
pub const POLLPRI = 0x002;
pub const POLLOUT = 0x004;
pub const POLLERR = 0x008;
pub const POLLHUP = 0x010;
pub const POLLNVAL = 0x020;
pub const POLLRDNORM = 0x040;
pub const POLLRDBAND = 0x080;
pub const MFD_CLOEXEC = 0x0001;
pub const MFD_ALLOW_SEALING = 0x0002;
pub const MFD_HUGETLB = 0x0004;
pub const MFD_ALL_FLAGS = MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB;
pub const HUGETLB_FLAG_ENCODE_SHIFT = 26;
pub const HUGETLB_FLAG_ENCODE_MASK = 0x3f;
pub const HUGETLB_FLAG_ENCODE_64KB = 16 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const HUGETLB_FLAG_ENCODE_512KB = 19 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const HUGETLB_FLAG_ENCODE_1MB = 20 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const HUGETLB_FLAG_ENCODE_2MB = 21 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const HUGETLB_FLAG_ENCODE_8MB = 23 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const HUGETLB_FLAG_ENCODE_16MB = 24 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const HUGETLB_FLAG_ENCODE_32MB = 25 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const HUGETLB_FLAG_ENCODE_256MB = 28 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const HUGETLB_FLAG_ENCODE_512MB = 29 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const HUGETLB_FLAG_ENCODE_1GB = 30 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const HUGETLB_FLAG_ENCODE_2GB = 31 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const HUGETLB_FLAG_ENCODE_16GB = 34 << HUGETLB_FLAG_ENCODE_SHIFT;
pub const MFD_HUGE_SHIFT = HUGETLB_FLAG_ENCODE_SHIFT;
pub const MFD_HUGE_MASK = HUGETLB_FLAG_ENCODE_MASK;
pub const MFD_HUGE_64KB = HUGETLB_FLAG_ENCODE_64KB;
pub const MFD_HUGE_512KB = HUGETLB_FLAG_ENCODE_512KB;
pub const MFD_HUGE_1MB = HUGETLB_FLAG_ENCODE_1MB;
pub const MFD_HUGE_2MB = HUGETLB_FLAG_ENCODE_2MB;
pub const MFD_HUGE_8MB = HUGETLB_FLAG_ENCODE_8MB;
pub const MFD_HUGE_16MB = HUGETLB_FLAG_ENCODE_16MB;
pub const MFD_HUGE_32MB = HUGETLB_FLAG_ENCODE_32MB;
pub const MFD_HUGE_256MB = HUGETLB_FLAG_ENCODE_256MB;
pub const MFD_HUGE_512MB = HUGETLB_FLAG_ENCODE_512MB;
pub const MFD_HUGE_1GB = HUGETLB_FLAG_ENCODE_1GB;
pub const MFD_HUGE_2GB = HUGETLB_FLAG_ENCODE_2GB;
pub const MFD_HUGE_16GB = HUGETLB_FLAG_ENCODE_16GB;
pub const RUSAGE_SELF = 0;
pub const RUSAGE_CHILDREN = -1;
pub const RUSAGE_THREAD = 1;
pub const rusage = extern struct {
utime: timeval,
stime: timeval,
maxrss: isize,
ixrss: isize,
idrss: isize,
isrss: isize,
minflt: isize,
majflt: isize,
nswap: isize,
inblock: isize,
oublock: isize,
msgsnd: isize,
msgrcv: isize,
nsignals: isize,
nvcsw: isize,
nivcsw: isize,
__reserved: [16]isize = [1]isize{0} ** 16,
};
pub const cc_t = u8;
pub const speed_t = u32;
pub const tcflag_t = u32;
pub const NCCS = 32;
pub const IGNBRK = 1;
pub const BRKINT = 2;
pub const IGNPAR = 4;
pub const PARMRK = 8;
pub const INPCK = 16;
pub const ISTRIP = 32;
pub const INLCR = 64;
pub const IGNCR = 128;
pub const ICRNL = 256;
pub const IUCLC = 512;
pub const IXON = 1024;
pub const IXANY = 2048;
pub const IXOFF = 4096;
pub const IMAXBEL = 8192;
pub const IUTF8 = 16384;
pub const OPOST = 1;
pub const OLCUC = 2;
pub const ONLCR = 4;
pub const OCRNL = 8;
pub const ONOCR = 16;
pub const ONLRET = 32;
pub const OFILL = 64;
pub const OFDEL = 128;
pub const VTDLY = 16384;
pub const VT0 = 0;
pub const VT1 = 16384;
pub const CSIZE = 48;
pub const CS5 = 0;
pub const CS6 = 16;
pub const CS7 = 32;
pub const CS8 = 48;
pub const CSTOPB = 64;
pub const CREAD = 128;
pub const PARENB = 256;
pub const PARODD = 512;
pub const HUPCL = 1024;
pub const CLOCAL = 2048;
pub const ISIG = 1;
pub const ICANON = 2;
pub const ECHO = 8;
pub const ECHOE = 16;
pub const ECHOK = 32;
pub const ECHONL = 64;
pub const NOFLSH = 128;
pub const TOSTOP = 256;
pub const IEXTEN = 32768;
pub const TCSA = extern enum(c_uint) {
NOW,
DRAIN,
FLUSH,
_,
};
pub const termios = extern struct {
iflag: tcflag_t,
oflag: tcflag_t,
cflag: tcflag_t,
lflag: tcflag_t,
line: cc_t,
cc: [NCCS]cc_t,
ispeed: speed_t,
ospeed: speed_t,
};
pub const SIOCGIFINDEX = 0x8933;
pub const IFNAMESIZE = 16;
pub const ifmap = extern struct {
mem_start: u32,
mem_end: u32,
base_addr: u16,
irq: u8,
dma: u8,
port: u8,
};
pub const ifreq = extern struct {
ifrn: extern union {
name: [IFNAMESIZE]u8,
},
ifru: extern union {
addr: sockaddr,
dstaddr: sockaddr,
broadaddr: sockaddr,
netmask: sockaddr,
hwaddr: sockaddr,
flags: i16,
ivalue: i32,
mtu: i32,
map: ifmap,
slave: [IFNAMESIZE - 1:0]u8,
newname: [IFNAMESIZE - 1:0]u8,
data: ?[*]u8,
},
};
// doc comments copied from musl
pub const rlimit_resource = extern enum(c_int) {
/// Per-process CPU limit, in seconds.
CPU,
/// Largest file that can be created, in bytes.
FSIZE,
/// Maximum size of data segment, in bytes.
DATA,
/// Maximum size of stack segment, in bytes.
STACK,
/// Largest core file that can be created, in bytes.
CORE,
/// Largest resident set size, in bytes.
/// This affects swapping; processes that are exceeding their
/// resident set size will be more likely to have physical memory
/// taken from them.
RSS,
/// Number of processes.
NPROC,
/// Number of open files.
NOFILE,
/// Locked-in-memory address space.
MEMLOCK,
/// Address space limit.
AS,
/// Maximum number of file locks.
LOCKS,
/// Maximum number of pending signals.
SIGPENDING,
/// Maximum bytes in POSIX message queues.
MSGQUEUE,
/// Maximum nice priority allowed to raise to.
/// Nice levels 19 .. -20 correspond to 0 .. 39
/// values of this resource limit.
NICE,
/// Maximum realtime priority allowed for non-priviledged
/// processes.
RTPRIO,
/// Maximum CPU time in µs that a process scheduled under a real-time
/// scheduling policy may consume without making a blocking system
/// call before being forcibly descheduled.
RTTIME,
_,
};
pub const rlim_t = u64;
/// No limit
pub const RLIM_INFINITY = ~@as(rlim_t, 0);
pub const RLIM_SAVED_MAX = RLIM_INFINITY;
pub const RLIM_SAVED_CUR = RLIM_INFINITY;
pub const rlimit = extern struct {
/// Soft limit
cur: rlim_t,
/// Hard limit
max: rlim_t,
};
|
lib/std/os/bits/linux.zig
|
const std = @import("std");
const fs = std.fs;
const io = std.io;
const info = std.log.info;
const print = std.debug.print;
const fmt = std.fmt;
const utils = @import("utils.zig");
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const Node = struct {
allo: *std.mem.Allocator = undefined,
is_root: bool = false,
name: []u8 = undefined,
parent_contains: usize = undefined,
children: []*Node = undefined,
pub fn init(allo: *std.mem.Allocator, name: []u8, is_root: bool, parent_contains: usize) !Node {
return Node{
.allo = allo,
.is_root = is_root,
.name = try allo.dupe(u8, name),
.parent_contains = parent_contains,
.children = try allo.alloc(*Node, 0),
};
}
pub fn append_child(self: *Node, name: []u8, count: usize) !void {
self.children = try self.allo.realloc(self.children, self.children.len + 1);
const child = try self.allo.create(Node);
child.* = try Node.init(self.allo, name, false, count);
self.children[self.children.len - 1] = child;
}
pub fn deinit(self: *Node) void {
self.allo.free(self.name);
for (self.children) |child| {
child.deinit();
self.allo.destroy(child);
}
self.allo.free(self.children);
}
};
pub fn parseNode(allo: *std.mem.Allocator, line: []const u8) !Node {
var tokens = std.mem.tokenize(line, " ");
// read and set parent
var name = try fmt.allocPrint(allo, "{}_{}", .{
tokens.next(),
tokens.next(),
});
defer allo.free(name);
var node = try Node.init(allo, name, true, 0);
// drop "bags contain"
_ = tokens.next();
_ = tokens.next();
// read contained bags info
var i: u32 = 0;
while (true) : (i += 1) {
const maybe_count = tokens.next() orelse {
break;
};
// check if no children
const count = fmt.parseUnsigned(usize, maybe_count, 10) catch {
break;
};
// parse child property and color
var child_name = try fmt.allocPrint(allo, "{}_{}", .{
tokens.next(),
tokens.next(),
});
defer allo.free(child_name);
try node.append_child(child_name, count);
// drop "bag,/."
_ = tokens.next() orelse {
break;
};
}
return node;
}
pub fn containsChild(map: std.StringHashMap(Node), name: []const u8, needle: []const u8) bool {
if (std.mem.eql(u8, name, needle)) {
return true;
}
const node = map.get(name) orelse {
return false;
};
for (node.children) |child| {
if (containsChild(map, child.name, needle)) {
return true;
}
}
return false;
}
pub fn countP1(map: std.StringHashMap(Node), needle: []const u8) !u32 {
var iter = map.iterator();
var res: u32 = 0;
// count all toplevels that can contain shiny_gold
while (iter.next()) |kv| {
if (!kv.value.is_root or std.mem.eql(u8, needle, kv.key)) {
continue;
}
if (containsChild(map, kv.key, needle)) {
res += 1;
}
}
return res;
}
pub fn countP2(map: std.StringHashMap(Node), needle: []const u8) anyerror!usize {
const root = map.get(needle) orelse {
return error.WHAT;
};
var tot: usize = 0;
for (root.children) |child| {
const val = try countP2(map, child.name);
tot += val * child.parent_contains + child.parent_contains;
}
return tot;
}
pub fn main() !void {
const begin = @divTrunc(std.time.nanoTimestamp(), 1000);
// allocator
defer _ = gpa.deinit();
var allo = &gpa.allocator;
const lines: [][]const u8 = try utils.readInputLines(allo, "./input1");
defer allo.free(lines);
print("== got {} input lines ==\n", .{lines.len});
var nodes = std.StringHashMap(Node).init(allo);
defer nodes.deinit();
for (lines) |line| {
defer allo.free(line);
info("parsing line: [{}]", .{line});
const root_node = try parseNode(allo, line);
try nodes.put(root_node.name, root_node);
}
// solutions
//
const needle = "shiny_gold";
const p1 = try countP1(nodes, needle[0..]);
print("p1: {}\n", .{p1});
const p2 = try countP2(nodes, needle[0..]);
print("p2: {}\n", .{p2});
var iter = nodes.iterator();
while (iter.next()) |kv| {
defer kv.value.deinit();
}
const delta = @divTrunc(std.time.nanoTimestamp(), 1000) - begin;
print("all done in {} microseconds\n", .{delta});
}
|
day_07/src/main.zig
|
const std = @import("../index.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
const Loop = std.event.Loop;
/// many producer, many consumer, thread-safe, runtime configurable buffer size
/// when buffer is empty, consumers suspend and are resumed by producers
/// when buffer is full, producers suspend and are resumed by consumers
pub fn Channel(comptime T: type) type {
return struct.{
loop: *Loop,
getters: std.atomic.Queue(GetNode),
or_null_queue: std.atomic.Queue(*std.atomic.Queue(GetNode).Node),
putters: std.atomic.Queue(PutNode),
get_count: usize,
put_count: usize,
dispatch_lock: u8, // TODO make this a bool
need_dispatch: u8, // TODO make this a bool
// simple fixed size ring buffer
buffer_nodes: []T,
buffer_index: usize,
buffer_len: usize,
const SelfChannel = @This();
const GetNode = struct.{
tick_node: *Loop.NextTickNode,
data: Data,
const Data = union(enum).{
Normal: Normal,
OrNull: OrNull,
};
const Normal = struct.{
ptr: *T,
};
const OrNull = struct.{
ptr: *?T,
or_null: *std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node,
};
};
const PutNode = struct.{
data: T,
tick_node: *Loop.NextTickNode,
};
/// call destroy when done
pub fn create(loop: *Loop, capacity: usize) !*SelfChannel {
const buffer_nodes = try loop.allocator.alloc(T, capacity);
errdefer loop.allocator.free(buffer_nodes);
const self = try loop.allocator.create(SelfChannel.{
.loop = loop,
.buffer_len = 0,
.buffer_nodes = buffer_nodes,
.buffer_index = 0,
.dispatch_lock = 0,
.need_dispatch = 0,
.getters = std.atomic.Queue(GetNode).init(),
.putters = std.atomic.Queue(PutNode).init(),
.or_null_queue = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).init(),
.get_count = 0,
.put_count = 0,
});
errdefer loop.allocator.destroy(self);
return self;
}
/// must be called when all calls to put and get have suspended and no more calls occur
pub fn destroy(self: *SelfChannel) void {
while (self.getters.get()) |get_node| {
cancel get_node.data.tick_node.data;
}
while (self.putters.get()) |put_node| {
cancel put_node.data.tick_node.data;
}
self.loop.allocator.free(self.buffer_nodes);
self.loop.allocator.destroy(self);
}
/// puts a data item in the channel. The promise completes when the value has been added to the
/// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
pub async fn put(self: *SelfChannel, data: T) void {
// TODO fix this workaround
suspend {
resume @handle();
}
var my_tick_node = Loop.NextTickNode.init(@handle());
var queue_node = std.atomic.Queue(PutNode).Node.init(PutNode.{
.tick_node = &my_tick_node,
.data = data,
});
// TODO test canceling a put()
errdefer {
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
const need_dispatch = !self.putters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the put_count incorrect for a period of time. fix by dispatching.
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.dispatch();
}
}
suspend {
self.putters.put(&queue_node);
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.dispatch();
}
}
/// await this function to get an item from the channel. If the buffer is empty, the promise will
/// complete when the next item is put in the channel.
pub async fn get(self: *SelfChannel) T {
// TODO fix this workaround
suspend {
resume @handle();
}
// TODO integrate this function with named return values
// so we can get rid of this extra result copy
var result: T = undefined;
var my_tick_node = Loop.NextTickNode.init(@handle());
var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode.{
.tick_node = &my_tick_node,
.data = GetNode.Data.{
.Normal = GetNode.Normal.{ .ptr = &result },
},
});
// TODO test canceling a get()
errdefer {
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
const need_dispatch = !self.getters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the get_count incorrect for a period of time. fix by dispatching.
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.dispatch();
}
}
suspend {
self.getters.put(&queue_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.dispatch();
}
return result;
}
//pub async fn select(comptime EnumUnion: type, channels: ...) EnumUnion {
// assert(@memberCount(EnumUnion) == channels.len); // enum union and channels mismatch
// assert(channels.len != 0); // enum unions cannot have 0 fields
// if (channels.len == 1) {
// const result = await (async channels[0].get() catch unreachable);
// return @unionInit(EnumUnion, @memberName(EnumUnion, 0), result);
// }
//}
/// Await this function to get an item from the channel. If the buffer is empty and there are no
/// puts waiting, this returns null.
/// Await is necessary for locking purposes. The function will be resumed after checking the channel
/// for data and will not wait for data to be available.
pub async fn getOrNull(self: *SelfChannel) ?T {
// TODO fix this workaround
suspend {
resume @handle();
}
// TODO integrate this function with named return values
// so we can get rid of this extra result copy
var result: ?T = null;
var my_tick_node = Loop.NextTickNode.init(@handle());
var or_null_node = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node.init(undefined);
var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode.{
.tick_node = &my_tick_node,
.data = GetNode.Data.{
.OrNull = GetNode.OrNull.{
.ptr = &result,
.or_null = &or_null_node,
},
},
});
or_null_node.data = &queue_node;
// TODO test canceling getOrNull
errdefer {
_ = self.or_null_queue.remove(&or_null_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
const need_dispatch = !self.getters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the get_count incorrect for a period of time. fix by dispatching.
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.dispatch();
}
}
suspend {
self.getters.put(&queue_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.or_null_queue.put(&or_null_node);
self.dispatch();
}
return result;
}
fn dispatch(self: *SelfChannel) void {
// set the "need dispatch" flag
_ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
lock: while (true) {
// set the lock flag
const prev_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
if (prev_lock != 0) return;
// clear the need_dispatch flag since we're about to do it
_ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
while (true) {
one_dispatch: {
// later we correct these extra subtractions
var get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
var put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
// transfer self.buffer to self.getters
while (self.buffer_len != 0) {
if (get_count == 0) break :one_dispatch;
const get_node = &self.getters.get().?.data;
switch (get_node.data) {
GetNode.Data.Normal => |info| {
info.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
},
GetNode.Data.OrNull => |info| {
_ = self.or_null_queue.remove(info.or_null);
info.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
},
}
self.loop.onNextTick(get_node.tick_node);
self.buffer_len -= 1;
get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
}
// direct transfer self.putters to self.getters
while (get_count != 0 and put_count != 0) {
const get_node = &self.getters.get().?.data;
const put_node = &self.putters.get().?.data;
switch (get_node.data) {
GetNode.Data.Normal => |info| {
info.ptr.* = put_node.data;
},
GetNode.Data.OrNull => |info| {
_ = self.or_null_queue.remove(info.or_null);
info.ptr.* = put_node.data;
},
}
self.loop.onNextTick(get_node.tick_node);
self.loop.onNextTick(put_node.tick_node);
get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
}
// transfer self.putters to self.buffer
while (self.buffer_len != self.buffer_nodes.len and put_count != 0) {
const put_node = &self.putters.get().?.data;
self.buffer_nodes[self.buffer_index] = put_node.data;
self.loop.onNextTick(put_node.tick_node);
self.buffer_index +%= 1;
self.buffer_len += 1;
put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
}
}
// undo the extra subtractions
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
// All the "get or null" functions should resume now.
var remove_count: usize = 0;
while (self.or_null_queue.get()) |or_null_node| {
remove_count += @boolToInt(self.getters.remove(or_null_node.data));
self.loop.onNextTick(or_null_node.data.data.tick_node);
}
if (remove_count != 0) {
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, remove_count, AtomicOrder.SeqCst);
}
// clear need-dispatch flag
const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
if (need_dispatch != 0) continue;
const my_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
assert(my_lock != 0);
// we have to check again now that we unlocked
if (@atomicLoad(u8, &self.need_dispatch, AtomicOrder.SeqCst) != 0) continue :lock;
return;
}
}
}
};
}
test "std.event.Channel" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const allocator = &da.allocator;
var loop: Loop = undefined;
// TODO make a multi threaded test
try loop.initSingleThreaded(allocator);
defer loop.deinit();
const channel = try Channel(i32).create(&loop, 0);
defer channel.destroy();
const handle = try async<allocator> testChannelGetter(&loop, channel);
defer cancel handle;
const putter = try async<allocator> testChannelPutter(channel);
defer cancel putter;
loop.run();
}
async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void {
errdefer @panic("test failed");
const value1_promise = try async channel.get();
const value1 = await value1_promise;
assert(value1 == 1234);
const value2_promise = try async channel.get();
const value2 = await value2_promise;
assert(value2 == 4567);
const value3_promise = try async channel.getOrNull();
const value3 = await value3_promise;
assert(value3 == null);
const last_put = try async testPut(channel, 4444);
const value4 = await try async channel.getOrNull();
assert(value4.? == 4444);
await last_put;
}
async fn testChannelPutter(channel: *Channel(i32)) void {
await (async channel.put(1234) catch @panic("out of memory"));
await (async channel.put(4567) catch @panic("out of memory"));
}
async fn testPut(channel: *Channel(i32), value: i32) void {
await (async channel.put(value) catch @panic("out of memory"));
}
|
std/event/channel.zig
|
const std = @import("std");
const assert = std.debug.assert;
const Atomic = std.atomic.Atomic;
const Sync = packed struct {
/// Tracks the number of threads not searching for Tasks
idle: u14 = 0,
/// Tracks the number of threads spawned
spawned: u14 = 0,
/// What you see is what you get
unused: bool = false,
/// Used to not miss notifications while state = waking
notified: bool = false,
/// The current state of the thread pool
state: enum(u2) {
/// A notification can be issued to wake up a sleeping as the "waking thread".
pending = 0,
/// The state was notifiied with a signal. A thread is woken up.
/// The first thread to transition to `waking` becomes the "waking thread".
signaled,
/// There is a "waking thread" among us.
/// No other thread should be woken up until the waking thread transitions the state.
waking,
/// The thread pool was terminated. Start decremented `spawned` so that it can be joined.
shutdown,
} = .pending,
};
/// Configuration options for the thread pool.
/// TODO: add CPU core affinity?
pub const Config = struct {
stack_size: u32 = (std.Thread.SpawnConfig{}).stack_size,
max_threads: u32,
};
/// A Task represents the unit of Work / Job / Execution that the ThreadPool schedules.
/// The user provides a `callback` which is invoked when the *Task can run on a thread.
pub fn Task(comptime Args: type, comptime Return: type) type {
const Callback = fn (Args) Return;
return struct {
node: Node = .{},
callback: Callback,
args: Args,
pub fn create(cb: Callback, args: Args) @This() {
return @This() {
.callback = cb,
.args = args,
};
}
fn execute(self: *const @This()) Return {
return self.callback(self.args);
}
};
}
/// An event which stores 1 semaphore token and is multi-threaded safe.
/// The event can be shutdown(), waking up all wait()ing threads and
/// making subsequent wait()'s return immediately.
const Event = struct {
state: Atomic(u32) = Atomic(u32).init(EMPTY),
const EMPTY = 0;
const WAITING = 1;
const NOTIFIED = 2;
const SHUTDOWN = 3;
/// Wait for and consume a notification
/// or wait for the event to be shutdown entirely
noinline fn wait(self: *Event) void {
var acquire_with: u32 = EMPTY;
var state = self.state.load(.Monotonic);
while (true) {
// If we're shutdown then exit early.
// Acquire barrier to ensure operations before the shutdown() are seen after the wait().
// Shutdown is rare so it's better to have an Acquire barrier here instead of on CAS failure + load which are common.
if (state == SHUTDOWN) {
std.atomic.fence(.Acquire);
return;
}
// Consume a notification when it pops up.
// Acquire barrier to ensure operations before the notify() appear after the wait().
if (state == NOTIFIED) {
state = self.state.tryCompareAndSwap(
state,
acquire_with,
.Acquire,
.Monotonic,
) orelse return;
continue;
}
// There is no notification to consume, we should wait on the event by ensuring its WAITING.
if (state != WAITING) blk: {
state = self.state.tryCompareAndSwap(
state,
WAITING,
.Monotonic,
.Monotonic,
) orelse break :blk;
continue;
}
// Wait on the event until a notify() or shutdown().
// If we wake up to a notification, we must acquire it with WAITING instead of EMPTY
// since there may be other threads sleeping on the Futex who haven't been woken up yet.
//
// Acquiring to WAITING will make the next notify() or shutdown() wake a sleeping futex thread
// who will either exit on SHUTDOWN or acquire with WAITING again, ensuring all threads are awoken.
// This unfortunately results in the last notify() or shutdown() doing an extra futex wake but that's fine.
std.Thread.Futex.wait(&self.state, WAITING, null) catch unreachable;
state = self.state.load(.Monotonic);
acquire_with = WAITING;
}
}
/// Post a notification to the event if it doesn't have one already
/// then wake up a waiting thread if there is one as well.
fn notify(self: *Event) void {
return self.wake(NOTIFIED, 1);
}
/// Marks the event as shutdown, making all future wait()'s return immediately.
/// Then wakes up any threads currently waiting on the Event.
fn shutdown(self: *Event) void {
return self.wake(SHUTDOWN, std.math.maxInt(u32));
}
fn wake(self: *Event, release_with: u32, wake_threads: u32) void {
// Update the Event to notifty it with the new `release_with` state (either NOTIFIED or SHUTDOWN).
// Release barrier to ensure any operations before this are this to happen before the wait() in the other threads.
const state = self.state.swap(release_with, .Release);
// Only wake threads sleeping in futex if the state is WAITING.
// Avoids unnecessary wake ups.
if (state == WAITING) {
std.Thread.Futex.wake(&self.state, wake_threads);
}
}
};
/// Linked list intrusive memory node and lock-free data structures to operate with it
const Node = struct {
next: ?*Node = null,
/// A linked list of Nodes
const List = struct {
head: *Node,
tail: *Node,
};
/// An unbounded multi-producer-(non blocking)-multi-consumer queue of Node pointers.
const Queue = struct {
stack: Atomic(usize) = Atomic(usize).init(0),
cache: ?*Node = null,
const HAS_CACHE: usize = 0b01;
const IS_CONSUMING: usize = 0b10;
const PTR_MASK: usize = ~(HAS_CACHE | IS_CONSUMING);
comptime {
assert(@alignOf(Node) >= ((IS_CONSUMING | HAS_CACHE) + 1));
}
fn push(noalias self: *Queue, list: List) void {
var stack = self.stack.load(.Monotonic);
while (true) {
// Attach the list to the stack (pt. 1)
list.tail.next = @intToPtr(?*Node, stack & PTR_MASK);
// Update the stack with the list (pt. 2).
// Don't change the HAS_CACHE and IS_CONSUMING bits of the consumer.
var new_stack = @ptrToInt(list.head);
assert(new_stack & ~PTR_MASK == 0);
new_stack |= (stack & ~PTR_MASK);
// Push to the stack with a release barrier for the consumer to see the proper list links.
stack = self.stack.tryCompareAndSwap(
stack,
new_stack,
.Release,
.Monotonic,
) orelse break;
}
}
fn tryAcquireConsumer(self: *Queue) error{Empty, Contended}!?*Node {
var stack = self.stack.load(.Monotonic);
while (true) {
if (stack & IS_CONSUMING != 0)
return error.Contended; // The queue already has a consumer.
if (stack & (HAS_CACHE | PTR_MASK) == 0)
return error.Empty; // The queue is empty when there's nothing cached and nothing in the stack.
// When we acquire the consumer, also consume the pushed stack if the cache is empty.
var new_stack = stack | HAS_CACHE | IS_CONSUMING;
if (stack & HAS_CACHE == 0) {
assert(stack & PTR_MASK != 0);
new_stack &= ~PTR_MASK;
}
// Acquire barrier on getting the consumer to see cache/Node updates done by previous consumers
// and to ensure our cache/Node updates in pop() happen after that of previous consumers.
stack = self.stack.tryCompareAndSwap(
stack,
new_stack,
.Acquire,
.Monotonic,
) orelse return self.cache orelse @intToPtr(*Node, stack & PTR_MASK);
}
}
fn releaseConsumer(noalias self: *Queue, noalias consumer: ?*Node) void {
// Stop consuming and remove the HAS_CACHE bit as well if the consumer's cache is empty.
// When HAS_CACHE bit is zeroed, the next consumer will acquire the pushed stack nodes.
var remove = IS_CONSUMING;
if (consumer == null)
remove |= HAS_CACHE;
// Release the consumer with a release barrier to ensure cache/node accesses
// happen before the consumer was released and before the next consumer starts using the cache.
self.cache = consumer;
const stack = self.stack.fetchSub(remove, .Release);
assert(stack & remove != 0);
}
fn pop(noalias self: *Queue, noalias consumer_ref: *?*Node) ?*Node {
// Check the consumer cache (fast path)
if (consumer_ref.*) |node| {
consumer_ref.* = node.next;
return node;
}
// Load the stack to see if there was anything pushed that we could grab.
var stack = self.stack.load(.Monotonic);
assert(stack & IS_CONSUMING != 0);
if (stack & PTR_MASK == 0) {
return null;
}
// Nodes have been pushed to the stack, grab then with an Acquire barrier to see the Node links.
stack = self.stack.swap(HAS_CACHE | IS_CONSUMING, .Acquire);
assert(stack & IS_CONSUMING != 0);
assert(stack & PTR_MASK != 0);
const node = @intToPtr(*Node, stack & PTR_MASK);
consumer_ref.* = node.next;
return node;
}
};
/// A bounded single-producer, multi-consumer ring buffer for node pointers.
const Buffer = struct {
head: Atomic(Index) = Atomic(Index).init(0),
tail: Atomic(Index) = Atomic(Index).init(0),
array: [capacity]Atomic(*Node) = undefined,
const Index = u32;
const capacity = 256; // Appears to be a pretty good trade-off in space vs contended throughput
comptime {
assert(std.math.maxInt(Index) >= capacity);
assert(std.math.isPowerOfTwo(capacity));
}
fn push(noalias self: *Buffer, noalias list: *List) error{Overflow}!void {
var head = self.head.load(.Monotonic);
var tail = self.tail.loadUnchecked(); // we're the only thread that can change this
while (true) {
var size = tail -% head;
assert(size <= capacity);
// Push nodes from the list to the buffer if it's not empty..
if (size < capacity) {
var nodes: ?*Node = list.head;
while (size < capacity) : (size += 1) {
const node = nodes orelse break;
nodes = node.next;
// Array written atomically with weakest ordering since it could be getting atomically read by steal().
self.array[tail % capacity].store(node, .Unordered);
tail +%= 1;
}
// Release barrier synchronizes with Acquire loads for steal()ers to see the array writes.
self.tail.store(tail, .Release);
// Update the list with the nodes we pushed to the buffer and try again if there's more.
list.head = nodes orelse return;
std.atomic.spinLoopHint();
head = self.head.load(.Monotonic);
continue;
}
// Try to steal/overflow half of the tasks in the buffer to make room for future push()es.
// Migrating half amortizes the cost of stealing while requiring future pops to still use the buffer.
// Acquire barrier to ensure the linked list creation after the steal only happens after we succesfully steal.
var migrate = size / 2;
head = self.head.tryCompareAndSwap(
head,
head +% migrate,
.Acquire,
.Monotonic,
) orelse {
// Link the migrated Nodes together
const first = self.array[head % capacity].loadUnchecked();
while (migrate > 0) : (migrate -= 1) {
const prev = self.array[head % capacity].loadUnchecked();
head +%= 1;
prev.next = self.array[head % capacity].loadUnchecked();
}
// Append the list that was supposed to be pushed to the end of the migrated Nodes
const last = self.array[(head -% 1) % capacity].loadUnchecked();
last.next = list.head;
list.tail.next = null;
// Return the migrated nodes + the original list as overflowed
list.head = first;
return error.Overflow;
};
}
}
fn pop(self: *Buffer) ?*Node {
var head = self.head.load(.Monotonic);
var tail = self.tail.loadUnchecked(); // we're the only thread that can change this
while (true) {
// Quick sanity check and return null when not empty
var size = tail -% head;
assert(size <= capacity);
if (size == 0) {
return null;
}
// Dequeue with an acquire barrier to ensure any writes done to the Node
// only happen after we succesfully claim it from the array.
head = self.head.tryCompareAndSwap(
head,
head +% 1,
.Acquire,
.Monotonic,
) orelse return self.array[head % capacity].loadUnchecked();
}
}
const Stole = struct {
node: *Node,
pushed: bool,
};
fn consume(noalias self: *Buffer, noalias queue: *Queue) ?Stole {
var consumer = queue.tryAcquireConsumer() catch return null;
defer queue.releaseConsumer(consumer);
const head = self.head.load(.Monotonic);
const tail = self.tail.loadUnchecked(); // we're the only thread that can change this
const size = tail -% head;
assert(size <= capacity);
assert(size == 0); // we should only be consuming if our array is empty
// Pop nodes from the queue and push them to our array.
// Atomic stores to the array as steal() threads may be atomically reading from it.
var pushed: Index = 0;
while (pushed < capacity) : (pushed += 1) {
const node = queue.pop(&consumer) orelse break;
self.array[(tail +% pushed) % capacity].store(node, .Unordered);
}
// We will be returning one node that we stole from the queue.
// Get an extra, and if that's not possible, take one from our array.
const node = queue.pop(&consumer) orelse blk: {
if (pushed == 0) return null;
pushed -= 1;
break :blk self.array[(tail +% pushed) % capacity].loadUnchecked();
};
// Update the array tail with the nodes we pushed to it.
// Release barrier to synchronize with Acquire barrier in steal()'s to see the written array Nodes.
if (pushed > 0) self.tail.store(tail +% pushed, .Release);
return Stole{
.node = node,
.pushed = pushed > 0,
};
}
fn steal(noalias self: *Buffer, noalias buffer: *Buffer) ?Stole {
const head = self.head.load(.Monotonic);
const tail = self.tail.loadUnchecked(); // we're the only thread that can change this
const size = tail -% head;
assert(size <= capacity);
assert(size == 0); // we should only be stealing if our array is empty
while (true) : (std.atomic.spinLoopHint()) {
const buffer_head = buffer.head.load(.Acquire);
const buffer_tail = buffer.tail.load(.Acquire);
// Overly large size indicates the the tail was updated a lot after the head was loaded.
// Reload both and try again.
const buffer_size = buffer_tail -% buffer_head;
if (buffer_size > capacity) {
continue;
}
// Try to steal half (divCeil) to amortize the cost of stealing from other threads.
const steal_size = buffer_size - (buffer_size / 2);
if (steal_size == 0) {
return null;
}
// Copy the nodes we will steal from the target's array to our own.
// Atomically load from the target buffer array as it may be pushing and atomically storing to it.
// Atomic store to our array as other steal() threads may be atomically loading from it as above.
var i: Index = 0;
while (i < steal_size) : (i += 1) {
const node = buffer.array[(buffer_head +% i) % capacity].load(.Unordered);
self.array[(tail +% i) % capacity].store(node, .Unordered);
}
// Try to commit the steal from the target buffer using:
// - an Acquire barrier to ensure that we only interact with the stolen Nodes after the steal was committed.
// - a Release barrier to ensure that the Nodes are copied above prior to the committing of the steal
// because if they're copied after the steal, the could be getting rewritten by the target's push().
_ = buffer.head.compareAndSwap(
buffer_head,
buffer_head +% steal_size,
.AcqRel,
.Monotonic,
) orelse {
// Pop one from the nodes we stole as we'll be returning it
const pushed = steal_size - 1;
const node = self.array[(tail +% pushed) % capacity].loadUnchecked();
// Update the array tail with the nodes we pushed to it.
// Release barrier to synchronize with Acquire barrier in steal()'s to see the written array Nodes.
if (pushed > 0) self.tail.store(tail +% pushed, .Release);
return Stole{
.node = node,
.pushed = pushed > 0,
};
};
}
}
};
};
pub fn ThreadPool(
comptime Args: type,
comptime Return: type
) type {
return struct {
const Self = @This();
stack_size: u32,
max_threads: u32,
sync: Atomic(u32) = Atomic(u32).init(@bitCast(u32, Sync{})),
idle_event: Event = .{},
join_event: Event = .{},
run_queue: Node.Queue = .{},
threads: Atomic(?*Thread) = Atomic(?*Thread).init(null),
/// Statically initialize the thread pool using the configuration.
pub fn init(config: Config) Self {
return .{
.stack_size = std.math.max(1, config.stack_size),
.max_threads = std.math.max(1, config.max_threads),
};
}
/// Wait for a thread to call shutdown() on the thread pool and kill the worker threads.
pub fn deinit(self: *Self) void {
self.join();
self.* = undefined;
}
/// An unordered collection of Tasks which can be submitted for scheduling as a group.
pub const Batch = struct {
len: usize = 0,
head: ?*Task(Args, Return) = null,
tail: ?*Task(Args, Return) = null,
/// Create a batch from a single task.
pub fn from(task: anytype) Batch {
return Batch{
.len = 1,
.head = task,
.tail = task,
};
}
/// Another batch into this one, taking ownership of its tasks.
pub fn push(self: *Batch, batch: Batch) void {
if (batch.len == 0) return;
if (self.len == 0) {
self.* = batch;
} else {
self.tail.?.node.next = if (batch.head) |h| &h.node else null;
self.tail = batch.tail;
self.len += batch.len;
}
}
};
/// Schedule a batch of tasks to be executed by some thread on the thread pool.
pub fn schedule(self: *Self, batch: Batch) void {
// Sanity check
if (batch.len == 0) {
return;
}
// Extract out the Node's from the Tasks
var list = Node.List{
.head = &batch.head.?.node,
.tail = &batch.tail.?.node,
};
// Push the task Nodes to the most approriate queue
if (Thread.current) |thread| {
thread.run_buffer.push(&list) catch thread.run_queue.push(list);
} else {
self.run_queue.push(list);
}
// Try to notify a thread
const is_waking = false;
return self.notify(is_waking);
}
inline fn notify(self: *Self, is_waking: bool) void {
// Fast path to check the Sync state to avoid calling into notifySlow().
// If we're waking, then we need to update the state regardless
if (!is_waking) {
const sync = @bitCast(Sync, self.sync.load(.Monotonic));
if (sync.notified) {
return;
}
}
return self.notifySlow(is_waking);
}
noinline fn notifySlow(self: *Self, is_waking: bool) void {
var sync = @bitCast(Sync, self.sync.load(.Monotonic));
while (sync.state != .shutdown) {
const can_wake = is_waking or (sync.state == .pending);
if (is_waking) {
assert(sync.state == .waking);
}
var new_sync = sync;
new_sync.notified = true;
if (can_wake and sync.idle > 0) { // wake up an idle thread
new_sync.state = .signaled;
} else if (can_wake and sync.spawned < self.max_threads) { // spawn a new thread
new_sync.state = .signaled;
new_sync.spawned += 1;
} else if (is_waking) { // no other thread to pass on "waking" status
new_sync.state = .pending;
} else if (sync.notified) { // nothing to update
return;
}
// Release barrier synchronizes with Acquire in wait()
// to ensure pushes to run queues happen before observing a posted notification.
sync = @bitCast(Sync, self.sync.tryCompareAndSwap(
@bitCast(u32, sync),
@bitCast(u32, new_sync),
.Release,
.Monotonic,
) orelse {
// We signaled to notify an idle thread
if (can_wake and sync.idle > 0) {
return self.idle_event.notify();
}
// We signaled to spawn a new thread
if (can_wake and sync.spawned < self.max_threads) {
const spawn_config = std.Thread.SpawnConfig{ .stack_size = self.stack_size };
const thread = std.Thread.spawn(spawn_config, Thread.run, .{self}) catch return self.unregister(null);
return thread.detach();
}
return;
});
}
}
noinline fn wait(self: *Self, _is_waking: bool) error{Shutdown}!bool {
var is_idle = false;
var is_waking = _is_waking;
var sync = @bitCast(Sync, self.sync.load(.Monotonic));
while (true) {
if (sync.state == .shutdown) return error.Shutdown;
if (is_waking) assert(sync.state == .waking);
// Consume a notification made by notify().
if (sync.notified) {
var new_sync = sync;
new_sync.notified = false;
if (is_idle)
new_sync.idle -= 1;
if (sync.state == .signaled)
new_sync.state = .waking;
// Acquire barrier synchronizes with notify()
// to ensure that pushes to run queue are observed after wait() returns.
sync = @bitCast(Sync, self.sync.tryCompareAndSwap(
@bitCast(u32, sync),
@bitCast(u32, new_sync),
.Acquire,
.Monotonic,
) orelse {
return is_waking or (sync.state == .signaled);
});
// No notification to consume.
// Mark this thread as idle before sleeping on the idle_event.
} else if (!is_idle) {
var new_sync = sync;
new_sync.idle += 1;
if (is_waking)
new_sync.state = .pending;
sync = @bitCast(Sync, self.sync.tryCompareAndSwap(
@bitCast(u32, sync),
@bitCast(u32, new_sync),
.Monotonic,
.Monotonic,
) orelse {
is_waking = false;
is_idle = true;
continue;
});
// Wait for a signal by either notify() or shutdown() without wasting cpu cycles.
// TODO: Add I/O polling here.
} else {
self.idle_event.wait();
sync = @bitCast(Sync, self.sync.load(.Monotonic));
}
}
}
/// Marks the thread pool as shutdown
pub noinline fn shutdown(self: *Self) void {
var sync = @bitCast(Sync, self.sync.load(.Monotonic));
while (sync.state != .shutdown) {
var new_sync = sync;
new_sync.notified = true;
new_sync.state = .shutdown;
new_sync.idle = 0;
// Full barrier to synchronize with both wait() and notify()
sync = @bitCast(Sync, self.sync.tryCompareAndSwap(
@bitCast(u32, sync),
@bitCast(u32, new_sync),
.AcqRel,
.Monotonic,
) orelse {
// Wake up any threads sleeping on the idle_event.
// TODO: I/O polling notification here.
if (sync.idle > 0) self.idle_event.shutdown();
return;
});
}
}
fn register(noalias self: *Self, noalias thread: *Thread) void {
// Push the thread onto the threads stack in a lock-free manner.
var threads = self.threads.load(.Monotonic);
while (true) {
thread.next = threads;
threads = self.threads.tryCompareAndSwap(
threads,
thread,
.Release,
.Monotonic,
) orelse break;
}
}
fn unregister(noalias self: *Self, noalias maybe_thread: ?*Thread) void {
// Un-spawn one thread, either due to a failed OS thread spawning or the thread is exitting.
const one_spawned = @bitCast(u32, Sync{ .spawned = 1 });
const sync = @bitCast(Sync, self.sync.fetchSub(one_spawned, .Release));
assert(sync.spawned > 0);
// The last thread to exit must wake up the thread pool join()er
// who will start the chain to shutdown all the threads.
if (sync.state == .shutdown and sync.spawned == 1) {
self.join_event.notify();
}
// If this is a thread pool thread, wait for a shutdown signal by the thread pool join()er.
const thread = maybe_thread orelse return;
thread.join_event.wait();
// After receiving the shutdown signal, shutdown the next thread in the pool.
// We have to do that without touching the thread pool itself since it's memory is invalidated by now.
// So just follow our .next link.
const next_thread = thread.next orelse return;
next_thread.join_event.notify();
}
fn join(self: *Self) void {
// Wait for the thread pool to be shutdown() then for all threads to enter a joinable state
var sync = @bitCast(Sync, self.sync.load(.Monotonic));
if (!(sync.state == .shutdown and sync.spawned == 0)) {
self.join_event.wait();
sync = @bitCast(Sync, self.sync.load(.Monotonic));
}
assert(sync.state == .shutdown);
assert(sync.spawned == 0);
// If there are threads, start off the chain sending it the shutdown signal.
// The thread receives the shutdown signal and sends it to the next thread, and the next..
const thread = self.threads.load(.Acquire) orelse return;
thread.join_event.notify();
}
const Thread = struct {
next: ?*Thread = null,
target: ?*Thread = null,
join_event: Event = .{},
run_queue: Node.Queue = .{},
run_buffer: Node.Buffer = .{},
threadlocal var current: ?*Thread = null;
/// Thread entry point which runs a worker for the ThreadPool
fn run(thread_pool: *Self) void {
var self = Thread{};
current = &self;
thread_pool.register(&self);
defer thread_pool.unregister(&self);
var is_waking = false;
while (true) {
is_waking = thread_pool.wait(is_waking) catch return;
while (self.pop(thread_pool)) |result| {
if (result.pushed or is_waking)
thread_pool.notify(is_waking);
is_waking = false;
const task = @fieldParentPtr(Task(Args, Return), "node", result.node);
_ = (task.callback)(task.args);
}
}
}
/// Try to dequeue a Node/Task from the ThreadPool.
/// Spurious reports of dequeue() returning empty are allowed.
fn pop(noalias self: *Thread, noalias thread_pool: *Self) ?Node.Buffer.Stole {
// Check our local buffer first
if (self.run_buffer.pop()) |node| {
return Node.Buffer.Stole{
.node = node,
.pushed = false,
};
}
// Then check our local queue
if (self.run_buffer.consume(&self.run_queue)) |stole| {
return stole;
}
// Then the global queue
if (self.run_buffer.consume(&thread_pool.run_queue)) |stole| {
return stole;
}
// TODO: add optimistic I/O polling here
// Then try work stealing from other threads
var num_threads: u32 = @bitCast(Sync, thread_pool.sync.load(.Monotonic)).spawned;
while (num_threads > 0) : (num_threads -= 1) {
// Traverse the stack of registered threads on the thread pool
const target = self.target orelse thread_pool.threads.load(.Acquire) orelse unreachable;
self.target = target.next;
// Try to steal from their queue first to avoid contention (the target steal's from queue last).
if (self.run_buffer.consume(&target.run_queue)) |stole| {
return stole;
}
// Skip stealing from the buffer if we're the target.
// We still steal from our own queue above given it may have just been locked the first time we tried.
if (target == self) {
continue;
}
// Steal from the buffer of a remote thread as a last resort
if (self.run_buffer.steal(&target.run_buffer)) |stole| {
return stole;
}
}
return null;
}
};
};
}
|
src/thread_pool.zig
|
pub fn isNeutral(cp: u21) bool {
if (cp < 0x0 or cp > 0xe007f) return false;
return switch (cp) {
0x0...0x1f => true,
0x7f...0x9f => true,
0xa0 => true,
0xa9 => true,
0xab => true,
0xb5 => true,
0xbb => true,
0xc0...0xc5 => true,
0xc7...0xcf => true,
0xd1...0xd6 => true,
0xd9...0xdd => true,
0xe2...0xe5 => true,
0xe7 => true,
0xeb => true,
0xee...0xef => true,
0xf1 => true,
0xf4...0xf6 => true,
0xfb => true,
0xfd => true,
0xff...0x100 => true,
0x102...0x110 => true,
0x112 => true,
0x114...0x11a => true,
0x11c...0x125 => true,
0x128...0x12a => true,
0x12c...0x130 => true,
0x134...0x137 => true,
0x139...0x13e => true,
0x143 => true,
0x145...0x147 => true,
0x14c => true,
0x14e...0x151 => true,
0x154...0x165 => true,
0x168...0x16a => true,
0x16c...0x1ba => true,
0x1bb => true,
0x1bc...0x1bf => true,
0x1c0...0x1c3 => true,
0x1c4...0x1cd => true,
0x1cf => true,
0x1d1 => true,
0x1d3 => true,
0x1d5 => true,
0x1d7 => true,
0x1d9 => true,
0x1db => true,
0x1dd...0x250 => true,
0x252...0x260 => true,
0x262...0x293 => true,
0x294 => true,
0x295...0x2af => true,
0x2b0...0x2c1 => true,
0x2c2...0x2c3 => true,
0x2c5 => true,
0x2c6 => true,
0x2c8 => true,
0x2cc => true,
0x2ce...0x2cf => true,
0x2d1 => true,
0x2d2...0x2d7 => true,
0x2dc => true,
0x2de => true,
0x2e0...0x2e4 => true,
0x2e5...0x2eb => true,
0x2ec => true,
0x2ed => true,
0x2ee => true,
0x2ef...0x2ff => true,
0x370...0x373 => true,
0x374 => true,
0x375 => true,
0x376...0x377 => true,
0x37a => true,
0x37b...0x37d => true,
0x37e => true,
0x37f => true,
0x384...0x385 => true,
0x386 => true,
0x387 => true,
0x388...0x38a => true,
0x38c => true,
0x38e...0x390 => true,
0x3aa...0x3b0 => true,
0x3c2 => true,
0x3ca...0x3f5 => true,
0x3f6 => true,
0x3f7...0x400 => true,
0x402...0x40f => true,
0x450 => true,
0x452...0x481 => true,
0x482 => true,
0x483...0x487 => true,
0x488...0x489 => true,
0x48a...0x52f => true,
0x531...0x556 => true,
0x559 => true,
0x55a...0x55f => true,
0x560...0x588 => true,
0x589 => true,
0x58a => true,
0x58d...0x58e => true,
0x58f => true,
0x591...0x5bd => true,
0x5be => true,
0x5bf => true,
0x5c0 => true,
0x5c1...0x5c2 => true,
0x5c3 => true,
0x5c4...0x5c5 => true,
0x5c6 => true,
0x5c7 => true,
0x5d0...0x5ea => true,
0x5ef...0x5f2 => true,
0x5f3...0x5f4 => true,
0x600...0x605 => true,
0x606...0x608 => true,
0x609...0x60a => true,
0x60b => true,
0x60c...0x60d => true,
0x60e...0x60f => true,
0x610...0x61a => true,
0x61b => true,
0x61c => true,
0x61d...0x61f => true,
0x620...0x63f => true,
0x640 => true,
0x641...0x64a => true,
0x64b...0x65f => true,
0x660...0x669 => true,
0x66a...0x66d => true,
0x66e...0x66f => true,
0x670 => true,
0x671...0x6d3 => true,
0x6d4 => true,
0x6d5 => true,
0x6d6...0x6dc => true,
0x6dd => true,
0x6de => true,
0x6df...0x6e4 => true,
0x6e5...0x6e6 => true,
0x6e7...0x6e8 => true,
0x6e9 => true,
0x6ea...0x6ed => true,
0x6ee...0x6ef => true,
0x6f0...0x6f9 => true,
0x6fa...0x6fc => true,
0x6fd...0x6fe => true,
0x6ff => true,
0x700...0x70d => true,
0x70f => true,
0x710 => true,
0x711 => true,
0x712...0x72f => true,
0x730...0x74a => true,
0x74d...0x7a5 => true,
0x7a6...0x7b0 => true,
0x7b1 => true,
0x7c0...0x7c9 => true,
0x7ca...0x7ea => true,
0x7eb...0x7f3 => true,
0x7f4...0x7f5 => true,
0x7f6 => true,
0x7f7...0x7f9 => true,
0x7fa => true,
0x7fd => true,
0x7fe...0x7ff => true,
0x800...0x815 => true,
0x816...0x819 => true,
0x81a => true,
0x81b...0x823 => true,
0x824 => true,
0x825...0x827 => true,
0x828 => true,
0x829...0x82d => true,
0x830...0x83e => true,
0x840...0x858 => true,
0x859...0x85b => true,
0x85e => true,
0x860...0x86a => true,
0x870...0x887 => true,
0x888 => true,
0x889...0x88e => true,
0x890...0x891 => true,
0x898...0x89f => true,
0x8a0...0x8c8 => true,
0x8c9 => true,
0x8ca...0x8e1 => true,
0x8e2 => true,
0x8e3...0x902 => true,
0x903 => true,
0x904...0x939 => true,
0x93a => true,
0x93b => true,
0x93c => true,
0x93d => true,
0x93e...0x940 => true,
0x941...0x948 => true,
0x949...0x94c => true,
0x94d => true,
0x94e...0x94f => true,
0x950 => true,
0x951...0x957 => true,
0x958...0x961 => true,
0x962...0x963 => true,
0x964...0x965 => true,
0x966...0x96f => true,
0x970 => true,
0x971 => true,
0x972...0x980 => true,
0x981 => true,
0x982...0x983 => true,
0x985...0x98c => true,
0x98f...0x990 => true,
0x993...0x9a8 => true,
0x9aa...0x9b0 => true,
0x9b2 => true,
0x9b6...0x9b9 => true,
0x9bc => true,
0x9bd => true,
0x9be...0x9c0 => true,
0x9c1...0x9c4 => true,
0x9c7...0x9c8 => true,
0x9cb...0x9cc => true,
0x9cd => true,
0x9ce => true,
0x9d7 => true,
0x9dc...0x9dd => true,
0x9df...0x9e1 => true,
0x9e2...0x9e3 => true,
0x9e6...0x9ef => true,
0x9f0...0x9f1 => true,
0x9f2...0x9f3 => true,
0x9f4...0x9f9 => true,
0x9fa => true,
0x9fb => true,
0x9fc => true,
0x9fd => true,
0x9fe => true,
0xa01...0xa02 => true,
0xa03 => true,
0xa05...0xa0a => true,
0xa0f...0xa10 => true,
0xa13...0xa28 => true,
0xa2a...0xa30 => true,
0xa32...0xa33 => true,
0xa35...0xa36 => true,
0xa38...0xa39 => true,
0xa3c => true,
0xa3e...0xa40 => true,
0xa41...0xa42 => true,
0xa47...0xa48 => true,
0xa4b...0xa4d => true,
0xa51 => true,
0xa59...0xa5c => true,
0xa5e => true,
0xa66...0xa6f => true,
0xa70...0xa71 => true,
0xa72...0xa74 => true,
0xa75 => true,
0xa76 => true,
0xa81...0xa82 => true,
0xa83 => true,
0xa85...0xa8d => true,
0xa8f...0xa91 => true,
0xa93...0xaa8 => true,
0xaaa...0xab0 => true,
0xab2...0xab3 => true,
0xab5...0xab9 => true,
0xabc => true,
0xabd => true,
0xabe...0xac0 => true,
0xac1...0xac5 => true,
0xac7...0xac8 => true,
0xac9 => true,
0xacb...0xacc => true,
0xacd => true,
0xad0 => true,
0xae0...0xae1 => true,
0xae2...0xae3 => true,
0xae6...0xaef => true,
0xaf0 => true,
0xaf1 => true,
0xaf9 => true,
0xafa...0xaff => true,
0xb01 => true,
0xb02...0xb03 => true,
0xb05...0xb0c => true,
0xb0f...0xb10 => true,
0xb13...0xb28 => true,
0xb2a...0xb30 => true,
0xb32...0xb33 => true,
0xb35...0xb39 => true,
0xb3c => true,
0xb3d => true,
0xb3e => true,
0xb3f => true,
0xb40 => true,
0xb41...0xb44 => true,
0xb47...0xb48 => true,
0xb4b...0xb4c => true,
0xb4d => true,
0xb55...0xb56 => true,
0xb57 => true,
0xb5c...0xb5d => true,
0xb5f...0xb61 => true,
0xb62...0xb63 => true,
0xb66...0xb6f => true,
0xb70 => true,
0xb71 => true,
0xb72...0xb77 => true,
0xb82 => true,
0xb83 => true,
0xb85...0xb8a => true,
0xb8e...0xb90 => true,
0xb92...0xb95 => true,
0xb99...0xb9a => true,
0xb9c => true,
0xb9e...0xb9f => true,
0xba3...0xba4 => true,
0xba8...0xbaa => true,
0xbae...0xbb9 => true,
0xbbe...0xbbf => true,
0xbc0 => true,
0xbc1...0xbc2 => true,
0xbc6...0xbc8 => true,
0xbca...0xbcc => true,
0xbcd => true,
0xbd0 => true,
0xbd7 => true,
0xbe6...0xbef => true,
0xbf0...0xbf2 => true,
0xbf3...0xbf8 => true,
0xbf9 => true,
0xbfa => true,
0xc00 => true,
0xc01...0xc03 => true,
0xc04 => true,
0xc05...0xc0c => true,
0xc0e...0xc10 => true,
0xc12...0xc28 => true,
0xc2a...0xc39 => true,
0xc3c => true,
0xc3d => true,
0xc3e...0xc40 => true,
0xc41...0xc44 => true,
0xc46...0xc48 => true,
0xc4a...0xc4d => true,
0xc55...0xc56 => true,
0xc58...0xc5a => true,
0xc5d => true,
0xc60...0xc61 => true,
0xc62...0xc63 => true,
0xc66...0xc6f => true,
0xc77 => true,
0xc78...0xc7e => true,
0xc7f => true,
0xc80 => true,
0xc81 => true,
0xc82...0xc83 => true,
0xc84 => true,
0xc85...0xc8c => true,
0xc8e...0xc90 => true,
0xc92...0xca8 => true,
0xcaa...0xcb3 => true,
0xcb5...0xcb9 => true,
0xcbc => true,
0xcbd => true,
0xcbe => true,
0xcbf => true,
0xcc0...0xcc4 => true,
0xcc6 => true,
0xcc7...0xcc8 => true,
0xcca...0xccb => true,
0xccc...0xccd => true,
0xcd5...0xcd6 => true,
0xcdd...0xcde => true,
0xce0...0xce1 => true,
0xce2...0xce3 => true,
0xce6...0xcef => true,
0xcf1...0xcf2 => true,
0xd00...0xd01 => true,
0xd02...0xd03 => true,
0xd04...0xd0c => true,
0xd0e...0xd10 => true,
0xd12...0xd3a => true,
0xd3b...0xd3c => true,
0xd3d => true,
0xd3e...0xd40 => true,
0xd41...0xd44 => true,
0xd46...0xd48 => true,
0xd4a...0xd4c => true,
0xd4d => true,
0xd4e => true,
0xd4f => true,
0xd54...0xd56 => true,
0xd57 => true,
0xd58...0xd5e => true,
0xd5f...0xd61 => true,
0xd62...0xd63 => true,
0xd66...0xd6f => true,
0xd70...0xd78 => true,
0xd79 => true,
0xd7a...0xd7f => true,
0xd81 => true,
0xd82...0xd83 => true,
0xd85...0xd96 => true,
0xd9a...0xdb1 => true,
0xdb3...0xdbb => true,
0xdbd => true,
0xdc0...0xdc6 => true,
0xdca => true,
0xdcf...0xdd1 => true,
0xdd2...0xdd4 => true,
0xdd6 => true,
0xdd8...0xddf => true,
0xde6...0xdef => true,
0xdf2...0xdf3 => true,
0xdf4 => true,
0xe01...0xe30 => true,
0xe31 => true,
0xe32...0xe33 => true,
0xe34...0xe3a => true,
0xe3f => true,
0xe40...0xe45 => true,
0xe46 => true,
0xe47...0xe4e => true,
0xe4f => true,
0xe50...0xe59 => true,
0xe5a...0xe5b => true,
0xe81...0xe82 => true,
0xe84 => true,
0xe86...0xe8a => true,
0xe8c...0xea3 => true,
0xea5 => true,
0xea7...0xeb0 => true,
0xeb1 => true,
0xeb2...0xeb3 => true,
0xeb4...0xebc => true,
0xebd => true,
0xec0...0xec4 => true,
0xec6 => true,
0xec8...0xecd => true,
0xed0...0xed9 => true,
0xedc...0xedf => true,
0xf00 => true,
0xf01...0xf03 => true,
0xf04...0xf12 => true,
0xf13 => true,
0xf14 => true,
0xf15...0xf17 => true,
0xf18...0xf19 => true,
0xf1a...0xf1f => true,
0xf20...0xf29 => true,
0xf2a...0xf33 => true,
0xf34 => true,
0xf35 => true,
0xf36 => true,
0xf37 => true,
0xf38 => true,
0xf39 => true,
0xf3a => true,
0xf3b => true,
0xf3c => true,
0xf3d => true,
0xf3e...0xf3f => true,
0xf40...0xf47 => true,
0xf49...0xf6c => true,
0xf71...0xf7e => true,
0xf7f => true,
0xf80...0xf84 => true,
0xf85 => true,
0xf86...0xf87 => true,
0xf88...0xf8c => true,
0xf8d...0xf97 => true,
0xf99...0xfbc => true,
0xfbe...0xfc5 => true,
0xfc6 => true,
0xfc7...0xfcc => true,
0xfce...0xfcf => true,
0xfd0...0xfd4 => true,
0xfd5...0xfd8 => true,
0xfd9...0xfda => true,
0x1000...0x102a => true,
0x102b...0x102c => true,
0x102d...0x1030 => true,
0x1031 => true,
0x1032...0x1037 => true,
0x1038 => true,
0x1039...0x103a => true,
0x103b...0x103c => true,
0x103d...0x103e => true,
0x103f => true,
0x1040...0x1049 => true,
0x104a...0x104f => true,
0x1050...0x1055 => true,
0x1056...0x1057 => true,
0x1058...0x1059 => true,
0x105a...0x105d => true,
0x105e...0x1060 => true,
0x1061 => true,
0x1062...0x1064 => true,
0x1065...0x1066 => true,
0x1067...0x106d => true,
0x106e...0x1070 => true,
0x1071...0x1074 => true,
0x1075...0x1081 => true,
0x1082 => true,
0x1083...0x1084 => true,
0x1085...0x1086 => true,
0x1087...0x108c => true,
0x108d => true,
0x108e => true,
0x108f => true,
0x1090...0x1099 => true,
0x109a...0x109c => true,
0x109d => true,
0x109e...0x109f => true,
0x10a0...0x10c5 => true,
0x10c7 => true,
0x10cd => true,
0x10d0...0x10fa => true,
0x10fb => true,
0x10fc => true,
0x10fd...0x10ff => true,
0x1160...0x1248 => true,
0x124a...0x124d => true,
0x1250...0x1256 => true,
0x1258 => true,
0x125a...0x125d => true,
0x1260...0x1288 => true,
0x128a...0x128d => true,
0x1290...0x12b0 => true,
0x12b2...0x12b5 => true,
0x12b8...0x12be => true,
0x12c0 => true,
0x12c2...0x12c5 => true,
0x12c8...0x12d6 => true,
0x12d8...0x1310 => true,
0x1312...0x1315 => true,
0x1318...0x135a => true,
0x135d...0x135f => true,
0x1360...0x1368 => true,
0x1369...0x137c => true,
0x1380...0x138f => true,
0x1390...0x1399 => true,
0x13a0...0x13f5 => true,
0x13f8...0x13fd => true,
0x1400 => true,
0x1401...0x166c => true,
0x166d => true,
0x166e => true,
0x166f...0x167f => true,
0x1680 => true,
0x1681...0x169a => true,
0x169b => true,
0x169c => true,
0x16a0...0x16ea => true,
0x16eb...0x16ed => true,
0x16ee...0x16f0 => true,
0x16f1...0x16f8 => true,
0x1700...0x1711 => true,
0x1712...0x1714 => true,
0x1715 => true,
0x171f...0x1731 => true,
0x1732...0x1733 => true,
0x1734 => true,
0x1735...0x1736 => true,
0x1740...0x1751 => true,
0x1752...0x1753 => true,
0x1760...0x176c => true,
0x176e...0x1770 => true,
0x1772...0x1773 => true,
0x1780...0x17b3 => true,
0x17b4...0x17b5 => true,
0x17b6 => true,
0x17b7...0x17bd => true,
0x17be...0x17c5 => true,
0x17c6 => true,
0x17c7...0x17c8 => true,
0x17c9...0x17d3 => true,
0x17d4...0x17d6 => true,
0x17d7 => true,
0x17d8...0x17da => true,
0x17db => true,
0x17dc => true,
0x17dd => true,
0x17e0...0x17e9 => true,
0x17f0...0x17f9 => true,
0x1800...0x1805 => true,
0x1806 => true,
0x1807...0x180a => true,
0x180b...0x180d => true,
0x180e => true,
0x180f => true,
0x1810...0x1819 => true,
0x1820...0x1842 => true,
0x1843 => true,
0x1844...0x1878 => true,
0x1880...0x1884 => true,
0x1885...0x1886 => true,
0x1887...0x18a8 => true,
0x18a9 => true,
0x18aa => true,
0x18b0...0x18f5 => true,
0x1900...0x191e => true,
0x1920...0x1922 => true,
0x1923...0x1926 => true,
0x1927...0x1928 => true,
0x1929...0x192b => true,
0x1930...0x1931 => true,
0x1932 => true,
0x1933...0x1938 => true,
0x1939...0x193b => true,
0x1940 => true,
0x1944...0x1945 => true,
0x1946...0x194f => true,
0x1950...0x196d => true,
0x1970...0x1974 => true,
0x1980...0x19ab => true,
0x19b0...0x19c9 => true,
0x19d0...0x19d9 => true,
0x19da => true,
0x19de...0x19ff => true,
0x1a00...0x1a16 => true,
0x1a17...0x1a18 => true,
0x1a19...0x1a1a => true,
0x1a1b => true,
0x1a1e...0x1a1f => true,
0x1a20...0x1a54 => true,
0x1a55 => true,
0x1a56 => true,
0x1a57 => true,
0x1a58...0x1a5e => true,
0x1a60 => true,
0x1a61 => true,
0x1a62 => true,
0x1a63...0x1a64 => true,
0x1a65...0x1a6c => true,
0x1a6d...0x1a72 => true,
0x1a73...0x1a7c => true,
0x1a7f => true,
0x1a80...0x1a89 => true,
0x1a90...0x1a99 => true,
0x1aa0...0x1aa6 => true,
0x1aa7 => true,
0x1aa8...0x1aad => true,
0x1ab0...0x1abd => true,
0x1abe => true,
0x1abf...0x1ace => true,
0x1b00...0x1b03 => true,
0x1b04 => true,
0x1b05...0x1b33 => true,
0x1b34 => true,
0x1b35 => true,
0x1b36...0x1b3a => true,
0x1b3b => true,
0x1b3c => true,
0x1b3d...0x1b41 => true,
0x1b42 => true,
0x1b43...0x1b44 => true,
0x1b45...0x1b4c => true,
0x1b50...0x1b59 => true,
0x1b5a...0x1b60 => true,
0x1b61...0x1b6a => true,
0x1b6b...0x1b73 => true,
0x1b74...0x1b7c => true,
0x1b7d...0x1b7e => true,
0x1b80...0x1b81 => true,
0x1b82 => true,
0x1b83...0x1ba0 => true,
0x1ba1 => true,
0x1ba2...0x1ba5 => true,
0x1ba6...0x1ba7 => true,
0x1ba8...0x1ba9 => true,
0x1baa => true,
0x1bab...0x1bad => true,
0x1bae...0x1baf => true,
0x1bb0...0x1bb9 => true,
0x1bba...0x1be5 => true,
0x1be6 => true,
0x1be7 => true,
0x1be8...0x1be9 => true,
0x1bea...0x1bec => true,
0x1bed => true,
0x1bee => true,
0x1bef...0x1bf1 => true,
0x1bf2...0x1bf3 => true,
0x1bfc...0x1bff => true,
0x1c00...0x1c23 => true,
0x1c24...0x1c2b => true,
0x1c2c...0x1c33 => true,
0x1c34...0x1c35 => true,
0x1c36...0x1c37 => true,
0x1c3b...0x1c3f => true,
0x1c40...0x1c49 => true,
0x1c4d...0x1c4f => true,
0x1c50...0x1c59 => true,
0x1c5a...0x1c77 => true,
0x1c78...0x1c7d => true,
0x1c7e...0x1c7f => true,
0x1c80...0x1c88 => true,
0x1c90...0x1cba => true,
0x1cbd...0x1cbf => true,
0x1cc0...0x1cc7 => true,
0x1cd0...0x1cd2 => true,
0x1cd3 => true,
0x1cd4...0x1ce0 => true,
0x1ce1 => true,
0x1ce2...0x1ce8 => true,
0x1ce9...0x1cec => true,
0x1ced => true,
0x1cee...0x1cf3 => true,
0x1cf4 => true,
0x1cf5...0x1cf6 => true,
0x1cf7 => true,
0x1cf8...0x1cf9 => true,
0x1cfa => true,
0x1d00...0x1d2b => true,
0x1d2c...0x1d6a => true,
0x1d6b...0x1d77 => true,
0x1d78 => true,
0x1d79...0x1d9a => true,
0x1d9b...0x1dbf => true,
0x1dc0...0x1dff => true,
0x1e00...0x1f15 => true,
0x1f18...0x1f1d => true,
0x1f20...0x1f45 => true,
0x1f48...0x1f4d => true,
0x1f50...0x1f57 => true,
0x1f59 => true,
0x1f5b => true,
0x1f5d => true,
0x1f5f...0x1f7d => true,
0x1f80...0x1fb4 => true,
0x1fb6...0x1fbc => true,
0x1fbd => true,
0x1fbe => true,
0x1fbf...0x1fc1 => true,
0x1fc2...0x1fc4 => true,
0x1fc6...0x1fcc => true,
0x1fcd...0x1fcf => true,
0x1fd0...0x1fd3 => true,
0x1fd6...0x1fdb => true,
0x1fdd...0x1fdf => true,
0x1fe0...0x1fec => true,
0x1fed...0x1fef => true,
0x1ff2...0x1ff4 => true,
0x1ff6...0x1ffc => true,
0x1ffd...0x1ffe => true,
0x2000...0x200a => true,
0x200b...0x200f => true,
0x2011...0x2012 => true,
0x2017 => true,
0x201a => true,
0x201b => true,
0x201e => true,
0x201f => true,
0x2023 => true,
0x2028 => true,
0x2029 => true,
0x202a...0x202e => true,
0x202f => true,
0x2031 => true,
0x2034 => true,
0x2036...0x2038 => true,
0x2039 => true,
0x203a => true,
0x203c...0x203d => true,
0x203f...0x2040 => true,
0x2041...0x2043 => true,
0x2044 => true,
0x2045 => true,
0x2046 => true,
0x2047...0x2051 => true,
0x2052 => true,
0x2053 => true,
0x2054 => true,
0x2055...0x205e => true,
0x205f => true,
0x2060...0x2064 => true,
0x2066...0x206f => true,
0x2070 => true,
0x2071 => true,
0x2075...0x2079 => true,
0x207a...0x207c => true,
0x207d => true,
0x207e => true,
0x2080 => true,
0x2085...0x2089 => true,
0x208a...0x208c => true,
0x208d => true,
0x208e => true,
0x2090...0x209c => true,
0x20a0...0x20a8 => true,
0x20aa...0x20ab => true,
0x20ad...0x20c0 => true,
0x20d0...0x20dc => true,
0x20dd...0x20e0 => true,
0x20e1 => true,
0x20e2...0x20e4 => true,
0x20e5...0x20f0 => true,
0x2100...0x2101 => true,
0x2102 => true,
0x2104 => true,
0x2106 => true,
0x2107 => true,
0x2108 => true,
0x210a...0x2112 => true,
0x2114 => true,
0x2115 => true,
0x2117 => true,
0x2118 => true,
0x2119...0x211d => true,
0x211e...0x2120 => true,
0x2123 => true,
0x2124 => true,
0x2125 => true,
0x2127 => true,
0x2128 => true,
0x2129 => true,
0x212a => true,
0x212c...0x212d => true,
0x212e => true,
0x212f...0x2134 => true,
0x2135...0x2138 => true,
0x2139 => true,
0x213a...0x213b => true,
0x213c...0x213f => true,
0x2140...0x2144 => true,
0x2145...0x2149 => true,
0x214a => true,
0x214b => true,
0x214c...0x214d => true,
0x214e => true,
0x214f => true,
0x2150...0x2152 => true,
0x2155...0x215a => true,
0x215f => true,
0x216c...0x216f => true,
0x217a...0x2182 => true,
0x2183...0x2184 => true,
0x2185...0x2188 => true,
0x218a...0x218b => true,
0x219a...0x219b => true,
0x219c...0x219f => true,
0x21a0 => true,
0x21a1...0x21a2 => true,
0x21a3 => true,
0x21a4...0x21a5 => true,
0x21a6 => true,
0x21a7...0x21ad => true,
0x21ae => true,
0x21af...0x21b7 => true,
0x21ba...0x21cd => true,
0x21ce...0x21cf => true,
0x21d0...0x21d1 => true,
0x21d3 => true,
0x21d5...0x21e6 => true,
0x21e8...0x21f3 => true,
0x21f4...0x21ff => true,
0x2201 => true,
0x2204...0x2206 => true,
0x2209...0x220a => true,
0x220c...0x220e => true,
0x2210 => true,
0x2212...0x2214 => true,
0x2216...0x2219 => true,
0x221b...0x221c => true,
0x2221...0x2222 => true,
0x2224 => true,
0x2226 => true,
0x222d => true,
0x222f...0x2233 => true,
0x2238...0x223b => true,
0x223e...0x2247 => true,
0x2249...0x224b => true,
0x224d...0x2251 => true,
0x2253...0x225f => true,
0x2262...0x2263 => true,
0x2268...0x2269 => true,
0x226c...0x226d => true,
0x2270...0x2281 => true,
0x2284...0x2285 => true,
0x2288...0x2294 => true,
0x2296...0x2298 => true,
0x229a...0x22a4 => true,
0x22a6...0x22be => true,
0x22c0...0x22ff => true,
0x2300...0x2307 => true,
0x2308 => true,
0x2309 => true,
0x230a => true,
0x230b => true,
0x230c...0x2311 => true,
0x2313...0x2319 => true,
0x231c...0x231f => true,
0x2320...0x2321 => true,
0x2322...0x2328 => true,
0x232b...0x237b => true,
0x237c => true,
0x237d...0x239a => true,
0x239b...0x23b3 => true,
0x23b4...0x23db => true,
0x23dc...0x23e1 => true,
0x23e2...0x23e8 => true,
0x23ed...0x23ef => true,
0x23f1...0x23f2 => true,
0x23f4...0x2426 => true,
0x2440...0x244a => true,
0x24ea => true,
0x254c...0x254f => true,
0x2574...0x257f => true,
0x2590...0x2591 => true,
0x2596...0x259f => true,
0x25a2 => true,
0x25aa...0x25b1 => true,
0x25b4...0x25b5 => true,
0x25b8...0x25bb => true,
0x25be...0x25bf => true,
0x25c2...0x25c5 => true,
0x25c9...0x25ca => true,
0x25cc...0x25cd => true,
0x25d2...0x25e1 => true,
0x25e6...0x25ee => true,
0x25f0...0x25f7 => true,
0x25f8...0x25fc => true,
0x25ff => true,
0x2600...0x2604 => true,
0x2607...0x2608 => true,
0x260a...0x260d => true,
0x2610...0x2613 => true,
0x2616...0x261b => true,
0x261d => true,
0x261f...0x263f => true,
0x2641 => true,
0x2643...0x2647 => true,
0x2654...0x265f => true,
0x2662 => true,
0x2666 => true,
0x266b => true,
0x266e => true,
0x2670...0x267e => true,
0x2680...0x2692 => true,
0x2694...0x269d => true,
0x26a0 => true,
0x26a2...0x26a9 => true,
0x26ac...0x26bc => true,
0x26c0...0x26c3 => true,
0x26e2 => true,
0x26e4...0x26e7 => true,
0x2700...0x2704 => true,
0x2706...0x2709 => true,
0x270c...0x2727 => true,
0x2729...0x273c => true,
0x273e...0x274b => true,
0x274d => true,
0x274f...0x2752 => true,
0x2756 => true,
0x2758...0x2767 => true,
0x2768 => true,
0x2769 => true,
0x276a => true,
0x276b => true,
0x276c => true,
0x276d => true,
0x276e => true,
0x276f => true,
0x2770 => true,
0x2771 => true,
0x2772 => true,
0x2773 => true,
0x2774 => true,
0x2775 => true,
0x2780...0x2793 => true,
0x2794 => true,
0x2798...0x27af => true,
0x27b1...0x27be => true,
0x27c0...0x27c4 => true,
0x27c5 => true,
0x27c6 => true,
0x27c7...0x27e5 => true,
0x27ee => true,
0x27ef => true,
0x27f0...0x27ff => true,
0x2800...0x28ff => true,
0x2900...0x2982 => true,
0x2983 => true,
0x2984 => true,
0x2987 => true,
0x2988 => true,
0x2989 => true,
0x298a => true,
0x298b => true,
0x298c => true,
0x298d => true,
0x298e => true,
0x298f => true,
0x2990 => true,
0x2991 => true,
0x2992 => true,
0x2993 => true,
0x2994 => true,
0x2995 => true,
0x2996 => true,
0x2997 => true,
0x2998 => true,
0x2999...0x29d7 => true,
0x29d8 => true,
0x29d9 => true,
0x29da => true,
0x29db => true,
0x29dc...0x29fb => true,
0x29fc => true,
0x29fd => true,
0x29fe...0x2aff => true,
0x2b00...0x2b1a => true,
0x2b1d...0x2b2f => true,
0x2b30...0x2b44 => true,
0x2b45...0x2b46 => true,
0x2b47...0x2b4c => true,
0x2b4d...0x2b4f => true,
0x2b51...0x2b54 => true,
0x2b5a...0x2b73 => true,
0x2b76...0x2b95 => true,
0x2b97...0x2bff => true,
0x2c00...0x2c7b => true,
0x2c7c...0x2c7d => true,
0x2c7e...0x2ce4 => true,
0x2ce5...0x2cea => true,
0x2ceb...0x2cee => true,
0x2cef...0x2cf1 => true,
0x2cf2...0x2cf3 => true,
0x2cf9...0x2cfc => true,
0x2cfd => true,
0x2cfe...0x2cff => true,
0x2d00...0x2d25 => true,
0x2d27 => true,
0x2d2d => true,
0x2d30...0x2d67 => true,
0x2d6f => true,
0x2d70 => true,
0x2d7f => true,
0x2d80...0x2d96 => true,
0x2da0...0x2da6 => true,
0x2da8...0x2dae => true,
0x2db0...0x2db6 => true,
0x2db8...0x2dbe => true,
0x2dc0...0x2dc6 => true,
0x2dc8...0x2dce => true,
0x2dd0...0x2dd6 => true,
0x2dd8...0x2dde => true,
0x2de0...0x2dff => true,
0x2e00...0x2e01 => true,
0x2e02 => true,
0x2e03 => true,
0x2e04 => true,
0x2e05 => true,
0x2e06...0x2e08 => true,
0x2e09 => true,
0x2e0a => true,
0x2e0b => true,
0x2e0c => true,
0x2e0d => true,
0x2e0e...0x2e16 => true,
0x2e17 => true,
0x2e18...0x2e19 => true,
0x2e1a => true,
0x2e1b => true,
0x2e1c => true,
0x2e1d => true,
0x2e1e...0x2e1f => true,
0x2e20 => true,
0x2e21 => true,
0x2e22 => true,
0x2e23 => true,
0x2e24 => true,
0x2e25 => true,
0x2e26 => true,
0x2e27 => true,
0x2e28 => true,
0x2e29 => true,
0x2e2a...0x2e2e => true,
0x2e2f => true,
0x2e30...0x2e39 => true,
0x2e3a...0x2e3b => true,
0x2e3c...0x2e3f => true,
0x2e40 => true,
0x2e41 => true,
0x2e42 => true,
0x2e43...0x2e4f => true,
0x2e50...0x2e51 => true,
0x2e52...0x2e54 => true,
0x2e55 => true,
0x2e56 => true,
0x2e57 => true,
0x2e58 => true,
0x2e59 => true,
0x2e5a => true,
0x2e5b => true,
0x2e5c => true,
0x2e5d => true,
0x303f => true,
0x4dc0...0x4dff => true,
0xa4d0...0xa4f7 => true,
0xa4f8...0xa4fd => true,
0xa4fe...0xa4ff => true,
0xa500...0xa60b => true,
0xa60c => true,
0xa60d...0xa60f => true,
0xa610...0xa61f => true,
0xa620...0xa629 => true,
0xa62a...0xa62b => true,
0xa640...0xa66d => true,
0xa66e => true,
0xa66f => true,
0xa670...0xa672 => true,
0xa673 => true,
0xa674...0xa67d => true,
0xa67e => true,
0xa67f => true,
0xa680...0xa69b => true,
0xa69c...0xa69d => true,
0xa69e...0xa69f => true,
0xa6a0...0xa6e5 => true,
0xa6e6...0xa6ef => true,
0xa6f0...0xa6f1 => true,
0xa6f2...0xa6f7 => true,
0xa700...0xa716 => true,
0xa717...0xa71f => true,
0xa720...0xa721 => true,
0xa722...0xa76f => true,
0xa770 => true,
0xa771...0xa787 => true,
0xa788 => true,
0xa789...0xa78a => true,
0xa78b...0xa78e => true,
0xa78f => true,
0xa790...0xa7ca => true,
0xa7d0...0xa7d1 => true,
0xa7d3 => true,
0xa7d5...0xa7d9 => true,
0xa7f2...0xa7f4 => true,
0xa7f5...0xa7f6 => true,
0xa7f7 => true,
0xa7f8...0xa7f9 => true,
0xa7fa => true,
0xa7fb...0xa801 => true,
0xa802 => true,
0xa803...0xa805 => true,
0xa806 => true,
0xa807...0xa80a => true,
0xa80b => true,
0xa80c...0xa822 => true,
0xa823...0xa824 => true,
0xa825...0xa826 => true,
0xa827 => true,
0xa828...0xa82b => true,
0xa82c => true,
0xa830...0xa835 => true,
0xa836...0xa837 => true,
0xa838 => true,
0xa839 => true,
0xa840...0xa873 => true,
0xa874...0xa877 => true,
0xa880...0xa881 => true,
0xa882...0xa8b3 => true,
0xa8b4...0xa8c3 => true,
0xa8c4...0xa8c5 => true,
0xa8ce...0xa8cf => true,
0xa8d0...0xa8d9 => true,
0xa8e0...0xa8f1 => true,
0xa8f2...0xa8f7 => true,
0xa8f8...0xa8fa => true,
0xa8fb => true,
0xa8fc => true,
0xa8fd...0xa8fe => true,
0xa8ff => true,
0xa900...0xa909 => true,
0xa90a...0xa925 => true,
0xa926...0xa92d => true,
0xa92e...0xa92f => true,
0xa930...0xa946 => true,
0xa947...0xa951 => true,
0xa952...0xa953 => true,
0xa95f => true,
0xa980...0xa982 => true,
0xa983 => true,
0xa984...0xa9b2 => true,
0xa9b3 => true,
0xa9b4...0xa9b5 => true,
0xa9b6...0xa9b9 => true,
0xa9ba...0xa9bb => true,
0xa9bc...0xa9bd => true,
0xa9be...0xa9c0 => true,
0xa9c1...0xa9cd => true,
0xa9cf => true,
0xa9d0...0xa9d9 => true,
0xa9de...0xa9df => true,
0xa9e0...0xa9e4 => true,
0xa9e5 => true,
0xa9e6 => true,
0xa9e7...0xa9ef => true,
0xa9f0...0xa9f9 => true,
0xa9fa...0xa9fe => true,
0xaa00...0xaa28 => true,
0xaa29...0xaa2e => true,
0xaa2f...0xaa30 => true,
0xaa31...0xaa32 => true,
0xaa33...0xaa34 => true,
0xaa35...0xaa36 => true,
0xaa40...0xaa42 => true,
0xaa43 => true,
0xaa44...0xaa4b => true,
0xaa4c => true,
0xaa4d => true,
0xaa50...0xaa59 => true,
0xaa5c...0xaa5f => true,
0xaa60...0xaa6f => true,
0xaa70 => true,
0xaa71...0xaa76 => true,
0xaa77...0xaa79 => true,
0xaa7a => true,
0xaa7b => true,
0xaa7c => true,
0xaa7d => true,
0xaa7e...0xaaaf => true,
0xaab0 => true,
0xaab1 => true,
0xaab2...0xaab4 => true,
0xaab5...0xaab6 => true,
0xaab7...0xaab8 => true,
0xaab9...0xaabd => true,
0xaabe...0xaabf => true,
0xaac0 => true,
0xaac1 => true,
0xaac2 => true,
0xaadb...0xaadc => true,
0xaadd => true,
0xaade...0xaadf => true,
0xaae0...0xaaea => true,
0xaaeb => true,
0xaaec...0xaaed => true,
0xaaee...0xaaef => true,
0xaaf0...0xaaf1 => true,
0xaaf2 => true,
0xaaf3...0xaaf4 => true,
0xaaf5 => true,
0xaaf6 => true,
0xab01...0xab06 => true,
0xab09...0xab0e => true,
0xab11...0xab16 => true,
0xab20...0xab26 => true,
0xab28...0xab2e => true,
0xab30...0xab5a => true,
0xab5b => true,
0xab5c...0xab5f => true,
0xab60...0xab68 => true,
0xab69 => true,
0xab6a...0xab6b => true,
0xab70...0xabbf => true,
0xabc0...0xabe2 => true,
0xabe3...0xabe4 => true,
0xabe5 => true,
0xabe6...0xabe7 => true,
0xabe8 => true,
0xabe9...0xabea => true,
0xabeb => true,
0xabec => true,
0xabed => true,
0xabf0...0xabf9 => true,
0xd7b0...0xd7c6 => true,
0xd7cb...0xd7fb => true,
0xfb00...0xfb06 => true,
0xfb13...0xfb17 => true,
0xfb1d => true,
0xfb1e => true,
0xfb1f...0xfb28 => true,
0xfb29 => true,
0xfb2a...0xfb36 => true,
0xfb38...0xfb3c => true,
0xfb3e => true,
0xfb40...0xfb41 => true,
0xfb43...0xfb44 => true,
0xfb46...0xfbb1 => true,
0xfbb2...0xfbc2 => true,
0xfbd3...0xfd3d => true,
0xfd3e => true,
0xfd3f => true,
0xfd40...0xfd4f => true,
0xfd50...0xfd8f => true,
0xfd92...0xfdc7 => true,
0xfdcf => true,
0xfdf0...0xfdfb => true,
0xfdfc => true,
0xfdfd...0xfdff => true,
0xfe20...0xfe2f => true,
0xfe70...0xfe74 => true,
0xfe76...0xfefc => true,
0xfeff => true,
0xfff9...0xfffb => true,
0xfffc => true,
0x10000...0x1000b => true,
0x1000d...0x10026 => true,
0x10028...0x1003a => true,
0x1003c...0x1003d => true,
0x1003f...0x1004d => true,
0x10050...0x1005d => true,
0x10080...0x100fa => true,
0x10100...0x10102 => true,
0x10107...0x10133 => true,
0x10137...0x1013f => true,
0x10140...0x10174 => true,
0x10175...0x10178 => true,
0x10179...0x10189 => true,
0x1018a...0x1018b => true,
0x1018c...0x1018e => true,
0x10190...0x1019c => true,
0x101a0 => true,
0x101d0...0x101fc => true,
0x101fd => true,
0x10280...0x1029c => true,
0x102a0...0x102d0 => true,
0x102e0 => true,
0x102e1...0x102fb => true,
0x10300...0x1031f => true,
0x10320...0x10323 => true,
0x1032d...0x10340 => true,
0x10341 => true,
0x10342...0x10349 => true,
0x1034a => true,
0x10350...0x10375 => true,
0x10376...0x1037a => true,
0x10380...0x1039d => true,
0x1039f => true,
0x103a0...0x103c3 => true,
0x103c8...0x103cf => true,
0x103d0 => true,
0x103d1...0x103d5 => true,
0x10400...0x1044f => true,
0x10450...0x1049d => true,
0x104a0...0x104a9 => true,
0x104b0...0x104d3 => true,
0x104d8...0x104fb => true,
0x10500...0x10527 => true,
0x10530...0x10563 => true,
0x1056f => true,
0x10570...0x1057a => true,
0x1057c...0x1058a => true,
0x1058c...0x10592 => true,
0x10594...0x10595 => true,
0x10597...0x105a1 => true,
0x105a3...0x105b1 => true,
0x105b3...0x105b9 => true,
0x105bb...0x105bc => true,
0x10600...0x10736 => true,
0x10740...0x10755 => true,
0x10760...0x10767 => true,
0x10780...0x10785 => true,
0x10787...0x107b0 => true,
0x107b2...0x107ba => true,
0x10800...0x10805 => true,
0x10808 => true,
0x1080a...0x10835 => true,
0x10837...0x10838 => true,
0x1083c => true,
0x1083f...0x10855 => true,
0x10857 => true,
0x10858...0x1085f => true,
0x10860...0x10876 => true,
0x10877...0x10878 => true,
0x10879...0x1087f => true,
0x10880...0x1089e => true,
0x108a7...0x108af => true,
0x108e0...0x108f2 => true,
0x108f4...0x108f5 => true,
0x108fb...0x108ff => true,
0x10900...0x10915 => true,
0x10916...0x1091b => true,
0x1091f => true,
0x10920...0x10939 => true,
0x1093f => true,
0x10980...0x109b7 => true,
0x109bc...0x109bd => true,
0x109be...0x109bf => true,
0x109c0...0x109cf => true,
0x109d2...0x109ff => true,
0x10a00 => true,
0x10a01...0x10a03 => true,
0x10a05...0x10a06 => true,
0x10a0c...0x10a0f => true,
0x10a10...0x10a13 => true,
0x10a15...0x10a17 => true,
0x10a19...0x10a35 => true,
0x10a38...0x10a3a => true,
0x10a3f => true,
0x10a40...0x10a48 => true,
0x10a50...0x10a58 => true,
0x10a60...0x10a7c => true,
0x10a7d...0x10a7e => true,
0x10a7f => true,
0x10a80...0x10a9c => true,
0x10a9d...0x10a9f => true,
0x10ac0...0x10ac7 => true,
0x10ac8 => true,
0x10ac9...0x10ae4 => true,
0x10ae5...0x10ae6 => true,
0x10aeb...0x10aef => true,
0x10af0...0x10af6 => true,
0x10b00...0x10b35 => true,
0x10b39...0x10b3f => true,
0x10b40...0x10b55 => true,
0x10b58...0x10b5f => true,
0x10b60...0x10b72 => true,
0x10b78...0x10b7f => true,
0x10b80...0x10b91 => true,
0x10b99...0x10b9c => true,
0x10ba9...0x10baf => true,
0x10c00...0x10c48 => true,
0x10c80...0x10cb2 => true,
0x10cc0...0x10cf2 => true,
0x10cfa...0x10cff => true,
0x10d00...0x10d23 => true,
0x10d24...0x10d27 => true,
0x10d30...0x10d39 => true,
0x10e60...0x10e7e => true,
0x10e80...0x10ea9 => true,
0x10eab...0x10eac => true,
0x10ead => true,
0x10eb0...0x10eb1 => true,
0x10f00...0x10f1c => true,
0x10f1d...0x10f26 => true,
0x10f27 => true,
0x10f30...0x10f45 => true,
0x10f46...0x10f50 => true,
0x10f51...0x10f54 => true,
0x10f55...0x10f59 => true,
0x10f70...0x10f81 => true,
0x10f82...0x10f85 => true,
0x10f86...0x10f89 => true,
0x10fb0...0x10fc4 => true,
0x10fc5...0x10fcb => true,
0x10fe0...0x10ff6 => true,
0x11000 => true,
0x11001 => true,
0x11002 => true,
0x11003...0x11037 => true,
0x11038...0x11046 => true,
0x11047...0x1104d => true,
0x11052...0x11065 => true,
0x11066...0x1106f => true,
0x11070 => true,
0x11071...0x11072 => true,
0x11073...0x11074 => true,
0x11075 => true,
0x1107f...0x11081 => true,
0x11082 => true,
0x11083...0x110af => true,
0x110b0...0x110b2 => true,
0x110b3...0x110b6 => true,
0x110b7...0x110b8 => true,
0x110b9...0x110ba => true,
0x110bb...0x110bc => true,
0x110bd => true,
0x110be...0x110c1 => true,
0x110c2 => true,
0x110cd => true,
0x110d0...0x110e8 => true,
0x110f0...0x110f9 => true,
0x11100...0x11102 => true,
0x11103...0x11126 => true,
0x11127...0x1112b => true,
0x1112c => true,
0x1112d...0x11134 => true,
0x11136...0x1113f => true,
0x11140...0x11143 => true,
0x11144 => true,
0x11145...0x11146 => true,
0x11147 => true,
0x11150...0x11172 => true,
0x11173 => true,
0x11174...0x11175 => true,
0x11176 => true,
0x11180...0x11181 => true,
0x11182 => true,
0x11183...0x111b2 => true,
0x111b3...0x111b5 => true,
0x111b6...0x111be => true,
0x111bf...0x111c0 => true,
0x111c1...0x111c4 => true,
0x111c5...0x111c8 => true,
0x111c9...0x111cc => true,
0x111cd => true,
0x111ce => true,
0x111cf => true,
0x111d0...0x111d9 => true,
0x111da => true,
0x111db => true,
0x111dc => true,
0x111dd...0x111df => true,
0x111e1...0x111f4 => true,
0x11200...0x11211 => true,
0x11213...0x1122b => true,
0x1122c...0x1122e => true,
0x1122f...0x11231 => true,
0x11232...0x11233 => true,
0x11234 => true,
0x11235 => true,
0x11236...0x11237 => true,
0x11238...0x1123d => true,
0x1123e => true,
0x11280...0x11286 => true,
0x11288 => true,
0x1128a...0x1128d => true,
0x1128f...0x1129d => true,
0x1129f...0x112a8 => true,
0x112a9 => true,
0x112b0...0x112de => true,
0x112df => true,
0x112e0...0x112e2 => true,
0x112e3...0x112ea => true,
0x112f0...0x112f9 => true,
0x11300...0x11301 => true,
0x11302...0x11303 => true,
0x11305...0x1130c => true,
0x1130f...0x11310 => true,
0x11313...0x11328 => true,
0x1132a...0x11330 => true,
0x11332...0x11333 => true,
0x11335...0x11339 => true,
0x1133b...0x1133c => true,
0x1133d => true,
0x1133e...0x1133f => true,
0x11340 => true,
0x11341...0x11344 => true,
0x11347...0x11348 => true,
0x1134b...0x1134d => true,
0x11350 => true,
0x11357 => true,
0x1135d...0x11361 => true,
0x11362...0x11363 => true,
0x11366...0x1136c => true,
0x11370...0x11374 => true,
0x11400...0x11434 => true,
0x11435...0x11437 => true,
0x11438...0x1143f => true,
0x11440...0x11441 => true,
0x11442...0x11444 => true,
0x11445 => true,
0x11446 => true,
0x11447...0x1144a => true,
0x1144b...0x1144f => true,
0x11450...0x11459 => true,
0x1145a...0x1145b => true,
0x1145d => true,
0x1145e => true,
0x1145f...0x11461 => true,
0x11480...0x114af => true,
0x114b0...0x114b2 => true,
0x114b3...0x114b8 => true,
0x114b9 => true,
0x114ba => true,
0x114bb...0x114be => true,
0x114bf...0x114c0 => true,
0x114c1 => true,
0x114c2...0x114c3 => true,
0x114c4...0x114c5 => true,
0x114c6 => true,
0x114c7 => true,
0x114d0...0x114d9 => true,
0x11580...0x115ae => true,
0x115af...0x115b1 => true,
0x115b2...0x115b5 => true,
0x115b8...0x115bb => true,
0x115bc...0x115bd => true,
0x115be => true,
0x115bf...0x115c0 => true,
0x115c1...0x115d7 => true,
0x115d8...0x115db => true,
0x115dc...0x115dd => true,
0x11600...0x1162f => true,
0x11630...0x11632 => true,
0x11633...0x1163a => true,
0x1163b...0x1163c => true,
0x1163d => true,
0x1163e => true,
0x1163f...0x11640 => true,
0x11641...0x11643 => true,
0x11644 => true,
0x11650...0x11659 => true,
0x11660...0x1166c => true,
0x11680...0x116aa => true,
0x116ab => true,
0x116ac => true,
0x116ad => true,
0x116ae...0x116af => true,
0x116b0...0x116b5 => true,
0x116b6 => true,
0x116b7 => true,
0x116b8 => true,
0x116b9 => true,
0x116c0...0x116c9 => true,
0x11700...0x1171a => true,
0x1171d...0x1171f => true,
0x11720...0x11721 => true,
0x11722...0x11725 => true,
0x11726 => true,
0x11727...0x1172b => true,
0x11730...0x11739 => true,
0x1173a...0x1173b => true,
0x1173c...0x1173e => true,
0x1173f => true,
0x11740...0x11746 => true,
0x11800...0x1182b => true,
0x1182c...0x1182e => true,
0x1182f...0x11837 => true,
0x11838 => true,
0x11839...0x1183a => true,
0x1183b => true,
0x118a0...0x118df => true,
0x118e0...0x118e9 => true,
0x118ea...0x118f2 => true,
0x118ff...0x11906 => true,
0x11909 => true,
0x1190c...0x11913 => true,
0x11915...0x11916 => true,
0x11918...0x1192f => true,
0x11930...0x11935 => true,
0x11937...0x11938 => true,
0x1193b...0x1193c => true,
0x1193d => true,
0x1193e => true,
0x1193f => true,
0x11940 => true,
0x11941 => true,
0x11942 => true,
0x11943 => true,
0x11944...0x11946 => true,
0x11950...0x11959 => true,
0x119a0...0x119a7 => true,
0x119aa...0x119d0 => true,
0x119d1...0x119d3 => true,
0x119d4...0x119d7 => true,
0x119da...0x119db => true,
0x119dc...0x119df => true,
0x119e0 => true,
0x119e1 => true,
0x119e2 => true,
0x119e3 => true,
0x119e4 => true,
0x11a00 => true,
0x11a01...0x11a0a => true,
0x11a0b...0x11a32 => true,
0x11a33...0x11a38 => true,
0x11a39 => true,
0x11a3a => true,
0x11a3b...0x11a3e => true,
0x11a3f...0x11a46 => true,
0x11a47 => true,
0x11a50 => true,
0x11a51...0x11a56 => true,
0x11a57...0x11a58 => true,
0x11a59...0x11a5b => true,
0x11a5c...0x11a89 => true,
0x11a8a...0x11a96 => true,
0x11a97 => true,
0x11a98...0x11a99 => true,
0x11a9a...0x11a9c => true,
0x11a9d => true,
0x11a9e...0x11aa2 => true,
0x11ab0...0x11af8 => true,
0x11c00...0x11c08 => true,
0x11c0a...0x11c2e => true,
0x11c2f => true,
0x11c30...0x11c36 => true,
0x11c38...0x11c3d => true,
0x11c3e => true,
0x11c3f => true,
0x11c40 => true,
0x11c41...0x11c45 => true,
0x11c50...0x11c59 => true,
0x11c5a...0x11c6c => true,
0x11c70...0x11c71 => true,
0x11c72...0x11c8f => true,
0x11c92...0x11ca7 => true,
0x11ca9 => true,
0x11caa...0x11cb0 => true,
0x11cb1 => true,
0x11cb2...0x11cb3 => true,
0x11cb4 => true,
0x11cb5...0x11cb6 => true,
0x11d00...0x11d06 => true,
0x11d08...0x11d09 => true,
0x11d0b...0x11d30 => true,
0x11d31...0x11d36 => true,
0x11d3a => true,
0x11d3c...0x11d3d => true,
0x11d3f...0x11d45 => true,
0x11d46 => true,
0x11d47 => true,
0x11d50...0x11d59 => true,
0x11d60...0x11d65 => true,
0x11d67...0x11d68 => true,
0x11d6a...0x11d89 => true,
0x11d8a...0x11d8e => true,
0x11d90...0x11d91 => true,
0x11d93...0x11d94 => true,
0x11d95 => true,
0x11d96 => true,
0x11d97 => true,
0x11d98 => true,
0x11da0...0x11da9 => true,
0x11ee0...0x11ef2 => true,
0x11ef3...0x11ef4 => true,
0x11ef5...0x11ef6 => true,
0x11ef7...0x11ef8 => true,
0x11fb0 => true,
0x11fc0...0x11fd4 => true,
0x11fd5...0x11fdc => true,
0x11fdd...0x11fe0 => true,
0x11fe1...0x11ff1 => true,
0x11fff => true,
0x12000...0x12399 => true,
0x12400...0x1246e => true,
0x12470...0x12474 => true,
0x12480...0x12543 => true,
0x12f90...0x12ff0 => true,
0x12ff1...0x12ff2 => true,
0x13000...0x1342e => true,
0x13430...0x13438 => true,
0x14400...0x14646 => true,
0x16800...0x16a38 => true,
0x16a40...0x16a5e => true,
0x16a60...0x16a69 => true,
0x16a6e...0x16a6f => true,
0x16a70...0x16abe => true,
0x16ac0...0x16ac9 => true,
0x16ad0...0x16aed => true,
0x16af0...0x16af4 => true,
0x16af5 => true,
0x16b00...0x16b2f => true,
0x16b30...0x16b36 => true,
0x16b37...0x16b3b => true,
0x16b3c...0x16b3f => true,
0x16b40...0x16b43 => true,
0x16b44 => true,
0x16b45 => true,
0x16b50...0x16b59 => true,
0x16b5b...0x16b61 => true,
0x16b63...0x16b77 => true,
0x16b7d...0x16b8f => true,
0x16e40...0x16e7f => true,
0x16e80...0x16e96 => true,
0x16e97...0x16e9a => true,
0x16f00...0x16f4a => true,
0x16f4f => true,
0x16f50 => true,
0x16f51...0x16f87 => true,
0x16f8f...0x16f92 => true,
0x16f93...0x16f9f => true,
0x1bc00...0x1bc6a => true,
0x1bc70...0x1bc7c => true,
0x1bc80...0x1bc88 => true,
0x1bc90...0x1bc99 => true,
0x1bc9c => true,
0x1bc9d...0x1bc9e => true,
0x1bc9f => true,
0x1bca0...0x1bca3 => true,
0x1cf00...0x1cf2d => true,
0x1cf30...0x1cf46 => true,
0x1cf50...0x1cfc3 => true,
0x1d000...0x1d0f5 => true,
0x1d100...0x1d126 => true,
0x1d129...0x1d164 => true,
0x1d165...0x1d166 => true,
0x1d167...0x1d169 => true,
0x1d16a...0x1d16c => true,
0x1d16d...0x1d172 => true,
0x1d173...0x1d17a => true,
0x1d17b...0x1d182 => true,
0x1d183...0x1d184 => true,
0x1d185...0x1d18b => true,
0x1d18c...0x1d1a9 => true,
0x1d1aa...0x1d1ad => true,
0x1d1ae...0x1d1ea => true,
0x1d200...0x1d241 => true,
0x1d242...0x1d244 => true,
0x1d245 => true,
0x1d2e0...0x1d2f3 => true,
0x1d300...0x1d356 => true,
0x1d360...0x1d378 => true,
0x1d400...0x1d454 => true,
0x1d456...0x1d49c => true,
0x1d49e...0x1d49f => true,
0x1d4a2 => true,
0x1d4a5...0x1d4a6 => true,
0x1d4a9...0x1d4ac => true,
0x1d4ae...0x1d4b9 => true,
0x1d4bb => true,
0x1d4bd...0x1d4c3 => true,
0x1d4c5...0x1d505 => true,
0x1d507...0x1d50a => true,
0x1d50d...0x1d514 => true,
0x1d516...0x1d51c => true,
0x1d51e...0x1d539 => true,
0x1d53b...0x1d53e => true,
0x1d540...0x1d544 => true,
0x1d546 => true,
0x1d54a...0x1d550 => true,
0x1d552...0x1d6a5 => true,
0x1d6a8...0x1d6c0 => true,
0x1d6c1 => true,
0x1d6c2...0x1d6da => true,
0x1d6db => true,
0x1d6dc...0x1d6fa => true,
0x1d6fb => true,
0x1d6fc...0x1d714 => true,
0x1d715 => true,
0x1d716...0x1d734 => true,
0x1d735 => true,
0x1d736...0x1d74e => true,
0x1d74f => true,
0x1d750...0x1d76e => true,
0x1d76f => true,
0x1d770...0x1d788 => true,
0x1d789 => true,
0x1d78a...0x1d7a8 => true,
0x1d7a9 => true,
0x1d7aa...0x1d7c2 => true,
0x1d7c3 => true,
0x1d7c4...0x1d7cb => true,
0x1d7ce...0x1d7ff => true,
0x1d800...0x1d9ff => true,
0x1da00...0x1da36 => true,
0x1da37...0x1da3a => true,
0x1da3b...0x1da6c => true,
0x1da6d...0x1da74 => true,
0x1da75 => true,
0x1da76...0x1da83 => true,
0x1da84 => true,
0x1da85...0x1da86 => true,
0x1da87...0x1da8b => true,
0x1da9b...0x1da9f => true,
0x1daa1...0x1daaf => true,
0x1df00...0x1df09 => true,
0x1df0a => true,
0x1df0b...0x1df1e => true,
0x1e000...0x1e006 => true,
0x1e008...0x1e018 => true,
0x1e01b...0x1e021 => true,
0x1e023...0x1e024 => true,
0x1e026...0x1e02a => true,
0x1e100...0x1e12c => true,
0x1e130...0x1e136 => true,
0x1e137...0x1e13d => true,
0x1e140...0x1e149 => true,
0x1e14e => true,
0x1e14f => true,
0x1e290...0x1e2ad => true,
0x1e2ae => true,
0x1e2c0...0x1e2eb => true,
0x1e2ec...0x1e2ef => true,
0x1e2f0...0x1e2f9 => true,
0x1e2ff => true,
0x1e7e0...0x1e7e6 => true,
0x1e7e8...0x1e7eb => true,
0x1e7ed...0x1e7ee => true,
0x1e7f0...0x1e7fe => true,
0x1e800...0x1e8c4 => true,
0x1e8c7...0x1e8cf => true,
0x1e8d0...0x1e8d6 => true,
0x1e900...0x1e943 => true,
0x1e944...0x1e94a => true,
0x1e94b => true,
0x1e950...0x1e959 => true,
0x1e95e...0x1e95f => true,
0x1ec71...0x1ecab => true,
0x1ecac => true,
0x1ecad...0x1ecaf => true,
0x1ecb0 => true,
0x1ecb1...0x1ecb4 => true,
0x1ed01...0x1ed2d => true,
0x1ed2e => true,
0x1ed2f...0x1ed3d => true,
0x1ee00...0x1ee03 => true,
0x1ee05...0x1ee1f => true,
0x1ee21...0x1ee22 => true,
0x1ee24 => true,
0x1ee27 => true,
0x1ee29...0x1ee32 => true,
0x1ee34...0x1ee37 => true,
0x1ee39 => true,
0x1ee3b => true,
0x1ee42 => true,
0x1ee47 => true,
0x1ee49 => true,
0x1ee4b => true,
0x1ee4d...0x1ee4f => true,
0x1ee51...0x1ee52 => true,
0x1ee54 => true,
0x1ee57 => true,
0x1ee59 => true,
0x1ee5b => true,
0x1ee5d => true,
0x1ee5f => true,
0x1ee61...0x1ee62 => true,
0x1ee64 => true,
0x1ee67...0x1ee6a => true,
0x1ee6c...0x1ee72 => true,
0x1ee74...0x1ee77 => true,
0x1ee79...0x1ee7c => true,
0x1ee7e => true,
0x1ee80...0x1ee89 => true,
0x1ee8b...0x1ee9b => true,
0x1eea1...0x1eea3 => true,
0x1eea5...0x1eea9 => true,
0x1eeab...0x1eebb => true,
0x1eef0...0x1eef1 => true,
0x1f000...0x1f003 => true,
0x1f005...0x1f02b => true,
0x1f030...0x1f093 => true,
0x1f0a0...0x1f0ae => true,
0x1f0b1...0x1f0bf => true,
0x1f0c1...0x1f0ce => true,
0x1f0d1...0x1f0f5 => true,
0x1f10b...0x1f10c => true,
0x1f10d...0x1f10f => true,
0x1f12e...0x1f12f => true,
0x1f16a...0x1f16f => true,
0x1f1ad => true,
0x1f1e6...0x1f1ff => true,
0x1f321...0x1f32c => true,
0x1f336 => true,
0x1f37d => true,
0x1f394...0x1f39f => true,
0x1f3cb...0x1f3ce => true,
0x1f3d4...0x1f3df => true,
0x1f3f1...0x1f3f3 => true,
0x1f3f5...0x1f3f7 => true,
0x1f43f => true,
0x1f441 => true,
0x1f4fd...0x1f4fe => true,
0x1f53e...0x1f54a => true,
0x1f54f => true,
0x1f568...0x1f579 => true,
0x1f57b...0x1f594 => true,
0x1f597...0x1f5a3 => true,
0x1f5a5...0x1f5fa => true,
0x1f650...0x1f67f => true,
0x1f6c6...0x1f6cb => true,
0x1f6cd...0x1f6cf => true,
0x1f6d3...0x1f6d4 => true,
0x1f6e0...0x1f6ea => true,
0x1f6f0...0x1f6f3 => true,
0x1f700...0x1f773 => true,
0x1f780...0x1f7d8 => true,
0x1f800...0x1f80b => true,
0x1f810...0x1f847 => true,
0x1f850...0x1f859 => true,
0x1f860...0x1f887 => true,
0x1f890...0x1f8ad => true,
0x1f8b0...0x1f8b1 => true,
0x1f900...0x1f90b => true,
0x1f93b => true,
0x1f946 => true,
0x1fa00...0x1fa53 => true,
0x1fa60...0x1fa6d => true,
0x1fb00...0x1fb92 => true,
0x1fb94...0x1fbca => true,
0x1fbf0...0x1fbf9 => true,
0xe0001 => true,
0xe0020...0xe007f => true,
else => false,
};
}
pub fn isAmbiguous(cp: u21) bool {
if (cp < 0xa1 or cp > 0x10fffd) return false;
return switch (cp) {
0xa1 => true,
0xa4 => true,
0xa7 => true,
0xa8 => true,
0xaa => true,
0xad => true,
0xae => true,
0xb0 => true,
0xb1 => true,
0xb2...0xb3 => true,
0xb4 => true,
0xb6...0xb7 => true,
0xb8 => true,
0xb9 => true,
0xba => true,
0xbc...0xbe => true,
0xbf => true,
0xc6 => true,
0xd0 => true,
0xd7 => true,
0xd8 => true,
0xde...0xe1 => true,
0xe6 => true,
0xe8...0xea => true,
0xec...0xed => true,
0xf0 => true,
0xf2...0xf3 => true,
0xf7 => true,
0xf8...0xfa => true,
0xfc => true,
0xfe => true,
0x101 => true,
0x111 => true,
0x113 => true,
0x11b => true,
0x126...0x127 => true,
0x12b => true,
0x131...0x133 => true,
0x138 => true,
0x13f...0x142 => true,
0x144 => true,
0x148...0x14b => true,
0x14d => true,
0x152...0x153 => true,
0x166...0x167 => true,
0x16b => true,
0x1ce => true,
0x1d0 => true,
0x1d2 => true,
0x1d4 => true,
0x1d6 => true,
0x1d8 => true,
0x1da => true,
0x1dc => true,
0x251 => true,
0x261 => true,
0x2c4 => true,
0x2c7 => true,
0x2c9...0x2cb => true,
0x2cd => true,
0x2d0 => true,
0x2d8...0x2db => true,
0x2dd => true,
0x2df => true,
0x300...0x36f => true,
0x391...0x3a1 => true,
0x3a3...0x3a9 => true,
0x3b1...0x3c1 => true,
0x3c3...0x3c9 => true,
0x401 => true,
0x410...0x44f => true,
0x451 => true,
0x2010 => true,
0x2013...0x2015 => true,
0x2016 => true,
0x2018 => true,
0x2019 => true,
0x201c => true,
0x201d => true,
0x2020...0x2022 => true,
0x2024...0x2027 => true,
0x2030 => true,
0x2032...0x2033 => true,
0x2035 => true,
0x203b => true,
0x203e => true,
0x2074 => true,
0x207f => true,
0x2081...0x2084 => true,
0x20ac => true,
0x2103 => true,
0x2105 => true,
0x2109 => true,
0x2113 => true,
0x2116 => true,
0x2121...0x2122 => true,
0x2126 => true,
0x212b => true,
0x2153...0x2154 => true,
0x215b...0x215e => true,
0x2160...0x216b => true,
0x2170...0x2179 => true,
0x2189 => true,
0x2190...0x2194 => true,
0x2195...0x2199 => true,
0x21b8...0x21b9 => true,
0x21d2 => true,
0x21d4 => true,
0x21e7 => true,
0x2200 => true,
0x2202...0x2203 => true,
0x2207...0x2208 => true,
0x220b => true,
0x220f => true,
0x2211 => true,
0x2215 => true,
0x221a => true,
0x221d...0x2220 => true,
0x2223 => true,
0x2225 => true,
0x2227...0x222c => true,
0x222e => true,
0x2234...0x2237 => true,
0x223c...0x223d => true,
0x2248 => true,
0x224c => true,
0x2252 => true,
0x2260...0x2261 => true,
0x2264...0x2267 => true,
0x226a...0x226b => true,
0x226e...0x226f => true,
0x2282...0x2283 => true,
0x2286...0x2287 => true,
0x2295 => true,
0x2299 => true,
0x22a5 => true,
0x22bf => true,
0x2312 => true,
0x2460...0x249b => true,
0x249c...0x24e9 => true,
0x24eb...0x24ff => true,
0x2500...0x254b => true,
0x2550...0x2573 => true,
0x2580...0x258f => true,
0x2592...0x2595 => true,
0x25a0...0x25a1 => true,
0x25a3...0x25a9 => true,
0x25b2...0x25b3 => true,
0x25b6 => true,
0x25b7 => true,
0x25bc...0x25bd => true,
0x25c0 => true,
0x25c1 => true,
0x25c6...0x25c8 => true,
0x25cb => true,
0x25ce...0x25d1 => true,
0x25e2...0x25e5 => true,
0x25ef => true,
0x2605...0x2606 => true,
0x2609 => true,
0x260e...0x260f => true,
0x261c => true,
0x261e => true,
0x2640 => true,
0x2642 => true,
0x2660...0x2661 => true,
0x2663...0x2665 => true,
0x2667...0x266a => true,
0x266c...0x266d => true,
0x266f => true,
0x269e...0x269f => true,
0x26bf => true,
0x26c6...0x26cd => true,
0x26cf...0x26d3 => true,
0x26d5...0x26e1 => true,
0x26e3 => true,
0x26e8...0x26e9 => true,
0x26eb...0x26f1 => true,
0x26f4 => true,
0x26f6...0x26f9 => true,
0x26fb...0x26fc => true,
0x26fe...0x26ff => true,
0x273d => true,
0x2776...0x277f => true,
0x2b56...0x2b59 => true,
0x3248...0x324f => true,
0xe000...0xf8ff => true,
0xfe00...0xfe0f => true,
0xfffd => true,
0x1f100...0x1f10a => true,
0x1f110...0x1f12d => true,
0x1f130...0x1f169 => true,
0x1f170...0x1f18d => true,
0x1f18f...0x1f190 => true,
0x1f19b...0x1f1ac => true,
0xe0100...0xe01ef => true,
0xf0000...0xffffd => true,
0x100000...0x10fffd => true,
else => false,
};
}
pub fn isHalfwidth(cp: u21) bool {
if (cp < 0x20a9 or cp > 0xffee) return false;
return switch (cp) {
0x20a9 => true,
0xff61 => true,
0xff62 => true,
0xff63 => true,
0xff64...0xff65 => true,
0xff66...0xff6f => true,
0xff70 => true,
0xff71...0xff9d => true,
0xff9e...0xff9f => true,
0xffa0...0xffbe => true,
0xffc2...0xffc7 => true,
0xffca...0xffcf => true,
0xffd2...0xffd7 => true,
0xffda...0xffdc => true,
0xffe8 => true,
0xffe9...0xffec => true,
0xffed...0xffee => true,
else => false,
};
}
pub fn isWide(cp: u21) bool {
if (cp < 0x1100 or cp > 0x3fffd) return false;
return switch (cp) {
0x1100...0x115f => true,
0x231a...0x231b => true,
0x2329 => true,
0x232a => true,
0x23e9...0x23ec => true,
0x23f0 => true,
0x23f3 => true,
0x25fd...0x25fe => true,
0x2614...0x2615 => true,
0x2648...0x2653 => true,
0x267f => true,
0x2693 => true,
0x26a1 => true,
0x26aa...0x26ab => true,
0x26bd...0x26be => true,
0x26c4...0x26c5 => true,
0x26ce => true,
0x26d4 => true,
0x26ea => true,
0x26f2...0x26f3 => true,
0x26f5 => true,
0x26fa => true,
0x26fd => true,
0x2705 => true,
0x270a...0x270b => true,
0x2728 => true,
0x274c => true,
0x274e => true,
0x2753...0x2755 => true,
0x2757 => true,
0x2795...0x2797 => true,
0x27b0 => true,
0x27bf => true,
0x2b1b...0x2b1c => true,
0x2b50 => true,
0x2b55 => true,
0x2e80...0x2e99 => true,
0x2e9b...0x2ef3 => true,
0x2f00...0x2fd5 => true,
0x2ff0...0x2ffb => true,
0x3001...0x3003 => true,
0x3004 => true,
0x3005 => true,
0x3006 => true,
0x3007 => true,
0x3008 => true,
0x3009 => true,
0x300a => true,
0x300b => true,
0x300c => true,
0x300d => true,
0x300e => true,
0x300f => true,
0x3010 => true,
0x3011 => true,
0x3012...0x3013 => true,
0x3014 => true,
0x3015 => true,
0x3016 => true,
0x3017 => true,
0x3018 => true,
0x3019 => true,
0x301a => true,
0x301b => true,
0x301c => true,
0x301d => true,
0x301e...0x301f => true,
0x3020 => true,
0x3021...0x3029 => true,
0x302a...0x302d => true,
0x302e...0x302f => true,
0x3030 => true,
0x3031...0x3035 => true,
0x3036...0x3037 => true,
0x3038...0x303a => true,
0x303b => true,
0x303c => true,
0x303d => true,
0x303e => true,
0x3041...0x3096 => true,
0x3099...0x309a => true,
0x309b...0x309c => true,
0x309d...0x309e => true,
0x309f => true,
0x30a0 => true,
0x30a1...0x30fa => true,
0x30fb => true,
0x30fc...0x30fe => true,
0x30ff => true,
0x3105...0x312f => true,
0x3131...0x318e => true,
0x3190...0x3191 => true,
0x3192...0x3195 => true,
0x3196...0x319f => true,
0x31a0...0x31bf => true,
0x31c0...0x31e3 => true,
0x31f0...0x31ff => true,
0x3200...0x321e => true,
0x3220...0x3229 => true,
0x322a...0x3247 => true,
0x3250 => true,
0x3251...0x325f => true,
0x3260...0x327f => true,
0x3280...0x3289 => true,
0x328a...0x32b0 => true,
0x32b1...0x32bf => true,
0x32c0...0x33ff => true,
0x3400...0x4dbf => true,
0x4e00...0xa014 => true,
0xa015 => true,
0xa016...0xa48c => true,
0xa490...0xa4c6 => true,
0xa960...0xa97c => true,
0xac00...0xd7a3 => true,
0xf900...0xfa6d => true,
0xfa6e...0xfa6f => true,
0xfa70...0xfad9 => true,
0xfada...0xfaff => true,
0xfe10...0xfe16 => true,
0xfe17 => true,
0xfe18 => true,
0xfe19 => true,
0xfe30 => true,
0xfe31...0xfe32 => true,
0xfe33...0xfe34 => true,
0xfe35 => true,
0xfe36 => true,
0xfe37 => true,
0xfe38 => true,
0xfe39 => true,
0xfe3a => true,
0xfe3b => true,
0xfe3c => true,
0xfe3d => true,
0xfe3e => true,
0xfe3f => true,
0xfe40 => true,
0xfe41 => true,
0xfe42 => true,
0xfe43 => true,
0xfe44 => true,
0xfe45...0xfe46 => true,
0xfe47 => true,
0xfe48 => true,
0xfe49...0xfe4c => true,
0xfe4d...0xfe4f => true,
0xfe50...0xfe52 => true,
0xfe54...0xfe57 => true,
0xfe58 => true,
0xfe59 => true,
0xfe5a => true,
0xfe5b => true,
0xfe5c => true,
0xfe5d => true,
0xfe5e => true,
0xfe5f...0xfe61 => true,
0xfe62 => true,
0xfe63 => true,
0xfe64...0xfe66 => true,
0xfe68 => true,
0xfe69 => true,
0xfe6a...0xfe6b => true,
0x16fe0...0x16fe1 => true,
0x16fe2 => true,
0x16fe3 => true,
0x16fe4 => true,
0x16ff0...0x16ff1 => true,
0x17000...0x187f7 => true,
0x18800...0x18cd5 => true,
0x18d00...0x18d08 => true,
0x1aff0...0x1aff3 => true,
0x1aff5...0x1affb => true,
0x1affd...0x1affe => true,
0x1b000...0x1b122 => true,
0x1b150...0x1b152 => true,
0x1b164...0x1b167 => true,
0x1b170...0x1b2fb => true,
0x1f004 => true,
0x1f0cf => true,
0x1f18e => true,
0x1f191...0x1f19a => true,
0x1f200...0x1f202 => true,
0x1f210...0x1f23b => true,
0x1f240...0x1f248 => true,
0x1f250...0x1f251 => true,
0x1f260...0x1f265 => true,
0x1f300...0x1f320 => true,
0x1f32d...0x1f335 => true,
0x1f337...0x1f37c => true,
0x1f37e...0x1f393 => true,
0x1f3a0...0x1f3ca => true,
0x1f3cf...0x1f3d3 => true,
0x1f3e0...0x1f3f0 => true,
0x1f3f4 => true,
0x1f3f8...0x1f3fa => true,
0x1f3fb...0x1f3ff => true,
0x1f400...0x1f43e => true,
0x1f440 => true,
0x1f442...0x1f4fc => true,
0x1f4ff...0x1f53d => true,
0x1f54b...0x1f54e => true,
0x1f550...0x1f567 => true,
0x1f57a => true,
0x1f595...0x1f596 => true,
0x1f5a4 => true,
0x1f5fb...0x1f64f => true,
0x1f680...0x1f6c5 => true,
0x1f6cc => true,
0x1f6d0...0x1f6d2 => true,
0x1f6d5...0x1f6d7 => true,
0x1f6dd...0x1f6df => true,
0x1f6eb...0x1f6ec => true,
0x1f6f4...0x1f6fc => true,
0x1f7e0...0x1f7eb => true,
0x1f7f0 => true,
0x1f90c...0x1f93a => true,
0x1f93c...0x1f945 => true,
0x1f947...0x1f9ff => true,
0x1fa70...0x1fa74 => true,
0x1fa78...0x1fa7c => true,
0x1fa80...0x1fa86 => true,
0x1fa90...0x1faac => true,
0x1fab0...0x1faba => true,
0x1fac0...0x1fac5 => true,
0x1fad0...0x1fad9 => true,
0x1fae0...0x1fae7 => true,
0x1faf0...0x1faf6 => true,
0x20000...0x2a6df => true,
0x2a6e0...0x2a6ff => true,
0x2a700...0x2b738 => true,
0x2b739...0x2b73f => true,
0x2b740...0x2b81d => true,
0x2b81e...0x2b81f => true,
0x2b820...0x2cea1 => true,
0x2cea2...0x2ceaf => true,
0x2ceb0...0x2ebe0 => true,
0x2ebe1...0x2f7ff => true,
0x2f800...0x2fa1d => true,
0x2fa1e...0x2fffd => true,
0x30000...0x3134a => true,
0x3134b...0x3fffd => true,
else => false,
};
}
pub fn isFullwidth(cp: u21) bool {
if (cp < 0x3000 or cp > 0xffe6) return false;
return switch (cp) {
0x3000 => true,
0xff01...0xff03 => true,
0xff04 => true,
0xff05...0xff07 => true,
0xff08 => true,
0xff09 => true,
0xff0a => true,
0xff0b => true,
0xff0c => true,
0xff0d => true,
0xff0e...0xff0f => true,
0xff10...0xff19 => true,
0xff1a...0xff1b => true,
0xff1c...0xff1e => true,
0xff1f...0xff20 => true,
0xff21...0xff3a => true,
0xff3b => true,
0xff3c => true,
0xff3d => true,
0xff3e => true,
0xff3f => true,
0xff40 => true,
0xff41...0xff5a => true,
0xff5b => true,
0xff5c => true,
0xff5d => true,
0xff5e => true,
0xff5f => true,
0xff60 => true,
0xffe0...0xffe1 => true,
0xffe2 => true,
0xffe3 => true,
0xffe4 => true,
0xffe5...0xffe6 => true,
else => false,
};
}
pub fn isNarrow(cp: u21) bool {
if (cp < 0x20 or cp > 0x2986) return false;
return switch (cp) {
0x20 => true,
0x21...0x23 => true,
0x24 => true,
0x25...0x27 => true,
0x28 => true,
0x29 => true,
0x2a => true,
0x2b => true,
0x2c => true,
0x2d => true,
0x2e...0x2f => true,
0x30...0x39 => true,
0x3a...0x3b => true,
0x3c...0x3e => true,
0x3f...0x40 => true,
0x41...0x5a => true,
0x5b => true,
0x5c => true,
0x5d => true,
0x5e => true,
0x5f => true,
0x60 => true,
0x61...0x7a => true,
0x7b => true,
0x7c => true,
0x7d => true,
0x7e => true,
0xa2...0xa3 => true,
0xa5 => true,
0xa6 => true,
0xac => true,
0xaf => true,
0x27e6 => true,
0x27e7 => true,
0x27e8 => true,
0x27e9 => true,
0x27ea => true,
0x27eb => true,
0x27ec => true,
0x27ed => true,
0x2985 => true,
0x2986 => true,
else => false,
};
}
|
src/components/autogen/DerivedEastAsianWidth.zig
|
pub const e_ident = struct {
pub const EI_OSABI = enum(u8) {
SystemV = 0x00,
HP_UX = 0x01,
NetBSD = 0x02,
Linux = 0x03,
GNU_Hurd = 0x04,
Solaris = 0x06,
AIX = 0x07,
IRIX = 0x08,
FreeBSD = 0x09,
Tru64 = 0x0A,
Novell_Modesto = 0x0B,
OpenBSD = 0x0C,
OpenVMS = 0x0D,
NonStop_Kernel = 0x0E,
AROS = 0x0F,
Fenix_OS = 0x10,
CloudABI = 0x11,
Stratus_VOS = 0x12,
_,
};
};
pub const e_type = enum(u16) {
ET_NONE = 0x00,
ET_REL = 0x01,
ET_EXEC = 0x02,
ET_DYN = 0x03,
ET_CORE = 0x04,
ET_LOOS = 0xFE00,
ET_HIOS = 0xFEFF,
ET_LOPROC = 0xFF00,
ET_HIPROC = 0xFFFF,
_,
};
// code taken from https://github.com/ziglang/zig/blob/77836e08a2384450b5e7933094511b61e3c22140/lib/std/elf.zig#L948
pub const e_machine = enum(u16) {
/// No machine
_NONE = 0,
/// AT&T WE 32100
_M32 = 1,
/// SPARC
_SPARC = 2,
/// Intel 386
_386 = 3,
/// Motorola 68000
_68K = 4,
/// Motorola 88000
_88K = 5,
/// Intel MCU
_IAMCU = 6,
/// Intel 80860
_860 = 7,
/// MIPS R3000
_MIPS = 8,
/// IBM System/370
_S370 = 9,
/// MIPS RS3000 Little-endian
_MIPS_RS3_LE = 10,
/// SPU Mark II
_SPU_2 = 13,
/// Hewlett-Packard PA-RISC
_PARISC = 15,
/// Fujitsu VPP500
_VPP500 = 17,
/// Enhanced instruction set SPARC
_SPARC32PLUS = 18,
/// Intel 80960
_960 = 19,
/// PowerPC
_PPC = 20,
/// PowerPC64
_PPC64 = 21,
/// IBM System/390
_S390 = 22,
/// IBM SPU/SPC
_SPU = 23,
/// NEC V800
_V800 = 36,
/// Fujitsu FR20
_FR20 = 37,
/// TRW RH-32
_RH32 = 38,
/// Motorola RCE
_RCE = 39,
/// ARM
_ARM = 40,
/// DEC Alpha
_ALPHA = 41,
/// Hitachi SH
_SH = 42,
/// SPARC V9
_SPARCV9 = 43,
/// Siemens TriCore
_TRICORE = 44,
/// Argonaut RISC Core
_ARC = 45,
/// Hitachi H8/300
_H8_300 = 46,
/// Hitachi H8/300H
_H8_300H = 47,
/// Hitachi H8S
_H8S = 48,
/// Hitachi H8/500
_H8_500 = 49,
/// Intel IA-64 processor architecture
_IA_64 = 50,
/// Stanford MIPS-X
_MIPS_X = 51,
/// Motorola ColdFire
_COLDFIRE = 52,
/// Motorola M68HC12
_68HC12 = 53,
/// Fujitsu MMA Multimedia Accelerator
_MMA = 54,
/// Siemens PCP
_PCP = 55,
/// Sony nCPU embedded RISC processor
_NCPU = 56,
/// Denso NDR1 microprocessor
_NDR1 = 57,
/// Motorola Star*Core processor
_STARCORE = 58,
/// Toyota ME16 processor
_ME16 = 59,
/// STMicroelectronics ST100 processor
_ST100 = 60,
/// Advanced Logic Corp. TinyJ embedded processor family
_TINYJ = 61,
/// AMD x86-64 architecture
_X86_64 = 62,
/// Sony DSP Processor
_PDSP = 63,
/// Digital Equipment Corp. PDP-10
_PDP10 = 64,
/// Digital Equipment Corp. PDP-11
_PDP11 = 65,
/// Siemens FX66 microcontroller
_FX66 = 66,
/// STMicroelectronics ST9+ 8/16 bit microcontroller
_ST9PLUS = 67,
/// STMicroelectronics ST7 8-bit microcontroller
_ST7 = 68,
/// Motorola MC68HC16 Microcontroller
_68HC16 = 69,
/// Motorola MC68HC11 Microcontroller
_68HC11 = 70,
/// Motorola MC68HC08 Microcontroller
_68HC08 = 71,
/// Motorola MC68HC05 Microcontroller
_68HC05 = 72,
/// Silicon Graphics SVx
_SVX = 73,
/// STMicroelectronics ST19 8-bit microcontroller
_ST19 = 74,
/// Digital VAX
_VAX = 75,
/// Axis Communications 32-bit embedded processor
_CRIS = 76,
/// Infineon Technologies 32-bit embedded processor
_JAVELIN = 77,
/// Element 14 64-bit DSP Processor
_FIREPATH = 78,
/// LSI Logic 16-bit DSP Processor
_ZSP = 79,
/// Donald Knuth's educational 64-bit processor
_MMIX = 80,
/// Harvard University machine-independent object files
_HUANY = 81,
/// SiTera Prism
_PRISM = 82,
/// Atmel AVR 8-bit microcontroller
_AVR = 83,
/// Fujitsu FR30
_FR30 = 84,
/// Mitsubishi D10V
_D10V = 85,
/// Mitsubishi D30V
_D30V = 86,
/// NEC v850
_V850 = 87,
/// Mitsubishi M32R
_M32R = 88,
/// Matsushita MN10300
_MN10300 = 89,
/// Matsushita MN10200
_MN10200 = 90,
/// picoJava
_PJ = 91,
/// OpenRISC 32-bit embedded processor
_OPENRISC = 92,
/// ARC International ARCompact processor (old spelling/synonym: EM_ARC_A5)
_ARC_COMPACT = 93,
/// Tensilica Xtensa Architecture
_XTENSA = 94,
/// Alphamosaic VideoCore processor
_VIDEOCORE = 95,
/// Thompson Multimedia General Purpose Processor
_TMM_GPP = 96,
/// National Semiconductor 32000 series
_NS32K = 97,
/// Tenor Network TPC processor
_TPC = 98,
/// Trebia SNP 1000 processor
_SNP1K = 99,
/// STMicroelectronics (www.st.com) ST200
_ST200 = 100,
/// Ubicom IP2xxx microcontroller family
_IP2K = 101,
/// MAX Processor
_MAX = 102,
/// National Semiconductor CompactRISC microprocessor
_CR = 103,
/// Fujitsu F2MC16
_F2MC16 = 104,
/// Texas Instruments embedded microcontroller msp430
_MSP430 = 105,
/// Analog Devices Blackfin (DSP) processor
_BLACKFIN = 106,
/// S1C33 Family of Seiko Epson processors
_SE_C33 = 107,
/// Sharp embedded microprocessor
_SEP = 108,
/// Arca RISC Microprocessor
_ARCA = 109,
/// Microprocessor series from PKU-Unity Ltd. and MPRC of Peking University
_UNICORE = 110,
/// eXcess: 16/32/64-bit configurable embedded CPU
_EXCESS = 111,
/// Icera Semiconductor Inc. Deep Execution Processor
_DXP = 112,
/// Altera Nios II soft-core processor
_ALTERA_NIOS2 = 113,
/// National Semiconductor CompactRISC CRX
_CRX = 114,
/// Motorola XGATE embedded processor
_XGATE = 115,
/// Infineon C16x/XC16x processor
_C166 = 116,
/// Renesas M16C series microprocessors
_M16C = 117,
/// Microchip Technology dsPIC30F Digital Signal Controller
_DSPIC30F = 118,
/// Freescale Communication Engine RISC core
_CE = 119,
/// Renesas M32C series microprocessors
_M32C = 120,
/// Altium TSK3000 core
_TSK3000 = 131,
/// Freescale RS08 embedded processor
_RS08 = 132,
/// Analog Devices SHARC family of 32-bit DSP processors
_SHARC = 133,
/// Cyan Technology eCOG2 microprocessor
_ECOG2 = 134,
/// Sunplus S+core7 RISC processor
_SCORE7 = 135,
/// New Japan Radio (NJR) 24-bit DSP Processor
_DSP24 = 136,
/// Broadcom VideoCore III processor
_VIDEOCORE3 = 137,
/// RISC processor for Lattice FPGA architecture
_LATTICEMICO32 = 138,
/// Seiko Epson C17 family
_SE_C17 = 139,
/// The Texas Instruments TMS320C6000 DSP family
_TI_C6000 = 140,
/// The Texas Instruments TMS320C2000 DSP family
_TI_C2000 = 141,
/// The Texas Instruments TMS320C55x DSP family
_TI_C5500 = 142,
/// STMicroelectronics 64bit VLIW Data Signal Processor
_MMDSP_PLUS = 160,
/// Cypress M8C microprocessor
_CYPRESS_M8C = 161,
/// Renesas R32C series microprocessors
_R32C = 162,
/// NXP Semiconductors TriMedia architecture family
_TRIMEDIA = 163,
/// Qualcomm Hexagon processor
_HEXAGON = 164,
/// Intel 8051 and variants
_8051 = 165,
/// STMicroelectronics STxP7x family of configurable and extensible RISC processors
_STXP7X = 166,
/// Andes Technology compact code size embedded RISC processor family
_NDS32 = 167,
/// Cyan Technology eCOG1X family
_ECOG1X = 168,
/// Dallas Semiconductor MAXQ30 Core Micro-controllers
_MAXQ30 = 169,
/// New Japan Radio (NJR) 16-bit DSP Processor
_XIMO16 = 170,
/// M2000 Reconfigurable RISC Microprocessor
_MANIK = 171,
/// Cray Inc. NV2 vector architecture
_CRAYNV2 = 172,
/// Renesas RX family
_RX = 173,
/// Imagination Technologies META processor architecture
_METAG = 174,
/// MCST Elbrus general purpose hardware architecture
_MCST_ELBRUS = 175,
/// Cyan Technology eCOG16 family
_ECOG16 = 176,
/// National Semiconductor CompactRISC CR16 16-bit microprocessor
_CR16 = 177,
/// Freescale Extended Time Processing Unit
_ETPU = 178,
/// Infineon Technologies SLE9X core
_SLE9X = 179,
/// Intel L10M
_L10M = 180,
/// Intel K10M
_K10M = 181,
/// ARM AArch64
_AARCH64 = 183,
/// Atmel Corporation 32-bit microprocessor family
_AVR32 = 185,
/// STMicroeletronics STM8 8-bit microcontroller
_STM8 = 186,
/// Tilera TILE64 multicore architecture family
_TILE64 = 187,
/// Tilera TILEPro multicore architecture family
_TILEPRO = 188,
/// NVIDIA CUDA architecture
_CUDA = 190,
/// Tilera TILE-Gx multicore architecture family
_TILEGX = 191,
/// CloudShield architecture family
_CLOUDSHIELD = 192,
/// KIPO-KAIST Core-A 1st generation processor family
_COREA_1ST = 193,
/// KIPO-KAIST Core-A 2nd generation processor family
_COREA_2ND = 194,
/// Synopsys ARCompact V2
_ARC_COMPACT2 = 195,
/// Open8 8-bit RISC soft processor core
_OPEN8 = 196,
/// Renesas RL78 family
_RL78 = 197,
/// Broadcom VideoCore V processor
_VIDEOCORE5 = 198,
/// Renesas 78KOR family
_78KOR = 199,
/// Freescale 56800EX Digital Signal Controller (DSC)
_56800EX = 200,
/// Beyond BA1 CPU architecture
_BA1 = 201,
/// Beyond BA2 CPU architecture
_BA2 = 202,
/// XMOS xCORE processor family
_XCORE = 203,
/// Microchip 8-bit PIC(r) family
_MCHP_PIC = 204,
/// Reserved by Intel
_INTEL205 = 205,
/// Reserved by Intel
_INTEL206 = 206,
/// Reserved by Intel
_INTEL207 = 207,
/// Reserved by Intel
_INTEL208 = 208,
/// Reserved by Intel
_INTEL209 = 209,
/// KM211 KM32 32-bit processor
_KM32 = 210,
/// KM211 KMX32 32-bit processor
_KMX32 = 211,
/// KM211 KMX16 16-bit processor
_KMX16 = 212,
/// KM211 KMX8 8-bit processor
_KMX8 = 213,
/// KM211 KVARC processor
_KVARC = 214,
/// Paneve CDP architecture family
_CDP = 215,
/// Cognitive Smart Memory Processor
_COGE = 216,
/// iCelero CoolEngine
_COOL = 217,
/// Nanoradio Optimized RISC
_NORC = 218,
/// CSR Kalimba architecture family
_CSR_KALIMBA = 219,
/// AMD GPU architecture
_AMDGPU = 224,
/// RISC-V
_RISCV = 243,
/// Lanai 32-bit processor
_LANAI = 244,
/// Linux kernel bpf virtual machine
_BPF = 247,
_,
};
|
src/FileHeader.zig
|
pub const ARRAY_SEP_CHAR = @as(u32, 9);
pub const WPCCHANNEL = @as(u32, 16);
pub const WPC_SETTINGS_LOCATE = @as(u32, 20);
pub const WPC_SETTINGS_MODIFY = @as(u32, 21);
pub const WPC_APP_LAUNCH = @as(u32, 22);
pub const WPC_SYSTEM = @as(u32, 23);
pub const WPC_WEB = @as(u32, 24);
pub const WPCPROV_TASK_SettingChange = @as(u32, 1);
pub const WPCPROV_TASK_GameStart = @as(u32, 2);
pub const WPCPROV_TASK_UrlVisit = @as(u32, 3);
pub const WPCPROV_TASK_EmailReceived = @as(u32, 4);
pub const WPCPROV_TASK_EmailSent = @as(u32, 5);
pub const WPCPROV_TASK_MediaPlayback = @as(u32, 6);
pub const WPCPROV_TASK_IMInvitation = @as(u32, 7);
pub const WPCPROV_TASK_IMJoin = @as(u32, 8);
pub const WPCPROV_TASK_IMLeave = @as(u32, 9);
pub const WPCPROV_TASK_FileDownload = @as(u32, 10);
pub const WPCPROV_TASK_IMFeature = @as(u32, 11);
pub const WPCPROV_TASK_Custom = @as(u32, 13);
pub const WPCPROV_TASK_EmailContact = @as(u32, 14);
pub const WPCPROV_TASK_IMContact = @as(u32, 15);
pub const WPCPROV_TASK_AppBlocked = @as(u32, 16);
pub const WPCPROV_TASK_AppOverride = @as(u32, 17);
pub const WPCPROV_TASK_WebOverride = @as(u32, 18);
pub const WPCPROV_TASK_WebsiteVisit = @as(u32, 19);
pub const WPCPROV_TASK_Application = @as(u32, 20);
pub const WPCPROV_TASK_ComputerUsage = @as(u32, 21);
pub const WPCPROV_TASK_ContentUsage = @as(u32, 22);
pub const WPCPROV_KEYWORD_WPC = @as(u32, 16);
pub const WPCPROV_KEYWORD_ThirdParty = @as(u32, 32);
pub const WPCEVENT_SYS_SETTINGCHANGE_value = @as(u32, 1);
pub const WPCEVENT_GAME_START_value = @as(u32, 2);
pub const WPCEVENT_WEB_URLVISIT_value = @as(u32, 3);
pub const WPCEVENT_EMAIL_RECEIVED_value = @as(u32, 4);
pub const WPCEVENT_EMAIL_SENT_value = @as(u32, 5);
pub const WPCEVENT_MEDIA_PLAYBACK_value = @as(u32, 6);
pub const WPCEVENT_IM_INVITATION_value = @as(u32, 7);
pub const WPCEVENT_IM_JOIN_value = @as(u32, 8);
pub const WPCEVENT_IM_LEAVE_value = @as(u32, 9);
pub const WPCEVENT_WEB_FILEDOWNLOAD_value = @as(u32, 10);
pub const WPCEVENT_IM_FEATURE_value = @as(u32, 11);
pub const WPCEVENT_CUSTOM_value = @as(u32, 13);
pub const WPCEVENT_EMAIL_CONTACT_value = @as(u32, 14);
pub const WPCEVENT_IM_CONTACT_value = @as(u32, 15);
pub const WPCEVENT_SYSTEM_APPBLOCKED_value = @as(u32, 16);
pub const WPCEVENT_APPOVERRIDE_value = @as(u32, 17);
pub const WPCEVENT_WEBOVERRIDE_value = @as(u32, 18);
pub const WPCEVENT_WEB_WEBSITEVISIT_value = @as(u32, 19);
pub const WPCEVENT_APPLICATION_value = @as(u32, 20);
pub const WPCEVENT_COMPUTERUSAGE_value = @as(u32, 21);
pub const WPCEVENT_CONTENTUSAGE_value = @as(u32, 22);
pub const MSG_Keyword_WPC = @as(i32, 268435461);
pub const MSG_Keyword_ThirdParty = @as(i32, 268435462);
pub const MSG_Opcode_Locate = @as(i32, 805306388);
pub const MSG_Opcode_Modify = @as(i32, 805306389);
pub const MSG_Opcode_Launch = @as(i32, 805306390);
pub const MSG_Opcode_System = @as(i32, 805306391);
pub const MSG_Opcode_Web = @as(i32, 805306392);
pub const MSG_Task_SettingChange = @as(i32, 1879048193);
pub const MSG_Task_GameStart = @as(i32, 1879048194);
pub const MSG_Task_UrlVisit = @as(i32, 1879048195);
pub const MSG_Task_EmailReceived = @as(i32, 1879048196);
pub const MSG_Task_EmailSent = @as(i32, 1879048197);
pub const MSG_Task_MediaPlayback = @as(i32, 1879048198);
pub const MSG_Task_IMInvitation = @as(i32, 1879048199);
pub const MSG_Task_IMJoin = @as(i32, 1879048200);
pub const MSG_Task_IMLeave = @as(i32, 1879048201);
pub const MSG_Task_FileDownload = @as(i32, 1879048202);
pub const MSG_Task_IMFeature = @as(i32, 1879048203);
pub const MSG_Task_Custom = @as(i32, 1879048205);
pub const MSG_Task_EmailContact = @as(i32, 1879048206);
pub const MSG_Task_IMContact = @as(i32, 1879048207);
pub const MSG_Task_AppBlocked = @as(i32, 1879048208);
pub const MSG_Task_AppOverride = @as(i32, 1879048209);
pub const MSG_Task_WebOverride = @as(i32, 1879048210);
pub const MSG_Task_WebsiteVisit = @as(i32, 1879048211);
pub const MSG_Task_Application = @as(i32, 1879048212);
pub const MSG_Task_ComputerUsage = @as(i32, 1879048213);
pub const MSG_Task_ContentUsage = @as(i32, 1879048214);
pub const MSG_Publisher_Name = @as(i32, -1879048191);
pub const MSG_Event_SettingChange = @as(i32, -1342177279);
pub const MSG_Event_GameStart = @as(i32, -1342177278);
pub const MSG_Event_UrlVisit = @as(i32, -1342177277);
pub const MSG_Event_EmailReceived = @as(i32, -1342177276);
pub const MSG_Event_EmailSent = @as(i32, -1342177275);
pub const MSG_Event_MediaPlayback = @as(i32, -1342177274);
pub const MSG_Event_IMInvitation = @as(i32, -1342177273);
pub const MSG_Event_IMJoin = @as(i32, -1342177272);
pub const MSG_Event_IMLeave = @as(i32, -1342177271);
pub const MSG_Event_FileDownload = @as(i32, -1342177270);
pub const MSG_Event_IMFeature = @as(i32, -1342177269);
pub const MSG_Event_Custom = @as(i32, -1342177267);
pub const MSG_Event_EmailContact = @as(i32, -1342177266);
pub const MSG_Event_IMContact = @as(i32, -1342177265);
pub const MSG_Event_AppBlocked = @as(i32, -1342177264);
pub const MSG_Event_AppOverride = @as(i32, -1342177263);
pub const MSG_Event_WebOverride = @as(i32, -1342177262);
pub const MSG_Event_WebsiteVisit = @as(i32, -1342177261);
pub const MSG_Event_Application = @as(i32, -1342177260);
pub const MSG_Event_ComputerUsage = @as(i32, -1342177259);
pub const MSG_Event_ContentUsage = @as(i32, -1342177258);
pub const FACILITY_WPC = @as(u32, 2457);
pub const WPCPROV = Guid.initString("01090065-b467-4503-9b28-533766761087");
//--------------------------------------------------------------------------------
// Section: Types (44)
//--------------------------------------------------------------------------------
const CLSID_WpcSettingsProvider_Value = Guid.initString("355dffaa-3b9f-435c-b428-5d44290bc5f2");
pub const CLSID_WpcSettingsProvider = &CLSID_WpcSettingsProvider_Value;
const CLSID_WpcProviderSupport_Value = Guid.initString("bb18c7a0-2186-4be0-97d8-04847b628e02");
pub const CLSID_WpcProviderSupport = &CLSID_WpcProviderSupport_Value;
const CLSID_WindowsParentalControls_Value = Guid.initString("e77cc89b-7401-4c04-8ced-149db35add04");
pub const CLSID_WindowsParentalControls = &CLSID_WindowsParentalControls_Value;
// TODO: this type is limited to platform 'windows6.1'
const IID_IWPCProviderState_Value = Guid.initString("50b6a267-c4bd-450b-adb5-759073837c9e");
pub const IID_IWPCProviderState = &IID_IWPCProviderState_Value;
pub const IWPCProviderState = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
Enable: fn(
self: *const IWPCProviderState,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Disable: fn(
self: *const IWPCProviderState,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCProviderState_Enable(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCProviderState.VTable, self.vtable).Enable(@ptrCast(*const IWPCProviderState, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCProviderState_Disable(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCProviderState.VTable, self.vtable).Disable(@ptrCast(*const IWPCProviderState, self));
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const WPCFLAG_OVERRIDE = enum(i32) {
N = 1,
};
pub const WPCFLAG_APPLICATION = WPCFLAG_OVERRIDE.N;
// TODO: this type is limited to platform 'windows6.1'
const IID_IWPCProviderConfig_Value = Guid.initString("bef54196-2d02-4a26-b6e5-d65af295d0f1");
pub const IID_IWPCProviderConfig = &IID_IWPCProviderConfig_Value;
pub const IWPCProviderConfig = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetUserSummary: fn(
self: *const IWPCProviderConfig,
bstrSID: ?BSTR,
pbstrUserSummary: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
Configure: fn(
self: *const IWPCProviderConfig,
hWnd: ?HWND,
bstrSID: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RequestOverride: fn(
self: *const IWPCProviderConfig,
hWnd: ?HWND,
bstrPath: ?BSTR,
dwFlags: WPCFLAG_RESTRICTION,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCProviderConfig_GetUserSummary(self: *const T, bstrSID: ?BSTR, pbstrUserSummary: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCProviderConfig.VTable, self.vtable).GetUserSummary(@ptrCast(*const IWPCProviderConfig, self), bstrSID, pbstrUserSummary);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCProviderConfig_Configure(self: *const T, hWnd: ?HWND, bstrSID: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCProviderConfig.VTable, self.vtable).Configure(@ptrCast(*const IWPCProviderConfig, self), hWnd, bstrSID);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCProviderConfig_RequestOverride(self: *const T, hWnd: ?HWND, bstrPath: ?BSTR, dwFlags: WPCFLAG_RESTRICTION) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCProviderConfig.VTable, self.vtable).RequestOverride(@ptrCast(*const IWPCProviderConfig, self), hWnd, bstrPath, dwFlags);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const WPCFLAG_RESTRICTION = enum(i32) {
NO_RESTRICTION = 0,
LOGGING_REQUIRED = 1,
WEB_FILTERED = 2,
HOURS_RESTRICTED = 4,
GAMES_BLOCKED = 8,
APPS_RESTRICTED = 16,
TIME_ALLOWANCE_RESTRICTED = 32,
GAMES_RESTRICTED = 64,
};
pub const WPCFLAG_NO_RESTRICTION = WPCFLAG_RESTRICTION.NO_RESTRICTION;
pub const WPCFLAG_LOGGING_REQUIRED = WPCFLAG_RESTRICTION.LOGGING_REQUIRED;
pub const WPCFLAG_WEB_FILTERED = WPCFLAG_RESTRICTION.WEB_FILTERED;
pub const WPCFLAG_HOURS_RESTRICTED = WPCFLAG_RESTRICTION.HOURS_RESTRICTED;
pub const WPCFLAG_GAMES_BLOCKED = WPCFLAG_RESTRICTION.GAMES_BLOCKED;
pub const WPCFLAG_APPS_RESTRICTED = WPCFLAG_RESTRICTION.APPS_RESTRICTED;
pub const WPCFLAG_TIME_ALLOWANCE_RESTRICTED = WPCFLAG_RESTRICTION.TIME_ALLOWANCE_RESTRICTED;
pub const WPCFLAG_GAMES_RESTRICTED = WPCFLAG_RESTRICTION.GAMES_RESTRICTED;
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWPCSettings_Value = Guid.initString("8fdf6ca1-0189-47e4-b670-1a8a4636e340");
pub const IID_IWPCSettings = &IID_IWPCSettings_Value;
pub const IWPCSettings = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
IsLoggingRequired: fn(
self: *const IWPCSettings,
pfRequired: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetLastSettingsChangeTime: fn(
self: *const IWPCSettings,
pTime: ?*SYSTEMTIME,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetRestrictions: fn(
self: *const IWPCSettings,
pdwRestrictions: ?*WPCFLAG_RESTRICTION,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCSettings_IsLoggingRequired(self: *const T, pfRequired: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCSettings.VTable, self.vtable).IsLoggingRequired(@ptrCast(*const IWPCSettings, self), pfRequired);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCSettings_GetLastSettingsChangeTime(self: *const T, pTime: ?*SYSTEMTIME) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCSettings.VTable, self.vtable).GetLastSettingsChangeTime(@ptrCast(*const IWPCSettings, self), pTime);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCSettings_GetRestrictions(self: *const T, pdwRestrictions: ?*WPCFLAG_RESTRICTION) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCSettings.VTable, self.vtable).GetRestrictions(@ptrCast(*const IWPCSettings, self), pdwRestrictions);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWPCGamesSettings_Value = Guid.initString("95e87780-e158-489e-b452-bbb850790715");
pub const IID_IWPCGamesSettings = &IID_IWPCGamesSettings_Value;
pub const IWPCGamesSettings = extern struct {
pub const VTable = extern struct {
base: IWPCSettings.VTable,
IsBlocked: fn(
self: *const IWPCGamesSettings,
guidAppID: Guid,
pdwReasons: ?*u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IWPCSettings.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCGamesSettings_IsBlocked(self: *const T, guidAppID: Guid, pdwReasons: ?*u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCGamesSettings.VTable, self.vtable).IsBlocked(@ptrCast(*const IWPCGamesSettings, self), guidAppID, pdwReasons);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const WPCFLAG_WEB_SETTING = enum(i32) {
NOTBLOCKED = 0,
DOWNLOADSBLOCKED = 1,
};
pub const WPCFLAG_WEB_SETTING_NOTBLOCKED = WPCFLAG_WEB_SETTING.NOTBLOCKED;
pub const WPCFLAG_WEB_SETTING_DOWNLOADSBLOCKED = WPCFLAG_WEB_SETTING.DOWNLOADSBLOCKED;
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWPCWebSettings_Value = Guid.initString("ffccbdb8-0992-4c30-b0f1-1cbb09c240aa");
pub const IID_IWPCWebSettings = &IID_IWPCWebSettings_Value;
pub const IWPCWebSettings = extern struct {
pub const VTable = extern struct {
base: IWPCSettings.VTable,
GetSettings: fn(
self: *const IWPCWebSettings,
pdwSettings: ?*WPCFLAG_WEB_SETTING,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
RequestURLOverride: fn(
self: *const IWPCWebSettings,
hWnd: ?HWND,
pcszURL: ?[*:0]const u16,
cURLs: u32,
ppcszSubURLs: ?[*]?PWSTR,
pfChanged: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IWPCSettings.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCWebSettings_GetSettings(self: *const T, pdwSettings: ?*WPCFLAG_WEB_SETTING) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCWebSettings.VTable, self.vtable).GetSettings(@ptrCast(*const IWPCWebSettings, self), pdwSettings);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCWebSettings_RequestURLOverride(self: *const T, hWnd: ?HWND, pcszURL: ?[*:0]const u16, cURLs: u32, ppcszSubURLs: ?[*]?PWSTR, pfChanged: ?*BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCWebSettings.VTable, self.vtable).RequestURLOverride(@ptrCast(*const IWPCWebSettings, self), hWnd, pcszURL, cURLs, ppcszSubURLs, pfChanged);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const WPCFLAG_VISIBILITY = enum(i32) {
VISIBLE = 0,
HIDDEN = 1,
};
pub const WPCFLAG_WPC_VISIBLE = WPCFLAG_VISIBILITY.VISIBLE;
pub const WPCFLAG_WPC_HIDDEN = WPCFLAG_VISIBILITY.HIDDEN;
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWindowsParentalControlsCore_Value = Guid.initString("4ff40a0f-3f3b-4d7c-a41b-4f39d7b44d05");
pub const IID_IWindowsParentalControlsCore = &IID_IWindowsParentalControlsCore_Value;
pub const IWindowsParentalControlsCore = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetVisibility: fn(
self: *const IWindowsParentalControlsCore,
peVisibility: ?*WPCFLAG_VISIBILITY,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetUserSettings: fn(
self: *const IWindowsParentalControlsCore,
pcszSID: ?[*:0]const u16,
ppSettings: ?*?*IWPCSettings,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetWebSettings: fn(
self: *const IWindowsParentalControlsCore,
pcszSID: ?[*:0]const u16,
ppSettings: ?*?*IWPCWebSettings,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetWebFilterInfo: fn(
self: *const IWindowsParentalControlsCore,
pguidID: ?*Guid,
ppszName: ?*?PWSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWindowsParentalControlsCore_GetVisibility(self: *const T, peVisibility: ?*WPCFLAG_VISIBILITY) callconv(.Inline) HRESULT {
return @ptrCast(*const IWindowsParentalControlsCore.VTable, self.vtable).GetVisibility(@ptrCast(*const IWindowsParentalControlsCore, self), peVisibility);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWindowsParentalControlsCore_GetUserSettings(self: *const T, pcszSID: ?[*:0]const u16, ppSettings: ?*?*IWPCSettings) callconv(.Inline) HRESULT {
return @ptrCast(*const IWindowsParentalControlsCore.VTable, self.vtable).GetUserSettings(@ptrCast(*const IWindowsParentalControlsCore, self), pcszSID, ppSettings);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWindowsParentalControlsCore_GetWebSettings(self: *const T, pcszSID: ?[*:0]const u16, ppSettings: ?*?*IWPCWebSettings) callconv(.Inline) HRESULT {
return @ptrCast(*const IWindowsParentalControlsCore.VTable, self.vtable).GetWebSettings(@ptrCast(*const IWindowsParentalControlsCore, self), pcszSID, ppSettings);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWindowsParentalControlsCore_GetWebFilterInfo(self: *const T, pguidID: ?*Guid, ppszName: ?*?PWSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IWindowsParentalControlsCore.VTable, self.vtable).GetWebFilterInfo(@ptrCast(*const IWindowsParentalControlsCore, self), pguidID, ppszName);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.0.6000'
const IID_IWindowsParentalControls_Value = Guid.initString("28b4d88b-e072-49e6-804d-26edbe21a7b9");
pub const IID_IWindowsParentalControls = &IID_IWindowsParentalControls_Value;
pub const IWindowsParentalControls = extern struct {
pub const VTable = extern struct {
base: IWindowsParentalControlsCore.VTable,
GetGamesSettings: fn(
self: *const IWindowsParentalControls,
pcszSID: ?[*:0]const u16,
ppSettings: ?*?*IWPCGamesSettings,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IWindowsParentalControlsCore.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWindowsParentalControls_GetGamesSettings(self: *const T, pcszSID: ?[*:0]const u16, ppSettings: ?*?*IWPCGamesSettings) callconv(.Inline) HRESULT {
return @ptrCast(*const IWindowsParentalControls.VTable, self.vtable).GetGamesSettings(@ptrCast(*const IWindowsParentalControls, self), pcszSID, ppSettings);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows6.1'
const IID_IWPCProviderSupport_Value = Guid.initString("41eba572-23ed-4779-bec1-8df96206c44c");
pub const IID_IWPCProviderSupport = &IID_IWPCProviderSupport_Value;
pub const IWPCProviderSupport = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetCurrent: fn(
self: *const IWPCProviderSupport,
pguidProvider: ?*Guid,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWPCProviderSupport_GetCurrent(self: *const T, pguidProvider: ?*Guid) callconv(.Inline) HRESULT {
return @ptrCast(*const IWPCProviderSupport.VTable, self.vtable).GetCurrent(@ptrCast(*const IWPCProviderSupport, self), pguidProvider);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const WPCFLAG_ISBLOCKED = enum(i32) {
NOTBLOCKED = 0,
IMBLOCKED = 1,
EMAILBLOCKED = 2,
MEDIAPLAYBACKBLOCKED = 4,
WEBBLOCKED = 8,
GAMESBLOCKED = 16,
CONTACTBLOCKED = 32,
FEATUREBLOCKED = 64,
DOWNLOADBLOCKED = 128,
RATINGBLOCKED = 256,
DESCRIPTORBLOCKED = 512,
EXPLICITBLOCK = 1024,
BADPASS = 2048,
MAXHOURS = 4096,
SPECHOURS = 8192,
SETTINGSCHANGEBLOCKED = 16384,
ATTACHMENTBLOCKED = 32768,
SENDERBLOCKED = 65536,
RECEIVERBLOCKED = 131072,
NOTEXPLICITLYALLOWED = 262144,
NOTINLIST = 524288,
CATEGORYBLOCKED = 1048576,
CATEGORYNOTINLIST = 2097152,
NOTKIDS = 4194304,
UNRATED = 8388608,
NOACCESS = 16777216,
INTERNALERROR = -1,
};
pub const WPCFLAG_ISBLOCKED_NOTBLOCKED = WPCFLAG_ISBLOCKED.NOTBLOCKED;
pub const WPCFLAG_ISBLOCKED_IMBLOCKED = WPCFLAG_ISBLOCKED.IMBLOCKED;
pub const WPCFLAG_ISBLOCKED_EMAILBLOCKED = WPCFLAG_ISBLOCKED.EMAILBLOCKED;
pub const WPCFLAG_ISBLOCKED_MEDIAPLAYBACKBLOCKED = WPCFLAG_ISBLOCKED.MEDIAPLAYBACKBLOCKED;
pub const WPCFLAG_ISBLOCKED_WEBBLOCKED = WPCFLAG_ISBLOCKED.WEBBLOCKED;
pub const WPCFLAG_ISBLOCKED_GAMESBLOCKED = WPCFLAG_ISBLOCKED.GAMESBLOCKED;
pub const WPCFLAG_ISBLOCKED_CONTACTBLOCKED = WPCFLAG_ISBLOCKED.CONTACTBLOCKED;
pub const WPCFLAG_ISBLOCKED_FEATUREBLOCKED = WPCFLAG_ISBLOCKED.FEATUREBLOCKED;
pub const WPCFLAG_ISBLOCKED_DOWNLOADBLOCKED = WPCFLAG_ISBLOCKED.DOWNLOADBLOCKED;
pub const WPCFLAG_ISBLOCKED_RATINGBLOCKED = WPCFLAG_ISBLOCKED.RATINGBLOCKED;
pub const WPCFLAG_ISBLOCKED_DESCRIPTORBLOCKED = WPCFLAG_ISBLOCKED.DESCRIPTORBLOCKED;
pub const WPCFLAG_ISBLOCKED_EXPLICITBLOCK = WPCFLAG_ISBLOCKED.EXPLICITBLOCK;
pub const WPCFLAG_ISBLOCKED_BADPASS = WPCFLAG_ISBLOCKED.BADPASS;
pub const WPCFLAG_ISBLOCKED_MAXHOURS = WPCFLAG_ISBLOCKED.MAXHOURS;
pub const WPCFLAG_ISBLOCKED_SPECHOURS = WPCFLAG_ISBLOCKED.SPECHOURS;
pub const WPCFLAG_ISBLOCKED_SETTINGSCHANGEBLOCKED = WPCFLAG_ISBLOCKED.SETTINGSCHANGEBLOCKED;
pub const WPCFLAG_ISBLOCKED_ATTACHMENTBLOCKED = WPCFLAG_ISBLOCKED.ATTACHMENTBLOCKED;
pub const WPCFLAG_ISBLOCKED_SENDERBLOCKED = WPCFLAG_ISBLOCKED.SENDERBLOCKED;
pub const WPCFLAG_ISBLOCKED_RECEIVERBLOCKED = WPCFLAG_ISBLOCKED.RECEIVERBLOCKED;
pub const WPCFLAG_ISBLOCKED_NOTEXPLICITLYALLOWED = WPCFLAG_ISBLOCKED.NOTEXPLICITLYALLOWED;
pub const WPCFLAG_ISBLOCKED_NOTINLIST = WPCFLAG_ISBLOCKED.NOTINLIST;
pub const WPCFLAG_ISBLOCKED_CATEGORYBLOCKED = WPCFLAG_ISBLOCKED.CATEGORYBLOCKED;
pub const WPCFLAG_ISBLOCKED_CATEGORYNOTINLIST = WPCFLAG_ISBLOCKED.CATEGORYNOTINLIST;
pub const WPCFLAG_ISBLOCKED_NOTKIDS = WPCFLAG_ISBLOCKED.NOTKIDS;
pub const WPCFLAG_ISBLOCKED_UNRATED = WPCFLAG_ISBLOCKED.UNRATED;
pub const WPCFLAG_ISBLOCKED_NOACCESS = WPCFLAG_ISBLOCKED.NOACCESS;
pub const WPCFLAG_ISBLOCKED_INTERNALERROR = WPCFLAG_ISBLOCKED.INTERNALERROR;
pub const WPCFLAG_LOGOFF_TYPE = enum(i32) {
LOGOUT = 0,
RESTART = 1,
SHUTDOWN = 2,
FUS = 4,
FORCEDFUS = 8,
};
pub const WPCFLAG_LOGOFF_TYPE_LOGOUT = WPCFLAG_LOGOFF_TYPE.LOGOUT;
pub const WPCFLAG_LOGOFF_TYPE_RESTART = WPCFLAG_LOGOFF_TYPE.RESTART;
pub const WPCFLAG_LOGOFF_TYPE_SHUTDOWN = WPCFLAG_LOGOFF_TYPE.SHUTDOWN;
pub const WPCFLAG_LOGOFF_TYPE_FUS = WPCFLAG_LOGOFF_TYPE.FUS;
pub const WPCFLAG_LOGOFF_TYPE_FORCEDFUS = WPCFLAG_LOGOFF_TYPE.FORCEDFUS;
pub const WPCFLAG_IM_LEAVE = enum(i32) {
NORMAL = 0,
FORCED = 1,
CONVERSATION_END = 2,
};
pub const WPCFLAG_IM_LEAVE_NORMAL = WPCFLAG_IM_LEAVE.NORMAL;
pub const WPCFLAG_IM_LEAVE_FORCED = WPCFLAG_IM_LEAVE.FORCED;
pub const WPCFLAG_IM_LEAVE_CONVERSATION_END = WPCFLAG_IM_LEAVE.CONVERSATION_END;
pub const WPC_ARGS_SETTINGSCHANGEEVENT = enum(i32) {
CLASS = 0,
SETTING = 1,
OWNER = 2,
OLDVAL = 3,
NEWVAL = 4,
REASON = 5,
OPTIONAL = 6,
CARGS = 7,
};
pub const WPC_ARGS_SETTINGSCHANGEEVENT_CLASS = WPC_ARGS_SETTINGSCHANGEEVENT.CLASS;
pub const WPC_ARGS_SETTINGSCHANGEEVENT_SETTING = WPC_ARGS_SETTINGSCHANGEEVENT.SETTING;
pub const WPC_ARGS_SETTINGSCHANGEEVENT_OWNER = WPC_ARGS_SETTINGSCHANGEEVENT.OWNER;
pub const WPC_ARGS_SETTINGSCHANGEEVENT_OLDVAL = WPC_ARGS_SETTINGSCHANGEEVENT.OLDVAL;
pub const WPC_ARGS_SETTINGSCHANGEEVENT_NEWVAL = WPC_ARGS_SETTINGSCHANGEEVENT.NEWVAL;
pub const WPC_ARGS_SETTINGSCHANGEEVENT_REASON = WPC_ARGS_SETTINGSCHANGEEVENT.REASON;
pub const WPC_ARGS_SETTINGSCHANGEEVENT_OPTIONAL = WPC_ARGS_SETTINGSCHANGEEVENT.OPTIONAL;
pub const WPC_ARGS_SETTINGSCHANGEEVENT_CARGS = WPC_ARGS_SETTINGSCHANGEEVENT.CARGS;
pub const WPC_ARGS_SAFERAPPBLOCKED = enum(i32) {
TIMESTAMP = 0,
USERID = 1,
PATH = 2,
RULEID = 3,
CARGS = 4,
};
pub const WPC_ARGS_SAFERAPPBLOCKED_TIMESTAMP = WPC_ARGS_SAFERAPPBLOCKED.TIMESTAMP;
pub const WPC_ARGS_SAFERAPPBLOCKED_USERID = WPC_ARGS_SAFERAPPBLOCKED.USERID;
pub const WPC_ARGS_SAFERAPPBLOCKED_PATH = WPC_ARGS_SAFERAPPBLOCKED.PATH;
pub const WPC_ARGS_SAFERAPPBLOCKED_RULEID = WPC_ARGS_SAFERAPPBLOCKED.RULEID;
pub const WPC_ARGS_SAFERAPPBLOCKED_CARGS = WPC_ARGS_SAFERAPPBLOCKED.CARGS;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT = enum(i32) {
SENDER = 0,
APPNAME = 1,
APPVERSION = 2,
SUBJECT = 3,
REASON = 4,
RECIPCOUNT = 5,
RECIPIENT = 6,
ATTACHCOUNT = 7,
ATTACHMENTNAME = 8,
RECEIVEDTIME = 9,
EMAILACCOUNT = 10,
CARGS = 11,
};
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_SENDER = WPC_ARGS_EMAILRECEIEVEDEVENT.SENDER;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_APPNAME = WPC_ARGS_EMAILRECEIEVEDEVENT.APPNAME;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_APPVERSION = WPC_ARGS_EMAILRECEIEVEDEVENT.APPVERSION;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_SUBJECT = WPC_ARGS_EMAILRECEIEVEDEVENT.SUBJECT;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_REASON = WPC_ARGS_EMAILRECEIEVEDEVENT.REASON;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_RECIPCOUNT = WPC_ARGS_EMAILRECEIEVEDEVENT.RECIPCOUNT;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_RECIPIENT = WPC_ARGS_EMAILRECEIEVEDEVENT.RECIPIENT;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_ATTACHCOUNT = WPC_ARGS_EMAILRECEIEVEDEVENT.ATTACHCOUNT;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_ATTACHMENTNAME = WPC_ARGS_EMAILRECEIEVEDEVENT.ATTACHMENTNAME;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_RECEIVEDTIME = WPC_ARGS_EMAILRECEIEVEDEVENT.RECEIVEDTIME;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_EMAILACCOUNT = WPC_ARGS_EMAILRECEIEVEDEVENT.EMAILACCOUNT;
pub const WPC_ARGS_EMAILRECEIEVEDEVENT_CARGS = WPC_ARGS_EMAILRECEIEVEDEVENT.CARGS;
pub const WPC_ARGS_EMAILSENTEVENT = enum(i32) {
SENDER = 0,
APPNAME = 1,
APPVERSION = 2,
SUBJECT = 3,
REASON = 4,
RECIPCOUNT = 5,
RECIPIENT = 6,
ATTACHCOUNT = 7,
ATTACHMENTNAME = 8,
EMAILACCOUNT = 9,
CARGS = 10,
};
pub const WPC_ARGS_EMAILSENTEVENT_SENDER = WPC_ARGS_EMAILSENTEVENT.SENDER;
pub const WPC_ARGS_EMAILSENTEVENT_APPNAME = WPC_ARGS_EMAILSENTEVENT.APPNAME;
pub const WPC_ARGS_EMAILSENTEVENT_APPVERSION = WPC_ARGS_EMAILSENTEVENT.APPVERSION;
pub const WPC_ARGS_EMAILSENTEVENT_SUBJECT = WPC_ARGS_EMAILSENTEVENT.SUBJECT;
pub const WPC_ARGS_EMAILSENTEVENT_REASON = WPC_ARGS_EMAILSENTEVENT.REASON;
pub const WPC_ARGS_EMAILSENTEVENT_RECIPCOUNT = WPC_ARGS_EMAILSENTEVENT.RECIPCOUNT;
pub const WPC_ARGS_EMAILSENTEVENT_RECIPIENT = WPC_ARGS_EMAILSENTEVENT.RECIPIENT;
pub const WPC_ARGS_EMAILSENTEVENT_ATTACHCOUNT = WPC_ARGS_EMAILSENTEVENT.ATTACHCOUNT;
pub const WPC_ARGS_EMAILSENTEVENT_ATTACHMENTNAME = WPC_ARGS_EMAILSENTEVENT.ATTACHMENTNAME;
pub const WPC_ARGS_EMAILSENTEVENT_EMAILACCOUNT = WPC_ARGS_EMAILSENTEVENT.EMAILACCOUNT;
pub const WPC_ARGS_EMAILSENTEVENT_CARGS = WPC_ARGS_EMAILSENTEVENT.CARGS;
pub const WPC_ARGS_EMAILCONTACTEVENT = enum(i32) {
APPNAME = 0,
APPVERSION = 1,
OLDNAME = 2,
OLDID = 3,
NEWNAME = 4,
NEWID = 5,
REASON = 6,
EMAILACCOUNT = 7,
CARGS = 8,
};
pub const WPC_ARGS_EMAILCONTACTEVENT_APPNAME = WPC_ARGS_EMAILCONTACTEVENT.APPNAME;
pub const WPC_ARGS_EMAILCONTACTEVENT_APPVERSION = WPC_ARGS_EMAILCONTACTEVENT.APPVERSION;
pub const WPC_ARGS_EMAILCONTACTEVENT_OLDNAME = WPC_ARGS_EMAILCONTACTEVENT.OLDNAME;
pub const WPC_ARGS_EMAILCONTACTEVENT_OLDID = WPC_ARGS_EMAILCONTACTEVENT.OLDID;
pub const WPC_ARGS_EMAILCONTACTEVENT_NEWNAME = WPC_ARGS_EMAILCONTACTEVENT.NEWNAME;
pub const WPC_ARGS_EMAILCONTACTEVENT_NEWID = WPC_ARGS_EMAILCONTACTEVENT.NEWID;
pub const WPC_ARGS_EMAILCONTACTEVENT_REASON = WPC_ARGS_EMAILCONTACTEVENT.REASON;
pub const WPC_ARGS_EMAILCONTACTEVENT_EMAILACCOUNT = WPC_ARGS_EMAILCONTACTEVENT.EMAILACCOUNT;
pub const WPC_ARGS_EMAILCONTACTEVENT_CARGS = WPC_ARGS_EMAILCONTACTEVENT.CARGS;
pub const WPC_MEDIA_TYPE = enum(i32) {
OTHER = 0,
DVD = 1,
RECORDED_TV = 2,
AUDIO_FILE = 3,
CD_AUDIO = 4,
VIDEO_FILE = 5,
PICTURE_FILE = 6,
MAX = 7,
};
pub const WPC_MEDIA_TYPE_OTHER = WPC_MEDIA_TYPE.OTHER;
pub const WPC_MEDIA_TYPE_DVD = WPC_MEDIA_TYPE.DVD;
pub const WPC_MEDIA_TYPE_RECORDED_TV = WPC_MEDIA_TYPE.RECORDED_TV;
pub const WPC_MEDIA_TYPE_AUDIO_FILE = WPC_MEDIA_TYPE.AUDIO_FILE;
pub const WPC_MEDIA_TYPE_CD_AUDIO = WPC_MEDIA_TYPE.CD_AUDIO;
pub const WPC_MEDIA_TYPE_VIDEO_FILE = WPC_MEDIA_TYPE.VIDEO_FILE;
pub const WPC_MEDIA_TYPE_PICTURE_FILE = WPC_MEDIA_TYPE.PICTURE_FILE;
pub const WPC_MEDIA_TYPE_MAX = WPC_MEDIA_TYPE.MAX;
pub const WPC_MEDIA_EXPLICIT = enum(i32) {
FALSE = 0,
TRUE = 1,
UNKNOWN = 2,
};
pub const WPC_MEDIA_EXPLICIT_FALSE = WPC_MEDIA_EXPLICIT.FALSE;
pub const WPC_MEDIA_EXPLICIT_TRUE = WPC_MEDIA_EXPLICIT.TRUE;
pub const WPC_MEDIA_EXPLICIT_UNKNOWN = WPC_MEDIA_EXPLICIT.UNKNOWN;
pub const WPC_ARGS_MEDIAPLAYBACKEVENT = enum(i32) {
APPNAME = 0,
APPVERSION = 1,
MEDIATYPE = 2,
PATH = 3,
TITLE = 4,
PML = 5,
ALBUM = 6,
EXPLICIT = 7,
REASON = 8,
CARGS = 9,
};
pub const WPC_ARGS_MEDIAPLAYBACKEVENT_APPNAME = WPC_ARGS_MEDIAPLAYBACKEVENT.APPNAME;
pub const WPC_ARGS_MEDIAPLAYBACKEVENT_APPVERSION = WPC_ARGS_MEDIAPLAYBACKEVENT.APPVERSION;
pub const WPC_ARGS_MEDIAPLAYBACKEVENT_MEDIATYPE = WPC_ARGS_MEDIAPLAYBACKEVENT.MEDIATYPE;
pub const WPC_ARGS_MEDIAPLAYBACKEVENT_PATH = WPC_ARGS_MEDIAPLAYBACKEVENT.PATH;
pub const WPC_ARGS_MEDIAPLAYBACKEVENT_TITLE = WPC_ARGS_MEDIAPLAYBACKEVENT.TITLE;
pub const WPC_ARGS_MEDIAPLAYBACKEVENT_PML = WPC_ARGS_MEDIAPLAYBACKEVENT.PML;
pub const WPC_ARGS_MEDIAPLAYBACKEVENT_ALBUM = WPC_ARGS_MEDIAPLAYBACKEVENT.ALBUM;
pub const WPC_ARGS_MEDIAPLAYBACKEVENT_EXPLICIT = WPC_ARGS_MEDIAPLAYBACKEVENT.EXPLICIT;
pub const WPC_ARGS_MEDIAPLAYBACKEVENT_REASON = WPC_ARGS_MEDIAPLAYBACKEVENT.REASON;
pub const WPC_ARGS_MEDIAPLAYBACKEVENT_CARGS = WPC_ARGS_MEDIAPLAYBACKEVENT.CARGS;
pub const WPC_ARGS_MEDIADOWNLOADEVENT = enum(i32) {
APPNAME = 0,
APPVERSION = 1,
MEDIATYPE = 2,
PATH = 3,
TITLE = 4,
PML = 5,
ALBUM = 6,
EXPLICIT = 7,
REASON = 8,
CARGS = 9,
};
pub const WPC_ARGS_MEDIADOWNLOADEVENT_APPNAME = WPC_ARGS_MEDIADOWNLOADEVENT.APPNAME;
pub const WPC_ARGS_MEDIADOWNLOADEVENT_APPVERSION = WPC_ARGS_MEDIADOWNLOADEVENT.APPVERSION;
pub const WPC_ARGS_MEDIADOWNLOADEVENT_MEDIATYPE = WPC_ARGS_MEDIADOWNLOADEVENT.MEDIATYPE;
pub const WPC_ARGS_MEDIADOWNLOADEVENT_PATH = WPC_ARGS_MEDIADOWNLOADEVENT.PATH;
pub const WPC_ARGS_MEDIADOWNLOADEVENT_TITLE = WPC_ARGS_MEDIADOWNLOADEVENT.TITLE;
pub const WPC_ARGS_MEDIADOWNLOADEVENT_PML = WPC_ARGS_MEDIADOWNLOADEVENT.PML;
pub const WPC_ARGS_MEDIADOWNLOADEVENT_ALBUM = WPC_ARGS_MEDIADOWNLOADEVENT.ALBUM;
pub const WPC_ARGS_MEDIADOWNLOADEVENT_EXPLICIT = WPC_ARGS_MEDIADOWNLOADEVENT.EXPLICIT;
pub const WPC_ARGS_MEDIADOWNLOADEVENT_REASON = WPC_ARGS_MEDIADOWNLOADEVENT.REASON;
pub const WPC_ARGS_MEDIADOWNLOADEVENT_CARGS = WPC_ARGS_MEDIADOWNLOADEVENT.CARGS;
pub const WPC_ARGS_CONVERSATIONINITEVENT = enum(i32) {
APPNAME = 0,
APPVERSION = 1,
ACCOUNTNAME = 2,
CONVID = 3,
REQUESTINGIP = 4,
SENDER = 5,
REASON = 6,
RECIPCOUNT = 7,
RECIPIENT = 8,
CARGS = 9,
};
pub const WPC_ARGS_CONVERSATIONINITEVENT_APPNAME = WPC_ARGS_CONVERSATIONINITEVENT.APPNAME;
pub const WPC_ARGS_CONVERSATIONINITEVENT_APPVERSION = WPC_ARGS_CONVERSATIONINITEVENT.APPVERSION;
pub const WPC_ARGS_CONVERSATIONINITEVENT_ACCOUNTNAME = WPC_ARGS_CONVERSATIONINITEVENT.ACCOUNTNAME;
pub const WPC_ARGS_CONVERSATIONINITEVENT_CONVID = WPC_ARGS_CONVERSATIONINITEVENT.CONVID;
pub const WPC_ARGS_CONVERSATIONINITEVENT_REQUESTINGIP = WPC_ARGS_CONVERSATIONINITEVENT.REQUESTINGIP;
pub const WPC_ARGS_CONVERSATIONINITEVENT_SENDER = WPC_ARGS_CONVERSATIONINITEVENT.SENDER;
pub const WPC_ARGS_CONVERSATIONINITEVENT_REASON = WPC_ARGS_CONVERSATIONINITEVENT.REASON;
pub const WPC_ARGS_CONVERSATIONINITEVENT_RECIPCOUNT = WPC_ARGS_CONVERSATIONINITEVENT.RECIPCOUNT;
pub const WPC_ARGS_CONVERSATIONINITEVENT_RECIPIENT = WPC_ARGS_CONVERSATIONINITEVENT.RECIPIENT;
pub const WPC_ARGS_CONVERSATIONINITEVENT_CARGS = WPC_ARGS_CONVERSATIONINITEVENT.CARGS;
pub const WPC_ARGS_CONVERSATIONJOINEVENT = enum(i32) {
APPNAME = 0,
APPVERSION = 1,
ACCOUNTNAME = 2,
CONVID = 3,
JOININGIP = 4,
JOININGUSER = 5,
REASON = 6,
MEMBERCOUNT = 7,
MEMBER = 8,
SENDER = 9,
CARGS = 10,
};
pub const WPC_ARGS_CONVERSATIONJOINEVENT_APPNAME = WPC_ARGS_CONVERSATIONJOINEVENT.APPNAME;
pub const WPC_ARGS_CONVERSATIONJOINEVENT_APPVERSION = WPC_ARGS_CONVERSATIONJOINEVENT.APPVERSION;
pub const WPC_ARGS_CONVERSATIONJOINEVENT_ACCOUNTNAME = WPC_ARGS_CONVERSATIONJOINEVENT.ACCOUNTNAME;
pub const WPC_ARGS_CONVERSATIONJOINEVENT_CONVID = WPC_ARGS_CONVERSATIONJOINEVENT.CONVID;
pub const WPC_ARGS_CONVERSATIONJOINEVENT_JOININGIP = WPC_ARGS_CONVERSATIONJOINEVENT.JOININGIP;
pub const WPC_ARGS_CONVERSATIONJOINEVENT_JOININGUSER = WPC_ARGS_CONVERSATIONJOINEVENT.JOININGUSER;
pub const WPC_ARGS_CONVERSATIONJOINEVENT_REASON = WPC_ARGS_CONVERSATIONJOINEVENT.REASON;
pub const WPC_ARGS_CONVERSATIONJOINEVENT_MEMBERCOUNT = WPC_ARGS_CONVERSATIONJOINEVENT.MEMBERCOUNT;
pub const WPC_ARGS_CONVERSATIONJOINEVENT_MEMBER = WPC_ARGS_CONVERSATIONJOINEVENT.MEMBER;
pub const WPC_ARGS_CONVERSATIONJOINEVENT_SENDER = WPC_ARGS_CONVERSATIONJOINEVENT.SENDER;
pub const WPC_ARGS_CONVERSATIONJOINEVENT_CARGS = WPC_ARGS_CONVERSATIONJOINEVENT.CARGS;
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT = enum(i32) {
APPNAME = 0,
APPVERSION = 1,
ACCOUNTNAME = 2,
CONVID = 3,
LEAVINGIP = 4,
LEAVINGUSER = 5,
REASON = 6,
MEMBERCOUNT = 7,
MEMBER = 8,
FLAGS = 9,
CARGS = 10,
};
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT_APPNAME = WPC_ARGS_CONVERSATIONLEAVEEVENT.APPNAME;
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT_APPVERSION = WPC_ARGS_CONVERSATIONLEAVEEVENT.APPVERSION;
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT_ACCOUNTNAME = WPC_ARGS_CONVERSATIONLEAVEEVENT.ACCOUNTNAME;
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT_CONVID = WPC_ARGS_CONVERSATIONLEAVEEVENT.CONVID;
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT_LEAVINGIP = WPC_ARGS_CONVERSATIONLEAVEEVENT.LEAVINGIP;
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT_LEAVINGUSER = WPC_ARGS_CONVERSATIONLEAVEEVENT.LEAVINGUSER;
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT_REASON = WPC_ARGS_CONVERSATIONLEAVEEVENT.REASON;
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT_MEMBERCOUNT = WPC_ARGS_CONVERSATIONLEAVEEVENT.MEMBERCOUNT;
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT_MEMBER = WPC_ARGS_CONVERSATIONLEAVEEVENT.MEMBER;
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT_FLAGS = WPC_ARGS_CONVERSATIONLEAVEEVENT.FLAGS;
pub const WPC_ARGS_CONVERSATIONLEAVEEVENT_CARGS = WPC_ARGS_CONVERSATIONLEAVEEVENT.CARGS;
pub const WPCFLAG_IM_FEATURE = enum(i32) {
NONE = 0,
VIDEO = 1,
AUDIO = 2,
GAME = 4,
SMS = 8,
FILESWAP = 16,
URLSWAP = 32,
SENDING = -2147483648,
ALL = -1,
};
pub const WPCFLAG_IM_FEATURE_NONE = WPCFLAG_IM_FEATURE.NONE;
pub const WPCFLAG_IM_FEATURE_VIDEO = WPCFLAG_IM_FEATURE.VIDEO;
pub const WPCFLAG_IM_FEATURE_AUDIO = WPCFLAG_IM_FEATURE.AUDIO;
pub const WPCFLAG_IM_FEATURE_GAME = WPCFLAG_IM_FEATURE.GAME;
pub const WPCFLAG_IM_FEATURE_SMS = WPCFLAG_IM_FEATURE.SMS;
pub const WPCFLAG_IM_FEATURE_FILESWAP = WPCFLAG_IM_FEATURE.FILESWAP;
pub const WPCFLAG_IM_FEATURE_URLSWAP = WPCFLAG_IM_FEATURE.URLSWAP;
pub const WPCFLAG_IM_FEATURE_SENDING = WPCFLAG_IM_FEATURE.SENDING;
pub const WPCFLAG_IM_FEATURE_ALL = WPCFLAG_IM_FEATURE.ALL;
pub const WPC_ARGS_IMFEATUREEVENT = enum(i32) {
APPNAME = 0,
APPVERSION = 1,
ACCOUNTNAME = 2,
CONVID = 3,
MEDIATYPE = 4,
REASON = 5,
RECIPCOUNT = 6,
RECIPIENT = 7,
SENDER = 8,
SENDERIP = 9,
DATA = 10,
CARGS = 11,
};
pub const WPC_ARGS_IMFEATUREEVENT_APPNAME = WPC_ARGS_IMFEATUREEVENT.APPNAME;
pub const WPC_ARGS_IMFEATUREEVENT_APPVERSION = WPC_ARGS_IMFEATUREEVENT.APPVERSION;
pub const WPC_ARGS_IMFEATUREEVENT_ACCOUNTNAME = WPC_ARGS_IMFEATUREEVENT.ACCOUNTNAME;
pub const WPC_ARGS_IMFEATUREEVENT_CONVID = WPC_ARGS_IMFEATUREEVENT.CONVID;
pub const WPC_ARGS_IMFEATUREEVENT_MEDIATYPE = WPC_ARGS_IMFEATUREEVENT.MEDIATYPE;
pub const WPC_ARGS_IMFEATUREEVENT_REASON = WPC_ARGS_IMFEATUREEVENT.REASON;
pub const WPC_ARGS_IMFEATUREEVENT_RECIPCOUNT = WPC_ARGS_IMFEATUREEVENT.RECIPCOUNT;
pub const WPC_ARGS_IMFEATUREEVENT_RECIPIENT = WPC_ARGS_IMFEATUREEVENT.RECIPIENT;
pub const WPC_ARGS_IMFEATUREEVENT_SENDER = WPC_ARGS_IMFEATUREEVENT.SENDER;
pub const WPC_ARGS_IMFEATUREEVENT_SENDERIP = WPC_ARGS_IMFEATUREEVENT.SENDERIP;
pub const WPC_ARGS_IMFEATUREEVENT_DATA = WPC_ARGS_IMFEATUREEVENT.DATA;
pub const WPC_ARGS_IMFEATUREEVENT_CARGS = WPC_ARGS_IMFEATUREEVENT.CARGS;
pub const WPC_ARGS_IMCONTACTEVENT = enum(i32) {
APPNAME = 0,
APPVERSION = 1,
ACCOUNTNAME = 2,
OLDNAME = 3,
OLDID = 4,
NEWNAME = 5,
NEWID = 6,
REASON = 7,
CARGS = 8,
};
pub const WPC_ARGS_IMCONTACTEVENT_APPNAME = WPC_ARGS_IMCONTACTEVENT.APPNAME;
pub const WPC_ARGS_IMCONTACTEVENT_APPVERSION = WPC_ARGS_IMCONTACTEVENT.APPVERSION;
pub const WPC_ARGS_IMCONTACTEVENT_ACCOUNTNAME = WPC_ARGS_IMCONTACTEVENT.ACCOUNTNAME;
pub const WPC_ARGS_IMCONTACTEVENT_OLDNAME = WPC_ARGS_IMCONTACTEVENT.OLDNAME;
pub const WPC_ARGS_IMCONTACTEVENT_OLDID = WPC_ARGS_IMCONTACTEVENT.OLDID;
pub const WPC_ARGS_IMCONTACTEVENT_NEWNAME = WPC_ARGS_IMCONTACTEVENT.NEWNAME;
pub const WPC_ARGS_IMCONTACTEVENT_NEWID = WPC_ARGS_IMCONTACTEVENT.NEWID;
pub const WPC_ARGS_IMCONTACTEVENT_REASON = WPC_ARGS_IMCONTACTEVENT.REASON;
pub const WPC_ARGS_IMCONTACTEVENT_CARGS = WPC_ARGS_IMCONTACTEVENT.CARGS;
pub const WPC_ARGS_GAMESTARTEVENT = enum(i32) {
APPID = 0,
INSTANCEID = 1,
APPVERSION = 2,
PATH = 3,
RATING = 4,
RATINGSYSTEM = 5,
REASON = 6,
DESCCOUNT = 7,
DESCRIPTOR = 8,
PID = 9,
CARGS = 10,
};
pub const WPC_ARGS_GAMESTARTEVENT_APPID = WPC_ARGS_GAMESTARTEVENT.APPID;
pub const WPC_ARGS_GAMESTARTEVENT_INSTANCEID = WPC_ARGS_GAMESTARTEVENT.INSTANCEID;
pub const WPC_ARGS_GAMESTARTEVENT_APPVERSION = WPC_ARGS_GAMESTARTEVENT.APPVERSION;
pub const WPC_ARGS_GAMESTARTEVENT_PATH = WPC_ARGS_GAMESTARTEVENT.PATH;
pub const WPC_ARGS_GAMESTARTEVENT_RATING = WPC_ARGS_GAMESTARTEVENT.RATING;
pub const WPC_ARGS_GAMESTARTEVENT_RATINGSYSTEM = WPC_ARGS_GAMESTARTEVENT.RATINGSYSTEM;
pub const WPC_ARGS_GAMESTARTEVENT_REASON = WPC_ARGS_GAMESTARTEVENT.REASON;
pub const WPC_ARGS_GAMESTARTEVENT_DESCCOUNT = WPC_ARGS_GAMESTARTEVENT.DESCCOUNT;
pub const WPC_ARGS_GAMESTARTEVENT_DESCRIPTOR = WPC_ARGS_GAMESTARTEVENT.DESCRIPTOR;
pub const WPC_ARGS_GAMESTARTEVENT_PID = WPC_ARGS_GAMESTARTEVENT.PID;
pub const WPC_ARGS_GAMESTARTEVENT_CARGS = WPC_ARGS_GAMESTARTEVENT.CARGS;
pub const WPC_ARGS_FILEDOWNLOADEVENT = enum(i32) {
URL = 0,
APPNAME = 1,
VERSION = 2,
BLOCKED = 3,
PATH = 4,
CARGS = 5,
};
pub const WPC_ARGS_FILEDOWNLOADEVENT_URL = WPC_ARGS_FILEDOWNLOADEVENT.URL;
pub const WPC_ARGS_FILEDOWNLOADEVENT_APPNAME = WPC_ARGS_FILEDOWNLOADEVENT.APPNAME;
pub const WPC_ARGS_FILEDOWNLOADEVENT_VERSION = WPC_ARGS_FILEDOWNLOADEVENT.VERSION;
pub const WPC_ARGS_FILEDOWNLOADEVENT_BLOCKED = WPC_ARGS_FILEDOWNLOADEVENT.BLOCKED;
pub const WPC_ARGS_FILEDOWNLOADEVENT_PATH = WPC_ARGS_FILEDOWNLOADEVENT.PATH;
pub const WPC_ARGS_FILEDOWNLOADEVENT_CARGS = WPC_ARGS_FILEDOWNLOADEVENT.CARGS;
pub const WPC_ARGS_URLVISITEVENT = enum(i32) {
URL = 0,
APPNAME = 1,
VERSION = 2,
REASON = 3,
RATINGSYSTEMID = 4,
CATCOUNT = 5,
CATEGORY = 6,
CARGS = 7,
};
pub const WPC_ARGS_URLVISITEVENT_URL = WPC_ARGS_URLVISITEVENT.URL;
pub const WPC_ARGS_URLVISITEVENT_APPNAME = WPC_ARGS_URLVISITEVENT.APPNAME;
pub const WPC_ARGS_URLVISITEVENT_VERSION = WPC_ARGS_URLVISITEVENT.VERSION;
pub const WPC_ARGS_URLVISITEVENT_REASON = WPC_ARGS_URLVISITEVENT.REASON;
pub const WPC_ARGS_URLVISITEVENT_RATINGSYSTEMID = WPC_ARGS_URLVISITEVENT.RATINGSYSTEMID;
pub const WPC_ARGS_URLVISITEVENT_CATCOUNT = WPC_ARGS_URLVISITEVENT.CATCOUNT;
pub const WPC_ARGS_URLVISITEVENT_CATEGORY = WPC_ARGS_URLVISITEVENT.CATEGORY;
pub const WPC_ARGS_URLVISITEVENT_CARGS = WPC_ARGS_URLVISITEVENT.CARGS;
pub const WPC_ARGS_WEBSITEVISITEVENT = enum(i32) {
URL = 0,
DECISION = 1,
CATEGORIES = 2,
BLOCKEDCATEGORIES = 3,
SERIALIZEDAPPLICATION = 4,
TITLE = 5,
CONTENTTYPE = 6,
REFERRER = 7,
TELEMETRY = 8,
CARGS = 9,
};
pub const WPC_ARGS_WEBSITEVISITEVENT_URL = WPC_ARGS_WEBSITEVISITEVENT.URL;
pub const WPC_ARGS_WEBSITEVISITEVENT_DECISION = WPC_ARGS_WEBSITEVISITEVENT.DECISION;
pub const WPC_ARGS_WEBSITEVISITEVENT_CATEGORIES = WPC_ARGS_WEBSITEVISITEVENT.CATEGORIES;
pub const WPC_ARGS_WEBSITEVISITEVENT_BLOCKEDCATEGORIES = WPC_ARGS_WEBSITEVISITEVENT.BLOCKEDCATEGORIES;
pub const WPC_ARGS_WEBSITEVISITEVENT_SERIALIZEDAPPLICATION = WPC_ARGS_WEBSITEVISITEVENT.SERIALIZEDAPPLICATION;
pub const WPC_ARGS_WEBSITEVISITEVENT_TITLE = WPC_ARGS_WEBSITEVISITEVENT.TITLE;
pub const WPC_ARGS_WEBSITEVISITEVENT_CONTENTTYPE = WPC_ARGS_WEBSITEVISITEVENT.CONTENTTYPE;
pub const WPC_ARGS_WEBSITEVISITEVENT_REFERRER = WPC_ARGS_WEBSITEVISITEVENT.REFERRER;
pub const WPC_ARGS_WEBSITEVISITEVENT_TELEMETRY = WPC_ARGS_WEBSITEVISITEVENT.TELEMETRY;
pub const WPC_ARGS_WEBSITEVISITEVENT_CARGS = WPC_ARGS_WEBSITEVISITEVENT.CARGS;
pub const WPC_ARGS_APPLICATIONEVENT = enum(i32) {
SERIALIZEDAPPLICATION = 0,
DECISION = 1,
PROCESSID = 2,
CREATIONTIME = 3,
TIMEUSED = 4,
CARGS = 5,
};
pub const WPC_ARGS_APPLICATIONEVENT_SERIALIZEDAPPLICATION = WPC_ARGS_APPLICATIONEVENT.SERIALIZEDAPPLICATION;
pub const WPC_ARGS_APPLICATIONEVENT_DECISION = WPC_ARGS_APPLICATIONEVENT.DECISION;
pub const WPC_ARGS_APPLICATIONEVENT_PROCESSID = WPC_ARGS_APPLICATIONEVENT.PROCESSID;
pub const WPC_ARGS_APPLICATIONEVENT_CREATIONTIME = WPC_ARGS_APPLICATIONEVENT.CREATIONTIME;
pub const WPC_ARGS_APPLICATIONEVENT_TIMEUSED = WPC_ARGS_APPLICATIONEVENT.TIMEUSED;
pub const WPC_ARGS_APPLICATIONEVENT_CARGS = WPC_ARGS_APPLICATIONEVENT.CARGS;
pub const WPC_ARGS_COMPUTERUSAGEEVENT = enum(i32) {
ID = 0,
TIMEUSED = 1,
CARGS = 2,
};
pub const WPC_ARGS_COMPUTERUSAGEEVENT_ID = WPC_ARGS_COMPUTERUSAGEEVENT.ID;
pub const WPC_ARGS_COMPUTERUSAGEEVENT_TIMEUSED = WPC_ARGS_COMPUTERUSAGEEVENT.TIMEUSED;
pub const WPC_ARGS_COMPUTERUSAGEEVENT_CARGS = WPC_ARGS_COMPUTERUSAGEEVENT.CARGS;
pub const WPC_ARGS_CONTENTUSAGEEVENT = enum(i32) {
CONTENTPROVIDERID = 0,
CONTENTPROVIDERTITLE = 1,
ID = 2,
TITLE = 3,
CATEGORY = 4,
RATINGS = 5,
DECISION = 6,
CARGS = 7,
};
pub const WPC_ARGS_CONTENTUSAGEEVENT_CONTENTPROVIDERID = WPC_ARGS_CONTENTUSAGEEVENT.CONTENTPROVIDERID;
pub const WPC_ARGS_CONTENTUSAGEEVENT_CONTENTPROVIDERTITLE = WPC_ARGS_CONTENTUSAGEEVENT.CONTENTPROVIDERTITLE;
pub const WPC_ARGS_CONTENTUSAGEEVENT_ID = WPC_ARGS_CONTENTUSAGEEVENT.ID;
pub const WPC_ARGS_CONTENTUSAGEEVENT_TITLE = WPC_ARGS_CONTENTUSAGEEVENT.TITLE;
pub const WPC_ARGS_CONTENTUSAGEEVENT_CATEGORY = WPC_ARGS_CONTENTUSAGEEVENT.CATEGORY;
pub const WPC_ARGS_CONTENTUSAGEEVENT_RATINGS = WPC_ARGS_CONTENTUSAGEEVENT.RATINGS;
pub const WPC_ARGS_CONTENTUSAGEEVENT_DECISION = WPC_ARGS_CONTENTUSAGEEVENT.DECISION;
pub const WPC_ARGS_CONTENTUSAGEEVENT_CARGS = WPC_ARGS_CONTENTUSAGEEVENT.CARGS;
pub const WPC_ARGS_CUSTOMEVENT = enum(i32) {
PUBLISHER = 0,
APPNAME = 1,
APPVERSION = 2,
EVENT = 3,
VALUE1 = 4,
VALUE2 = 5,
VALUE3 = 6,
BLOCKED = 7,
REASON = 8,
CARGS = 9,
};
pub const WPC_ARGS_CUSTOMEVENT_PUBLISHER = WPC_ARGS_CUSTOMEVENT.PUBLISHER;
pub const WPC_ARGS_CUSTOMEVENT_APPNAME = WPC_ARGS_CUSTOMEVENT.APPNAME;
pub const WPC_ARGS_CUSTOMEVENT_APPVERSION = WPC_ARGS_CUSTOMEVENT.APPVERSION;
pub const WPC_ARGS_CUSTOMEVENT_EVENT = WPC_ARGS_CUSTOMEVENT.EVENT;
pub const WPC_ARGS_CUSTOMEVENT_VALUE1 = WPC_ARGS_CUSTOMEVENT.VALUE1;
pub const WPC_ARGS_CUSTOMEVENT_VALUE2 = WPC_ARGS_CUSTOMEVENT.VALUE2;
pub const WPC_ARGS_CUSTOMEVENT_VALUE3 = WPC_ARGS_CUSTOMEVENT.VALUE3;
pub const WPC_ARGS_CUSTOMEVENT_BLOCKED = WPC_ARGS_CUSTOMEVENT.BLOCKED;
pub const WPC_ARGS_CUSTOMEVENT_REASON = WPC_ARGS_CUSTOMEVENT.REASON;
pub const WPC_ARGS_CUSTOMEVENT_CARGS = WPC_ARGS_CUSTOMEVENT.CARGS;
pub const WPC_ARGS_WEBOVERRIDEEVENT = enum(i32) {
USERID = 0,
URL = 1,
REASON = 2,
CARGS = 3,
};
pub const WPC_ARGS_WEBOVERRIDEEVENT_USERID = WPC_ARGS_WEBOVERRIDEEVENT.USERID;
pub const WPC_ARGS_WEBOVERRIDEEVENT_URL = WPC_ARGS_WEBOVERRIDEEVENT.URL;
pub const WPC_ARGS_WEBOVERRIDEEVENT_REASON = WPC_ARGS_WEBOVERRIDEEVENT.REASON;
pub const WPC_ARGS_WEBOVERRIDEEVENT_CARGS = WPC_ARGS_WEBOVERRIDEEVENT.CARGS;
pub const WPC_ARGS_APPOVERRIDEEVENT = enum(i32) {
USERID = 0,
PATH = 1,
REASON = 2,
CARGS = 3,
};
pub const WPC_ARGS_APPOVERRIDEEVENT_USERID = WPC_ARGS_APPOVERRIDEEVENT.USERID;
pub const WPC_ARGS_APPOVERRIDEEVENT_PATH = WPC_ARGS_APPOVERRIDEEVENT.PATH;
pub const WPC_ARGS_APPOVERRIDEEVENT_REASON = WPC_ARGS_APPOVERRIDEEVENT.REASON;
pub const WPC_ARGS_APPOVERRIDEEVENT_CARGS = WPC_ARGS_APPOVERRIDEEVENT.CARGS;
pub const WPC_SETTINGS = enum(i32) {
S_WPC_EXTENSION_PATH = 0,
S_WPC_EXTENSION_SILO = 1,
S_WPC_EXTENSION_IMAGE_PATH = 2,
S_WPC_EXTENSION_DISABLEDIMAGE_PATH = 3,
S_WPC_EXTENSION_NAME = 4,
S_WPC_EXTENSION_SUB_TITLE = 5,
S_SYSTEM_CURRENT_RATING_SYSTEM = 6,
S_SYSTEM_LAST_LOG_VIEW = 7,
S_SYSTEM_LOG_VIEW_REMINDER_INTERVAL = 8,
S_SYSTEM_HTTP_EXEMPTION_LIST = 9,
S_SYSTEM_URL_EXEMPTION_LIST = 10,
S_SYSTEM_FILTER_ID = 11,
S_SYSTEM_FILTER_NAME = 12,
S_SYSTEM_LOCALE = 13,
S_ALLOW_BLOCK = 14,
S_GAME_BLOCKED = 15,
S_GAME_ALLOW_UNRATED = 16,
S_GAME_MAX_ALLOWED = 17,
S_GAME_DENIED_DESCRIPTORS = 18,
S_USER_WPC_ENABLED = 19,
S_USER_LOGGING_REQUIRED = 20,
S_USER_HOURLY_RESTRICTIONS = 21,
S_USER_OVERRRIDE_REQUESTS = 22,
S_USER_LOGON_HOURS = 23,
S_USER_APP_RESTRICTIONS = 24,
S_WEB_FILTER_ON = 25,
S_WEB_DOWNLOAD_BLOCKED = 26,
S_WEB_FILTER_LEVEL = 27,
S_WEB_BLOCKED_CATEGORY_LIST = 28,
S_WEB_BLOCK_UNRATED = 29,
S_WPC_ENABLED = 30,
S_WPC_LOGGING_REQUIRED = 31,
S_RATING_SYSTEM_PATH = 32,
S_WPC_PROVIDER_CURRENT = 33,
S_USER_TIME_ALLOWANCE = 34,
S_USER_TIME_ALLOWANCE_RESTRICTIONS = 35,
S_GAME_RESTRICTED = 36,
_COUNT = 37,
};
pub const WPC_SETTINGS_WPC_EXTENSION_PATH = WPC_SETTINGS.S_WPC_EXTENSION_PATH;
pub const WPC_SETTINGS_WPC_EXTENSION_SILO = WPC_SETTINGS.S_WPC_EXTENSION_SILO;
pub const WPC_SETTINGS_WPC_EXTENSION_IMAGE_PATH = WPC_SETTINGS.S_WPC_EXTENSION_IMAGE_PATH;
pub const WPC_SETTINGS_WPC_EXTENSION_DISABLEDIMAGE_PATH = WPC_SETTINGS.S_WPC_EXTENSION_DISABLEDIMAGE_PATH;
pub const WPC_SETTINGS_WPC_EXTENSION_NAME = WPC_SETTINGS.S_WPC_EXTENSION_NAME;
pub const WPC_SETTINGS_WPC_EXTENSION_SUB_TITLE = WPC_SETTINGS.S_WPC_EXTENSION_SUB_TITLE;
pub const WPC_SETTINGS_SYSTEM_CURRENT_RATING_SYSTEM = WPC_SETTINGS.S_SYSTEM_CURRENT_RATING_SYSTEM;
pub const WPC_SETTINGS_SYSTEM_LAST_LOG_VIEW = WPC_SETTINGS.S_SYSTEM_LAST_LOG_VIEW;
pub const WPC_SETTINGS_SYSTEM_LOG_VIEW_REMINDER_INTERVAL = WPC_SETTINGS.S_SYSTEM_LOG_VIEW_REMINDER_INTERVAL;
pub const WPC_SETTINGS_SYSTEM_HTTP_EXEMPTION_LIST = WPC_SETTINGS.S_SYSTEM_HTTP_EXEMPTION_LIST;
pub const WPC_SETTINGS_SYSTEM_URL_EXEMPTION_LIST = WPC_SETTINGS.S_SYSTEM_URL_EXEMPTION_LIST;
pub const WPC_SETTINGS_SYSTEM_FILTER_ID = WPC_SETTINGS.S_SYSTEM_FILTER_ID;
pub const WPC_SETTINGS_SYSTEM_FILTER_NAME = WPC_SETTINGS.S_SYSTEM_FILTER_NAME;
pub const WPC_SETTINGS_SYSTEM_LOCALE = WPC_SETTINGS.S_SYSTEM_LOCALE;
pub const WPC_SETTINGS_ALLOW_BLOCK = WPC_SETTINGS.S_ALLOW_BLOCK;
pub const WPC_SETTINGS_GAME_BLOCKED = WPC_SETTINGS.S_GAME_BLOCKED;
pub const WPC_SETTINGS_GAME_ALLOW_UNRATED = WPC_SETTINGS.S_GAME_ALLOW_UNRATED;
pub const WPC_SETTINGS_GAME_MAX_ALLOWED = WPC_SETTINGS.S_GAME_MAX_ALLOWED;
pub const WPC_SETTINGS_GAME_DENIED_DESCRIPTORS = WPC_SETTINGS.S_GAME_DENIED_DESCRIPTORS;
pub const WPC_SETTINGS_USER_WPC_ENABLED = WPC_SETTINGS.S_USER_WPC_ENABLED;
pub const WPC_SETTINGS_USER_LOGGING_REQUIRED = WPC_SETTINGS.S_USER_LOGGING_REQUIRED;
pub const WPC_SETTINGS_USER_HOURLY_RESTRICTIONS = WPC_SETTINGS.S_USER_HOURLY_RESTRICTIONS;
pub const WPC_SETTINGS_USER_OVERRRIDE_REQUESTS = WPC_SETTINGS.S_USER_OVERRRIDE_REQUESTS;
pub const WPC_SETTINGS_USER_LOGON_HOURS = WPC_SETTINGS.S_USER_LOGON_HOURS;
pub const WPC_SETTINGS_USER_APP_RESTRICTIONS = WPC_SETTINGS.S_USER_APP_RESTRICTIONS;
pub const WPC_SETTINGS_WEB_FILTER_ON = WPC_SETTINGS.S_WEB_FILTER_ON;
pub const WPC_SETTINGS_WEB_DOWNLOAD_BLOCKED = WPC_SETTINGS.S_WEB_DOWNLOAD_BLOCKED;
pub const WPC_SETTINGS_WEB_FILTER_LEVEL = WPC_SETTINGS.S_WEB_FILTER_LEVEL;
pub const WPC_SETTINGS_WEB_BLOCKED_CATEGORY_LIST = WPC_SETTINGS.S_WEB_BLOCKED_CATEGORY_LIST;
pub const WPC_SETTINGS_WEB_BLOCK_UNRATED = WPC_SETTINGS.S_WEB_BLOCK_UNRATED;
pub const WPC_SETTINGS_WPC_ENABLED = WPC_SETTINGS.S_WPC_ENABLED;
pub const WPC_SETTINGS_WPC_LOGGING_REQUIRED = WPC_SETTINGS.S_WPC_LOGGING_REQUIRED;
pub const WPC_SETTINGS_RATING_SYSTEM_PATH = WPC_SETTINGS.S_RATING_SYSTEM_PATH;
pub const WPC_SETTINGS_WPC_PROVIDER_CURRENT = WPC_SETTINGS.S_WPC_PROVIDER_CURRENT;
pub const WPC_SETTINGS_USER_TIME_ALLOWANCE = WPC_SETTINGS.S_USER_TIME_ALLOWANCE;
pub const WPC_SETTINGS_USER_TIME_ALLOWANCE_RESTRICTIONS = WPC_SETTINGS.S_USER_TIME_ALLOWANCE_RESTRICTIONS;
pub const WPC_SETTINGS_GAME_RESTRICTED = WPC_SETTINGS.S_GAME_RESTRICTED;
pub const WPC_SETTING_COUNT = WPC_SETTINGS._COUNT;
//--------------------------------------------------------------------------------
// Section: Functions (0)
//--------------------------------------------------------------------------------
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (8)
//--------------------------------------------------------------------------------
const Guid = @import("../zig.zig").Guid;
const BOOL = @import("../foundation.zig").BOOL;
const BSTR = @import("../foundation.zig").BSTR;
const HRESULT = @import("../foundation.zig").HRESULT;
const HWND = @import("../foundation.zig").HWND;
const IUnknown = @import("../system/com.zig").IUnknown;
const PWSTR = @import("../foundation.zig").PWSTR;
const SYSTEMTIME = @import("../foundation.zig").SYSTEMTIME;
test {
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
}
|
win32/system/parental_controls.zig
|
pub const OPTION_PAD = @as(u32, 0);
pub const OPTION_SUBNET_MASK = @as(u32, 1);
pub const OPTION_TIME_OFFSET = @as(u32, 2);
pub const OPTION_ROUTER_ADDRESS = @as(u32, 3);
pub const OPTION_TIME_SERVERS = @as(u32, 4);
pub const OPTION_IEN116_NAME_SERVERS = @as(u32, 5);
pub const OPTION_DOMAIN_NAME_SERVERS = @as(u32, 6);
pub const OPTION_LOG_SERVERS = @as(u32, 7);
pub const OPTION_COOKIE_SERVERS = @as(u32, 8);
pub const OPTION_LPR_SERVERS = @as(u32, 9);
pub const OPTION_IMPRESS_SERVERS = @as(u32, 10);
pub const OPTION_RLP_SERVERS = @as(u32, 11);
pub const OPTION_HOST_NAME = @as(u32, 12);
pub const OPTION_BOOT_FILE_SIZE = @as(u32, 13);
pub const OPTION_MERIT_DUMP_FILE = @as(u32, 14);
pub const OPTION_DOMAIN_NAME = @as(u32, 15);
pub const OPTION_SWAP_SERVER = @as(u32, 16);
pub const OPTION_ROOT_DISK = @as(u32, 17);
pub const OPTION_EXTENSIONS_PATH = @as(u32, 18);
pub const OPTION_BE_A_ROUTER = @as(u32, 19);
pub const OPTION_NON_LOCAL_SOURCE_ROUTING = @as(u32, 20);
pub const OPTION_POLICY_FILTER_FOR_NLSR = @as(u32, 21);
pub const OPTION_MAX_REASSEMBLY_SIZE = @as(u32, 22);
pub const OPTION_DEFAULT_TTL = @as(u32, 23);
pub const OPTION_PMTU_AGING_TIMEOUT = @as(u32, 24);
pub const OPTION_PMTU_PLATEAU_TABLE = @as(u32, 25);
pub const OPTION_MTU = @as(u32, 26);
pub const OPTION_ALL_SUBNETS_MTU = @as(u32, 27);
pub const OPTION_BROADCAST_ADDRESS = @as(u32, 28);
pub const OPTION_PERFORM_MASK_DISCOVERY = @as(u32, 29);
pub const OPTION_BE_A_MASK_SUPPLIER = @as(u32, 30);
pub const OPTION_PERFORM_ROUTER_DISCOVERY = @as(u32, 31);
pub const OPTION_ROUTER_SOLICITATION_ADDR = @as(u32, 32);
pub const OPTION_STATIC_ROUTES = @as(u32, 33);
pub const OPTION_TRAILERS = @as(u32, 34);
pub const OPTION_ARP_CACHE_TIMEOUT = @as(u32, 35);
pub const OPTION_ETHERNET_ENCAPSULATION = @as(u32, 36);
pub const OPTION_TTL = @as(u32, 37);
pub const OPTION_KEEP_ALIVE_INTERVAL = @as(u32, 38);
pub const OPTION_KEEP_ALIVE_DATA_SIZE = @as(u32, 39);
pub const OPTION_NETWORK_INFO_SERVICE_DOM = @as(u32, 40);
pub const OPTION_NETWORK_INFO_SERVERS = @as(u32, 41);
pub const OPTION_NETWORK_TIME_SERVERS = @as(u32, 42);
pub const OPTION_VENDOR_SPEC_INFO = @as(u32, 43);
pub const OPTION_NETBIOS_NAME_SERVER = @as(u32, 44);
pub const OPTION_NETBIOS_DATAGRAM_SERVER = @as(u32, 45);
pub const OPTION_NETBIOS_NODE_TYPE = @as(u32, 46);
pub const OPTION_NETBIOS_SCOPE_OPTION = @as(u32, 47);
pub const OPTION_XWINDOW_FONT_SERVER = @as(u32, 48);
pub const OPTION_XWINDOW_DISPLAY_MANAGER = @as(u32, 49);
pub const OPTION_REQUESTED_ADDRESS = @as(u32, 50);
pub const OPTION_LEASE_TIME = @as(u32, 51);
pub const OPTION_OK_TO_OVERLAY = @as(u32, 52);
pub const OPTION_MESSAGE_TYPE = @as(u32, 53);
pub const OPTION_SERVER_IDENTIFIER = @as(u32, 54);
pub const OPTION_PARAMETER_REQUEST_LIST = @as(u32, 55);
pub const OPTION_MESSAGE = @as(u32, 56);
pub const OPTION_MESSAGE_LENGTH = @as(u32, 57);
pub const OPTION_RENEWAL_TIME = @as(u32, 58);
pub const OPTION_REBIND_TIME = @as(u32, 59);
pub const OPTION_CLIENT_CLASS_INFO = @as(u32, 60);
pub const OPTION_CLIENT_ID = @as(u32, 61);
pub const OPTION_TFTP_SERVER_NAME = @as(u32, 66);
pub const OPTION_BOOTFILE_NAME = @as(u32, 67);
pub const OPTION_MSFT_IE_PROXY = @as(u32, 252);
pub const OPTION_END = @as(u32, 255);
pub const DHCPCAPI_REQUEST_PERSISTENT = @as(u32, 1);
pub const DHCPCAPI_REQUEST_SYNCHRONOUS = @as(u32, 2);
pub const DHCPCAPI_REQUEST_ASYNCHRONOUS = @as(u32, 4);
pub const DHCPCAPI_REQUEST_CANCEL = @as(u32, 8);
pub const DHCPCAPI_REQUEST_MASK = @as(u32, 15);
pub const DHCPCAPI_REGISTER_HANDLE_EVENT = @as(u32, 1);
pub const DHCPCAPI_DEREGISTER_HANDLE_EVENT = @as(u32, 1);
pub const ERROR_DHCP_REGISTRY_INIT_FAILED = @as(u32, 20000);
pub const ERROR_DHCP_DATABASE_INIT_FAILED = @as(u32, 20001);
pub const ERROR_DHCP_RPC_INIT_FAILED = @as(u32, 20002);
pub const ERROR_DHCP_NETWORK_INIT_FAILED = @as(u32, 20003);
pub const ERROR_DHCP_SUBNET_EXITS = @as(u32, 20004);
pub const ERROR_DHCP_SUBNET_NOT_PRESENT = @as(u32, 20005);
pub const ERROR_DHCP_PRIMARY_NOT_FOUND = @as(u32, 20006);
pub const ERROR_DHCP_ELEMENT_CANT_REMOVE = @as(u32, 20007);
pub const ERROR_DHCP_OPTION_EXITS = @as(u32, 20009);
pub const ERROR_DHCP_OPTION_NOT_PRESENT = @as(u32, 20010);
pub const ERROR_DHCP_ADDRESS_NOT_AVAILABLE = @as(u32, 20011);
pub const ERROR_DHCP_RANGE_FULL = @as(u32, 20012);
pub const ERROR_DHCP_JET_ERROR = @as(u32, 20013);
pub const ERROR_DHCP_CLIENT_EXISTS = @as(u32, 20014);
pub const ERROR_DHCP_INVALID_DHCP_MESSAGE = @as(u32, 20015);
pub const ERROR_DHCP_INVALID_DHCP_CLIENT = @as(u32, 20016);
pub const ERROR_DHCP_SERVICE_PAUSED = @as(u32, 20017);
pub const ERROR_DHCP_NOT_RESERVED_CLIENT = @as(u32, 20018);
pub const ERROR_DHCP_RESERVED_CLIENT = @as(u32, 20019);
pub const ERROR_DHCP_RANGE_TOO_SMALL = @as(u32, 20020);
pub const ERROR_DHCP_IPRANGE_EXITS = @as(u32, 20021);
pub const ERROR_DHCP_RESERVEDIP_EXITS = @as(u32, 20022);
pub const ERROR_DHCP_INVALID_RANGE = @as(u32, 20023);
pub const ERROR_DHCP_RANGE_EXTENDED = @as(u32, 20024);
pub const ERROR_EXTEND_TOO_SMALL = @as(u32, 20025);
pub const WARNING_EXTENDED_LESS = @as(i32, 20026);
pub const ERROR_DHCP_JET_CONV_REQUIRED = @as(u32, 20027);
pub const ERROR_SERVER_INVALID_BOOT_FILE_TABLE = @as(u32, 20028);
pub const ERROR_SERVER_UNKNOWN_BOOT_FILE_NAME = @as(u32, 20029);
pub const ERROR_DHCP_SUPER_SCOPE_NAME_TOO_LONG = @as(u32, 20030);
pub const ERROR_DHCP_IP_ADDRESS_IN_USE = @as(u32, 20032);
pub const ERROR_DHCP_LOG_FILE_PATH_TOO_LONG = @as(u32, 20033);
pub const ERROR_DHCP_UNSUPPORTED_CLIENT = @as(u32, 20034);
pub const ERROR_DHCP_JET97_CONV_REQUIRED = @as(u32, 20036);
pub const ERROR_DHCP_ROGUE_INIT_FAILED = @as(u32, 20037);
pub const ERROR_DHCP_ROGUE_SAMSHUTDOWN = @as(u32, 20038);
pub const ERROR_DHCP_ROGUE_NOT_AUTHORIZED = @as(u32, 20039);
pub const ERROR_DHCP_ROGUE_DS_UNREACHABLE = @as(u32, 20040);
pub const ERROR_DHCP_ROGUE_DS_CONFLICT = @as(u32, 20041);
pub const ERROR_DHCP_ROGUE_NOT_OUR_ENTERPRISE = @as(u32, 20042);
pub const ERROR_DHCP_ROGUE_STANDALONE_IN_DS = @as(u32, 20043);
pub const ERROR_DHCP_CLASS_NOT_FOUND = @as(u32, 20044);
pub const ERROR_DHCP_CLASS_ALREADY_EXISTS = @as(u32, 20045);
pub const ERROR_DHCP_SCOPE_NAME_TOO_LONG = @as(u32, 20046);
pub const ERROR_DHCP_DEFAULT_SCOPE_EXITS = @as(u32, 20047);
pub const ERROR_DHCP_CANT_CHANGE_ATTRIBUTE = @as(u32, 20048);
pub const ERROR_DHCP_IPRANGE_CONV_ILLEGAL = @as(u32, 20049);
pub const ERROR_DHCP_NETWORK_CHANGED = @as(u32, 20050);
pub const ERROR_DHCP_CANNOT_MODIFY_BINDINGS = @as(u32, 20051);
pub const ERROR_DHCP_SUBNET_EXISTS = @as(u32, 20052);
pub const ERROR_DHCP_MSCOPE_EXISTS = @as(u32, 20053);
pub const ERROR_MSCOPE_RANGE_TOO_SMALL = @as(u32, 20054);
pub const ERROR_DHCP_EXEMPTION_EXISTS = @as(u32, 20055);
pub const ERROR_DHCP_EXEMPTION_NOT_PRESENT = @as(u32, 20056);
pub const ERROR_DHCP_INVALID_PARAMETER_OPTION32 = @as(u32, 20057);
pub const ERROR_DDS_NO_DS_AVAILABLE = @as(u32, 20070);
pub const ERROR_DDS_NO_DHCP_ROOT = @as(u32, 20071);
pub const ERROR_DDS_UNEXPECTED_ERROR = @as(u32, 20072);
pub const ERROR_DDS_TOO_MANY_ERRORS = @as(u32, 20073);
pub const ERROR_DDS_DHCP_SERVER_NOT_FOUND = @as(u32, 20074);
pub const ERROR_DDS_OPTION_ALREADY_EXISTS = @as(u32, 20075);
pub const ERROR_DDS_OPTION_DOES_NOT_EXIST = @as(u32, 20076);
pub const ERROR_DDS_CLASS_EXISTS = @as(u32, 20077);
pub const ERROR_DDS_CLASS_DOES_NOT_EXIST = @as(u32, 20078);
pub const ERROR_DDS_SERVER_ALREADY_EXISTS = @as(u32, 20079);
pub const ERROR_DDS_SERVER_DOES_NOT_EXIST = @as(u32, 20080);
pub const ERROR_DDS_SERVER_ADDRESS_MISMATCH = @as(u32, 20081);
pub const ERROR_DDS_SUBNET_EXISTS = @as(u32, 20082);
pub const ERROR_DDS_SUBNET_HAS_DIFF_SSCOPE = @as(u32, 20083);
pub const ERROR_DDS_SUBNET_NOT_PRESENT = @as(u32, 20084);
pub const ERROR_DDS_RESERVATION_NOT_PRESENT = @as(u32, 20085);
pub const ERROR_DDS_RESERVATION_CONFLICT = @as(u32, 20086);
pub const ERROR_DDS_POSSIBLE_RANGE_CONFLICT = @as(u32, 20087);
pub const ERROR_DDS_RANGE_DOES_NOT_EXIST = @as(u32, 20088);
pub const ERROR_DHCP_DELETE_BUILTIN_CLASS = @as(u32, 20089);
pub const ERROR_DHCP_INVALID_SUBNET_PREFIX = @as(u32, 20091);
pub const ERROR_DHCP_INVALID_DELAY = @as(u32, 20092);
pub const ERROR_DHCP_LINKLAYER_ADDRESS_EXISTS = @as(u32, 20093);
pub const ERROR_DHCP_LINKLAYER_ADDRESS_RESERVATION_EXISTS = @as(u32, 20094);
pub const ERROR_DHCP_LINKLAYER_ADDRESS_DOES_NOT_EXIST = @as(u32, 20095);
pub const ERROR_DHCP_HARDWARE_ADDRESS_TYPE_ALREADY_EXEMPT = @as(u32, 20101);
pub const ERROR_DHCP_UNDEFINED_HARDWARE_ADDRESS_TYPE = @as(u32, 20102);
pub const ERROR_DHCP_OPTION_TYPE_MISMATCH = @as(u32, 20103);
pub const ERROR_DHCP_POLICY_BAD_PARENT_EXPR = @as(u32, 20104);
pub const ERROR_DHCP_POLICY_EXISTS = @as(u32, 20105);
pub const ERROR_DHCP_POLICY_RANGE_EXISTS = @as(u32, 20106);
pub const ERROR_DHCP_POLICY_RANGE_BAD = @as(u32, 20107);
pub const ERROR_DHCP_RANGE_INVALID_IN_SERVER_POLICY = @as(u32, 20108);
pub const ERROR_DHCP_INVALID_POLICY_EXPRESSION = @as(u32, 20109);
pub const ERROR_DHCP_INVALID_PROCESSING_ORDER = @as(u32, 20110);
pub const ERROR_DHCP_POLICY_NOT_FOUND = @as(u32, 20111);
pub const ERROR_SCOPE_RANGE_POLICY_RANGE_CONFLICT = @as(u32, 20112);
pub const ERROR_DHCP_FO_SCOPE_ALREADY_IN_RELATIONSHIP = @as(u32, 20113);
pub const ERROR_DHCP_FO_RELATIONSHIP_EXISTS = @as(u32, 20114);
pub const ERROR_DHCP_FO_RELATIONSHIP_DOES_NOT_EXIST = @as(u32, 20115);
pub const ERROR_DHCP_FO_SCOPE_NOT_IN_RELATIONSHIP = @as(u32, 20116);
pub const ERROR_DHCP_FO_RELATION_IS_SECONDARY = @as(u32, 20117);
pub const ERROR_DHCP_FO_NOT_SUPPORTED = @as(u32, 20118);
pub const ERROR_DHCP_FO_TIME_OUT_OF_SYNC = @as(u32, 20119);
pub const ERROR_DHCP_FO_STATE_NOT_NORMAL = @as(u32, 20120);
pub const ERROR_DHCP_NO_ADMIN_PERMISSION = @as(u32, 20121);
pub const ERROR_DHCP_SERVER_NOT_REACHABLE = @as(u32, 20122);
pub const ERROR_DHCP_SERVER_NOT_RUNNING = @as(u32, 20123);
pub const ERROR_DHCP_SERVER_NAME_NOT_RESOLVED = @as(u32, 20124);
pub const ERROR_DHCP_FO_RELATIONSHIP_NAME_TOO_LONG = @as(u32, 20125);
pub const ERROR_DHCP_REACHED_END_OF_SELECTION = @as(u32, 20126);
pub const ERROR_DHCP_FO_ADDSCOPE_LEASES_NOT_SYNCED = @as(u32, 20127);
pub const ERROR_DHCP_FO_MAX_RELATIONSHIPS = @as(u32, 20128);
pub const ERROR_DHCP_FO_IPRANGE_TYPE_CONV_ILLEGAL = @as(u32, 20129);
pub const ERROR_DHCP_FO_MAX_ADD_SCOPES = @as(u32, 20130);
pub const ERROR_DHCP_FO_BOOT_NOT_SUPPORTED = @as(u32, 20131);
pub const ERROR_DHCP_FO_RANGE_PART_OF_REL = @as(u32, 20132);
pub const ERROR_DHCP_FO_SCOPE_SYNC_IN_PROGRESS = @as(u32, 20133);
pub const ERROR_DHCP_FO_FEATURE_NOT_SUPPORTED = @as(u32, 20134);
pub const ERROR_DHCP_POLICY_FQDN_RANGE_UNSUPPORTED = @as(u32, 20135);
pub const ERROR_DHCP_POLICY_FQDN_OPTION_UNSUPPORTED = @as(u32, 20136);
pub const ERROR_DHCP_POLICY_EDIT_FQDN_UNSUPPORTED = @as(u32, 20137);
pub const ERROR_DHCP_NAP_NOT_SUPPORTED = @as(u32, 20138);
pub const ERROR_LAST_DHCP_SERVER_ERROR = @as(u32, 20139);
pub const DHCP_SUBNET_INFO_VQ_FLAG_QUARANTINE = @as(u32, 1);
pub const MAX_PATTERN_LENGTH = @as(u32, 255);
pub const MAC_ADDRESS_LENGTH = @as(u32, 6);
pub const HWTYPE_ETHERNET_10MB = @as(u32, 1);
pub const FILTER_STATUS_NONE = @as(u32, 1);
pub const FILTER_STATUS_FULL_MATCH_IN_ALLOW_LIST = @as(u32, 2);
pub const FILTER_STATUS_FULL_MATCH_IN_DENY_LIST = @as(u32, 4);
pub const FILTER_STATUS_WILDCARD_MATCH_IN_ALLOW_LIST = @as(u32, 8);
pub const FILTER_STATUS_WILDCARD_MATCH_IN_DENY_LIST = @as(u32, 16);
pub const Set_APIProtocolSupport = @as(u32, 1);
pub const Set_DatabaseName = @as(u32, 2);
pub const Set_DatabasePath = @as(u32, 4);
pub const Set_BackupPath = @as(u32, 8);
pub const Set_BackupInterval = @as(u32, 16);
pub const Set_DatabaseLoggingFlag = @as(u32, 32);
pub const Set_RestoreFlag = @as(u32, 64);
pub const Set_DatabaseCleanupInterval = @as(u32, 128);
pub const Set_DebugFlag = @as(u32, 256);
pub const Set_PingRetries = @as(u32, 512);
pub const Set_BootFileTable = @as(u32, 1024);
pub const Set_AuditLogState = @as(u32, 2048);
pub const Set_QuarantineON = @as(u32, 4096);
pub const Set_QuarantineDefFail = @as(u32, 8192);
pub const CLIENT_TYPE_UNSPECIFIED = @as(u32, 0);
pub const CLIENT_TYPE_DHCP = @as(u32, 1);
pub const CLIENT_TYPE_BOOTP = @as(u32, 2);
pub const CLIENT_TYPE_RESERVATION_FLAG = @as(u32, 4);
pub const CLIENT_TYPE_NONE = @as(u32, 100);
pub const Set_UnicastFlag = @as(u32, 1);
pub const Set_RapidCommitFlag = @as(u32, 2);
pub const Set_PreferredLifetime = @as(u32, 4);
pub const Set_ValidLifetime = @as(u32, 8);
pub const Set_T1 = @as(u32, 16);
pub const Set_T2 = @as(u32, 32);
pub const Set_PreferredLifetimeIATA = @as(u32, 64);
pub const Set_ValidLifetimeIATA = @as(u32, 128);
pub const V5_ADDRESS_STATE_OFFERED = @as(u32, 0);
pub const V5_ADDRESS_STATE_ACTIVE = @as(u32, 1);
pub const V5_ADDRESS_STATE_DECLINED = @as(u32, 2);
pub const V5_ADDRESS_STATE_DOOM = @as(u32, 3);
pub const V5_ADDRESS_BIT_DELETED = @as(u32, 128);
pub const V5_ADDRESS_BIT_UNREGISTERED = @as(u32, 64);
pub const V5_ADDRESS_BIT_BOTH_REC = @as(u32, 32);
pub const V5_ADDRESS_EX_BIT_DISABLE_PTR_RR = @as(u32, 1);
pub const DNS_FLAG_ENABLED = @as(u32, 1);
pub const DNS_FLAG_UPDATE_DOWNLEVEL = @as(u32, 2);
pub const DNS_FLAG_CLEANUP_EXPIRED = @as(u32, 4);
pub const DNS_FLAG_UPDATE_BOTH_ALWAYS = @as(u32, 16);
pub const DNS_FLAG_UPDATE_DHCID = @as(u32, 32);
pub const DNS_FLAG_DISABLE_PTR_UPDATE = @as(u32, 64);
pub const DNS_FLAG_HAS_DNS_SUFFIX = @as(u32, 128);
pub const DHCP_OPT_ENUM_IGNORE_VENDOR = @as(u32, 1);
pub const DHCP_OPT_ENUM_USE_CLASSNAME = @as(u32, 2);
pub const DHCP_FLAGS_DONT_ACCESS_DS = @as(u32, 1);
pub const DHCP_FLAGS_DONT_DO_RPC = @as(u32, 2);
pub const DHCP_FLAGS_OPTION_IS_VENDOR = @as(u32, 3);
pub const DHCP_ATTRIB_BOOL_IS_ROGUE = @as(u32, 1);
pub const DHCP_ATTRIB_BOOL_IS_DYNBOOTP = @as(u32, 2);
pub const DHCP_ATTRIB_BOOL_IS_PART_OF_DSDC = @as(u32, 3);
pub const DHCP_ATTRIB_BOOL_IS_BINDING_AWARE = @as(u32, 4);
pub const DHCP_ATTRIB_BOOL_IS_ADMIN = @as(u32, 5);
pub const DHCP_ATTRIB_ULONG_RESTORE_STATUS = @as(u32, 6);
pub const DHCP_ATTRIB_TYPE_BOOL = @as(u32, 1);
pub const DHCP_ATTRIB_TYPE_ULONG = @as(u32, 2);
pub const DHCP_ENDPOINT_FLAG_CANT_MODIFY = @as(u32, 1);
pub const QUARANTIN_OPTION_BASE = @as(u32, 43220);
pub const QUARANTINE_SCOPE_QUARPROFILE_OPTION = @as(u32, 43221);
pub const QUARANTINE_CONFIG_OPTION = @as(u32, 43222);
pub const ADDRESS_TYPE_IANA = @as(u32, 0);
pub const ADDRESS_TYPE_IATA = @as(u32, 1);
pub const DHCP_MIN_DELAY = @as(u32, 0);
pub const DHCP_MAX_DELAY = @as(u32, 1000);
pub const DHCP_FAILOVER_DELETE_SCOPES = @as(u32, 1);
pub const DHCP_FAILOVER_MAX_NUM_ADD_SCOPES = @as(u32, 400);
pub const DHCP_FAILOVER_MAX_NUM_REL = @as(u32, 31);
pub const MCLT = @as(u32, 1);
pub const SAFEPERIOD = @as(u32, 2);
pub const CHANGESTATE = @as(u32, 4);
pub const PERCENTAGE = @as(u32, 8);
pub const MODE = @as(u32, 16);
pub const PREVSTATE = @as(u32, 32);
pub const SHAREDSECRET = @as(u32, 64);
pub const DHCP_CONTROL_START = @as(u32, 1);
pub const DHCP_CONTROL_STOP = @as(u32, 2);
pub const DHCP_CONTROL_PAUSE = @as(u32, 3);
pub const DHCP_CONTROL_CONTINUE = @as(u32, 4);
pub const DHCP_DROP_DUPLICATE = @as(u32, 1);
pub const DHCP_DROP_NOMEM = @as(u32, 2);
pub const DHCP_DROP_INTERNAL_ERROR = @as(u32, 3);
pub const DHCP_DROP_TIMEOUT = @as(u32, 4);
pub const DHCP_DROP_UNAUTH = @as(u32, 5);
pub const DHCP_DROP_PAUSED = @as(u32, 6);
pub const DHCP_DROP_NO_SUBNETS = @as(u32, 7);
pub const DHCP_DROP_INVALID = @as(u32, 8);
pub const DHCP_DROP_WRONG_SERVER = @as(u32, 9);
pub const DHCP_DROP_NOADDRESS = @as(u32, 10);
pub const DHCP_DROP_PROCESSED = @as(u32, 11);
pub const DHCP_DROP_GEN_FAILURE = @as(u32, 256);
pub const DHCP_SEND_PACKET = @as(u32, 268435456);
pub const DHCP_PROB_CONFLICT = @as(u32, 536870913);
pub const DHCP_PROB_DECLINE = @as(u32, 536870914);
pub const DHCP_PROB_RELEASE = @as(u32, 536870915);
pub const DHCP_PROB_NACKED = @as(u32, 536870916);
pub const DHCP_GIVE_ADDRESS_NEW = @as(u32, 805306369);
pub const DHCP_GIVE_ADDRESS_OLD = @as(u32, 805306370);
pub const DHCP_CLIENT_BOOTP = @as(u32, 805306371);
pub const DHCP_CLIENT_DHCP = @as(u32, 805306372);
pub const DHCPV6_OPTION_CLIENTID = @as(u32, 1);
pub const DHCPV6_OPTION_SERVERID = @as(u32, 2);
pub const DHCPV6_OPTION_IA_NA = @as(u32, 3);
pub const DHCPV6_OPTION_IA_TA = @as(u32, 4);
pub const DHCPV6_OPTION_ORO = @as(u32, 6);
pub const DHCPV6_OPTION_PREFERENCE = @as(u32, 7);
pub const DHCPV6_OPTION_UNICAST = @as(u32, 12);
pub const DHCPV6_OPTION_RAPID_COMMIT = @as(u32, 14);
pub const DHCPV6_OPTION_USER_CLASS = @as(u32, 15);
pub const DHCPV6_OPTION_VENDOR_CLASS = @as(u32, 16);
pub const DHCPV6_OPTION_VENDOR_OPTS = @as(u32, 17);
pub const DHCPV6_OPTION_RECONF_MSG = @as(u32, 19);
pub const DHCPV6_OPTION_SIP_SERVERS_NAMES = @as(u32, 21);
pub const DHCPV6_OPTION_SIP_SERVERS_ADDRS = @as(u32, 22);
pub const DHCPV6_OPTION_DNS_SERVERS = @as(u32, 23);
pub const DHCPV6_OPTION_DOMAIN_LIST = @as(u32, 24);
pub const DHCPV6_OPTION_IA_PD = @as(u32, 25);
pub const DHCPV6_OPTION_NIS_SERVERS = @as(u32, 27);
pub const DHCPV6_OPTION_NISP_SERVERS = @as(u32, 28);
pub const DHCPV6_OPTION_NIS_DOMAIN_NAME = @as(u32, 29);
pub const DHCPV6_OPTION_NISP_DOMAIN_NAME = @as(u32, 30);
//--------------------------------------------------------------------------------
// Section: Types (164)
//--------------------------------------------------------------------------------
pub const DHCPV6CAPI_PARAMS = extern struct {
Flags: u32,
OptionId: u32,
IsVendor: BOOL,
Data: ?*u8,
nBytesData: u32,
};
pub const DHCPV6CAPI_PARAMS_ARRAY = extern struct {
nParams: u32,
Params: ?*DHCPV6CAPI_PARAMS,
};
pub const DHCPV6CAPI_CLASSID = extern struct {
Flags: u32,
Data: ?*u8,
nBytesData: u32,
};
pub const StatusCode = enum(i32) {
NO_ERROR = 0,
UNSPECIFIED_FAILURE = 1,
NO_BINDING = 3,
NOPREFIX_AVAIL = 6,
};
pub const STATUS_NO_ERROR = StatusCode.NO_ERROR;
pub const STATUS_UNSPECIFIED_FAILURE = StatusCode.UNSPECIFIED_FAILURE;
pub const STATUS_NO_BINDING = StatusCode.NO_BINDING;
pub const STATUS_NOPREFIX_AVAIL = StatusCode.NOPREFIX_AVAIL;
pub const DHCPV6Prefix = extern struct {
prefix: [16]u8,
prefixLength: u32,
preferredLifeTime: u32,
validLifeTime: u32,
status: StatusCode,
};
pub const DHCPV6PrefixLeaseInformation = extern struct {
nPrefixes: u32,
prefixArray: ?*DHCPV6Prefix,
iaid: u32,
T1: i64,
T2: i64,
MaxLeaseExpirationTime: i64,
LastRenewalTime: i64,
status: StatusCode,
ServerId: ?*u8,
ServerIdLen: u32,
};
pub const DHCPAPI_PARAMS = extern struct {
Flags: u32,
OptionId: u32,
IsVendor: BOOL,
Data: ?*u8,
nBytesData: u32,
};
pub const DHCPCAPI_PARAMS_ARRAY = extern struct {
nParams: u32,
Params: ?*DHCPAPI_PARAMS,
};
pub const DHCPCAPI_CLASSID = extern struct {
Flags: u32,
Data: ?*u8,
nBytesData: u32,
};
pub const LPDHCP_CONTROL = fn(
dwControlCode: u32,
lpReserved: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const LPDHCP_NEWPKT = fn(
Packet: ?*?*u8,
PacketSize: ?*u32,
IpAddress: u32,
Reserved: ?*anyopaque,
PktContext: ?*?*anyopaque,
ProcessIt: ?*i32,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const LPDHCP_DROP_SEND = fn(
Packet: ?*?*u8,
PacketSize: ?*u32,
ControlCode: u32,
IpAddress: u32,
Reserved: ?*anyopaque,
PktContext: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const LPDHCP_PROB = fn(
Packet: ?*u8,
PacketSize: u32,
ControlCode: u32,
IpAddress: u32,
AltAddress: u32,
Reserved: ?*anyopaque,
PktContext: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const LPDHCP_GIVE_ADDRESS = fn(
Packet: ?*u8,
PacketSize: u32,
ControlCode: u32,
IpAddress: u32,
AltAddress: u32,
AddrType: u32,
LeaseTime: u32,
Reserved: ?*anyopaque,
PktContext: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const LPDHCP_HANDLE_OPTIONS = fn(
Packet: ?*u8,
PacketSize: u32,
Reserved: ?*anyopaque,
PktContext: ?*anyopaque,
ServerOptions: ?*DHCP_SERVER_OPTIONS,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const LPDHCP_DELETE_CLIENT = fn(
IpAddress: u32,
HwAddress: ?*u8,
HwAddressLength: u32,
Reserved: u32,
ClientType: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const DHCP_CALLOUT_TABLE = extern struct {
DhcpControlHook: ?LPDHCP_CONTROL,
DhcpNewPktHook: ?LPDHCP_NEWPKT,
DhcpPktDropHook: ?LPDHCP_DROP_SEND,
DhcpPktSendHook: ?LPDHCP_DROP_SEND,
DhcpAddressDelHook: ?LPDHCP_PROB,
DhcpAddressOfferHook: ?LPDHCP_GIVE_ADDRESS,
DhcpHandleOptionsHook: ?LPDHCP_HANDLE_OPTIONS,
DhcpDeleteClientHook: ?LPDHCP_DELETE_CLIENT,
DhcpExtensionHook: ?*anyopaque,
DhcpReservedHook: ?*anyopaque,
};
pub const LPDHCP_ENTRY_POINT_FUNC = fn(
ChainDlls: ?PWSTR,
CalloutVersion: u32,
CalloutTbl: ?*DHCP_CALLOUT_TABLE,
) callconv(@import("std").os.windows.WINAPI) u32;
pub const DATE_TIME = extern struct {
dwLowDateTime: u32,
dwHighDateTime: u32,
};
pub const DHCP_IP_RANGE = extern struct {
StartAddress: u32,
EndAddress: u32,
};
pub const DHCP_BINARY_DATA = extern struct {
DataLength: u32,
Data: ?*u8,
};
pub const DHCP_HOST_INFO = extern struct {
IpAddress: u32,
NetBiosName: ?PWSTR,
HostName: ?PWSTR,
};
pub const DHCP_FORCE_FLAG = enum(i32) {
FullForce = 0,
NoForce = 1,
FailoverForce = 2,
};
pub const DhcpFullForce = DHCP_FORCE_FLAG.FullForce;
pub const DhcpNoForce = DHCP_FORCE_FLAG.NoForce;
pub const DhcpFailoverForce = DHCP_FORCE_FLAG.FailoverForce;
pub const DWORD_DWORD = extern struct {
DWord1: u32,
DWord2: u32,
};
pub const DHCP_SUBNET_STATE = enum(i32) {
Enabled = 0,
Disabled = 1,
EnabledSwitched = 2,
DisabledSwitched = 3,
InvalidState = 4,
};
pub const DhcpSubnetEnabled = DHCP_SUBNET_STATE.Enabled;
pub const DhcpSubnetDisabled = DHCP_SUBNET_STATE.Disabled;
pub const DhcpSubnetEnabledSwitched = DHCP_SUBNET_STATE.EnabledSwitched;
pub const DhcpSubnetDisabledSwitched = DHCP_SUBNET_STATE.DisabledSwitched;
pub const DhcpSubnetInvalidState = DHCP_SUBNET_STATE.InvalidState;
pub const DHCP_SUBNET_INFO = extern struct {
SubnetAddress: u32,
SubnetMask: u32,
SubnetName: ?PWSTR,
SubnetComment: ?PWSTR,
PrimaryHost: DHCP_HOST_INFO,
SubnetState: DHCP_SUBNET_STATE,
};
pub const DHCP_SUBNET_INFO_VQ = extern struct {
SubnetAddress: u32,
SubnetMask: u32,
SubnetName: ?PWSTR,
SubnetComment: ?PWSTR,
PrimaryHost: DHCP_HOST_INFO,
SubnetState: DHCP_SUBNET_STATE,
QuarantineOn: u32,
Reserved1: u32,
Reserved2: u32,
Reserved3: i64,
Reserved4: i64,
};
pub const DHCP_IP_ARRAY = extern struct {
NumElements: u32,
Elements: [*]u32,
};
pub const DHCP_IP_CLUSTER = extern struct {
ClusterAddress: u32,
ClusterMask: u32,
};
pub const DHCP_IP_RESERVATION = extern struct {
ReservedIpAddress: u32,
ReservedForClient: ?*DHCP_BINARY_DATA,
};
pub const DHCP_SUBNET_ELEMENT_TYPE = enum(i32) {
IpRanges = 0,
SecondaryHosts = 1,
ReservedIps = 2,
ExcludedIpRanges = 3,
IpUsedClusters = 4,
IpRangesDhcpOnly = 5,
IpRangesDhcpBootp = 6,
IpRangesBootpOnly = 7,
};
pub const DhcpIpRanges = DHCP_SUBNET_ELEMENT_TYPE.IpRanges;
pub const DhcpSecondaryHosts = DHCP_SUBNET_ELEMENT_TYPE.SecondaryHosts;
pub const DhcpReservedIps = DHCP_SUBNET_ELEMENT_TYPE.ReservedIps;
pub const DhcpExcludedIpRanges = DHCP_SUBNET_ELEMENT_TYPE.ExcludedIpRanges;
pub const DhcpIpUsedClusters = DHCP_SUBNET_ELEMENT_TYPE.IpUsedClusters;
pub const DhcpIpRangesDhcpOnly = DHCP_SUBNET_ELEMENT_TYPE.IpRangesDhcpOnly;
pub const DhcpIpRangesDhcpBootp = DHCP_SUBNET_ELEMENT_TYPE.IpRangesDhcpBootp;
pub const DhcpIpRangesBootpOnly = DHCP_SUBNET_ELEMENT_TYPE.IpRangesBootpOnly;
pub const DHCP_SUBNET_ELEMENT_DATA = extern struct {
pub const DHCP_SUBNET_ELEMENT_UNION = extern union {
IpRange: ?*DHCP_IP_RANGE,
SecondaryHost: ?*DHCP_HOST_INFO,
ReservedIp: ?*DHCP_IP_RESERVATION,
ExcludeIpRange: ?*DHCP_IP_RANGE,
IpUsedCluster: ?*DHCP_IP_CLUSTER,
};
ElementType: DHCP_SUBNET_ELEMENT_TYPE,
Element: DHCP_SUBNET_ELEMENT_UNION,
};
// WARNING: this type symbol conflicts with something!
pub const DHCP_SUBNET_ELEMENT_UNION_CONFLICT_ = usize;
pub const DHCP_SUBNET_ELEMENT_INFO_ARRAY = extern struct {
NumElements: u32,
Elements: ?*DHCP_SUBNET_ELEMENT_DATA,
};
pub const DHCP_IPV6_ADDRESS = extern struct {
HighOrderBits: u64,
LowOrderBits: u64,
};
pub const DHCP_FILTER_LIST_TYPE = enum(i32) {
Deny = 0,
Allow = 1,
};
pub const Deny = DHCP_FILTER_LIST_TYPE.Deny;
pub const Allow = DHCP_FILTER_LIST_TYPE.Allow;
pub const DHCP_ADDR_PATTERN = extern struct {
MatchHWType: BOOL,
HWType: u8,
IsWildcard: BOOL,
Length: u8,
Pattern: [255]u8,
};
pub const DHCP_FILTER_ADD_INFO = extern struct {
AddrPatt: DHCP_ADDR_PATTERN,
Comment: ?PWSTR,
ListType: DHCP_FILTER_LIST_TYPE,
};
pub const DHCP_FILTER_GLOBAL_INFO = extern struct {
EnforceAllowList: BOOL,
EnforceDenyList: BOOL,
};
pub const DHCP_FILTER_RECORD = extern struct {
AddrPatt: DHCP_ADDR_PATTERN,
Comment: ?PWSTR,
};
pub const DHCP_FILTER_ENUM_INFO = extern struct {
NumElements: u32,
pEnumRecords: ?*DHCP_FILTER_RECORD,
};
pub const DHCP_OPTION_DATA_TYPE = enum(i32) {
ByteOption = 0,
WordOption = 1,
DWordOption = 2,
DWordDWordOption = 3,
IpAddressOption = 4,
StringDataOption = 5,
BinaryDataOption = 6,
EncapsulatedDataOption = 7,
Ipv6AddressOption = 8,
};
pub const DhcpByteOption = DHCP_OPTION_DATA_TYPE.ByteOption;
pub const DhcpWordOption = DHCP_OPTION_DATA_TYPE.WordOption;
pub const DhcpDWordOption = DHCP_OPTION_DATA_TYPE.DWordOption;
pub const DhcpDWordDWordOption = DHCP_OPTION_DATA_TYPE.DWordDWordOption;
pub const DhcpIpAddressOption = DHCP_OPTION_DATA_TYPE.IpAddressOption;
pub const DhcpStringDataOption = DHCP_OPTION_DATA_TYPE.StringDataOption;
pub const DhcpBinaryDataOption = DHCP_OPTION_DATA_TYPE.BinaryDataOption;
pub const DhcpEncapsulatedDataOption = DHCP_OPTION_DATA_TYPE.EncapsulatedDataOption;
pub const DhcpIpv6AddressOption = DHCP_OPTION_DATA_TYPE.Ipv6AddressOption;
pub const DHCP_OPTION_DATA_ELEMENT = extern struct {
pub const DHCP_OPTION_ELEMENT_UNION = extern union {
ByteOption: u8,
WordOption: u16,
DWordOption: u32,
DWordDWordOption: DWORD_DWORD,
IpAddressOption: u32,
StringDataOption: ?PWSTR,
BinaryDataOption: DHCP_BINARY_DATA,
EncapsulatedDataOption: DHCP_BINARY_DATA,
Ipv6AddressDataOption: ?PWSTR,
};
OptionType: DHCP_OPTION_DATA_TYPE,
Element: DHCP_OPTION_ELEMENT_UNION,
};
// WARNING: this type symbol conflicts with something!
pub const DHCP_OPTION_ELEMENT_UNION_CONFLICT_ = usize;
pub const DHCP_OPTION_DATA = extern struct {
NumElements: u32,
Elements: ?*DHCP_OPTION_DATA_ELEMENT,
};
pub const DHCP_OPTION_TYPE = enum(i32) {
UnaryElementTypeOption = 0,
ArrayTypeOption = 1,
};
pub const DhcpUnaryElementTypeOption = DHCP_OPTION_TYPE.UnaryElementTypeOption;
pub const DhcpArrayTypeOption = DHCP_OPTION_TYPE.ArrayTypeOption;
pub const DHCP_OPTION = extern struct {
OptionID: u32,
OptionName: ?PWSTR,
OptionComment: ?PWSTR,
DefaultValue: DHCP_OPTION_DATA,
OptionType: DHCP_OPTION_TYPE,
};
pub const DHCP_OPTION_ARRAY = extern struct {
NumElements: u32,
Options: ?*DHCP_OPTION,
};
pub const DHCP_OPTION_VALUE = extern struct {
OptionID: u32,
Value: DHCP_OPTION_DATA,
};
pub const DHCP_OPTION_VALUE_ARRAY = extern struct {
NumElements: u32,
Values: ?*DHCP_OPTION_VALUE,
};
pub const DHCP_OPTION_SCOPE_TYPE = enum(i32) {
DefaultOptions = 0,
GlobalOptions = 1,
SubnetOptions = 2,
ReservedOptions = 3,
MScopeOptions = 4,
};
pub const DhcpDefaultOptions = DHCP_OPTION_SCOPE_TYPE.DefaultOptions;
pub const DhcpGlobalOptions = DHCP_OPTION_SCOPE_TYPE.GlobalOptions;
pub const DhcpSubnetOptions = DHCP_OPTION_SCOPE_TYPE.SubnetOptions;
pub const DhcpReservedOptions = DHCP_OPTION_SCOPE_TYPE.ReservedOptions;
pub const DhcpMScopeOptions = DHCP_OPTION_SCOPE_TYPE.MScopeOptions;
pub const DHCP_RESERVED_SCOPE = extern struct {
ReservedIpAddress: u32,
ReservedIpSubnetAddress: u32,
};
pub const DHCP_OPTION_SCOPE_INFO = extern struct {
pub const _DHCP_OPTION_SCOPE_UNION = extern union {
DefaultScopeInfo: ?*anyopaque,
GlobalScopeInfo: ?*anyopaque,
SubnetScopeInfo: u32,
ReservedScopeInfo: DHCP_RESERVED_SCOPE,
MScopeInfo: ?PWSTR,
};
ScopeType: DHCP_OPTION_SCOPE_TYPE,
ScopeInfo: _DHCP_OPTION_SCOPE_UNION,
};
pub const DHCP_OPTION_SCOPE_TYPE6 = enum(i32) {
DefaultOptions6 = 0,
ScopeOptions6 = 1,
ReservedOptions6 = 2,
GlobalOptions6 = 3,
};
pub const DhcpDefaultOptions6 = DHCP_OPTION_SCOPE_TYPE6.DefaultOptions6;
pub const DhcpScopeOptions6 = DHCP_OPTION_SCOPE_TYPE6.ScopeOptions6;
pub const DhcpReservedOptions6 = DHCP_OPTION_SCOPE_TYPE6.ReservedOptions6;
pub const DhcpGlobalOptions6 = DHCP_OPTION_SCOPE_TYPE6.GlobalOptions6;
pub const DHCP_RESERVED_SCOPE6 = extern struct {
ReservedIpAddress: DHCP_IPV6_ADDRESS,
ReservedIpSubnetAddress: DHCP_IPV6_ADDRESS,
};
pub const DHCP_OPTION_SCOPE_INFO6 = extern struct {
pub const DHCP_OPTION_SCOPE_UNION6 = extern union {
DefaultScopeInfo: ?*anyopaque,
SubnetScopeInfo: DHCP_IPV6_ADDRESS,
ReservedScopeInfo: DHCP_RESERVED_SCOPE6,
};
ScopeType: DHCP_OPTION_SCOPE_TYPE6,
ScopeInfo: DHCP_OPTION_SCOPE_UNION6,
};
// WARNING: this type symbol conflicts with something!
pub const DHCP_OPTION_SCOPE_UNION6_CONFLICT_ = usize;
pub const DHCP_OPTION_LIST = extern struct {
NumOptions: u32,
Options: ?*DHCP_OPTION_VALUE,
};
pub const DHCP_CLIENT_INFO = extern struct {
ClientIpAddress: u32,
SubnetMask: u32,
ClientHardwareAddress: DHCP_BINARY_DATA,
ClientName: ?PWSTR,
ClientComment: ?PWSTR,
ClientLeaseExpires: DATE_TIME,
OwnerHost: DHCP_HOST_INFO,
};
pub const DHCP_CLIENT_INFO_ARRAY = extern struct {
NumElements: u32,
Clients: ?*?*DHCP_CLIENT_INFO,
};
pub const QuarantineStatus = enum(i32) {
NOQUARANTINE = 0,
RESTRICTEDACCESS = 1,
DROPPACKET = 2,
PROBATION = 3,
EXEMPT = 4,
DEFAULTQUARSETTING = 5,
NOQUARINFO = 6,
};
pub const NOQUARANTINE = QuarantineStatus.NOQUARANTINE;
pub const RESTRICTEDACCESS = QuarantineStatus.RESTRICTEDACCESS;
pub const DROPPACKET = QuarantineStatus.DROPPACKET;
pub const PROBATION = QuarantineStatus.PROBATION;
pub const EXEMPT = QuarantineStatus.EXEMPT;
pub const DEFAULTQUARSETTING = QuarantineStatus.DEFAULTQUARSETTING;
pub const NOQUARINFO = QuarantineStatus.NOQUARINFO;
pub const DHCP_CLIENT_INFO_VQ = extern struct {
ClientIpAddress: u32,
SubnetMask: u32,
ClientHardwareAddress: DHCP_BINARY_DATA,
ClientName: ?PWSTR,
ClientComment: ?PWSTR,
ClientLeaseExpires: DATE_TIME,
OwnerHost: DHCP_HOST_INFO,
bClientType: u8,
AddressState: u8,
Status: QuarantineStatus,
ProbationEnds: DATE_TIME,
QuarantineCapable: BOOL,
};
pub const DHCP_CLIENT_INFO_ARRAY_VQ = extern struct {
NumElements: u32,
Clients: ?*?*DHCP_CLIENT_INFO_VQ,
};
pub const DHCP_CLIENT_FILTER_STATUS_INFO = extern struct {
ClientIpAddress: u32,
SubnetMask: u32,
ClientHardwareAddress: DHCP_BINARY_DATA,
ClientName: ?PWSTR,
ClientComment: ?PWSTR,
ClientLeaseExpires: DATE_TIME,
OwnerHost: DHCP_HOST_INFO,
bClientType: u8,
AddressState: u8,
Status: QuarantineStatus,
ProbationEnds: DATE_TIME,
QuarantineCapable: BOOL,
FilterStatus: u32,
};
pub const DHCP_CLIENT_FILTER_STATUS_INFO_ARRAY = extern struct {
NumElements: u32,
Clients: ?*?*DHCP_CLIENT_FILTER_STATUS_INFO,
};
pub const DHCP_CLIENT_INFO_PB = extern struct {
ClientIpAddress: u32,
SubnetMask: u32,
ClientHardwareAddress: DHCP_BINARY_DATA,
ClientName: ?PWSTR,
ClientComment: ?PWSTR,
ClientLeaseExpires: DATE_TIME,
OwnerHost: DHCP_HOST_INFO,
bClientType: u8,
AddressState: u8,
Status: QuarantineStatus,
ProbationEnds: DATE_TIME,
QuarantineCapable: BOOL,
FilterStatus: u32,
PolicyName: ?PWSTR,
};
pub const DHCP_CLIENT_INFO_PB_ARRAY = extern struct {
NumElements: u32,
Clients: ?*?*DHCP_CLIENT_INFO_PB,
};
pub const DHCP_SEARCH_INFO_TYPE = enum(i32) {
IpAddress = 0,
HardwareAddress = 1,
Name = 2,
};
pub const DhcpClientIpAddress = DHCP_SEARCH_INFO_TYPE.IpAddress;
pub const DhcpClientHardwareAddress = DHCP_SEARCH_INFO_TYPE.HardwareAddress;
pub const DhcpClientName = DHCP_SEARCH_INFO_TYPE.Name;
pub const DHCP_SEARCH_INFO = extern struct {
pub const DHCP_CLIENT_SEARCH_UNION = extern union {
ClientIpAddress: u32,
ClientHardwareAddress: DHCP_BINARY_DATA,
ClientName: ?PWSTR,
};
SearchType: DHCP_SEARCH_INFO_TYPE,
SearchInfo: DHCP_CLIENT_SEARCH_UNION,
};
// WARNING: this type symbol conflicts with something!
pub const DHCP_CLIENT_SEARCH_UNION_CONFLICT_ = usize;
pub const DHCP_PROPERTY_TYPE = enum(i32) {
Byte = 0,
Word = 1,
Dword = 2,
String = 3,
Binary = 4,
};
pub const DhcpPropTypeByte = DHCP_PROPERTY_TYPE.Byte;
pub const DhcpPropTypeWord = DHCP_PROPERTY_TYPE.Word;
pub const DhcpPropTypeDword = DHCP_PROPERTY_TYPE.Dword;
pub const DhcpPropTypeString = DHCP_PROPERTY_TYPE.String;
pub const DhcpPropTypeBinary = DHCP_PROPERTY_TYPE.Binary;
pub const DHCP_PROPERTY_ID = enum(i32) {
PolicyDnsSuffix = 0,
ClientAddressStateEx = 1,
};
pub const DhcpPropIdPolicyDnsSuffix = DHCP_PROPERTY_ID.PolicyDnsSuffix;
pub const DhcpPropIdClientAddressStateEx = DHCP_PROPERTY_ID.ClientAddressStateEx;
pub const DHCP_PROPERTY = extern struct {
pub const _DHCP_PROPERTY_VALUE_UNION = extern union {
ByteValue: u8,
WordValue: u16,
DWordValue: u32,
StringValue: ?PWSTR,
BinaryValue: DHCP_BINARY_DATA,
};
ID: DHCP_PROPERTY_ID,
Type: DHCP_PROPERTY_TYPE,
Value: _DHCP_PROPERTY_VALUE_UNION,
};
pub const DHCP_PROPERTY_ARRAY = extern struct {
NumElements: u32,
Elements: ?*DHCP_PROPERTY,
};
pub const DHCP_CLIENT_INFO_EX = extern struct {
ClientIpAddress: u32,
SubnetMask: u32,
ClientHardwareAddress: DHCP_BINARY_DATA,
ClientName: ?PWSTR,
ClientComment: ?PWSTR,
ClientLeaseExpires: DATE_TIME,
OwnerHost: DHCP_HOST_INFO,
bClientType: u8,
AddressState: u8,
Status: QuarantineStatus,
ProbationEnds: DATE_TIME,
QuarantineCapable: BOOL,
FilterStatus: u32,
PolicyName: ?PWSTR,
Properties: ?*DHCP_PROPERTY_ARRAY,
};
pub const DHCP_CLIENT_INFO_EX_ARRAY = extern struct {
NumElements: u32,
Clients: ?*?*DHCP_CLIENT_INFO_EX,
};
pub const SCOPE_MIB_INFO = extern struct {
Subnet: u32,
NumAddressesInuse: u32,
NumAddressesFree: u32,
NumPendingOffers: u32,
};
pub const DHCP_MIB_INFO = extern struct {
Discovers: u32,
Offers: u32,
Requests: u32,
Acks: u32,
Naks: u32,
Declines: u32,
Releases: u32,
ServerStartTime: DATE_TIME,
Scopes: u32,
ScopeInfo: ?*SCOPE_MIB_INFO,
};
pub const SCOPE_MIB_INFO_VQ = extern struct {
Subnet: u32,
NumAddressesInuse: u32,
NumAddressesFree: u32,
NumPendingOffers: u32,
QtnNumLeases: u32,
QtnPctQtnLeases: u32,
QtnProbationLeases: u32,
QtnNonQtnLeases: u32,
QtnExemptLeases: u32,
QtnCapableClients: u32,
};
pub const DHCP_MIB_INFO_VQ = extern struct {
Discovers: u32,
Offers: u32,
Requests: u32,
Acks: u32,
Naks: u32,
Declines: u32,
Releases: u32,
ServerStartTime: DATE_TIME,
QtnNumLeases: u32,
QtnPctQtnLeases: u32,
QtnProbationLeases: u32,
QtnNonQtnLeases: u32,
QtnExemptLeases: u32,
QtnCapableClients: u32,
QtnIASErrors: u32,
Scopes: u32,
ScopeInfo: ?*SCOPE_MIB_INFO_VQ,
};
pub const SCOPE_MIB_INFO_V5 = extern struct {
Subnet: u32,
NumAddressesInuse: u32,
NumAddressesFree: u32,
NumPendingOffers: u32,
};
pub const DHCP_MIB_INFO_V5 = extern struct {
Discovers: u32,
Offers: u32,
Requests: u32,
Acks: u32,
Naks: u32,
Declines: u32,
Releases: u32,
ServerStartTime: DATE_TIME,
QtnNumLeases: u32,
QtnPctQtnLeases: u32,
QtnProbationLeases: u32,
QtnNonQtnLeases: u32,
QtnExemptLeases: u32,
QtnCapableClients: u32,
QtnIASErrors: u32,
DelayedOffers: u32,
ScopesWithDelayedOffers: u32,
Scopes: u32,
ScopeInfo: ?*SCOPE_MIB_INFO_V5,
};
pub const DHCP_SERVER_CONFIG_INFO = extern struct {
APIProtocolSupport: u32,
DatabaseName: ?PWSTR,
DatabasePath: ?PWSTR,
BackupPath: ?PWSTR,
BackupInterval: u32,
DatabaseLoggingFlag: u32,
RestoreFlag: u32,
DatabaseCleanupInterval: u32,
DebugFlag: u32,
};
pub const DHCP_SCAN_FLAG = enum(i32) {
RegistryFix = 0,
DatabaseFix = 1,
};
pub const DhcpRegistryFix = DHCP_SCAN_FLAG.RegistryFix;
pub const DhcpDatabaseFix = DHCP_SCAN_FLAG.DatabaseFix;
pub const DHCP_SCAN_ITEM = extern struct {
IpAddress: u32,
ScanFlag: DHCP_SCAN_FLAG,
};
pub const DHCP_SCAN_LIST = extern struct {
NumScanItems: u32,
ScanItems: ?*DHCP_SCAN_ITEM,
};
pub const DHCP_CLASS_INFO = extern struct {
ClassName: ?PWSTR,
ClassComment: ?PWSTR,
ClassDataLength: u32,
IsVendor: BOOL,
Flags: u32,
ClassData: ?*u8,
};
pub const DHCP_CLASS_INFO_ARRAY = extern struct {
NumElements: u32,
Classes: ?*DHCP_CLASS_INFO,
};
pub const DHCP_CLASS_INFO_V6 = extern struct {
ClassName: ?PWSTR,
ClassComment: ?PWSTR,
ClassDataLength: u32,
IsVendor: BOOL,
EnterpriseNumber: u32,
Flags: u32,
ClassData: ?*u8,
};
pub const DHCP_CLASS_INFO_ARRAY_V6 = extern struct {
NumElements: u32,
Classes: ?*DHCP_CLASS_INFO_V6,
};
pub const DHCP_SERVER_SPECIFIC_STRINGS = extern struct {
DefaultVendorClassName: ?PWSTR,
DefaultUserClassName: ?PWSTR,
};
pub const DHCP_IP_RESERVATION_V4 = extern struct {
ReservedIpAddress: u32,
ReservedForClient: ?*DHCP_BINARY_DATA,
bAllowedClientTypes: u8,
};
pub const DHCP_IP_RESERVATION_INFO = extern struct {
ReservedIpAddress: u32,
ReservedForClient: DHCP_BINARY_DATA,
ReservedClientName: ?PWSTR,
ReservedClientDesc: ?PWSTR,
bAllowedClientTypes: u8,
fOptionsPresent: u8,
};
pub const DHCP_RESERVATION_INFO_ARRAY = extern struct {
NumElements: u32,
Elements: ?*?*DHCP_IP_RESERVATION_INFO,
};
pub const DHCP_SUBNET_ELEMENT_DATA_V4 = extern struct {
pub const DHCP_SUBNET_ELEMENT_UNION_V4 = extern union {
IpRange: ?*DHCP_IP_RANGE,
SecondaryHost: ?*DHCP_HOST_INFO,
ReservedIp: ?*DHCP_IP_RESERVATION_V4,
ExcludeIpRange: ?*DHCP_IP_RANGE,
IpUsedCluster: ?*DHCP_IP_CLUSTER,
};
ElementType: DHCP_SUBNET_ELEMENT_TYPE,
Element: DHCP_SUBNET_ELEMENT_UNION_V4,
};
// WARNING: this type symbol conflicts with something!
pub const DHCP_SUBNET_ELEMENT_UNION_V4_CONFLICT_ = usize;
pub const DHCP_SUBNET_ELEMENT_INFO_ARRAY_V4 = extern struct {
NumElements: u32,
Elements: ?*DHCP_SUBNET_ELEMENT_DATA_V4,
};
pub const DHCP_CLIENT_INFO_V4 = extern struct {
ClientIpAddress: u32,
SubnetMask: u32,
ClientHardwareAddress: DHCP_BINARY_DATA,
ClientName: ?PWSTR,
ClientComment: ?PWSTR,
ClientLeaseExpires: DATE_TIME,
OwnerHost: DHCP_HOST_INFO,
bClientType: u8,
};
pub const DHCP_CLIENT_INFO_ARRAY_V4 = extern struct {
NumElements: u32,
Clients: ?*?*DHCP_CLIENT_INFO_V4,
};
pub const DHCP_SERVER_CONFIG_INFO_V4 = extern struct {
APIProtocolSupport: u32,
DatabaseName: ?PWSTR,
DatabasePath: ?PWSTR,
BackupPath: ?PWSTR,
BackupInterval: u32,
DatabaseLoggingFlag: u32,
RestoreFlag: u32,
DatabaseCleanupInterval: u32,
DebugFlag: u32,
dwPingRetries: u32,
cbBootTableString: u32,
wszBootTableString: ?PWSTR,
fAuditLog: BOOL,
};
pub const DHCP_SERVER_CONFIG_INFO_VQ = extern struct {
APIProtocolSupport: u32,
DatabaseName: ?PWSTR,
DatabasePath: ?PWSTR,
BackupPath: ?PWSTR,
BackupInterval: u32,
DatabaseLoggingFlag: u32,
RestoreFlag: u32,
DatabaseCleanupInterval: u32,
DebugFlag: u32,
dwPingRetries: u32,
cbBootTableString: u32,
wszBootTableString: ?PWSTR,
fAuditLog: BOOL,
QuarantineOn: BOOL,
QuarDefFail: u32,
QuarRuntimeStatus: BOOL,
};
pub const DHCP_SERVER_CONFIG_INFO_V6 = extern struct {
UnicastFlag: BOOL,
RapidCommitFlag: BOOL,
PreferredLifetime: u32,
ValidLifetime: u32,
T1: u32,
T2: u32,
PreferredLifetimeIATA: u32,
ValidLifetimeIATA: u32,
fAuditLog: BOOL,
};
pub const DHCP_SUPER_SCOPE_TABLE_ENTRY = extern struct {
SubnetAddress: u32,
SuperScopeNumber: u32,
NextInSuperScope: u32,
SuperScopeName: ?PWSTR,
};
pub const DHCP_SUPER_SCOPE_TABLE = extern struct {
cEntries: u32,
pEntries: ?*DHCP_SUPER_SCOPE_TABLE_ENTRY,
};
pub const DHCP_CLIENT_INFO_V5 = extern struct {
ClientIpAddress: u32,
SubnetMask: u32,
ClientHardwareAddress: DHCP_BINARY_DATA,
ClientName: ?PWSTR,
ClientComment: ?PWSTR,
ClientLeaseExpires: DATE_TIME,
OwnerHost: DHCP_HOST_INFO,
bClientType: u8,
AddressState: u8,
};
pub const DHCP_CLIENT_INFO_ARRAY_V5 = extern struct {
NumElements: u32,
Clients: ?*?*DHCP_CLIENT_INFO_V5,
};
pub const DHCP_ALL_OPTIONS = extern struct {
Flags: u32,
NonVendorOptions: ?*DHCP_OPTION_ARRAY,
NumVendorOptions: u32,
VendorOptions: ?*extern struct {
Option: DHCP_OPTION,
VendorName: ?PWSTR,
ClassName: ?PWSTR,
},
};
pub const DHCP_ALL_OPTION_VALUES = extern struct {
Flags: u32,
NumElements: u32,
Options: ?*extern struct {
ClassName: ?PWSTR,
VendorName: ?PWSTR,
IsVendor: BOOL,
OptionsArray: ?*DHCP_OPTION_VALUE_ARRAY,
},
};
pub const DHCP_ALL_OPTION_VALUES_PB = extern struct {
Flags: u32,
NumElements: u32,
Options: ?*extern struct {
PolicyName: ?PWSTR,
VendorName: ?PWSTR,
IsVendor: BOOL,
OptionsArray: ?*DHCP_OPTION_VALUE_ARRAY,
},
};
pub const DHCPDS_SERVER = extern struct {
Version: u32,
ServerName: ?PWSTR,
ServerAddress: u32,
Flags: u32,
State: u32,
DsLocation: ?PWSTR,
DsLocType: u32,
};
pub const DHCPDS_SERVERS = extern struct {
Flags: u32,
NumElements: u32,
Servers: ?*DHCPDS_SERVER,
};
pub const DHCP_ATTRIB = extern struct {
DhcpAttribId: u32,
DhcpAttribType: u32,
Anonymous: extern union {
DhcpAttribBool: BOOL,
DhcpAttribUlong: u32,
},
};
pub const DHCP_ATTRIB_ARRAY = extern struct {
NumElements: u32,
DhcpAttribs: ?*DHCP_ATTRIB,
};
pub const DHCP_BOOTP_IP_RANGE = extern struct {
StartAddress: u32,
EndAddress: u32,
BootpAllocated: u32,
MaxBootpAllowed: u32,
};
pub const DHCP_SUBNET_ELEMENT_DATA_V5 = extern struct {
pub const _DHCP_SUBNET_ELEMENT_UNION_V5 = extern union {
IpRange: ?*DHCP_BOOTP_IP_RANGE,
SecondaryHost: ?*DHCP_HOST_INFO,
ReservedIp: ?*DHCP_IP_RESERVATION_V4,
ExcludeIpRange: ?*DHCP_IP_RANGE,
IpUsedCluster: ?*DHCP_IP_CLUSTER,
};
ElementType: DHCP_SUBNET_ELEMENT_TYPE,
Element: _DHCP_SUBNET_ELEMENT_UNION_V5,
};
pub const DHCP_SUBNET_ELEMENT_INFO_ARRAY_V5 = extern struct {
NumElements: u32,
Elements: ?*DHCP_SUBNET_ELEMENT_DATA_V5,
};
pub const DHCP_PERF_STATS = extern struct {
dwNumPacketsReceived: u32,
dwNumPacketsDuplicate: u32,
dwNumPacketsExpired: u32,
dwNumMilliSecondsProcessed: u32,
dwNumPacketsInActiveQueue: u32,
dwNumPacketsInPingQueue: u32,
dwNumDiscoversReceived: u32,
dwNumOffersSent: u32,
dwNumRequestsReceived: u32,
dwNumInformsReceived: u32,
dwNumAcksSent: u32,
dwNumNacksSent: u32,
dwNumDeclinesReceived: u32,
dwNumReleasesReceived: u32,
dwNumDelayedOfferInQueue: u32,
dwNumPacketsProcessed: u32,
dwNumPacketsInQuarWaitingQueue: u32,
dwNumPacketsInQuarReadyQueue: u32,
dwNumPacketsInQuarDecisionQueue: u32,
};
pub const DHCP_BIND_ELEMENT = extern struct {
Flags: u32,
fBoundToDHCPServer: BOOL,
AdapterPrimaryAddress: u32,
AdapterSubnetAddress: u32,
IfDescription: ?PWSTR,
IfIdSize: u32,
IfId: ?*u8,
};
pub const DHCP_BIND_ELEMENT_ARRAY = extern struct {
NumElements: u32,
Elements: ?*DHCP_BIND_ELEMENT,
};
pub const DHCPV6_BIND_ELEMENT = extern struct {
Flags: u32,
fBoundToDHCPServer: BOOL,
AdapterPrimaryAddress: DHCP_IPV6_ADDRESS,
AdapterSubnetAddress: DHCP_IPV6_ADDRESS,
IfDescription: ?PWSTR,
IpV6IfIndex: u32,
IfIdSize: u32,
IfId: ?*u8,
};
pub const DHCPV6_BIND_ELEMENT_ARRAY = extern struct {
NumElements: u32,
Elements: ?*DHCPV6_BIND_ELEMENT,
};
pub const DHCP_IP_RANGE_V6 = extern struct {
StartAddress: DHCP_IPV6_ADDRESS,
EndAddress: DHCP_IPV6_ADDRESS,
};
pub const DHCP_HOST_INFO_V6 = extern struct {
IpAddress: DHCP_IPV6_ADDRESS,
NetBiosName: ?PWSTR,
HostName: ?PWSTR,
};
pub const DHCP_SUBNET_INFO_V6 = extern struct {
SubnetAddress: DHCP_IPV6_ADDRESS,
Prefix: u32,
Preference: u16,
SubnetName: ?PWSTR,
SubnetComment: ?PWSTR,
State: u32,
ScopeId: u32,
};
pub const SCOPE_MIB_INFO_V6 = extern struct {
Subnet: DHCP_IPV6_ADDRESS,
NumAddressesInuse: u64,
NumAddressesFree: u64,
NumPendingAdvertises: u64,
};
pub const DHCP_MIB_INFO_V6 = extern struct {
Solicits: u32,
Advertises: u32,
Requests: u32,
Renews: u32,
Rebinds: u32,
Replies: u32,
Confirms: u32,
Declines: u32,
Releases: u32,
Informs: u32,
ServerStartTime: DATE_TIME,
Scopes: u32,
ScopeInfo: ?*SCOPE_MIB_INFO_V6,
};
pub const DHCP_IP_RESERVATION_V6 = extern struct {
ReservedIpAddress: DHCP_IPV6_ADDRESS,
ReservedForClient: ?*DHCP_BINARY_DATA,
InterfaceId: u32,
};
pub const DHCP_SUBNET_ELEMENT_TYPE_V6 = enum(i32) {
IpRanges = 0,
ReservedIps = 1,
ExcludedIpRanges = 2,
};
pub const Dhcpv6IpRanges = DHCP_SUBNET_ELEMENT_TYPE_V6.IpRanges;
pub const Dhcpv6ReservedIps = DHCP_SUBNET_ELEMENT_TYPE_V6.ReservedIps;
pub const Dhcpv6ExcludedIpRanges = DHCP_SUBNET_ELEMENT_TYPE_V6.ExcludedIpRanges;
pub const DHCP_SUBNET_ELEMENT_DATA_V6 = extern struct {
pub const DHCP_SUBNET_ELEMENT_UNION_V6 = extern union {
IpRange: ?*DHCP_IP_RANGE_V6,
ReservedIp: ?*DHCP_IP_RESERVATION_V6,
ExcludeIpRange: ?*DHCP_IP_RANGE_V6,
};
ElementType: DHCP_SUBNET_ELEMENT_TYPE_V6,
Element: DHCP_SUBNET_ELEMENT_UNION_V6,
};
// WARNING: this type symbol conflicts with something!
pub const DHCP_SUBNET_ELEMENT_UNION_V6_CONFLICT_ = usize;
pub const DHCP_SUBNET_ELEMENT_INFO_ARRAY_V6 = extern struct {
NumElements: u32,
Elements: ?*DHCP_SUBNET_ELEMENT_DATA_V6,
};
pub const DHCP_CLIENT_INFO_V6 = extern struct {
ClientIpAddress: DHCP_IPV6_ADDRESS,
ClientDUID: DHCP_BINARY_DATA,
AddressType: u32,
IAID: u32,
ClientName: ?PWSTR,
ClientComment: ?PWSTR,
ClientValidLeaseExpires: DATE_TIME,
ClientPrefLeaseExpires: DATE_TIME,
OwnerHost: DHCP_HOST_INFO_V6,
};
pub const DHCPV6_IP_ARRAY = extern struct {
NumElements: u32,
Elements: ?*DHCP_IPV6_ADDRESS,
};
pub const DHCP_CLIENT_INFO_ARRAY_V6 = extern struct {
NumElements: u32,
Clients: ?*?*DHCP_CLIENT_INFO_V6,
};
pub const DHCP_SEARCH_INFO_TYPE_V6 = enum(i32) {
IpAddress = 0,
DUID = 1,
Name = 2,
};
pub const Dhcpv6ClientIpAddress = DHCP_SEARCH_INFO_TYPE_V6.IpAddress;
pub const Dhcpv6ClientDUID = DHCP_SEARCH_INFO_TYPE_V6.DUID;
pub const Dhcpv6ClientName = DHCP_SEARCH_INFO_TYPE_V6.Name;
pub const DHCP_SEARCH_INFO_V6 = extern struct {
pub const _DHCP_CLIENT_SEARCH_UNION_V6 = extern union {
ClientIpAddress: DHCP_IPV6_ADDRESS,
ClientDUID: DHCP_BINARY_DATA,
ClientName: ?PWSTR,
};
SearchType: DHCP_SEARCH_INFO_TYPE_V6,
SearchInfo: _DHCP_CLIENT_SEARCH_UNION_V6,
};
pub const DHCP_POL_ATTR_TYPE = enum(i32) {
HWAddr = 0,
Option = 1,
SubOption = 2,
Fqdn = 3,
FqdnSingleLabel = 4,
};
pub const DhcpAttrHWAddr = DHCP_POL_ATTR_TYPE.HWAddr;
pub const DhcpAttrOption = DHCP_POL_ATTR_TYPE.Option;
pub const DhcpAttrSubOption = DHCP_POL_ATTR_TYPE.SubOption;
pub const DhcpAttrFqdn = DHCP_POL_ATTR_TYPE.Fqdn;
pub const DhcpAttrFqdnSingleLabel = DHCP_POL_ATTR_TYPE.FqdnSingleLabel;
pub const DHCP_POL_COMPARATOR = enum(i32) {
Equal = 0,
NotEqual = 1,
BeginsWith = 2,
NotBeginWith = 3,
EndsWith = 4,
NotEndWith = 5,
};
pub const DhcpCompEqual = DHCP_POL_COMPARATOR.Equal;
pub const DhcpCompNotEqual = DHCP_POL_COMPARATOR.NotEqual;
pub const DhcpCompBeginsWith = DHCP_POL_COMPARATOR.BeginsWith;
pub const DhcpCompNotBeginWith = DHCP_POL_COMPARATOR.NotBeginWith;
pub const DhcpCompEndsWith = DHCP_POL_COMPARATOR.EndsWith;
pub const DhcpCompNotEndWith = DHCP_POL_COMPARATOR.NotEndWith;
pub const DHCP_POL_LOGIC_OPER = enum(i32) {
Or = 0,
And = 1,
};
pub const DhcpLogicalOr = DHCP_POL_LOGIC_OPER.Or;
pub const DhcpLogicalAnd = DHCP_POL_LOGIC_OPER.And;
pub const DHCP_POLICY_FIELDS_TO_UPDATE = enum(i32) {
Name = 1,
Order = 2,
Expr = 4,
Ranges = 8,
Descr = 16,
Status = 32,
DnsSuffix = 64,
};
pub const DhcpUpdatePolicyName = DHCP_POLICY_FIELDS_TO_UPDATE.Name;
pub const DhcpUpdatePolicyOrder = DHCP_POLICY_FIELDS_TO_UPDATE.Order;
pub const DhcpUpdatePolicyExpr = DHCP_POLICY_FIELDS_TO_UPDATE.Expr;
pub const DhcpUpdatePolicyRanges = DHCP_POLICY_FIELDS_TO_UPDATE.Ranges;
pub const DhcpUpdatePolicyDescr = DHCP_POLICY_FIELDS_TO_UPDATE.Descr;
pub const DhcpUpdatePolicyStatus = DHCP_POLICY_FIELDS_TO_UPDATE.Status;
pub const DhcpUpdatePolicyDnsSuffix = DHCP_POLICY_FIELDS_TO_UPDATE.DnsSuffix;
pub const DHCP_POL_COND = extern struct {
ParentExpr: u32,
Type: DHCP_POL_ATTR_TYPE,
OptionID: u32,
SubOptionID: u32,
VendorName: ?PWSTR,
Operator: DHCP_POL_COMPARATOR,
Value: ?*u8,
ValueLength: u32,
};
pub const DHCP_POL_COND_ARRAY = extern struct {
NumElements: u32,
Elements: ?*DHCP_POL_COND,
};
pub const DHCP_POL_EXPR = extern struct {
ParentExpr: u32,
Operator: DHCP_POL_LOGIC_OPER,
};
pub const DHCP_POL_EXPR_ARRAY = extern struct {
NumElements: u32,
Elements: ?*DHCP_POL_EXPR,
};
pub const DHCP_IP_RANGE_ARRAY = extern struct {
NumElements: u32,
Elements: ?*DHCP_IP_RANGE,
};
pub const DHCP_POLICY = extern struct {
PolicyName: ?PWSTR,
IsGlobalPolicy: BOOL,
Subnet: u32,
ProcessingOrder: u32,
Conditions: ?*DHCP_POL_COND_ARRAY,
Expressions: ?*DHCP_POL_EXPR_ARRAY,
Ranges: ?*DHCP_IP_RANGE_ARRAY,
Description: ?PWSTR,
Enabled: BOOL,
};
pub const DHCP_POLICY_ARRAY = extern struct {
NumElements: u32,
Elements: ?*DHCP_POLICY,
};
pub const DHCP_POLICY_EX = extern struct {
PolicyName: ?PWSTR,
IsGlobalPolicy: BOOL,
Subnet: u32,
ProcessingOrder: u32,
Conditions: ?*DHCP_POL_COND_ARRAY,
Expressions: ?*DHCP_POL_EXPR_ARRAY,
Ranges: ?*DHCP_IP_RANGE_ARRAY,
Description: ?PWSTR,
Enabled: BOOL,
Properties: ?*DHCP_PROPERTY_ARRAY,
};
pub const DHCP_POLICY_EX_ARRAY = extern struct {
NumElements: u32,
Elements: ?*DHCP_POLICY_EX,
};
pub const DHCPV6_STATELESS_PARAM_TYPE = enum(i32) {
PurgeInterval = 1,
Status = 2,
};
pub const DhcpStatelessPurgeInterval = DHCPV6_STATELESS_PARAM_TYPE.PurgeInterval;
pub const DhcpStatelessStatus = DHCPV6_STATELESS_PARAM_TYPE.Status;
pub const DHCPV6_STATELESS_PARAMS = extern struct {
Status: BOOL,
PurgeInterval: u32,
};
pub const DHCPV6_STATELESS_SCOPE_STATS = extern struct {
SubnetAddress: DHCP_IPV6_ADDRESS,
NumStatelessClientsAdded: u64,
NumStatelessClientsRemoved: u64,
};
pub const DHCPV6_STATELESS_STATS = extern struct {
NumScopes: u32,
ScopeStats: ?*DHCPV6_STATELESS_SCOPE_STATS,
};
pub const DHCP_FAILOVER_MODE = enum(i32) {
LoadBalance = 0,
HotStandby = 1,
};
pub const LoadBalance = DHCP_FAILOVER_MODE.LoadBalance;
pub const HotStandby = DHCP_FAILOVER_MODE.HotStandby;
pub const DHCP_FAILOVER_SERVER = enum(i32) {
PrimaryServer = 0,
SecondaryServer = 1,
};
pub const PrimaryServer = DHCP_FAILOVER_SERVER.PrimaryServer;
pub const SecondaryServer = DHCP_FAILOVER_SERVER.SecondaryServer;
pub const FSM_STATE = enum(i32) {
NO_STATE = 0,
INIT = 1,
STARTUP = 2,
NORMAL = 3,
COMMUNICATION_INT = 4,
PARTNER_DOWN = 5,
POTENTIAL_CONFLICT = 6,
CONFLICT_DONE = 7,
RESOLUTION_INT = 8,
RECOVER = 9,
RECOVER_WAIT = 10,
RECOVER_DONE = 11,
PAUSED = 12,
SHUTDOWN = 13,
};
pub const NO_STATE = FSM_STATE.NO_STATE;
pub const INIT = FSM_STATE.INIT;
pub const STARTUP = FSM_STATE.STARTUP;
pub const NORMAL = FSM_STATE.NORMAL;
pub const COMMUNICATION_INT = FSM_STATE.COMMUNICATION_INT;
pub const PARTNER_DOWN = FSM_STATE.PARTNER_DOWN;
pub const POTENTIAL_CONFLICT = FSM_STATE.POTENTIAL_CONFLICT;
pub const CONFLICT_DONE = FSM_STATE.CONFLICT_DONE;
pub const RESOLUTION_INT = FSM_STATE.RESOLUTION_INT;
pub const RECOVER = FSM_STATE.RECOVER;
pub const RECOVER_WAIT = FSM_STATE.RECOVER_WAIT;
pub const RECOVER_DONE = FSM_STATE.RECOVER_DONE;
pub const PAUSED = FSM_STATE.PAUSED;
pub const SHUTDOWN = FSM_STATE.SHUTDOWN;
pub const DHCP_FAILOVER_RELATIONSHIP = extern struct {
PrimaryServer: u32,
SecondaryServer: u32,
Mode: DHCP_FAILOVER_MODE,
ServerType: DHCP_FAILOVER_SERVER,
State: FSM_STATE,
PrevState: FSM_STATE,
Mclt: u32,
SafePeriod: u32,
RelationshipName: ?PWSTR,
PrimaryServerName: ?PWSTR,
SecondaryServerName: ?PWSTR,
pScopes: ?*DHCP_IP_ARRAY,
Percentage: u8,
SharedSecret: ?PWSTR,
};
pub const DHCP_FAILOVER_RELATIONSHIP_ARRAY = extern struct {
NumElements: u32,
pRelationships: ?*DHCP_FAILOVER_RELATIONSHIP,
};
pub const DHCPV4_FAILOVER_CLIENT_INFO = extern struct {
ClientIpAddress: u32,
SubnetMask: u32,
ClientHardwareAddress: DHCP_BINARY_DATA,
ClientName: ?PWSTR,
ClientComment: ?PWSTR,
ClientLeaseExpires: DATE_TIME,
OwnerHost: DHCP_HOST_INFO,
bClientType: u8,
AddressState: u8,
Status: QuarantineStatus,
ProbationEnds: DATE_TIME,
QuarantineCapable: BOOL,
SentPotExpTime: u32,
AckPotExpTime: u32,
RecvPotExpTime: u32,
StartTime: u32,
CltLastTransTime: u32,
LastBndUpdTime: u32,
BndMsgStatus: u32,
PolicyName: ?PWSTR,
Flags: u8,
};
pub const DHCPV4_FAILOVER_CLIENT_INFO_ARRAY = extern struct {
NumElements: u32,
Clients: ?*?*DHCPV4_FAILOVER_CLIENT_INFO,
};
pub const DHCPV4_FAILOVER_CLIENT_INFO_EX = extern struct {
ClientIpAddress: u32,
SubnetMask: u32,
ClientHardwareAddress: DHCP_BINARY_DATA,
ClientName: ?PWSTR,
ClientComment: ?PWSTR,
ClientLeaseExpires: DATE_TIME,
OwnerHost: DHCP_HOST_INFO,
bClientType: u8,
AddressState: u8,
Status: QuarantineStatus,
ProbationEnds: DATE_TIME,
QuarantineCapable: BOOL,
SentPotExpTime: u32,
AckPotExpTime: u32,
RecvPotExpTime: u32,
StartTime: u32,
CltLastTransTime: u32,
LastBndUpdTime: u32,
BndMsgStatus: u32,
PolicyName: ?PWSTR,
Flags: u8,
AddressStateEx: u32,
};
pub const DHCP_FAILOVER_STATISTICS = extern struct {
NumAddr: u32,
AddrFree: u32,
AddrInUse: u32,
PartnerAddrFree: u32,
ThisAddrFree: u32,
PartnerAddrInUse: u32,
ThisAddrInUse: u32,
};
pub const DHCP_SERVER_OPTIONS = switch(@import("../zig.zig").arch) {
.X64, .Arm64 => extern struct {
MessageType: ?*u8,
SubnetMask: ?*u32,
RequestedAddress: ?*u32,
RequestLeaseTime: ?*u32,
OverlayFields: ?*u8,
RouterAddress: ?*u32,
Server: ?*u32,
ParameterRequestList: ?*u8,
ParameterRequestListLength: u32,
MachineName: ?PSTR,
MachineNameLength: u32,
ClientHardwareAddressType: u8,
ClientHardwareAddressLength: u8,
ClientHardwareAddress: ?*u8,
ClassIdentifier: ?PSTR,
ClassIdentifierLength: u32,
VendorClass: ?*u8,
VendorClassLength: u32,
DNSFlags: u32,
DNSNameLength: u32,
DNSName: ?*u8,
DSDomainNameRequested: BOOLEAN,
DSDomainName: ?PSTR,
DSDomainNameLen: u32,
ScopeId: ?*u32,
},
.X86 => extern struct {
MessageType: ?*u8,
SubnetMask: ?*u32,
RequestedAddress: ?*u32,
RequestLeaseTime: ?*u32,
OverlayFields: ?*u8,
RouterAddress: ?*u32,
Server: ?*u32,
ParameterRequestList: ?*u8,
ParameterRequestListLength: u32,
MachineName: ?PSTR,
MachineNameLength: u32,
ClientHardwareAddressType: u8,
ClientHardwareAddressLength: u8,
ClientHardwareAddress: ?*u8,
ClassIdentifier: ?PSTR,
ClassIdentifierLength: u32,
VendorClass: ?*u8,
VendorClassLength: u32,
DNSFlags: u32,
DNSNameLength: u32,
DNSName: ?*u8,
DSDomainNameRequested: BOOLEAN,
DSDomainName: ?PSTR,
DSDomainNameLen: u32,
ScopeId: ?*u32,
},
};
//--------------------------------------------------------------------------------
// Section: Functions (210)
//--------------------------------------------------------------------------------
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "dhcpcsvc6" fn Dhcpv6CApiInitialize(
Version: ?*u32,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "dhcpcsvc6" fn Dhcpv6CApiCleanup(
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "dhcpcsvc6" fn Dhcpv6RequestParams(
forceNewInform: BOOL,
reserved: ?*anyopaque,
adapterName: ?PWSTR,
classId: ?*DHCPV6CAPI_CLASSID,
recdParams: DHCPV6CAPI_PARAMS_ARRAY,
buffer: ?*u8,
pSize: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "dhcpcsvc6" fn Dhcpv6RequestPrefix(
adapterName: ?PWSTR,
pclassId: ?*DHCPV6CAPI_CLASSID,
prefixleaseInfo: ?*DHCPV6PrefixLeaseInformation,
pdwTimeToWait: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "dhcpcsvc6" fn Dhcpv6RenewPrefix(
adapterName: ?PWSTR,
pclassId: ?*DHCPV6CAPI_CLASSID,
prefixleaseInfo: ?*DHCPV6PrefixLeaseInformation,
pdwTimeToWait: ?*u32,
bValidatePrefix: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows6.0.6000'
pub extern "dhcpcsvc6" fn Dhcpv6ReleasePrefix(
adapterName: ?PWSTR,
classId: ?*DHCPV6CAPI_CLASSID,
leaseInfo: ?*DHCPV6PrefixLeaseInformation,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "dhcpcsvc" fn DhcpCApiInitialize(
Version: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "dhcpcsvc" fn DhcpCApiCleanup(
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windows5.0'
pub extern "dhcpcsvc" fn DhcpRequestParams(
Flags: u32,
Reserved: ?*anyopaque,
AdapterName: ?PWSTR,
ClassId: ?*DHCPCAPI_CLASSID,
SendParams: DHCPCAPI_PARAMS_ARRAY,
RecdParams: DHCPCAPI_PARAMS_ARRAY,
// TODO: what to do with BytesParamIndex 7?
Buffer: ?*u8,
pSize: ?*u32,
RequestIdStr: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "dhcpcsvc" fn DhcpUndoRequestParams(
Flags: u32,
Reserved: ?*anyopaque,
AdapterName: ?PWSTR,
RequestIdStr: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "dhcpcsvc" fn DhcpRegisterParamChange(
Flags: u32,
Reserved: ?*anyopaque,
AdapterName: ?PWSTR,
ClassId: ?*DHCPCAPI_CLASSID,
Params: DHCPCAPI_PARAMS_ARRAY,
Handle: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "dhcpcsvc" fn DhcpDeRegisterParamChange(
Flags: u32,
Reserved: ?*anyopaque,
Event: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windows5.0'
pub extern "dhcpcsvc" fn DhcpRemoveDNSRegistrations(
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "dhcpcsvc" fn DhcpGetOriginalSubnetMask(
sAdapterName: ?[*:0]const u16,
dwSubnetMask: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpAddFilterV4(
ServerIpAddress: ?[*:0]const u16,
AddFilterInfo: ?*DHCP_FILTER_ADD_INFO,
ForceFlag: BOOL,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpDeleteFilterV4(
ServerIpAddress: ?[*:0]const u16,
DeleteFilterInfo: ?*DHCP_ADDR_PATTERN,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetFilterV4(
ServerIpAddress: ?[*:0]const u16,
GlobalFilterInfo: ?*DHCP_FILTER_GLOBAL_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetFilterV4(
ServerIpAddress: ?[*:0]const u16,
GlobalFilterInfo: ?*DHCP_FILTER_GLOBAL_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumFilterV4(
ServerIpAddress: ?[*:0]const u16,
ResumeHandle: ?*DHCP_ADDR_PATTERN,
PreferredMaximum: u32,
ListType: DHCP_FILTER_LIST_TYPE,
EnumFilterInfo: ?*?*DHCP_FILTER_ENUM_INFO,
ElementsRead: ?*u32,
ElementsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpCreateSubnet(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
SubnetInfo: ?*const DHCP_SUBNET_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpSetSubnetInfo(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
SubnetInfo: ?*const DHCP_SUBNET_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpGetSubnetInfo(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
SubnetInfo: ?*?*DHCP_SUBNET_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpEnumSubnets(
ServerIpAddress: ?[*:0]const u16,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
EnumInfo: ?*?*DHCP_IP_ARRAY,
ElementsRead: ?*u32,
ElementsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpAddSubnetElement(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
AddElementInfo: ?*const DHCP_SUBNET_ELEMENT_DATA,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpEnumSubnetElements(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
EnumElementType: DHCP_SUBNET_ELEMENT_TYPE,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
EnumElementInfo: ?*?*DHCP_SUBNET_ELEMENT_INFO_ARRAY,
ElementsRead: ?*u32,
ElementsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpRemoveSubnetElement(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
RemoveElementInfo: ?*const DHCP_SUBNET_ELEMENT_DATA,
ForceFlag: DHCP_FORCE_FLAG,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpDeleteSubnet(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
ForceFlag: DHCP_FORCE_FLAG,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpCreateOption(
ServerIpAddress: ?[*:0]const u16,
OptionID: u32,
OptionInfo: ?*const DHCP_OPTION,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetOptionInfo(
ServerIpAddress: ?[*:0]const u16,
OptionID: u32,
OptionInfo: ?*const DHCP_OPTION,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetOptionInfo(
ServerIpAddress: ?[*:0]const u16,
OptionID: u32,
OptionInfo: ?*?*DHCP_OPTION,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumOptions(
ServerIpAddress: ?[*:0]const u16,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
Options: ?*?*DHCP_OPTION_ARRAY,
OptionsRead: ?*u32,
OptionsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpRemoveOption(
ServerIpAddress: ?[*:0]const u16,
OptionID: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetOptionValue(
ServerIpAddress: ?[*:0]const u16,
OptionID: u32,
ScopeInfo: ?*const DHCP_OPTION_SCOPE_INFO,
OptionValue: ?*const DHCP_OPTION_DATA,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetOptionValues(
ServerIpAddress: ?[*:0]const u16,
ScopeInfo: ?*const DHCP_OPTION_SCOPE_INFO,
OptionValues: ?*const DHCP_OPTION_VALUE_ARRAY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpGetOptionValue(
ServerIpAddress: ?[*:0]const u16,
OptionID: u32,
ScopeInfo: ?*const DHCP_OPTION_SCOPE_INFO,
OptionValue: ?*?*DHCP_OPTION_VALUE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumOptionValues(
ServerIpAddress: ?[*:0]const u16,
ScopeInfo: ?*const DHCP_OPTION_SCOPE_INFO,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
OptionValues: ?*?*DHCP_OPTION_VALUE_ARRAY,
OptionsRead: ?*u32,
OptionsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpRemoveOptionValue(
ServerIpAddress: ?[*:0]const u16,
OptionID: u32,
ScopeInfo: ?*const DHCP_OPTION_SCOPE_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpCreateClientInfoVQ(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_CLIENT_INFO_VQ,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetClientInfoVQ(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_CLIENT_INFO_VQ,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetClientInfoVQ(
ServerIpAddress: ?[*:0]const u16,
SearchInfo: ?*const DHCP_SEARCH_INFO,
ClientInfo: ?*?*DHCP_CLIENT_INFO_VQ,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumSubnetClientsVQ(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
ClientInfo: ?*?*DHCP_CLIENT_INFO_ARRAY_VQ,
ClientsRead: ?*u32,
ClientsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumSubnetClientsFilterStatusInfo(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
ClientInfo: ?*?*DHCP_CLIENT_FILTER_STATUS_INFO_ARRAY,
ClientsRead: ?*u32,
ClientsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpCreateClientInfo(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_CLIENT_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpSetClientInfo(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_CLIENT_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpGetClientInfo(
ServerIpAddress: ?[*:0]const u16,
SearchInfo: ?*const DHCP_SEARCH_INFO,
ClientInfo: ?*?*DHCP_CLIENT_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpDeleteClientInfo(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_SEARCH_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpEnumSubnetClients(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
ClientInfo: ?*?*DHCP_CLIENT_INFO_ARRAY,
ClientsRead: ?*u32,
ClientsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetClientOptions(
ServerIpAddress: ?[*:0]const u16,
ClientIpAddress: u32,
ClientSubnetMask: u32,
ClientOptions: ?*?*DHCP_OPTION_LIST,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpGetMibInfo(
ServerIpAddress: ?[*:0]const u16,
MibInfo: ?*?*DHCP_MIB_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerSetConfig(
ServerIpAddress: ?[*:0]const u16,
FieldsToSet: u32,
ConfigInfo: ?*DHCP_SERVER_CONFIG_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerGetConfig(
ServerIpAddress: ?[*:0]const u16,
ConfigInfo: ?*?*DHCP_SERVER_CONFIG_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpScanDatabase(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
FixFlag: u32,
ScanList: ?*?*DHCP_SCAN_LIST,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpRpcFreeMemory(
BufferPointer: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpGetVersion(
ServerIpAddress: ?PWSTR,
MajorVersion: ?*u32,
MinorVersion: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpAddSubnetElementV4(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
AddElementInfo: ?*const DHCP_SUBNET_ELEMENT_DATA_V4,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpEnumSubnetElementsV4(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
EnumElementType: DHCP_SUBNET_ELEMENT_TYPE,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
EnumElementInfo: ?*?*DHCP_SUBNET_ELEMENT_INFO_ARRAY_V4,
ElementsRead: ?*u32,
ElementsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpRemoveSubnetElementV4(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
RemoveElementInfo: ?*const DHCP_SUBNET_ELEMENT_DATA_V4,
ForceFlag: DHCP_FORCE_FLAG,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpCreateClientInfoV4(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_CLIENT_INFO_V4,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetClientInfoV4(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_CLIENT_INFO_V4,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetClientInfoV4(
ServerIpAddress: ?[*:0]const u16,
SearchInfo: ?*const DHCP_SEARCH_INFO,
ClientInfo: ?*?*DHCP_CLIENT_INFO_V4,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumSubnetClientsV4(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
ClientInfo: ?*?*DHCP_CLIENT_INFO_ARRAY_V4,
ClientsRead: ?*u32,
ClientsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerSetConfigV4(
ServerIpAddress: ?[*:0]const u16,
FieldsToSet: u32,
ConfigInfo: ?*DHCP_SERVER_CONFIG_INFO_V4,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerGetConfigV4(
ServerIpAddress: ?[*:0]const u16,
ConfigInfo: ?*?*DHCP_SERVER_CONFIG_INFO_V4,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetSuperScopeV4(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
SuperScopeName: ?[*:0]const u16,
ChangeExisting: BOOL,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpDeleteSuperScopeV4(
ServerIpAddress: ?[*:0]const u16,
SuperScopeName: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetSuperScopeInfoV4(
ServerIpAddress: ?[*:0]const u16,
SuperScopeTable: ?*?*DHCP_SUPER_SCOPE_TABLE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumSubnetClientsV5(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
ClientInfo: ?*?*DHCP_CLIENT_INFO_ARRAY_V5,
ClientsRead: ?*u32,
ClientsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpCreateOptionV5(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionId: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
OptionInfo: ?*DHCP_OPTION,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetOptionInfoV5(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
OptionInfo: ?*DHCP_OPTION,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetOptionInfoV5(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
OptionInfo: ?*?*DHCP_OPTION,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumOptionsV5(
ServerIpAddress: ?PWSTR,
Flags: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
Options: ?*?*DHCP_OPTION_ARRAY,
OptionsRead: ?*u32,
OptionsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpRemoveOptionV5(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpSetOptionValueV5(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionId: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO,
OptionValue: ?*DHCP_OPTION_DATA,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetOptionValuesV5(
ServerIpAddress: ?PWSTR,
Flags: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO,
OptionValues: ?*DHCP_OPTION_VALUE_ARRAY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetOptionValueV5(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO,
OptionValue: ?*?*DHCP_OPTION_VALUE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetOptionValueV6(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO6,
OptionValue: ?*?*DHCP_OPTION_VALUE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumOptionValuesV5(
ServerIpAddress: ?PWSTR,
Flags: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
OptionValues: ?*?*DHCP_OPTION_VALUE_ARRAY,
OptionsRead: ?*u32,
OptionsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpRemoveOptionValueV5(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpCreateClass(
ServerIpAddress: ?PWSTR,
ReservedMustBeZero: u32,
ClassInfo: ?*DHCP_CLASS_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpModifyClass(
ServerIpAddress: ?PWSTR,
ReservedMustBeZero: u32,
ClassInfo: ?*DHCP_CLASS_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpDeleteClass(
ServerIpAddress: ?PWSTR,
ReservedMustBeZero: u32,
ClassName: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetClassInfo(
ServerIpAddress: ?PWSTR,
ReservedMustBeZero: u32,
PartialClassInfo: ?*DHCP_CLASS_INFO,
FilledClassInfo: ?*?*DHCP_CLASS_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumClasses(
ServerIpAddress: ?PWSTR,
ReservedMustBeZero: u32,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
ClassInfoArray: ?*?*DHCP_CLASS_INFO_ARRAY,
nRead: ?*u32,
nTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetAllOptions(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionStruct: ?*?*DHCP_ALL_OPTIONS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetAllOptionsV6(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionStruct: ?*?*DHCP_ALL_OPTIONS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetAllOptionValues(
ServerIpAddress: ?PWSTR,
Flags: u32,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO,
Values: ?*?*DHCP_ALL_OPTION_VALUES,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetAllOptionValuesV6(
ServerIpAddress: ?PWSTR,
Flags: u32,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO6,
Values: ?*?*DHCP_ALL_OPTION_VALUES,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpEnumServers(
Flags: u32,
IdInfo: ?*anyopaque,
Servers: ?*?*DHCPDS_SERVERS,
CallbackFn: ?*anyopaque,
CallbackData: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpAddServer(
Flags: u32,
IdInfo: ?*anyopaque,
NewServer: ?*DHCPDS_SERVER,
CallbackFn: ?*anyopaque,
CallbackData: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpDeleteServer(
Flags: u32,
IdInfo: ?*anyopaque,
NewServer: ?*DHCPDS_SERVER,
CallbackFn: ?*anyopaque,
CallbackData: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpGetServerBindingInfo(
ServerIpAddress: ?[*:0]const u16,
Flags: u32,
BindElementsInfo: ?*?*DHCP_BIND_ELEMENT_ARRAY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpSetServerBindingInfo(
ServerIpAddress: ?[*:0]const u16,
Flags: u32,
BindElementInfo: ?*DHCP_BIND_ELEMENT_ARRAY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpAddSubnetElementV5(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
AddElementInfo: ?*const DHCP_SUBNET_ELEMENT_DATA_V5,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpEnumSubnetElementsV5(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
EnumElementType: DHCP_SUBNET_ELEMENT_TYPE,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
EnumElementInfo: ?*?*DHCP_SUBNET_ELEMENT_INFO_ARRAY_V5,
ElementsRead: ?*u32,
ElementsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpRemoveSubnetElementV5(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
RemoveElementInfo: ?*const DHCP_SUBNET_ELEMENT_DATA_V5,
ForceFlag: DHCP_FORCE_FLAG,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4EnumSubnetReservations(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
EnumElementInfo: ?*?*DHCP_RESERVATION_INFO_ARRAY,
ElementsRead: ?*u32,
ElementsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpCreateOptionV6(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionId: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
OptionInfo: ?*DHCP_OPTION,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpRemoveOptionV6(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumOptionsV6(
ServerIpAddress: ?PWSTR,
Flags: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
Options: ?*?*DHCP_OPTION_ARRAY,
OptionsRead: ?*u32,
OptionsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpRemoveOptionValueV6(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetOptionInfoV6(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
OptionInfo: ?*?*DHCP_OPTION,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetOptionInfoV6(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
OptionInfo: ?*DHCP_OPTION,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetOptionValueV6(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionId: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO6,
OptionValue: ?*DHCP_OPTION_DATA,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetSubnetInfoVQ(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
SubnetInfo: ?*?*DHCP_SUBNET_INFO_VQ,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpCreateSubnetVQ(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
SubnetInfo: ?*const DHCP_SUBNET_INFO_VQ,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetSubnetInfoVQ(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
SubnetInfo: ?*const DHCP_SUBNET_INFO_VQ,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumOptionValuesV6(
ServerIpAddress: ?[*:0]const u16,
Flags: u32,
ClassName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO6,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
OptionValues: ?*?*DHCP_OPTION_VALUE_ARRAY,
OptionsRead: ?*u32,
OptionsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpDsInit(
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpDsCleanup(
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetThreadOptions(
Flags: u32,
Reserved: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetThreadOptions(
pFlags: ?*u32,
Reserved: ?*anyopaque,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerQueryAttribute(
ServerIpAddr: ?PWSTR,
dwReserved: u32,
DhcpAttribId: u32,
pDhcpAttrib: ?*?*DHCP_ATTRIB,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerQueryAttributes(
ServerIpAddr: ?PWSTR,
dwReserved: u32,
dwAttribCount: u32,
pDhcpAttribs: ?*u32,
pDhcpAttribArr: ?*?*DHCP_ATTRIB_ARRAY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2000'
pub extern "DHCPSAPI" fn DhcpServerRedoAuthorization(
ServerIpAddr: ?PWSTR,
dwReserved: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpAuditLogSetParams(
ServerIpAddress: ?PWSTR,
Flags: u32,
AuditLogDir: ?PWSTR,
DiskCheckInterval: u32,
MaxLogFilesSize: u32,
MinSpaceOnDisk: u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpAuditLogGetParams(
ServerIpAddress: ?PWSTR,
Flags: u32,
AuditLogDir: ?*?PWSTR,
DiskCheckInterval: ?*u32,
MaxLogFilesSize: ?*u32,
MinSpaceOnDisk: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerQueryDnsRegCredentials(
ServerIpAddress: ?PWSTR,
UnameSize: u32,
Uname: [*:0]u16,
DomainSize: u32,
Domain: [*:0]u16,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpServerSetDnsRegCredentials(
ServerIpAddress: ?PWSTR,
Uname: ?PWSTR,
Domain: ?PWSTR,
Passwd: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerSetDnsRegCredentialsV5(
ServerIpAddress: ?PWSTR,
Uname: ?PWSTR,
Domain: ?PWSTR,
Passwd: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerBackupDatabase(
ServerIpAddress: ?PWSTR,
Path: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerRestoreDatabase(
ServerIpAddress: ?PWSTR,
Path: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerSetConfigVQ(
ServerIpAddress: ?[*:0]const u16,
FieldsToSet: u32,
ConfigInfo: ?*DHCP_SERVER_CONFIG_INFO_VQ,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerGetConfigVQ(
ServerIpAddress: ?[*:0]const u16,
ConfigInfo: ?*?*DHCP_SERVER_CONFIG_INFO_VQ,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetServerSpecificStrings(
ServerIpAddress: ?[*:0]const u16,
ServerSpecificStrings: ?*?*DHCP_SERVER_SPECIFIC_STRINGS,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpServerAuditlogParamsFree(
ConfigInfo: ?*DHCP_SERVER_CONFIG_INFO_VQ,
) callconv(@import("std").os.windows.WINAPI) void;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpCreateSubnetV6(
ServerIpAddress: ?PWSTR,
SubnetAddress: DHCP_IPV6_ADDRESS,
SubnetInfo: ?*DHCP_SUBNET_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpDeleteSubnetV6(
ServerIpAddress: ?PWSTR,
SubnetAddress: DHCP_IPV6_ADDRESS,
ForceFlag: DHCP_FORCE_FLAG,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumSubnetsV6(
ServerIpAddress: ?[*:0]const u16,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
EnumInfo: ?*?*DHCPV6_IP_ARRAY,
ElementsRead: ?*u32,
ElementsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpAddSubnetElementV6(
ServerIpAddress: ?PWSTR,
SubnetAddress: DHCP_IPV6_ADDRESS,
AddElementInfo: ?*DHCP_SUBNET_ELEMENT_DATA_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpRemoveSubnetElementV6(
ServerIpAddress: ?PWSTR,
SubnetAddress: DHCP_IPV6_ADDRESS,
RemoveElementInfo: ?*DHCP_SUBNET_ELEMENT_DATA_V6,
ForceFlag: DHCP_FORCE_FLAG,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumSubnetElementsV6(
ServerIpAddress: ?PWSTR,
SubnetAddress: DHCP_IPV6_ADDRESS,
EnumElementType: DHCP_SUBNET_ELEMENT_TYPE_V6,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
EnumElementInfo: ?*?*DHCP_SUBNET_ELEMENT_INFO_ARRAY_V6,
ElementsRead: ?*u32,
ElementsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetSubnetInfoV6(
ServerIpAddress: ?PWSTR,
SubnetAddress: DHCP_IPV6_ADDRESS,
SubnetInfo: ?*?*DHCP_SUBNET_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumSubnetClientsV6(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: DHCP_IPV6_ADDRESS,
ResumeHandle: ?*DHCP_IPV6_ADDRESS,
PreferredMaximum: u32,
ClientInfo: ?*?*DHCP_CLIENT_INFO_ARRAY_V6,
ClientsRead: ?*u32,
ClientsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerGetConfigV6(
ServerIpAddress: ?[*:0]const u16,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO6,
ConfigInfo: ?*?*DHCP_SERVER_CONFIG_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpServerSetConfigV6(
ServerIpAddress: ?[*:0]const u16,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO6,
FieldsToSet: u32,
ConfigInfo: ?*DHCP_SERVER_CONFIG_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetSubnetInfoV6(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: DHCP_IPV6_ADDRESS,
SubnetInfo: ?*DHCP_SUBNET_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetMibInfoV6(
ServerIpAddress: ?[*:0]const u16,
MibInfo: ?*?*DHCP_MIB_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetServerBindingInfoV6(
ServerIpAddress: ?[*:0]const u16,
Flags: u32,
BindElementsInfo: ?*?*DHCPV6_BIND_ELEMENT_ARRAY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetServerBindingInfoV6(
ServerIpAddress: ?[*:0]const u16,
Flags: u32,
BindElementInfo: ?*DHCPV6_BIND_ELEMENT_ARRAY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetClientInfoV6(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_CLIENT_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetClientInfoV6(
ServerIpAddress: ?[*:0]const u16,
SearchInfo: ?*const DHCP_SEARCH_INFO_V6,
ClientInfo: ?*?*DHCP_CLIENT_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpDeleteClientInfoV6(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_SEARCH_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpCreateClassV6(
ServerIpAddress: ?PWSTR,
ReservedMustBeZero: u32,
ClassInfo: ?*DHCP_CLASS_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpModifyClassV6(
ServerIpAddress: ?PWSTR,
ReservedMustBeZero: u32,
ClassInfo: ?*DHCP_CLASS_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpDeleteClassV6(
ServerIpAddress: ?PWSTR,
ReservedMustBeZero: u32,
ClassName: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpEnumClassesV6(
ServerIpAddress: ?PWSTR,
ReservedMustBeZero: u32,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
ClassInfoArray: ?*?*DHCP_CLASS_INFO_ARRAY_V6,
nRead: ?*u32,
nTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpSetSubnetDelayOffer(
ServerIpAddress: ?PWSTR,
SubnetAddress: u32,
TimeDelayInMilliseconds: u16,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetSubnetDelayOffer(
ServerIpAddress: ?PWSTR,
SubnetAddress: u32,
TimeDelayInMilliseconds: ?*u16,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2008'
pub extern "DHCPSAPI" fn DhcpGetMibInfoV5(
ServerIpAddress: ?[*:0]const u16,
MibInfo: ?*?*DHCP_MIB_INFO_V5,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpAddSecurityGroup(
pServer: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4GetOptionValue(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
PolicyName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO,
OptionValue: ?*?*DHCP_OPTION_VALUE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4SetOptionValue(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionId: u32,
PolicyName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO,
OptionValue: ?*DHCP_OPTION_DATA,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4SetOptionValues(
ServerIpAddress: ?PWSTR,
Flags: u32,
PolicyName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO,
OptionValues: ?*DHCP_OPTION_VALUE_ARRAY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4RemoveOptionValue(
ServerIpAddress: ?PWSTR,
Flags: u32,
OptionID: u32,
PolicyName: ?PWSTR,
VendorName: ?PWSTR,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4GetAllOptionValues(
ServerIpAddress: ?PWSTR,
Flags: u32,
ScopeInfo: ?*DHCP_OPTION_SCOPE_INFO,
Values: ?*?*DHCP_ALL_OPTION_VALUES_PB,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverCreateRelationship(
ServerIpAddress: ?PWSTR,
pRelationship: ?*DHCP_FAILOVER_RELATIONSHIP,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverSetRelationship(
ServerIpAddress: ?PWSTR,
Flags: u32,
pRelationship: ?*DHCP_FAILOVER_RELATIONSHIP,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverDeleteRelationship(
ServerIpAddress: ?PWSTR,
pRelationshipName: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverGetRelationship(
ServerIpAddress: ?PWSTR,
pRelationshipName: ?PWSTR,
pRelationship: ?*?*DHCP_FAILOVER_RELATIONSHIP,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverEnumRelationship(
ServerIpAddress: ?PWSTR,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
pRelationship: ?*?*DHCP_FAILOVER_RELATIONSHIP_ARRAY,
RelationshipRead: ?*u32,
RelationshipTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverAddScopeToRelationship(
ServerIpAddress: ?PWSTR,
pRelationship: ?*DHCP_FAILOVER_RELATIONSHIP,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverDeleteScopeFromRelationship(
ServerIpAddress: ?PWSTR,
pRelationship: ?*DHCP_FAILOVER_RELATIONSHIP,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverGetScopeRelationship(
ServerIpAddress: ?PWSTR,
ScopeId: u32,
pRelationship: ?*?*DHCP_FAILOVER_RELATIONSHIP,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverGetScopeStatistics(
ServerIpAddress: ?PWSTR,
ScopeId: u32,
pStats: ?*?*DHCP_FAILOVER_STATISTICS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverGetClientInfo(
ServerIpAddress: ?PWSTR,
SearchInfo: ?*const DHCP_SEARCH_INFO,
ClientInfo: ?*?*DHCPV4_FAILOVER_CLIENT_INFO,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverGetSystemTime(
ServerIpAddress: ?PWSTR,
pTime: ?*u32,
pMaxAllowedDeltaTime: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverGetAddressStatus(
ServerIpAddress: ?PWSTR,
SubnetAddress: u32,
pStatus: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4FailoverTriggerAddrAllocation(
ServerIpAddress: ?PWSTR,
pFailRelName: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpHlprCreateV4Policy(
PolicyName: ?PWSTR,
fGlobalPolicy: BOOL,
Subnet: u32,
ProcessingOrder: u32,
RootOperator: DHCP_POL_LOGIC_OPER,
Description: ?PWSTR,
Enabled: BOOL,
Policy: ?*?*DHCP_POLICY,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpHlprCreateV4PolicyEx(
PolicyName: ?PWSTR,
fGlobalPolicy: BOOL,
Subnet: u32,
ProcessingOrder: u32,
RootOperator: DHCP_POL_LOGIC_OPER,
Description: ?PWSTR,
Enabled: BOOL,
Policy: ?*?*DHCP_POLICY_EX,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpHlprAddV4PolicyExpr(
Policy: ?*DHCP_POLICY,
ParentExpr: u32,
Operator: DHCP_POL_LOGIC_OPER,
ExprIndex: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpHlprAddV4PolicyCondition(
Policy: ?*DHCP_POLICY,
ParentExpr: u32,
Type: DHCP_POL_ATTR_TYPE,
OptionID: u32,
SubOptionID: u32,
VendorName: ?PWSTR,
Operator: DHCP_POL_COMPARATOR,
// TODO: what to do with BytesParamIndex 8?
Value: ?*u8,
ValueLength: u32,
ConditionIndex: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpHlprAddV4PolicyRange(
Policy: ?*DHCP_POLICY,
Range: ?*DHCP_IP_RANGE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpHlprResetV4PolicyExpr(
Policy: ?*DHCP_POLICY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpHlprModifyV4PolicyExpr(
Policy: ?*DHCP_POLICY,
Operator: DHCP_POL_LOGIC_OPER,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpHlprFreeV4Policy(
Policy: ?*DHCP_POLICY,
) callconv(@import("std").os.windows.WINAPI) void;
pub extern "DHCPSAPI" fn DhcpHlprFreeV4PolicyArray(
PolicyArray: ?*DHCP_POLICY_ARRAY,
) callconv(@import("std").os.windows.WINAPI) void;
pub extern "DHCPSAPI" fn DhcpHlprFreeV4PolicyEx(
PolicyEx: ?*DHCP_POLICY_EX,
) callconv(@import("std").os.windows.WINAPI) void;
pub extern "DHCPSAPI" fn DhcpHlprFreeV4PolicyExArray(
PolicyExArray: ?*DHCP_POLICY_EX_ARRAY,
) callconv(@import("std").os.windows.WINAPI) void;
pub extern "DHCPSAPI" fn DhcpHlprFreeV4DhcpProperty(
Property: ?*DHCP_PROPERTY,
) callconv(@import("std").os.windows.WINAPI) void;
pub extern "DHCPSAPI" fn DhcpHlprFreeV4DhcpPropertyArray(
PropertyArray: ?*DHCP_PROPERTY_ARRAY,
) callconv(@import("std").os.windows.WINAPI) void;
pub extern "DHCPSAPI" fn DhcpHlprFindV4DhcpProperty(
PropertyArray: ?*DHCP_PROPERTY_ARRAY,
ID: DHCP_PROPERTY_ID,
Type: DHCP_PROPERTY_TYPE,
) callconv(@import("std").os.windows.WINAPI) ?*DHCP_PROPERTY;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpHlprIsV4PolicySingleUC(
Policy: ?*DHCP_POLICY,
) callconv(@import("std").os.windows.WINAPI) BOOL;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4QueryPolicyEnforcement(
ServerIpAddress: ?PWSTR,
fGlobalPolicy: BOOL,
SubnetAddress: u32,
Enabled: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4SetPolicyEnforcement(
ServerIpAddress: ?PWSTR,
fGlobalPolicy: BOOL,
SubnetAddress: u32,
Enable: BOOL,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpHlprIsV4PolicyWellFormed(
pPolicy: ?*DHCP_POLICY,
) callconv(@import("std").os.windows.WINAPI) BOOL;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpHlprIsV4PolicyValid(
pPolicy: ?*DHCP_POLICY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4CreatePolicy(
ServerIpAddress: ?PWSTR,
pPolicy: ?*DHCP_POLICY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4GetPolicy(
ServerIpAddress: ?PWSTR,
fGlobalPolicy: BOOL,
SubnetAddress: u32,
PolicyName: ?PWSTR,
Policy: ?*?*DHCP_POLICY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4SetPolicy(
ServerIpAddress: ?PWSTR,
FieldsModified: u32,
fGlobalPolicy: BOOL,
SubnetAddress: u32,
PolicyName: ?PWSTR,
Policy: ?*DHCP_POLICY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4DeletePolicy(
ServerIpAddress: ?PWSTR,
fGlobalPolicy: BOOL,
SubnetAddress: u32,
PolicyName: ?PWSTR,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4EnumPolicies(
ServerIpAddress: ?PWSTR,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
fGlobalPolicy: BOOL,
SubnetAddress: u32,
EnumInfo: ?*?*DHCP_POLICY_ARRAY,
ElementsRead: ?*u32,
ElementsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4AddPolicyRange(
ServerIpAddress: ?PWSTR,
SubnetAddress: u32,
PolicyName: ?PWSTR,
Range: ?*DHCP_IP_RANGE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4RemovePolicyRange(
ServerIpAddress: ?PWSTR,
SubnetAddress: u32,
PolicyName: ?PWSTR,
Range: ?*DHCP_IP_RANGE,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV6SetStatelessStoreParams(
ServerIpAddress: ?PWSTR,
fServerLevel: BOOL,
SubnetAddress: DHCP_IPV6_ADDRESS,
FieldModified: u32,
Params: ?*DHCPV6_STATELESS_PARAMS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV6GetStatelessStoreParams(
ServerIpAddress: ?PWSTR,
fServerLevel: BOOL,
SubnetAddress: DHCP_IPV6_ADDRESS,
Params: ?*?*DHCPV6_STATELESS_PARAMS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV6GetStatelessStatistics(
ServerIpAddress: ?PWSTR,
StatelessStats: ?*?*DHCPV6_STATELESS_STATS,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4CreateClientInfo(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_CLIENT_INFO_PB,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4EnumSubnetClients(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
ClientInfo: ?*?*DHCP_CLIENT_INFO_PB_ARRAY,
ClientsRead: ?*u32,
ClientsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4GetClientInfo(
ServerIpAddress: ?[*:0]const u16,
SearchInfo: ?*const DHCP_SEARCH_INFO,
ClientInfo: ?*?*DHCP_CLIENT_INFO_PB,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV6CreateClientInfo(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_CLIENT_INFO_V6,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV4GetFreeIPAddress(
ServerIpAddress: ?PWSTR,
ScopeId: u32,
StartIP: u32,
EndIP: u32,
NumFreeAddrReq: u32,
IPAddrList: ?*?*DHCP_IP_ARRAY,
) callconv(@import("std").os.windows.WINAPI) u32;
// TODO: this type is limited to platform 'windowsServer2012'
pub extern "DHCPSAPI" fn DhcpV6GetFreeIPAddress(
ServerIpAddress: ?PWSTR,
ScopeId: DHCP_IPV6_ADDRESS,
StartIP: DHCP_IPV6_ADDRESS,
EndIP: DHCP_IPV6_ADDRESS,
NumFreeAddrReq: u32,
IPAddrList: ?*?*DHCPV6_IP_ARRAY,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpV4CreateClientInfoEx(
ServerIpAddress: ?[*:0]const u16,
ClientInfo: ?*const DHCP_CLIENT_INFO_EX,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpV4EnumSubnetClientsEx(
ServerIpAddress: ?[*:0]const u16,
SubnetAddress: u32,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
ClientInfo: ?*?*DHCP_CLIENT_INFO_EX_ARRAY,
ClientsRead: ?*u32,
ClientsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpV4GetClientInfoEx(
ServerIpAddress: ?[*:0]const u16,
SearchInfo: ?*const DHCP_SEARCH_INFO,
ClientInfo: ?*?*DHCP_CLIENT_INFO_EX,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpV4CreatePolicyEx(
ServerIpAddress: ?PWSTR,
PolicyEx: ?*DHCP_POLICY_EX,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpV4GetPolicyEx(
ServerIpAddress: ?PWSTR,
GlobalPolicy: BOOL,
SubnetAddress: u32,
PolicyName: ?PWSTR,
Policy: ?*?*DHCP_POLICY_EX,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpV4SetPolicyEx(
ServerIpAddress: ?PWSTR,
FieldsModified: u32,
GlobalPolicy: BOOL,
SubnetAddress: u32,
PolicyName: ?PWSTR,
Policy: ?*DHCP_POLICY_EX,
) callconv(@import("std").os.windows.WINAPI) u32;
pub extern "DHCPSAPI" fn DhcpV4EnumPoliciesEx(
ServerIpAddress: ?PWSTR,
ResumeHandle: ?*u32,
PreferredMaximum: u32,
GlobalPolicy: BOOL,
SubnetAddress: u32,
EnumInfo: ?*?*DHCP_POLICY_EX_ARRAY,
ElementsRead: ?*u32,
ElementsTotal: ?*u32,
) callconv(@import("std").os.windows.WINAPI) u32;
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (4)
//--------------------------------------------------------------------------------
const BOOL = @import("../foundation.zig").BOOL;
const BOOLEAN = @import("../foundation.zig").BOOLEAN;
const PSTR = @import("../foundation.zig").PSTR;
const PWSTR = @import("../foundation.zig").PWSTR;
test {
// The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476
if (@hasDecl(@This(), "LPDHCP_CONTROL")) { _ = LPDHCP_CONTROL; }
if (@hasDecl(@This(), "LPDHCP_NEWPKT")) { _ = LPDHCP_NEWPKT; }
if (@hasDecl(@This(), "LPDHCP_DROP_SEND")) { _ = LPDHCP_DROP_SEND; }
if (@hasDecl(@This(), "LPDHCP_PROB")) { _ = LPDHCP_PROB; }
if (@hasDecl(@This(), "LPDHCP_GIVE_ADDRESS")) { _ = LPDHCP_GIVE_ADDRESS; }
if (@hasDecl(@This(), "LPDHCP_HANDLE_OPTIONS")) { _ = LPDHCP_HANDLE_OPTIONS; }
if (@hasDecl(@This(), "LPDHCP_DELETE_CLIENT")) { _ = LPDHCP_DELETE_CLIENT; }
if (@hasDecl(@This(), "LPDHCP_ENTRY_POINT_FUNC")) { _ = LPDHCP_ENTRY_POINT_FUNC; }
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
}
|
win32/network_management/dhcp.zig
|
const std = @import("../std.zig");
const maxInt = std.math.maxInt;
const builtin = @import("builtin");
const iovec = std.os.iovec;
const iovec_const = std.os.iovec_const;
extern "c" fn __errno() *c_int;
pub const _errno = __errno;
pub const dl_iterate_phdr_callback = fn (info: *dl_phdr_info, size: usize, data: ?*anyopaque) callconv(.C) c_int;
pub extern "c" fn dl_iterate_phdr(callback: dl_iterate_phdr_callback, data: ?*anyopaque) c_int;
pub extern "c" fn arc4random_buf(buf: [*]u8, len: usize) void;
pub extern "c" fn getthrid() pid_t;
pub extern "c" fn pipe2(fds: *[2]fd_t, flags: u32) c_int;
pub extern "c" fn getdents(fd: c_int, buf_ptr: [*]u8, nbytes: usize) usize;
pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
pub const pthread_mutex_t = extern struct {
inner: ?*anyopaque = null,
};
pub const pthread_cond_t = extern struct {
inner: ?*anyopaque = null,
};
pub const pthread_rwlock_t = extern struct {
ptr: ?*anyopaque = null,
};
pub const pthread_spinlock_t = extern struct {
inner: ?*anyopaque = null,
};
pub const pthread_attr_t = extern struct {
inner: ?*anyopaque = null,
};
pub const pthread_key_t = c_int;
pub const sem_t = ?*opaque {};
pub extern "c" fn posix_memalign(memptr: *?*anyopaque, alignment: usize, size: usize) c_int;
pub extern "c" fn pledge(promises: ?[*:0]const u8, execpromises: ?[*:0]const u8) c_int;
pub extern "c" fn unveil(path: ?[*:0]const u8, permissions: ?[*:0]const u8) c_int;
pub extern "c" fn pthread_set_name_np(thread: std.c.pthread_t, name: [*:0]const u8) void;
pub extern "c" fn pthread_get_name_np(thread: std.c.pthread_t, name: [*:0]u8, len: usize) void;
pub const blkcnt_t = i64;
pub const blksize_t = i32;
pub const clock_t = i64;
pub const dev_t = i32;
pub const fd_t = c_int;
pub const gid_t = u32;
pub const ino_t = u64;
pub const mode_t = u32;
pub const nlink_t = u32;
pub const off_t = i64;
pub const pid_t = i32;
pub const socklen_t = u32;
pub const time_t = i64;
pub const uid_t = u32;
/// Renamed from `kevent` to `Kevent` to avoid conflict with function name.
pub const Kevent = extern struct {
ident: usize,
filter: c_short,
flags: u16,
fflags: c_uint,
data: i64,
udata: usize,
};
// Modes and flags for dlopen()
// include/dlfcn.h
pub const RTLD = struct {
/// Bind function calls lazily.
pub const LAZY = 1;
/// Bind function calls immediately.
pub const NOW = 2;
/// Make symbols globally available.
pub const GLOBAL = 0x100;
/// Opposite of GLOBAL, and the default.
pub const LOCAL = 0x000;
/// Trace loaded objects and exit.
pub const TRACE = 0x200;
};
pub const dl_phdr_info = extern struct {
dlpi_addr: std.elf.Addr,
dlpi_name: ?[*:0]const u8,
dlpi_phdr: [*]std.elf.Phdr,
dlpi_phnum: std.elf.Half,
};
pub const Flock = extern struct {
l_start: off_t,
l_len: off_t,
l_pid: pid_t,
l_type: c_short,
l_whence: c_short,
};
pub const addrinfo = extern struct {
flags: c_int,
family: c_int,
socktype: c_int,
protocol: c_int,
addrlen: socklen_t,
addr: ?*sockaddr,
canonname: ?[*:0]u8,
next: ?*addrinfo,
};
pub const EAI = enum(c_int) {
/// address family for hostname not supported
ADDRFAMILY = -9,
/// name could not be resolved at this time
AGAIN = -3,
/// flags parameter had an invalid value
BADFLAGS = -1,
/// non-recoverable failure in name resolution
FAIL = -4,
/// address family not recognized
FAMILY = -6,
/// memory allocation failure
MEMORY = -10,
/// no address associated with hostname
NODATA = -5,
/// name does not resolve
NONAME = -2,
/// service not recognized for socket type
SERVICE = -8,
/// intended socket type was not recognized
SOCKTYPE = -7,
/// system error returned in errno
SYSTEM = -11,
/// invalid value for hints
BADHINTS = -12,
/// resolved protocol is unknown
PROTOCOL = -13,
/// argument buffer overflow
OVERFLOW = -14,
_,
};
pub const EAI_MAX = 15;
pub const msghdr = extern struct {
/// optional address
msg_name: ?*sockaddr,
/// size of address
msg_namelen: socklen_t,
/// scatter/gather array
msg_iov: [*]iovec,
/// # elements in msg_iov
msg_iovlen: c_uint,
/// ancillary data
msg_control: ?*anyopaque,
/// ancillary data buffer len
msg_controllen: socklen_t,
/// flags on received message
msg_flags: c_int,
};
pub const msghdr_const = extern struct {
/// optional address
msg_name: ?*const sockaddr,
/// size of address
msg_namelen: socklen_t,
/// scatter/gather array
msg_iov: [*]iovec_const,
/// # elements in msg_iov
msg_iovlen: c_uint,
/// ancillary data
msg_control: ?*anyopaque,
/// ancillary data buffer len
msg_controllen: socklen_t,
/// flags on received message
msg_flags: c_int,
};
pub const Stat = extern struct {
mode: mode_t,
dev: dev_t,
ino: ino_t,
nlink: nlink_t,
uid: uid_t,
gid: gid_t,
rdev: dev_t,
atim: timespec,
mtim: timespec,
ctim: timespec,
size: off_t,
blocks: blkcnt_t,
blksize: blksize_t,
flags: u32,
gen: u32,
birthtim: timespec,
pub fn atime(self: @This()) timespec {
return self.atim;
}
pub fn mtime(self: @This()) timespec {
return self.mtim;
}
pub fn ctime(self: @This()) timespec {
return self.ctim;
}
pub fn birthtime(self: @This()) timespec {
return self.birthtim;
}
};
pub const timespec = extern struct {
tv_sec: time_t,
tv_nsec: c_long,
};
pub const timeval = extern struct {
tv_sec: time_t,
tv_usec: c_long,
};
pub const timezone = extern struct {
tz_minuteswest: c_int,
tz_dsttime: c_int,
};
pub const MAXNAMLEN = 255;
pub const dirent = extern struct {
d_fileno: ino_t,
d_off: off_t,
d_reclen: u16,
d_type: u8,
d_namlen: u8,
__d_padding: [4]u8,
d_name: [MAXNAMLEN + 1]u8,
pub fn reclen(self: dirent) u16 {
return self.d_reclen;
}
};
pub const in_port_t = u16;
pub const sa_family_t = u8;
pub const sockaddr = extern struct {
/// total length
len: u8,
/// address family
family: sa_family_t,
/// actually longer; address value
data: [14]u8,
pub const SS_MAXSIZE = 256;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {
len: u8 = @sizeOf(in),
family: sa_family_t = AF.INET,
port: in_port_t,
addr: u32,
zero: [8]u8 = [8]u8{ 0, 0, 0, 0, 0, 0, 0, 0 },
};
pub const in6 = extern struct {
len: u8 = @sizeOf(in6),
family: sa_family_t = AF.INET6,
port: in_port_t,
flowinfo: u32,
addr: [16]u8,
scope_id: u32,
};
/// Definitions for UNIX IPC domain.
pub const un = extern struct {
/// total sockaddr length
len: u8 = @sizeOf(un),
family: sa_family_t = AF.LOCAL,
/// path name
path: [104]u8,
};
};
pub const AI = struct {
/// get address to use bind()
pub const PASSIVE = 1;
/// fill ai_canonname
pub const CANONNAME = 2;
/// prevent host name resolution
pub const NUMERICHOST = 4;
/// prevent service name resolution
pub const NUMERICSERV = 16;
/// only if any address is assigned
pub const ADDRCONFIG = 64;
};
pub const PATH_MAX = 1024;
pub const IOV_MAX = 1024;
pub const STDIN_FILENO = 0;
pub const STDOUT_FILENO = 1;
pub const STDERR_FILENO = 2;
pub const PROT = struct {
pub const NONE = 0;
pub const READ = 1;
pub const WRITE = 2;
pub const EXEC = 4;
};
pub const CLOCK = struct {
pub const REALTIME = 0;
pub const PROCESS_CPUTIME_ID = 2;
pub const MONOTONIC = 3;
pub const THREAD_CPUTIME_ID = 4;
};
pub const MAP = struct {
pub const FAILED = @intToPtr(*anyopaque, maxInt(usize));
pub const SHARED = 0x0001;
pub const PRIVATE = 0x0002;
pub const FIXED = 0x0010;
pub const RENAME = 0;
pub const NORESERVE = 0;
pub const INHERIT = 0;
pub const HASSEMAPHORE = 0;
pub const TRYFIXED = 0;
pub const FILE = 0;
pub const ANON = 0x1000;
pub const ANONYMOUS = ANON;
pub const STACK = 0x4000;
pub const CONCEAL = 0x8000;
};
pub const MSF = struct {
pub const ASYNC = 1;
pub const INVALIDATE = 2;
pub const SYNC = 4;
};
pub const W = struct {
pub const NOHANG = 1;
pub const UNTRACED = 2;
pub const CONTINUED = 8;
pub fn EXITSTATUS(s: u32) u8 {
return @intCast(u8, (s >> 8) & 0xff);
}
pub fn TERMSIG(s: u32) u32 {
return (s & 0x7f);
}
pub fn STOPSIG(s: u32) u32 {
return EXITSTATUS(s);
}
pub fn IFEXITED(s: u32) bool {
return TERMSIG(s) == 0;
}
pub fn IFCONTINUED(s: u32) bool {
return ((s & 0o177777) == 0o177777);
}
pub fn IFSTOPPED(s: u32) bool {
return (s & 0xff == 0o177);
}
pub fn IFSIGNALED(s: u32) bool {
return (((s) & 0o177) != 0o177) and (((s) & 0o177) != 0);
}
};
pub const SA = struct {
pub const ONSTACK = 0x0001;
pub const RESTART = 0x0002;
pub const RESETHAND = 0x0004;
pub const NOCLDSTOP = 0x0008;
pub const NODEFER = 0x0010;
pub const NOCLDWAIT = 0x0020;
pub const SIGINFO = 0x0040;
};
// access function
pub const F_OK = 0; // test for existence of file
pub const X_OK = 1; // test for execute or search permission
pub const W_OK = 2; // test for write permission
pub const R_OK = 4; // test for read permission
pub const O = struct {
/// open for reading only
pub const RDONLY = 0x00000000;
/// open for writing only
pub const WRONLY = 0x00000001;
/// open for reading and writing
pub const RDWR = 0x00000002;
/// mask for above modes
pub const ACCMODE = 0x00000003;
/// no delay
pub const NONBLOCK = 0x00000004;
/// set append mode
pub const APPEND = 0x00000008;
/// open with shared file lock
pub const SHLOCK = 0x00000010;
/// open with exclusive file lock
pub const EXLOCK = 0x00000020;
/// signal pgrp when data ready
pub const ASYNC = 0x00000040;
/// synchronous writes
pub const SYNC = 0x00000080;
/// don't follow symlinks on the last
pub const NOFOLLOW = 0x00000100;
/// create if nonexistent
pub const CREAT = 0x00000200;
/// truncate to zero length
pub const TRUNC = 0x00000400;
/// error if already exists
pub const EXCL = 0x00000800;
/// don't assign controlling terminal
pub const NOCTTY = 0x00008000;
/// write: I/O data completion
pub const DSYNC = SYNC;
/// read: I/O completion as for write
pub const RSYNC = SYNC;
/// fail if not a directory
pub const DIRECTORY = 0x20000;
/// set close on exec
pub const CLOEXEC = 0x10000;
};
pub const F = struct {
pub const DUPFD = 0;
pub const GETFD = 1;
pub const SETFD = 2;
pub const GETFL = 3;
pub const SETFL = 4;
pub const GETOWN = 5;
pub const SETOWN = 6;
pub const GETLK = 7;
pub const SETLK = 8;
pub const SETLKW = 9;
pub const RDLCK = 1;
pub const UNLCK = 2;
pub const WRLCK = 3;
};
pub const LOCK = struct {
pub const SH = 0x01;
pub const EX = 0x02;
pub const NB = 0x04;
pub const UN = 0x08;
};
pub const FD_CLOEXEC = 1;
pub const SEEK = struct {
pub const SET = 0;
pub const CUR = 1;
pub const END = 2;
};
pub const SOCK = struct {
pub const STREAM = 1;
pub const DGRAM = 2;
pub const RAW = 3;
pub const RDM = 4;
pub const SEQPACKET = 5;
pub const CLOEXEC = 0x8000;
pub const NONBLOCK = 0x4000;
};
pub const SO = struct {
pub const DEBUG = 0x0001;
pub const ACCEPTCONN = 0x0002;
pub const REUSEADDR = 0x0004;
pub const KEEPALIVE = 0x0008;
pub const DONTROUTE = 0x0010;
pub const BROADCAST = 0x0020;
pub const USELOOPBACK = 0x0040;
pub const LINGER = 0x0080;
pub const OOBINLINE = 0x0100;
pub const REUSEPORT = 0x0200;
pub const TIMESTAMP = 0x0800;
pub const BINDANY = 0x1000;
pub const ZEROIZE = 0x2000;
pub const SNDBUF = 0x1001;
pub const RCVBUF = 0x1002;
pub const SNDLOWAT = 0x1003;
pub const RCVLOWAT = 0x1004;
pub const SNDTIMEO = 0x1005;
pub const RCVTIMEO = 0x1006;
pub const ERROR = 0x1007;
pub const TYPE = 0x1008;
pub const NETPROC = 0x1020;
pub const RTABLE = 0x1021;
pub const PEERCRED = 0x1022;
pub const SPLICE = 0x1023;
pub const DOMAIN = 0x1024;
pub const PROTOCOL = 0x1025;
};
pub const SOL = struct {
pub const SOCKET = 0xffff;
};
pub const PF = struct {
pub const UNSPEC = AF.UNSPEC;
pub const LOCAL = AF.LOCAL;
pub const UNIX = AF.UNIX;
pub const INET = AF.INET;
pub const APPLETALK = AF.APPLETALK;
pub const INET6 = AF.INET6;
pub const DECnet = AF.DECnet;
pub const KEY = AF.KEY;
pub const ROUTE = AF.ROUTE;
pub const SNA = AF.SNA;
pub const MPLS = AF.MPLS;
pub const BLUETOOTH = AF.BLUETOOTH;
pub const ISDN = AF.ISDN;
pub const MAX = AF.MAX;
};
pub const AF = struct {
pub const UNSPEC = 0;
pub const UNIX = 1;
pub const LOCAL = UNIX;
pub const INET = 2;
pub const APPLETALK = 16;
pub const INET6 = 24;
pub const KEY = 30;
pub const ROUTE = 17;
pub const SNA = 11;
pub const MPLS = 33;
pub const BLUETOOTH = 32;
pub const ISDN = 26;
pub const MAX = 36;
};
pub const DT = struct {
pub const UNKNOWN = 0;
pub const FIFO = 1;
pub const CHR = 2;
pub const DIR = 4;
pub const BLK = 6;
pub const REG = 8;
pub const LNK = 10;
pub const SOCK = 12;
pub const WHT = 14; // XXX
};
pub const EV_ADD = 0x0001;
pub const EV_DELETE = 0x0002;
pub const EV_ENABLE = 0x0004;
pub const EV_DISABLE = 0x0008;
pub const EV_ONESHOT = 0x0010;
pub const EV_CLEAR = 0x0020;
pub const EV_RECEIPT = 0x0040;
pub const EV_DISPATCH = 0x0080;
pub const EV_FLAG1 = 0x2000;
pub const EV_ERROR = 0x4000;
pub const EV_EOF = 0x8000;
pub const EVFILT_READ = -1;
pub const EVFILT_WRITE = -2;
pub const EVFILT_AIO = -3;
pub const EVFILT_VNODE = -4;
pub const EVFILT_PROC = -5;
pub const EVFILT_SIGNAL = -6;
pub const EVFILT_TIMER = -7;
pub const EVFILT_EXCEPT = -9;
// data/hint flags for EVFILT_{READ|WRITE}
pub const NOTE_LOWAT = 0x0001;
pub const NOTE_EOF = 0x0002;
// data/hint flags for EVFILT_EXCEPT and EVFILT_{READ|WRITE}
pub const NOTE_OOB = 0x0004;
// data/hint flags for EVFILT_VNODE
pub const NOTE_DELETE = 0x0001;
pub const NOTE_WRITE = 0x0002;
pub const NOTE_EXTEND = 0x0004;
pub const NOTE_ATTRIB = 0x0008;
pub const NOTE_LINK = 0x0010;
pub const NOTE_RENAME = 0x0020;
pub const NOTE_REVOKE = 0x0040;
pub const NOTE_TRUNCATE = 0x0080;
// data/hint flags for EVFILT_PROC
pub const NOTE_EXIT = 0x80000000;
pub const NOTE_FORK = 0x40000000;
pub const NOTE_EXEC = 0x20000000;
pub const NOTE_PDATAMASK = 0x000fffff;
pub const NOTE_PCTRLMASK = 0xf0000000;
pub const NOTE_TRACK = 0x00000001;
pub const NOTE_TRACKERR = 0x00000002;
pub const NOTE_CHILD = 0x00000004;
// data/hint flags for EVFILT_DEVICE
pub const NOTE_CHANGE = 0x00000001;
pub const T = struct {
pub const IOCCBRK = 0x2000747a;
pub const IOCCDTR = 0x20007478;
pub const IOCCONS = 0x80047462;
pub const IOCDCDTIMESTAMP = 0x40107458;
pub const IOCDRAIN = 0x2000745e;
pub const IOCEXCL = 0x2000740d;
pub const IOCEXT = 0x80047460;
pub const IOCFLAG_CDTRCTS = 0x10;
pub const IOCFLAG_CLOCAL = 0x2;
pub const IOCFLAG_CRTSCTS = 0x4;
pub const IOCFLAG_MDMBUF = 0x8;
pub const IOCFLAG_SOFTCAR = 0x1;
pub const IOCFLUSH = 0x80047410;
pub const IOCGETA = 0x402c7413;
pub const IOCGETD = 0x4004741a;
pub const IOCGFLAGS = 0x4004745d;
pub const IOCGLINED = 0x40207442;
pub const IOCGPGRP = 0x40047477;
pub const IOCGQSIZE = 0x40047481;
pub const IOCGRANTPT = 0x20007447;
pub const IOCGSID = 0x40047463;
pub const IOCGSIZE = 0x40087468;
pub const IOCGWINSZ = 0x40087468;
pub const IOCMBIC = 0x8004746b;
pub const IOCMBIS = 0x8004746c;
pub const IOCMGET = 0x4004746a;
pub const IOCMSET = 0x8004746d;
pub const IOCM_CAR = 0x40;
pub const IOCM_CD = 0x40;
pub const IOCM_CTS = 0x20;
pub const IOCM_DSR = 0x100;
pub const IOCM_DTR = 0x2;
pub const IOCM_LE = 0x1;
pub const IOCM_RI = 0x80;
pub const IOCM_RNG = 0x80;
pub const IOCM_RTS = 0x4;
pub const IOCM_SR = 0x10;
pub const IOCM_ST = 0x8;
pub const IOCNOTTY = 0x20007471;
pub const IOCNXCL = 0x2000740e;
pub const IOCOUTQ = 0x40047473;
pub const IOCPKT = 0x80047470;
pub const IOCPKT_DATA = 0x0;
pub const IOCPKT_DOSTOP = 0x20;
pub const IOCPKT_FLUSHREAD = 0x1;
pub const IOCPKT_FLUSHWRITE = 0x2;
pub const IOCPKT_IOCTL = 0x40;
pub const IOCPKT_NOSTOP = 0x10;
pub const IOCPKT_START = 0x8;
pub const IOCPKT_STOP = 0x4;
pub const IOCPTMGET = 0x40287446;
pub const IOCPTSNAME = 0x40287448;
pub const IOCRCVFRAME = 0x80087445;
pub const IOCREMOTE = 0x80047469;
pub const IOCSBRK = 0x2000747b;
pub const IOCSCTTY = 0x20007461;
pub const IOCSDTR = 0x20007479;
pub const IOCSETA = 0x802c7414;
pub const IOCSETAF = 0x802c7416;
pub const IOCSETAW = 0x802c7415;
pub const IOCSETD = 0x8004741b;
pub const IOCSFLAGS = 0x8004745c;
pub const IOCSIG = 0x2000745f;
pub const IOCSLINED = 0x80207443;
pub const IOCSPGRP = 0x80047476;
pub const IOCSQSIZE = 0x80047480;
pub const IOCSSIZE = 0x80087467;
pub const IOCSTART = 0x2000746e;
pub const IOCSTAT = 0x80047465;
pub const IOCSTI = 0x80017472;
pub const IOCSTOP = 0x2000746f;
pub const IOCSWINSZ = 0x80087467;
pub const IOCUCNTL = 0x80047466;
pub const IOCXMTFRAME = 0x80087444;
};
// BSD Authentication
pub const auth_item_t = c_int;
pub const AUTHV = struct {
pub const ALL: auth_item_t = 0;
pub const CHALLENGE: auth_item_t = 1;
pub const CLASS: auth_item_t = 2;
pub const NAME: auth_item_t = 3;
pub const SERVICE: auth_item_t = 4;
pub const STYLE: auth_item_t = 5;
pub const INTERACTIVE: auth_item_t = 6;
};
pub const BI = struct {
pub const AUTH = "authorize"; // Accepted authentication
pub const REJECT = "reject"; // Rejected authentication
pub const CHALLENGE = "reject challenge"; // Reject with a challenge
pub const SILENT = "reject silent"; // Reject silently
pub const REMOVE = "remove"; // remove file on error
pub const ROOTOKAY = "authorize root"; // root authenticated
pub const SECURE = "authorize secure"; // okay on non-secure line
pub const SETENV = "setenv"; // set environment variable
pub const UNSETENV = "unsetenv"; // unset environment variable
pub const VALUE = "value"; // set local variable
pub const EXPIRED = "reject expired"; // account expired
pub const PWEXPIRED = "reject pwexpired"; // password expired
pub const FDPASS = "fd"; // child is passing an fd
};
pub const AUTH = struct {
pub const OKAY: c_int = 0x01; // user authenticated
pub const ROOTOKAY: c_int = 0x02; // authenticated as root
pub const SECURE: c_int = 0x04; // secure login
pub const SILENT: c_int = 0x08; // silent rejection
pub const CHALLENGE: c_int = 0x10; // a challenge was given
pub const EXPIRED: c_int = 0x20; // account expired
pub const PWEXPIRED: c_int = 0x40; // password expired
pub const ALLOW: c_int = (OKAY | ROOTOKAY | SECURE);
};
// Term
pub const V = struct {
pub const EOF = 0; // ICANON
pub const EOL = 1; // ICANON
pub const EOL2 = 2; // ICANON
pub const ERASE = 3; // ICANON
pub const WERASE = 4; // ICANON
pub const KILL = 5; // ICANON
pub const REPRINT = 6; // ICANON
// 7 spare 1
pub const INTR = 8; // ISIG
pub const QUIT = 9; // ISIG
pub const SUSP = 10; // ISIG
pub const DSUSP = 11; // ISIG
pub const START = 12; // IXON, IXOFF
pub const STOP = 13; // IXON, IXOFF
pub const LNEXT = 14; // IEXTEN
pub const DISCARD = 15; // IEXTEN
pub const MIN = 16; // !ICANON
pub const TIME = 17; // !ICANON
pub const STATUS = 18; // ICANON
// 19 spare 2
};
pub const tcflag_t = c_uint;
pub const speed_t = c_uint;
pub const cc_t = u8;
pub const NCCS = 20;
// Input flags - software input processing
pub const IGNBRK: tcflag_t = 0x00000001; // ignore BREAK condition
pub const BRKINT: tcflag_t = 0x00000002; // map BREAK to SIGINT
pub const IGNPAR: tcflag_t = 0x00000004; // ignore (discard) parity errors
pub const PARMRK: tcflag_t = 0x00000008; // mark parity and framing errors
pub const INPCK: tcflag_t = 0x00000010; // enable checking of parity errors
pub const ISTRIP: tcflag_t = 0x00000020; // strip 8th bit off chars
pub const INLCR: tcflag_t = 0x00000040; // map NL into CR
pub const IGNCR: tcflag_t = 0x00000080; // ignore CR
pub const ICRNL: tcflag_t = 0x00000100; // map CR to NL (ala CRMOD)
pub const IXON: tcflag_t = 0x00000200; // enable output flow control
pub const IXOFF: tcflag_t = 0x00000400; // enable input flow control
pub const IXANY: tcflag_t = 0x00000800; // any char will restart after stop
pub const IUCLC: tcflag_t = 0x00001000; // translate upper to lower case
pub const IMAXBEL: tcflag_t = 0x00002000; // ring bell on input queue full
// Output flags - software output processing
pub const OPOST: tcflag_t = 0x00000001; // enable following output processing
pub const ONLCR: tcflag_t = 0x00000002; // map NL to CR-NL (ala CRMOD)
pub const OXTABS: tcflag_t = 0x00000004; // expand tabs to spaces
pub const ONOEOT: tcflag_t = 0x00000008; // discard EOT's (^D) on output
pub const OCRNL: tcflag_t = 0x00000010; // map CR to NL
pub const OLCUC: tcflag_t = 0x00000020; // translate lower case to upper case
pub const ONOCR: tcflag_t = 0x00000040; // No CR output at column 0
pub const ONLRET: tcflag_t = 0x00000080; // NL performs the CR function
// Control flags - hardware control of terminal
pub const CIGNORE: tcflag_t = 0x00000001; // ignore control flags
pub const CSIZE: tcflag_t = 0x00000300; // character size mask
pub const CS5: tcflag_t = 0x00000000; // 5 bits (pseudo)
pub const CS6: tcflag_t = 0x00000100; // 6 bits
pub const CS7: tcflag_t = 0x00000200; // 7 bits
pub const CS8: tcflag_t = 0x00000300; // 8 bits
pub const CSTOPB: tcflag_t = 0x00000400; // send 2 stop bits
pub const CREAD: tcflag_t = 0x00000800; // enable receiver
pub const PARENB: tcflag_t = 0x00001000; // parity enable
pub const PARODD: tcflag_t = 0x00002000; // odd parity, else even
pub const HUPCL: tcflag_t = 0x00004000; // hang up on last close
pub const CLOCAL: tcflag_t = 0x00008000; // ignore modem status lines
pub const CRTSCTS: tcflag_t = 0x00010000; // RTS/CTS full-duplex flow control
pub const CRTS_IFLOW: tcflag_t = CRTSCTS; // XXX compat
pub const CCTS_OFLOW: tcflag_t = CRTSCTS; // XXX compat
pub const MDMBUF: tcflag_t = 0x00100000; // DTR/DCD hardware flow control
pub const CHWFLOW: tcflag_t = (MDMBUF | CRTSCTS); // all types of hw flow control
pub const termios = extern struct {
iflag: tcflag_t, // input flags
oflag: tcflag_t, // output flags
cflag: tcflag_t, // control flags
lflag: tcflag_t, // local flags
cc: [NCCS]cc_t, // control chars
ispeed: c_int, // input speed
ospeed: c_int, // output speed
};
// Commands passed to tcsetattr() for setting the termios structure.
pub const TCSA = struct {
pub const NOW = 0; // make change immediate
pub const DRAIN = 1; // drain output, then change
pub const FLUSH = 2; // drain output, flush input
pub const SOFT = 0x10; // flag - don't alter h.w. state
};
// Standard speeds
pub const B0 = 0;
pub const B50 = 50;
pub const B75 = 75;
pub const B110 = 110;
pub const B134 = 134;
pub const B150 = 150;
pub const B200 = 200;
pub const B300 = 300;
pub const B600 = 600;
pub const B1200 = 1200;
pub const B1800 = 1800;
pub const B2400 = 2400;
pub const B4800 = 4800;
pub const B9600 = 9600;
pub const B19200 = 19200;
pub const B38400 = 38400;
pub const B7200 = 7200;
pub const B14400 = 14400;
pub const B28800 = 28800;
pub const B57600 = 57600;
pub const B76800 = 76800;
pub const B115200 = 115200;
pub const B230400 = 230400;
pub const EXTA = 19200;
pub const EXTB = 38400;
pub const TCIFLUSH = 1;
pub const TCOFLUSH = 2;
pub const TCIOFLUSH = 3;
pub const TCOOFF = 1;
pub const TCOON = 2;
pub const TCIOFF = 3;
pub const TCION = 4;
pub const winsize = extern struct {
ws_row: c_ushort,
ws_col: c_ushort,
ws_xpixel: c_ushort,
ws_ypixel: c_ushort,
};
const NSIG = 33;
pub const SIG = struct {
pub const DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
pub const IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
pub const ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
pub const CATCH = @intToPtr(?Sigaction.sigaction_fn, 2);
pub const HOLD = @intToPtr(?Sigaction.sigaction_fn, 3);
pub const HUP = 1;
pub const INT = 2;
pub const QUIT = 3;
pub const ILL = 4;
pub const TRAP = 5;
pub const ABRT = 6;
pub const IOT = ABRT;
pub const EMT = 7;
pub const FPE = 8;
pub const KILL = 9;
pub const BUS = 10;
pub const SEGV = 11;
pub const SYS = 12;
pub const PIPE = 13;
pub const ALRM = 14;
pub const TERM = 15;
pub const URG = 16;
pub const STOP = 17;
pub const TSTP = 18;
pub const CONT = 19;
pub const CHLD = 20;
pub const TTIN = 21;
pub const TTOU = 22;
pub const IO = 23;
pub const XCPU = 24;
pub const XFSZ = 25;
pub const VTALRM = 26;
pub const PROF = 27;
pub const WINCH = 28;
pub const INFO = 29;
pub const USR1 = 30;
pub const USR2 = 31;
pub const PWR = 32;
pub const BLOCK = 1;
pub const UNBLOCK = 2;
pub const SETMASK = 3;
};
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
pub const handler_fn = fn (c_int) callconv(.C) void;
pub const sigaction_fn = fn (c_int, *const siginfo_t, ?*const anyopaque) callconv(.C) void;
/// signal handler
handler: extern union {
handler: ?handler_fn,
sigaction: ?sigaction_fn,
},
/// signal mask to apply
mask: sigset_t,
/// signal options
flags: c_uint,
};
pub const sigval = extern union {
int: c_int,
ptr: ?*anyopaque,
};
pub const siginfo_t = extern struct {
signo: c_int,
code: c_int,
errno: c_int,
data: extern union {
proc: extern struct {
pid: pid_t,
pdata: extern union {
kill: extern struct {
uid: uid_t,
value: sigval,
},
cld: extern struct {
utime: clock_t,
stime: clock_t,
status: c_int,
},
},
},
fault: extern struct {
addr: ?*anyopaque,
trapno: c_int,
},
__pad: [128 - 3 * @sizeOf(c_int)]u8,
},
};
comptime {
if (@sizeOf(usize) == 4)
std.debug.assert(@sizeOf(siginfo_t) == 128)
else
// Take into account the padding between errno and data fields.
std.debug.assert(@sizeOf(siginfo_t) == 136);
}
pub usingnamespace switch (builtin.cpu.arch) {
.x86_64 => struct {
pub const ucontext_t = extern struct {
sc_rdi: c_long,
sc_rsi: c_long,
sc_rdx: c_long,
sc_rcx: c_long,
sc_r8: c_long,
sc_r9: c_long,
sc_r10: c_long,
sc_r11: c_long,
sc_r12: c_long,
sc_r13: c_long,
sc_r14: c_long,
sc_r15: c_long,
sc_rbp: c_long,
sc_rbx: c_long,
sc_rax: c_long,
sc_gs: c_long,
sc_fs: c_long,
sc_es: c_long,
sc_ds: c_long,
sc_trapno: c_long,
sc_err: c_long,
sc_rip: c_long,
sc_cs: c_long,
sc_rflags: c_long,
sc_rsp: c_long,
sc_ss: c_long,
sc_fpstate: fxsave64,
__sc_unused: c_int,
sc_mask: c_int,
sc_cookie: c_long,
};
pub const fxsave64 = packed struct {
fx_fcw: u16,
fx_fsw: u16,
fx_ftw: u8,
fx_unused1: u8,
fx_fop: u16,
fx_rip: u64,
fx_rdp: u64,
fx_mxcsr: u32,
fx_mxcsr_mask: u32,
fx_st: [8][2]u64,
fx_xmm: [16][2]u64,
fx_unused3: [96]u8,
};
},
else => struct {},
};
pub const sigset_t = c_uint;
pub const empty_sigset: sigset_t = 0;
pub const E = enum(u16) {
/// No error occurred.
SUCCESS = 0,
PERM = 1, // Operation not permitted
NOENT = 2, // No such file or directory
SRCH = 3, // No such process
INTR = 4, // Interrupted system call
IO = 5, // Input/output error
NXIO = 6, // Device not configured
@"2BIG" = 7, // Argument list too long
NOEXEC = 8, // Exec format error
BADF = 9, // Bad file descriptor
CHILD = 10, // No child processes
DEADLK = 11, // Resource deadlock avoided
// 11 was AGAIN
NOMEM = 12, // Cannot allocate memory
ACCES = 13, // Permission denied
FAULT = 14, // Bad address
NOTBLK = 15, // Block device required
BUSY = 16, // Device busy
EXIST = 17, // File exists
XDEV = 18, // Cross-device link
NODEV = 19, // Operation not supported by device
NOTDIR = 20, // Not a directory
ISDIR = 21, // Is a directory
INVAL = 22, // Invalid argument
NFILE = 23, // Too many open files in system
MFILE = 24, // Too many open files
NOTTY = 25, // Inappropriate ioctl for device
TXTBSY = 26, // Text file busy
FBIG = 27, // File too large
NOSPC = 28, // No space left on device
SPIPE = 29, // Illegal seek
ROFS = 30, // Read-only file system
MLINK = 31, // Too many links
PIPE = 32, // Broken pipe
// math software
DOM = 33, // Numerical argument out of domain
RANGE = 34, // Result too large or too small
// non-blocking and interrupt i/o
// also: WOULDBLOCK: operation would block
AGAIN = 35, // Resource temporarily unavailable
INPROGRESS = 36, // Operation now in progress
ALREADY = 37, // Operation already in progress
// ipc/network software -- argument errors
NOTSOCK = 38, // Socket operation on non-socket
DESTADDRREQ = 39, // Destination address required
MSGSIZE = 40, // Message too long
PROTOTYPE = 41, // Protocol wrong type for socket
NOPROTOOPT = 42, // Protocol option not available
PROTONOSUPPORT = 43, // Protocol not supported
SOCKTNOSUPPORT = 44, // Socket type not supported
OPNOTSUPP = 45, // Operation not supported
PFNOSUPPORT = 46, // Protocol family not supported
AFNOSUPPORT = 47, // Address family not supported by protocol family
ADDRINUSE = 48, // Address already in use
ADDRNOTAVAIL = 49, // Can't assign requested address
// ipc/network software -- operational errors
NETDOWN = 50, // Network is down
NETUNREACH = 51, // Network is unreachable
NETRESET = 52, // Network dropped connection on reset
CONNABORTED = 53, // Software caused connection abort
CONNRESET = 54, // Connection reset by peer
NOBUFS = 55, // No buffer space available
ISCONN = 56, // Socket is already connected
NOTCONN = 57, // Socket is not connected
SHUTDOWN = 58, // Can't send after socket shutdown
TOOMANYREFS = 59, // Too many references: can't splice
TIMEDOUT = 60, // Operation timed out
CONNREFUSED = 61, // Connection refused
LOOP = 62, // Too many levels of symbolic links
NAMETOOLONG = 63, // File name too long
// should be rearranged
HOSTDOWN = 64, // Host is down
HOSTUNREACH = 65, // No route to host
NOTEMPTY = 66, // Directory not empty
// quotas & mush
PROCLIM = 67, // Too many processes
USERS = 68, // Too many users
DQUOT = 69, // Disc quota exceeded
// Network File System
STALE = 70, // Stale NFS file handle
REMOTE = 71, // Too many levels of remote in path
BADRPC = 72, // RPC struct is bad
RPCMISMATCH = 73, // RPC version wrong
PROGUNAVAIL = 74, // RPC prog. not avail
PROGMISMATCH = 75, // Program version wrong
PROCUNAVAIL = 76, // Bad procedure for program
NOLCK = 77, // No locks available
NOSYS = 78, // Function not implemented
FTYPE = 79, // Inappropriate file type or format
AUTH = 80, // Authentication error
NEEDAUTH = 81, // Need authenticator
IPSEC = 82, // IPsec processing failure
NOATTR = 83, // Attribute not found
// Wide/multibyte-character handling, ISO/IEC 9899/AMD1:1995
ILSEQ = 84, // Illegal byte sequence
NOMEDIUM = 85, // No medium found
MEDIUMTYPE = 86, // Wrong medium type
OVERFLOW = 87, // Value too large to be stored in data type
CANCELED = 88, // Operation canceled
IDRM = 89, // Identifier removed
NOMSG = 90, // No message of desired type
NOTSUP = 91, // Not supported
BADMSG = 92, // Bad or Corrupt message
NOTRECOVERABLE = 93, // State not recoverable
OWNERDEAD = 94, // Previous owner died
PROTO = 95, // Protocol error
_,
};
const _MAX_PAGE_SHIFT = switch (builtin.cpu.arch) {
.i386 => 12,
.sparcv9 => 13,
};
pub const MINSIGSTKSZ = 1 << _MAX_PAGE_SHIFT;
pub const SIGSTKSZ = MINSIGSTKSZ + (1 << _MAX_PAGE_SHIFT) * 4;
pub const SS_ONSTACK = 0x0001;
pub const SS_DISABLE = 0x0004;
pub const stack_t = extern struct {
sp: [*]u8,
size: usize,
flags: c_int,
};
pub const S = struct {
pub const IFMT = 0o170000;
pub const IFIFO = 0o010000;
pub const IFCHR = 0o020000;
pub const IFDIR = 0o040000;
pub const IFBLK = 0o060000;
pub const IFREG = 0o100000;
pub const IFLNK = 0o120000;
pub const IFSOCK = 0o140000;
pub const ISUID = 0o4000;
pub const ISGID = 0o2000;
pub const ISVTX = 0o1000;
pub const IRWXU = 0o700;
pub const IRUSR = 0o400;
pub const IWUSR = 0o200;
pub const IXUSR = 0o100;
pub const IRWXG = 0o070;
pub const IRGRP = 0o040;
pub const IWGRP = 0o020;
pub const IXGRP = 0o010;
pub const IRWXO = 0o007;
pub const IROTH = 0o004;
pub const IWOTH = 0o002;
pub const IXOTH = 0o001;
pub fn ISFIFO(m: u32) bool {
return m & IFMT == IFIFO;
}
pub fn ISCHR(m: u32) bool {
return m & IFMT == IFCHR;
}
pub fn ISDIR(m: u32) bool {
return m & IFMT == IFDIR;
}
pub fn ISBLK(m: u32) bool {
return m & IFMT == IFBLK;
}
pub fn ISREG(m: u32) bool {
return m & IFMT == IFREG;
}
pub fn ISLNK(m: u32) bool {
return m & IFMT == IFLNK;
}
pub fn ISSOCK(m: u32) bool {
return m & IFMT == IFSOCK;
}
};
pub const AT = struct {
/// Magic value that specify the use of the current working directory
/// to determine the target of relative file paths in the openat() and
/// similar syscalls.
pub const FDCWD = -100;
/// Check access using effective user and group ID
pub const EACCESS = 0x01;
/// Do not follow symbolic links
pub const SYMLINK_NOFOLLOW = 0x02;
/// Follow symbolic link
pub const SYMLINK_FOLLOW = 0x04;
/// Remove directory instead of file
pub const REMOVEDIR = 0x08;
};
pub const HOST_NAME_MAX = 255;
pub const IPPROTO = struct {
/// dummy for IP
pub const IP = 0;
/// IP6 hop-by-hop options
pub const HOPOPTS = IP;
/// control message protocol
pub const ICMP = 1;
/// group mgmt protocol
pub const IGMP = 2;
/// gateway^2 (deprecated)
pub const GGP = 3;
/// IP header
pub const IPV4 = IPIP;
/// IP inside IP
pub const IPIP = 4;
/// tcp
pub const TCP = 6;
/// exterior gateway protocol
pub const EGP = 8;
/// pup
pub const PUP = 12;
/// user datagram protocol
pub const UDP = 17;
/// xns idp
pub const IDP = 22;
/// tp-4 w/ class negotiation
pub const TP = 29;
/// IP6 header
pub const IPV6 = 41;
/// IP6 routing header
pub const ROUTING = 43;
/// IP6 fragmentation header
pub const FRAGMENT = 44;
/// resource reservation
pub const RSVP = 46;
/// GRE encaps RFC 1701
pub const GRE = 47;
/// encap. security payload
pub const ESP = 50;
/// authentication header
pub const AH = 51;
/// IP Mobility RFC 2004
pub const MOBILE = 55;
/// IPv6 ICMP
pub const IPV6_ICMP = 58;
/// ICMP6
pub const ICMPV6 = 58;
/// IP6 no next header
pub const NONE = 59;
/// IP6 destination option
pub const DSTOPTS = 60;
/// ISO cnlp
pub const EON = 80;
/// Ethernet-in-IP
pub const ETHERIP = 97;
/// encapsulation header
pub const ENCAP = 98;
/// Protocol indep. multicast
pub const PIM = 103;
/// IP Payload Comp. Protocol
pub const IPCOMP = 108;
/// VRRP RFC 2338
pub const VRRP = 112;
/// Common Address Resolution Protocol
pub const CARP = 112;
/// PFSYNC
pub const PFSYNC = 240;
/// raw IP packet
pub const RAW = 255;
};
pub const rlimit_resource = enum(c_int) {
CPU,
FSIZE,
DATA,
STACK,
CORE,
RSS,
MEMLOCK,
NPROC,
NOFILE,
_,
};
pub const rlim_t = u64;
pub const RLIM = struct {
/// No limit
pub const INFINITY: rlim_t = (1 << 63) - 1;
pub const SAVED_MAX = INFINITY;
pub const SAVED_CUR = INFINITY;
};
pub const rlimit = extern struct {
/// Soft limit
cur: rlim_t,
/// Hard limit
max: rlim_t,
};
pub const SHUT = struct {
pub const RD = 0;
pub const WR = 1;
pub const RDWR = 2;
};
pub const nfds_t = c_uint;
pub const pollfd = extern struct {
fd: fd_t,
events: c_short,
revents: c_short,
};
pub const POLL = struct {
pub const IN = 0x0001;
pub const PRI = 0x0002;
pub const OUT = 0x0004;
pub const ERR = 0x0008;
pub const HUP = 0x0010;
pub const NVAL = 0x0020;
pub const RDNORM = 0x0040;
pub const NORM = RDNORM;
pub const WRNORM = OUT;
pub const RDBAND = 0x0080;
pub const WRBAND = 0x0100;
};
pub const CTL = struct {
pub const UNSPEC = 0;
pub const KERN = 1;
pub const VM = 2;
pub const FS = 3;
pub const NET = 4;
pub const DEBUG = 5;
pub const HW = 6;
pub const MACHDEP = 7;
pub const DDB = 9;
pub const VFS = 10;
};
pub const KERN = struct {
pub const OSTYPE = 1;
pub const OSRELEASE = 2;
pub const OSREV = 3;
pub const VERSION = 4;
pub const MAXVNODES = 5;
pub const MAXPROC = 6;
pub const MAXFILES = 7;
pub const ARGMAX = 8;
pub const SECURELVL = 9;
pub const HOSTNAME = 10;
pub const HOSTID = 11;
pub const CLOCKRATE = 12;
pub const PROF = 16;
pub const POSIX1 = 17;
pub const NGROUPS = 18;
pub const JOB_CONTROL = 19;
pub const SAVED_IDS = 20;
pub const BOOTTIME = 21;
pub const DOMAINNAME = 22;
pub const MAXPARTITIONS = 23;
pub const RAWPARTITION = 24;
pub const MAXTHREAD = 25;
pub const NTHREADS = 26;
pub const OSVERSION = 27;
pub const SOMAXCONN = 28;
pub const SOMINCONN = 29;
pub const NOSUIDCOREDUMP = 32;
pub const FSYNC = 33;
pub const SYSVMSG = 34;
pub const SYSVSEM = 35;
pub const SYSVSHM = 36;
pub const MSGBUFSIZE = 38;
pub const MALLOCSTATS = 39;
pub const CPTIME = 40;
pub const NCHSTATS = 41;
pub const FORKSTAT = 42;
pub const NSELCOLL = 43;
pub const TTY = 44;
pub const CCPU = 45;
pub const FSCALE = 46;
pub const NPROCS = 47;
pub const MSGBUF = 48;
pub const POOL = 49;
pub const STACKGAPRANDOM = 50;
pub const SYSVIPC_INFO = 51;
pub const ALLOWKMEM = 52;
pub const WITNESSWATCH = 53;
pub const SPLASSERT = 54;
pub const PROC_ARGS = 55;
pub const NFILES = 56;
pub const TTYCOUNT = 57;
pub const NUMVNODES = 58;
pub const MBSTAT = 59;
pub const WITNESS = 60;
pub const SEMINFO = 61;
pub const SHMINFO = 62;
pub const INTRCNT = 63;
pub const WATCHDOG = 64;
pub const ALLOWDT = 65;
pub const PROC = 66;
pub const MAXCLUSTERS = 67;
pub const EVCOUNT = 68;
pub const TIMECOUNTER = 69;
pub const MAXLOCKSPERUID = 70;
pub const CPTIME2 = 71;
pub const CACHEPCT = 72;
pub const FILE = 73;
pub const WXABORT = 74;
pub const CONSDEV = 75;
pub const NETLIVELOCKS = 76;
pub const POOL_DEBUG = 77;
pub const PROC_CWD = 78;
pub const PROC_NOBROADCASTKILL = 79;
pub const PROC_VMMAP = 80;
pub const GLOBAL_PTRACE = 81;
pub const CONSBUFSIZE = 82;
pub const CONSBUF = 83;
pub const AUDIO = 84;
pub const CPUSTATS = 85;
pub const PFSTATUS = 86;
pub const TIMEOUT_STATS = 87;
pub const UTC_OFFSET = 88;
pub const VIDEO = 89;
pub const PROC_ALL = 0;
pub const PROC_PID = 1;
pub const PROC_PGRP = 2;
pub const PROC_SESSION = 3;
pub const PROC_TTY = 4;
pub const PROC_UID = 5;
pub const PROC_RUID = 6;
pub const PROC_KTHREAD = 7;
pub const PROC_SHOW_THREADS = 0x40000000;
pub const PROC_ARGV = 1;
pub const PROC_NARGV = 2;
pub const PROC_ENV = 3;
pub const PROC_NENV = 4;
};
pub const HW_MACHINE = 1;
pub const HW_MODEL = 2;
pub const HW_NCPU = 3;
pub const HW_BYTEORDER = 4;
pub const HW_PHYSMEM = 5;
pub const HW_USERMEM = 6;
pub const HW_PAGESIZE = 7;
pub const HW_DISKNAMES = 8;
pub const HW_DISKSTATS = 9;
pub const HW_DISKCOUNT = 10;
pub const HW_SENSORS = 11;
pub const HW_CPUSPEED = 12;
pub const HW_SETPERF = 13;
pub const HW_VENDOR = 14;
pub const HW_PRODUCT = 15;
pub const HW_VERSION = 16;
pub const HW_SERIALNO = 17;
pub const HW_UUID = 18;
pub const HW_PHYSMEM64 = 19;
pub const HW_USERMEM64 = 20;
pub const HW_NCPUFOUND = 21;
pub const HW_ALLOWPOWERDOWN = 22;
pub const HW_PERFPOLICY = 23;
pub const HW_SMT = 24;
pub const HW_NCPUONLINE = 25;
|
lib/std/c/openbsd.zig
|
const std = @import("std");
const common = @import("../_common_utils.zig");
const FV = common.FV;
pub const HSET = struct {
key: []const u8,
fvs: []const FV,
/// Instantiates a new HSET command.
pub fn init(key: []const u8, fvs: []const FV) HSET {
return .{ .key = key, .fvs = fvs };
}
/// Validates if the command is syntactically correct.
pub fn validate(self: HSET) !void {
if (self.key.len == 0) return error.EmptyKeyName;
if (self.fvs.len == 0) return error.FVsArrayIsEmpty;
// Check the individual FV pairs
// TODO: how the hell do I check for dups without an allocator?
var i: usize = 0;
while (i < self.fvs.len) : (i += 1) {
if (self.fvs[i].field.len == 0) return error.EmptyFieldName;
}
}
// This reassignment is necessary to avoid having two definitions of
// RedisCommand in the same scope (it causes a shadowing error).
pub const forStruct = _forStruct;
pub const RedisCommand = struct {
pub fn serialize(self: HSET, comptime rootSerializer: type, msg: anytype) !void {
return rootSerializer.serializeCommand(msg, .{ "HSET", self.key, self });
}
};
pub const RedisArguments = struct {
pub fn count(self: HSET) usize {
return self.fvs.len * 2;
}
pub fn serialize(self: HSET, comptime rootSerializer: type, msg: anytype) !void {
for (self.fvs) |fv| {
try rootSerializer.serializeArgument(msg, []const u8, fv.field);
try rootSerializer.serializeArgument(msg, []const u8, fv.value);
}
}
};
};
fn _forStruct(comptime T: type) type {
// TODO: support pointers to struct, check that the struct is serializable (strings and numbers).
// TODO: there is some duplicated code with xread. Values should be a dedicated generic type.
if (@typeInfo(T) != .Struct) @compileError("Only Struct types allowed.");
return struct {
key: []const u8,
values: T,
const Self = @This();
pub fn init(key: []const u8, values: T) Self {
return .{ .key = key, .values = values };
}
/// Validates if the command is syntactically correct.
pub fn validate(self: Self) !void {
if (self.key.len == 0) return error.EmptyKeyName;
}
pub const RedisCommand = struct {
pub fn serialize(self: Self, comptime rootSerializer: type, msg: anytype) !void {
return rootSerializer.serializeCommand(msg, .{
"HSET",
self.key,
// Dirty trick to control struct serialization :3
self,
});
}
};
// We are marking ouserlves also as an argument to manage struct serialization.
pub const RedisArguments = struct {
pub fn count(self: Self) usize {
return comptime std.meta.fields(T).len * 2;
}
pub fn serialize(self: Self, comptime rootSerializer: type, msg: anytype) !void {
inline for (std.meta.fields(T)) |field| {
const arg = @field(self.values, field.name);
const ArgT = @TypeOf(arg);
try rootSerializer.serializeArgument(msg, []const u8, field.name);
try rootSerializer.serializeArgument(msg, ArgT, arg);
}
}
};
};
}
test "basic usage" {
const cmd = HSET.init("mykey", &[_]FV{
.{ .field = "field1", .value = "val1" },
.{ .field = "field2", .value = "val2" },
});
try cmd.validate();
const ExampleStruct = struct {
banana: usize,
id: []const u8,
};
const example = ExampleStruct{
.banana = 10,
.id = "ok",
};
const cmd1 = HSET.forStruct(ExampleStruct).init("mykey", example);
try cmd1.validate();
}
test "serializer" {
const serializer = @import("../../serializer.zig").CommandSerializer;
var correctBuf: [1000]u8 = undefined;
var correctMsg = std.io.fixedBufferStream(correctBuf[0..]);
var testBuf: [1000]u8 = undefined;
var testMsg = std.io.fixedBufferStream(testBuf[0..]);
{
{
correctMsg.reset();
testMsg.reset();
try serializer.serializeCommand(
testMsg.outStream(),
HSET.init("k1", &[_]FV{.{ .field = "f1", .value = "v1" }}),
);
try serializer.serializeCommand(
correctMsg.outStream(),
.{ "HSET", "k1", "f1", "v1" },
);
// std.debug.warn("{}\n\n\n{}\n", .{ correctMsg.getWritten(), testMsg.getWritten() });
std.testing.expectEqualSlices(u8, correctMsg.getWritten(), testMsg.getWritten());
}
{
correctMsg.reset();
testMsg.reset();
const MyStruct = struct {
field1: []const u8,
field2: u8,
field3: usize,
};
const MyHSET = HSET.forStruct(MyStruct);
try serializer.serializeCommand(
testMsg.outStream(),
MyHSET.init(
"k1",
.{ .field1 = "nice!", .field2 = 'a', .field3 = 42 },
),
);
try serializer.serializeCommand(
correctMsg.outStream(),
.{ "HSET", "k1", "field1", "nice!", "field2", 'a', "field3", 42 },
);
std.testing.expectEqualSlices(u8, correctMsg.getWritten(), testMsg.getWritten());
}
}
}
|
src/commands/hashes/hset.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const List = std.ArrayList;
const Map = std.AutoHashMap;
const StrMap = std.StringHashMap;
const BitSet = std.DynamicBitSet;
const Str = []const u8;
const util = @import("util.zig");
const gpa = util.gpa;
const data = @embedFile("../data/day02.txt");
const Submarine = struct{
horizontal: u64 = 0,
depth:u64 = 0,
aim:u64 =0,
pub fn SubMultiply(self:*Submarine) u64 {
return self.horizontal * self.depth;
}
};
pub fn main() !void {
var part1Sub = Submarine{};
var part2Sub = Submarine{};
var lines = tokenize(u8,data, "\r\n");
while(lines.next()) |line|{
var tokenized = tokenize(u8, line, " ");
var order = tokenized.next().?;
var amount = try parseInt(u64, tokenized.next().?, 10);
//std.debug.print("order: {s}, amount: {} \n", .{order, amount});
if(std.mem.eql(u8, order, "forward")){
part1Sub.horizontal += amount;
part2Sub.horizontal += amount;
part2Sub.depth += (amount * part2Sub.aim);
} else if(std.mem.eql(u8,order,"down")){
part1Sub.depth += amount;
part2Sub.aim += amount;
} else if(std.mem.eql(u8,order,"up")){
part1Sub.depth -= amount;
part2Sub.aim -= amount;
} else {
std.debug.print("something went horribly wrong", .{});
}
}
std.debug.print("Day 2 Part 1: {} \n", .{part1Sub.SubMultiply()});
std.debug.print("Day 2 Part 1: {} \n", .{part2Sub.SubMultiply()});
}
// Useful stdlib functions
const tokenize = std.mem.tokenize;
const split = std.mem.split;
const indexOf = std.mem.indexOfScalar;
const indexOfAny = std.mem.indexOfAny;
const indexOfStr = std.mem.indexOfPosLinear;
const lastIndexOf = std.mem.lastIndexOfScalar;
const lastIndexOfAny = std.mem.lastIndexOfAny;
const lastIndexOfStr = std.mem.lastIndexOfLinear;
const trim = std.mem.trim;
const sliceMin = std.mem.min;
const sliceMax = std.mem.max;
const parseInt = std.fmt.parseInt;
const parseFloat = std.fmt.parseFloat;
const min = std.math.min;
const min3 = std.math.min3;
const max = std.math.max;
const max3 = std.math.max3;
const print = std.debug.print;
const assert = std.debug.assert;
const sort = std.sort.sort;
const asc = std.sort.asc;
const desc = std.sort.desc;
|
src/day02.zig
|
const std = @import("std");
const utf8 = @import("../unicode/utf8/index.zig");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const mem = std.mem;
const warn = std.debug.warn;
pub fn lastIndexFunc(input: []const u8, f: fn (rune: i32) bool) anyerror!?usize {
return lastIndexFuncInternal(input, f, true);
}
fn lastIndexFuncInternal(input: []const u8, f: fn (rune: i32) bool, truthy: bool) anyerror!?usize {
var idx = @intCast(isize, input.len);
while (idx > 0) {
const r = try utf8.decodeLastRune(input[0..@intCast(usize, idx)]);
idx -= @intCast(isize, r.size);
if (f(r.value) == truthy) {
return @intCast(usize, idx);
}
}
return null;
}
// stringFinder efficiently finds strings in a source text. It's implemented
// using the Boyer-Moore string search algorithm:
// https://en.wikipedia.org/wiki/Boyer-Moore_string_search_algorithm
// https://www.cs.utexas.edu/~moore/publications/fstrpos.pdf (note: this aged
// document uses 1-based indexing)
pub const StringFinder = struct {
pattern: []const u8,
bad_char_skip: [256]usize,
good_suffix_skip: []usize,
a: *Allocator,
const Self = @This();
pub fn init(a: *Allocator, pattern: []const u8) !Self {
var s: Self = undefined;
s.a = a;
s.pattern = pattern;
if (pattern.len == 0) {
return s;
}
s.good_suffix_skip = try a.alloc(usize, pattern.len);
// last is the index of the last character in the pattern.
const last = pattern.len - 1;
// Build bad character table.
// Bytes not in the pattern can skip one pattern's length.
for (s.bad_char_skip) |*value| {
value.* = pattern.len;
}
// The loop condition is < instead of <= so that the last byte does not
// have a zero distance to itself. Finding this byte out of place implies
// that it is not in the last position.
var i: usize = 0;
while (i < last) : (i += 1) {
s.bad_char_skip[@intCast(usize, pattern[i])] = last - i;
}
// Build good suffix table.
// First pass: set each value to the next index which starts a prefix of
// pattern.
var last_prefix = last;
i = last;
while (i >= 0) {
if (hasPrefix(pattern, pattern[i + 1 ..])) {
last_prefix = i + 1;
}
// lastPrefix is the shift, and (last-i) is len(suffix).
s.good_suffix_skip[i] = last_prefix + last - 1;
if (i == 0) {
break;
}
i -= 1;
}
// Second pass: find repeats of pattern's suffix starting from the front.
i = 0;
while (i < last) : (i += 1) {
const len_suffix = longestCommonSuffix(pattern, pattern[1 .. i + 1]);
if (pattern[i - len_suffix] != pattern[last - len_suffix]) {
s.good_suffix_skip[last - len_suffix] = len_suffix + last - i;
}
}
return s;
}
pub fn deinit(self: *Self) void {
if (self.pattern.len > 0) {
self.a.free(self.good_suffix_skip);
}
}
pub fn next(self: *Self, text: []const u8) ?usize {
const size = self.pattern.len;
var i = @intCast(isize, size) - 1;
while (i < @intCast(isize, text.len)) {
var j: isize = @intCast(isize, size) - 1;
while (j >= 0 and (text[@intCast(usize, i)] == self.pattern[@intCast(usize, j)])) {
i -= 1;
j -= 1;
}
if (j < 0) {
return @intCast(usize, i + 1);
}
i += @intCast(isize, max(
self.bad_char_skip[@intCast(usize, text[@intCast(usize, i)])],
self.good_suffix_skip[@intCast(usize, j)],
));
}
return null;
}
};
pub fn stringFind(a: *Allocator, pattern: []const u8, text: []const u8) !?usize {
var f = &try StringFinder.init(a, pattern);
defer f.deinit();
return f.next(text);
}
fn max(a: usize, b: usize) usize {
if (a > b) {
return a;
}
return b;
}
pub fn hasPrefix(s: []const u8, prefix: []const u8) bool {
if (s.len < prefix.len) {
return false;
}
return std.mem.eql(u8, s[0..prefix.len], prefix);
}
fn longestCommonSuffix(a: []const u8, b: []const u8) usize {
var i: usize = 0;
while (i < a.len and i < b.len) : (i += 1) {
if (a[a.len - 1 - i] != b[b.len - 1 - i]) {
break;
}
}
return i;
}
pub const SingleReplacer = struct {
find: StringFinder,
old: []const u8,
new: []const u8,
replacer: Replacer,
pub fn init(a: *Allocator, old: []const u8, new: []const u8) !SingleReplacer {
return SingleReplacer{
.old = old,
.new = new,
.find = try StringFinder.init(a, old),
.replacer = Replacer{ .replaceFn = replaceFn },
};
}
pub fn deinit(self: *SingleReplacer) void {
self.find.deinit();
}
pub fn replaceFn(replace_ctx: *Replacer, s: []const u8, buf: *std.Buffer) anyerror!void {
const self = @fieldParentPtr(SingleReplacer, "replacer", replace_ctx);
var i: usize = 0;
var matched = false;
top: while (true) {
if ((&self.find).next(s[i..])) |match| {
matched = true;
try buf.append(s[i .. i + match]);
try buf.append(self.new);
i += match + self.old.len;
continue :top;
}
break;
}
if (!matched) {
try buf.append(s);
} else {
try buf.append(s[i..]);
}
}
};
pub const Replacer = struct {
replaceFn: fn (*Replacer, []const u8, *std.Buffer) anyerror!void,
pub fn replace(self: *Replacer, s: []const u8, out: *std.Buffer) anyerror!void {
return self.replaceFn(self, s, out);
}
};
pub const StringReplacer = struct {
impl: ReplaceImpl,
const ReplaceImpl = union(enum) {
Single: SingleReplacer,
Generic: GenericReplacer,
Byte: ByteReplacer,
ByteString: ByteStringReplacer,
};
pub fn init(a: *Allocator, old_new: []const []const u8) !StringReplacer {
if (old_new.len == 2 and old_new[0].len > 1) {
return StringReplacer{
.impl = ReplaceImpl{
.Single = try SingleReplacer.init(a, old_new[0], old_new[1]),
},
};
}
var all_new_bytes = true;
var i: usize = 0;
while (i < old_new.len) : (i += 2) {
if (old_new[i].len != 1) {
return StringReplacer{
.impl = ReplaceImpl{
.Generic = try GenericReplacer.init(a, old_new),
},
};
}
if (old_new[i + 1].len != 1) {
all_new_bytes = false;
}
}
if (all_new_bytes) {
return StringReplacer{
.impl = ReplaceImpl{
.Byte = ByteReplacer.init(old_new),
},
};
}
return StringReplacer{
.impl = ReplaceImpl{
.ByteString = ByteStringReplacer.init(old_new),
},
};
}
fn replace(self: *StringReplacer, s: []const u8, out: *std.Buffer) anyerror!void {
switch (self.impl) {
ReplaceImpl.Single => |*value| {
const r = &value.replacer;
return try r.replace(s, out);
},
ReplaceImpl.Generic => |*value| {
const r = &value.replacer;
return try r.replace(s, out);
},
ReplaceImpl.Byte => |*value| {
const r = &value.replacer;
return try r.replace(s, out);
},
ReplaceImpl.ByteString => |*value| {
const r = &value.replacer;
return try r.replace(s, out);
},
else => unreachable,
}
}
pub fn deinit(self: *StringReplacer) void {
switch (self.impl) {
ReplaceImpl.Single => |*value| {
value.deinit();
},
ReplaceImpl.Generic => |*value| {
value.deinit();
},
ReplaceImpl.Byte => {},
ReplaceImpl.ByteString => {},
else => unreachable,
}
}
};
pub const ByteReplacer = struct {
matrix: [256]u8,
replacer: Replacer,
pub fn init(old_new: []const []const u8) ByteReplacer {
var r: [256]u8 = undefined;
for (r) |*v, i| {
v.* = @intCast(u8, i);
}
var i = old_new.len - 2;
while (i >= 0) {
const o = old_new[i][0];
const n = old_new[i + 1][0];
r[o] = n;
if (i == 0) {
break;
}
i -= 2;
}
return ByteReplacer{
.matrix = r,
.replacer = Replacer{ .replaceFn = replaceFn },
};
}
pub fn replaceFn(replace_ctx: *Replacer, s: []const u8, buf: *std.Buffer) anyerror!void {
const self = @fieldParentPtr(ByteReplacer, "replacer", replace_ctx);
var i: usize = 0;
while (i < s.len) : (i += 1) {
const b = s[i];
if (self.matrix[b] != b) {
try buf.appendByte(self.matrix[b]);
} else {
try buf.appendByte(b);
}
}
}
};
pub const ByteStringReplacer = struct {
replacements: [256]?[]const u8,
replacer: Replacer,
pub fn init(old_new: []const []const u8) ByteStringReplacer {
var r: [256]?[]const u8 = undefined;
var i = old_new.len - 2;
while (i >= 0) {
const o = old_new[i][0];
r[o] = old_new[i + 1];
if (i == 0) {
break;
}
i -= 2;
}
return ByteStringReplacer{
.replacements = r,
.replacer = Replacer{ .replaceFn = replaceFn },
};
}
pub fn replaceFn(replace_ctx: *Replacer, s: []const u8, buf: *std.Buffer) anyerror!void {
const self = @fieldParentPtr(ByteStringReplacer, "replacer", replace_ctx);
var new_size = s.len;
var any_changes = false;
var i: usize = 0;
while (i < s.len) : (i += 1) {
const b = s[i];
if (self.replacements[b]) |value| {
new_size += value.len - 1;
any_changes = true;
}
}
if (!any_changes) {
return buf.append(s);
}
try buf.resize(new_size);
var out = buf.toSlice();
var j: usize = 0;
i = 0;
while (i < s.len) : (i += 1) {
const b = s[i];
if (self.replacements[b]) |value| {
mem.copy(u8, out[j..], value);
j += value.len;
} else {
out[j] = b;
j += 1;
}
}
}
};
pub const GenericReplacer = struct {
root: *TrieNode,
table_size: usize,
mapping: [256]usize,
a: *Allocator,
replacer: Replacer,
arena: std.heap.ArenaAllocator,
const TrieNode = struct {
value: []const u8,
priority: usize,
prefix: []const u8,
next: ?*TrieNode,
table: ?*Table,
const Table = ArrayList(?*TrieNode);
fn padd(size: usize) void {
var i: usize = 0;
while (i < size) : (i += 1) {
warn(" ");
}
}
fn add(self: *TrieNode, key: []const u8, value: []const u8, priority: usize, r: *GenericReplacer) anyerror!void {
if (key.len == 0) {
if (self.priority == 0) {
self.value = value;
self.priority = priority;
}
return;
}
if (!mem.eql(u8, self.prefix, "")) {
const p = self.prefix;
var n: usize = 0;
while (n < p.len and n < key.len) : (n += 1) {
if (p[n] != key[n]) {
break;
}
}
if (n == p.len) {
try self.next.?.add(key[n..], value, priority, r);
} else if (n == 0) {
var prefix_node: ?*TrieNode = null;
if (p.len == 1) {
prefix_node = self.next;
} else {
prefix_node = try r.createNode();
prefix_node.?.prefix = p[1..];
prefix_node.?.next = self.next;
}
var key_node = try r.createNode();
self.table = try r.createTable(r.table_size);
var ta = self.table.?;
try ta.resize(r.table_size);
ta.set(r.mapping[p[0]], prefix_node.?);
ta.set(r.mapping[key[0]], key_node);
self.prefix = "";
self.next = null;
try key_node.add(key[1..], value, priority, r);
} else {
const nxt = try r.createNode();
nxt.prefix = p[n..];
nxt.next = self.next;
self.prefix = p[n..];
self.next = nxt;
try nxt.add(key[n..], value, priority, r);
}
} else if (self.table != null) {
var ta = self.table.?;
const m = r.mapping[key[0]];
var n: *TrieNode = undefined;
if (ta.toSlice()[m] == null) {
n = try r.createNode();
ta.set(m, n);
} else {
n = ta.toSlice()[m].?;
}
try n.add(key[1..], value, priority, r);
} else {
self.prefix = key;
self.next = try r.createNode();
try self.next.?.add("", value, priority, r);
}
}
};
const LookupRes = struct {
value: []const u8,
key_len: usize,
found: bool,
};
pub fn init(a: *Allocator, old_new: []const []const u8) !GenericReplacer {
var g: GenericReplacer = undefined;
g.arena = std.heap.ArenaAllocator.init(a);
g.mapping = [_]usize{0} ** 256;
g.table_size = 0;
g.replacer = Replacer{ .replaceFn = replaceFn };
var i: usize = 0;
while (i < old_new.len) : (i += 2) {
var j: usize = 0;
const key = old_new[i];
while (j < key.len) : (j += 1) {
g.mapping[key[j]] = 1;
}
}
for (g.mapping) |value, idx| {
g.table_size += value;
}
var index: usize = 0;
for (g.mapping) |*value| {
if (value.* == 0) {
value.* = g.table_size;
} else {
value.* = index;
index += 1;
}
}
g.a = &g.arena.allocator;
g.root = try g.createNode();
g.root.table = try g.createTable(g.table_size);
i = 0;
while (i < old_new.len) : (i += 2) {
try g.root.add(old_new[i], old_new[i + 1], old_new.len - i, &g);
}
return g;
}
fn createNode(self: *GenericReplacer) !*TrieNode {
var n = try self.a.create(TrieNode);
n.* = TrieNode{
.prefix = "",
.next = undefined,
.priority = 0,
.value = "",
.table = null,
};
return n;
}
fn createTable(self: *GenericReplacer, size: usize) !*TrieNode.Table {
var n = try self.a.create(TrieNode.Table);
n.* = TrieNode.Table.init(self.a);
try n.resize(size);
// We need to be able to check if the index has a value set yet. So we
// set all values of the initialized array to null.
//
// This allows us to do table[i]==null to check if there is a tie node
// there, I havent mastered pointer magic yet so no idea if there is a
// mbetter way that wont necessarity reqire using a hash table.
for (n.toSlice()) |*value| {
value.* = null;
}
return n;
}
fn deinit(self: *GenericReplacer) void {
self.arena.deinit();
}
fn lookup(self: *GenericReplacer, src: []const u8, ignore_root: bool) anyerror!LookupRes {
var best_priority: usize = 0;
var current_node: ?*TrieNode = self.root;
var n: usize = 0;
var result = LookupRes{
.value = "",
.key_len = 0,
.found = false,
};
var s = src;
while (current_node) |node| {
if (node.priority > best_priority and !(ignore_root and node == self.root)) {
best_priority = node.priority;
result.value = node.value;
result.key_len = n;
}
if (mem.eql(u8, s, "")) {
break;
}
if (node.table) |table| {
const index = self.mapping[s[0]];
if (index == self.table_size) {
break;
}
current_node = table.toSlice()[index];
s = s[1..];
n += 1;
} else if (!mem.eql(u8, node.prefix, "") and hasPrefix(s, node.prefix)) {
n += node.prefix.len;
s = s[node.prefix.len..];
current_node = node.next;
} else {
break;
}
}
return result;
}
pub fn replaceFn(replace_ctx: *Replacer, s: []const u8, buf: *std.Buffer) anyerror!void {
const self = @fieldParentPtr(GenericReplacer, "replacer", replace_ctx);
var last: usize = 0;
var sw: usize = 0;
var prev_match_empty = false;
var i: usize = 0;
while (i < s.len) {
if (i != s.len and self.root.priority == 0) {
const index = self.mapping[s[i]];
if (index == self.table_size or self.root.table.?.at(index) == null) {
i += 1;
continue;
}
const lk = try self.lookup(s[i..], prev_match_empty);
prev_match_empty = lk.found and lk.key_len == 0;
if (lk.found) {
try buf.append(s[last..i]);
try buf.append(lk.value);
i += lk.key_len;
last = i;
continue;
}
i += 1;
}
}
if (last != s.len) {
try buf.append(s[last..]);
}
}
};
|
src/strings/strings.zig
|
const std = @import("std");
const builtin = std.builtin;
const common = @import("../common.zig");
const crypto = std.crypto;
const debug = std.debug;
const math = std.math;
const mem = std.mem;
const Field = common.Field;
const NonCanonicalError = std.crypto.errors.NonCanonicalError;
const NotSquareError = std.crypto.errors.NotSquareError;
/// Number of bytes required to encode a scalar.
pub const encoded_length = 32;
/// A compressed scalar, in canonical form.
pub const CompressedScalar = [encoded_length]u8;
const Fe = Field(.{
.fiat = @import("p256_scalar_64.zig"),
.field_order = 115792089210356248762697446949407573529996955224135760342422259061068512044369,
.field_bits = 256,
.saturated_bits = 255,
.encoded_length = encoded_length,
});
/// Reject a scalar whose encoding is not canonical.
pub fn rejectNonCanonical(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!void {
return Fe.rejectNonCanonical(s, endian);
}
/// Reduce a 48-bytes scalar to the field size.
pub fn reduce48(s: [48]u8, endian: builtin.Endian) CompressedScalar {
return Scalar.fromBytes48(s, endian).toBytes(endian);
}
/// Reduce a 64-bytes scalar to the field size.
pub fn reduce64(s: [64]u8, endian: builtin.Endian) CompressedScalar {
return ScalarDouble.fromBytes64(s, endian).toBytes(endian);
}
/// Return a*b (mod L)
pub fn mul(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).mul(try Scalar.fromBytes(b, endian)).toBytes(endian);
}
/// Return a*b+c (mod L)
pub fn mulAdd(a: CompressedScalar, b: CompressedScalar, c: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).mul(try Scalar.fromBytes(b, endian)).add(try Scalar.fromBytes(c, endian)).toBytes(endian);
}
/// Return a+b (mod L)
pub fn add(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).add(try Scalar.fromBytes(b, endian)).toBytes(endian);
}
/// Return -s (mod L)
pub fn neg(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).neg().toBytes(endian);
}
/// Return (a-b) (mod L)
pub fn sub(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).sub(try Scalar.fromBytes(b.endian)).toBytes(endian);
}
/// Return a random scalar
pub fn random(endian: builtin.Endian) CompressedScalar {
return Scalar.random().toBytes(endian);
}
/// A scalar in unpacked representation.
pub const Scalar = struct {
fe: Fe,
/// Zero.
pub const zero = Scalar{ .fe = Fe.zero };
/// One.
pub const one = Scalar{ .fe = Fe.one };
/// Unpack a serialized representation of a scalar.
pub fn fromBytes(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!Scalar {
return Scalar{ .fe = try Fe.fromBytes(s, endian) };
}
/// Reduce a 384 bit input to the field size.
pub fn fromBytes48(s: [48]u8, endian: builtin.Endian) Scalar {
const t = ScalarDouble.fromBytes(384, s, endian);
return t.reduce(384);
}
/// Reduce a 512 bit input to the field size.
pub fn fromBytes64(s: [64]u8, endian: builtin.Endian) Scalar {
const t = ScalarDouble.fromBytes(512, s, endian);
return t.reduce(512);
}
/// Pack a scalar into bytes.
pub fn toBytes(n: Scalar, endian: builtin.Endian) CompressedScalar {
return n.fe.toBytes(endian);
}
/// Return true if the scalar is zero..
pub fn isZero(n: Scalar) bool {
return n.fe.isZero();
}
/// Return true if a and b are equivalent.
pub fn equivalent(a: Scalar, b: Scalar) bool {
return a.fe.equivalent(b.fe);
}
/// Compute x+y (mod L)
pub fn add(x: Scalar, y: Scalar) Scalar {
return Scalar{ .fe = x.fe.add(y.fe) };
}
/// Compute x-y (mod L)
pub fn sub(x: Scalar, y: Scalar) Scalar {
return Scalar{ .fe = x.fe.sub(y.fe) };
}
/// Compute 2n (mod L)
pub fn dbl(n: Scalar) Scalar {
return Scalar{ .fe = n.fe.dbl() };
}
/// Compute x*y (mod L)
pub fn mul(x: Scalar, y: Scalar) Scalar {
return Scalar{ .fe = x.fe.mul(y.fe) };
}
/// Compute x^2 (mod L)
pub fn sq(n: Scalar) Scalar {
return Scalar{ .fe = n.fe.sq() };
}
/// Compute x^n (mod L)
pub fn pow(a: Scalar, comptime T: type, comptime n: T) Scalar {
return Scalar{ .fe = a.fe.pow(n) };
}
/// Compute -x (mod L)
pub fn neg(n: Scalar) Scalar {
return Scalar{ .fe = n.fe.neg() };
}
/// Compute x^-1 (mod L)
pub fn invert(n: Scalar) Scalar {
return Scalar{ .fe = n.fe.invert() };
}
/// Return true if n is a quadratic residue mod L.
pub fn isSquare(n: Scalar) Scalar {
return n.fe.isSquare();
}
/// Return the square root of L, or NotSquare if there isn't any solutions.
pub fn sqrt(n: Scalar) NotSquareError!Scalar {
return Scalar{ .fe = try n.fe.sqrt() };
}
/// Return a random scalar < L.
pub fn random() Scalar {
var s: [48]u8 = undefined;
while (true) {
crypto.random.bytes(&s);
const n = Scalar.fromBytes48(s, .Little);
if (!n.isZero()) {
return n;
}
}
}
};
const ScalarDouble = struct {
x1: Fe,
x2: Fe,
x3: Fe,
fn fromBytes(comptime bits: usize, s_: [bits / 8]u8, endian: builtin.Endian) ScalarDouble {
debug.assert(bits > 0 and bits <= 512 and bits >= Fe.saturated_bits and bits <= Fe.saturated_bits * 3);
var s = s_;
if (endian == .Big) {
for (s_) |x, i| s[s.len - 1 - i] = x;
}
var t = ScalarDouble{ .x1 = undefined, .x2 = Fe.zero, .x3 = Fe.zero };
{
var b = [_]u8{0} ** encoded_length;
const len = math.min(s.len, 24);
mem.copy(u8, b[0..len], s[0..len]);
t.x1 = Fe.fromBytes(b, .Little) catch unreachable;
}
if (s_.len >= 24) {
var b = [_]u8{0} ** encoded_length;
const len = math.min(s.len - 24, 24);
mem.copy(u8, b[0..len], s[24..][0..len]);
t.x2 = Fe.fromBytes(b, .Little) catch unreachable;
}
if (s_.len >= 48) {
var b = [_]u8{0} ** encoded_length;
const len = s.len - 48;
mem.copy(u8, b[0..len], s[48..][0..len]);
t.x3 = Fe.fromBytes(b, .Little) catch unreachable;
}
return t;
}
fn reduce(expanded: ScalarDouble, comptime bits: usize) Scalar {
debug.assert(bits > 0 and bits <= Fe.saturated_bits * 3 and bits <= 512);
var fe = expanded.x1;
if (bits >= 192) {
const st1 = Fe.fromInt(1 << 192) catch unreachable;
fe = fe.add(expanded.x2.mul(st1));
if (bits >= 384) {
const st2 = st1.sq();
fe = fe.add(expanded.x3.mul(st2));
}
}
return Scalar{ .fe = fe };
}
};
|
lib/std/crypto/pcurves/p256/scalar.zig
|
const Plan9 = @This();
const link = @import("../link.zig");
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
const aout = @import("Plan9/aout.zig");
const codegen = @import("../codegen.zig");
const trace = @import("../tracy.zig").trace;
const File = link.File;
const build_options = @import("build_options");
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
const std = @import("std");
const builtin = @import("builtin");
const mem = std.mem;
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
base: link.File,
sixtyfour_bit: bool,
error_flags: File.ErrorFlags = File.ErrorFlags{},
bases: Bases,
/// A symbol's value is just casted down when compiling
/// for a 32 bit target.
syms: std.ArrayListUnmanaged(aout.Sym) = .{},
fn_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{},
data_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{},
hdr: aout.ExecHdr = undefined,
entry_val: ?u64 = null,
got_len: usize = 0,
const Bases = struct {
text: u64,
/// the Global Offset Table starts at the beginning of the data section
data: u64,
};
fn getAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 {
return addr + switch (t) {
.T, .t, .l, .L => self.bases.text,
.D, .d, .B, .b => self.bases.data,
else => unreachable,
};
}
fn getSymAddr(self: Plan9, s: aout.Sym) u64 {
return self.getAddr(s.value, s.type);
}
pub const DeclBlock = struct {
type: aout.Sym.Type,
/// offset in the text or data sects
offset: ?u64,
/// offset into syms
sym_index: ?usize,
/// offset into got
got_index: ?usize,
pub const empty = DeclBlock{
.type = .t,
.offset = null,
.sym_index = null,
.got_index = null,
};
};
pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
return switch (arch) {
.x86_64 => .{
// header size => 40 => 0x28
.text = 0x200028,
.data = 0x400000,
},
.i386 => .{
// header size => 32 => 0x20
.text = 0x200020,
.data = 0x400000,
},
.aarch64 => .{
// header size => 40 => 0x28
.text = 0x10028,
.data = 0x20000,
},
else => std.debug.panic("find default base address for {}", .{arch}),
};
}
pub const PtrWidth = enum { p32, p64 };
pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
const sixtyfour_bit: bool = switch (options.target.cpu.arch.ptrBitWidth()) {
0...32 => false,
33...64 => true,
else => return error.UnsupportedP9Architecture,
};
const self = try gpa.create(Plan9);
self.* = .{
.base = .{
.tag = .plan9,
.options = options,
.allocator = gpa,
.file = null,
},
.sixtyfour_bit = sixtyfour_bit,
.bases = undefined,
};
return self;
}
pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
const decl = func.owner_decl;
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ .none = .{} });
const code = switch (res) {
.appended => code_buffer.toOwnedSlice(),
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
return;
},
};
try self.fn_decl_table.put(self.base.allocator, decl, code);
return self.updateFinish(decl);
}
pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void {
if (decl.val.tag() == .extern_fn) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
if (decl.val.castTag(.variable)) |payload| {
const variable = payload.data;
if (variable.is_extern) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
}
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{ .none = .{} });
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
return;
},
};
var duped_code = try std.mem.dupe(self.base.allocator, u8, code);
errdefer self.base.allocator.free(duped_code);
try self.data_decl_table.put(self.base.allocator, decl, duped_code);
return self.updateFinish(decl);
}
/// called at the end of update{Decl,Func}
fn updateFinish(self: *Plan9, decl: *Module.Decl) !void {
const is_fn = (decl.ty.zigTypeTag() == .Fn);
log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name });
const sym_t: aout.Sym.Type = if (is_fn) .t else .d;
// write the internal linker metadata
decl.link.plan9.type = sym_t;
// write the symbol
// we already have the got index because that got allocated in allocateDeclIndexes
const sym: aout.Sym = .{
.value = undefined, // the value of stuff gets filled in in flushModule
.type = decl.link.plan9.type,
.name = mem.span(decl.name),
};
if (decl.link.plan9.sym_index) |s| {
self.syms.items[s] = sym;
} else {
try self.syms.append(self.base.allocator, sym);
decl.link.plan9.sym_index = self.syms.items.len - 1;
}
}
pub fn flush(self: *Plan9, comp: *Compilation) !void {
assert(!self.base.options.use_lld);
switch (self.base.options.effectiveOutputMode()) {
.Exe => {},
// plan9 object files are totally different
.Obj => return error.TODOImplementPlan9Objs,
.Lib => return error.TODOImplementWritingLibFiles,
}
return self.flushModule(comp);
}
pub fn flushModule(self: *Plan9, comp: *Compilation) !void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
_ = comp;
const tracy = trace(@src());
defer tracy.end();
log.debug("flushModule", .{});
defer assert(self.hdr.entry != 0x0);
const mod = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
// TODO I changed this assert from == to >= but this code all needs to be audited; see
// the comment in `freeDecl`.
assert(self.got_len >= self.fn_decl_table.count() + self.data_decl_table.count());
const got_size = self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8;
var got_table = try self.base.allocator.alloc(u8, got_size);
defer self.base.allocator.free(got_table);
// + 2 for header, got, symbols
var iovecs = try self.base.allocator.alloc(std.os.iovec_const, self.fn_decl_table.count() + self.data_decl_table.count() + 3);
defer self.base.allocator.free(iovecs);
const file = self.base.file.?;
var hdr_buf: [40]u8 = undefined;
// account for the fat header
const hdr_size = if (self.sixtyfour_bit) @as(usize, 40) else 32;
const hdr_slice: []u8 = hdr_buf[0..hdr_size];
var foff = hdr_size;
iovecs[0] = .{ .iov_base = hdr_slice.ptr, .iov_len = hdr_slice.len };
var iovecs_i: usize = 1;
var text_i: u64 = 0;
// text
{
var it = self.fn_decl_table.iterator();
while (it.next()) |entry| {
const decl = entry.key_ptr.*;
const code = entry.value_ptr.*;
log.debug("write text decl {*} ({s})", .{ decl, decl.name });
foff += code.len;
iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len };
iovecs_i += 1;
const off = self.getAddr(text_i, .t);
text_i += code.len;
decl.link.plan9.offset = off;
if (!self.sixtyfour_bit) {
mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off));
mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
self.syms.items[decl.link.plan9.sym_index.?].value = off;
if (mod.decl_exports.get(decl)) |exports| {
try self.addDeclExports(mod, decl, exports);
}
}
// etext symbol
self.syms.items[2].value = self.getAddr(text_i, .t);
}
// global offset table is in data
iovecs[iovecs_i] = .{ .iov_base = got_table.ptr, .iov_len = got_table.len };
iovecs_i += 1;
// data
var data_i: u64 = got_size;
{
var it = self.data_decl_table.iterator();
while (it.next()) |entry| {
const decl = entry.key_ptr.*;
const code = entry.value_ptr.*;
log.debug("write data decl {*} ({s})", .{ decl, decl.name });
foff += code.len;
iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len };
iovecs_i += 1;
const off = self.getAddr(data_i, .d);
data_i += code.len;
decl.link.plan9.offset = off;
if (!self.sixtyfour_bit) {
mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
self.syms.items[decl.link.plan9.sym_index.?].value = off;
if (mod.decl_exports.get(decl)) |exports| {
try self.addDeclExports(mod, decl, exports);
}
}
// edata symbol
self.syms.items[0].value = self.getAddr(data_i, .b);
}
// edata
self.syms.items[1].value = self.getAddr(0x0, .b);
var sym_buf = std.ArrayList(u8).init(self.base.allocator);
defer sym_buf.deinit();
try self.writeSyms(&sym_buf);
assert(2 + self.fn_decl_table.count() + self.data_decl_table.count() == iovecs_i); // we didn't write all the decls
iovecs[iovecs_i] = .{ .iov_base = sym_buf.items.ptr, .iov_len = sym_buf.items.len };
iovecs_i += 1;
// generate the header
self.hdr = .{
.magic = try aout.magicFromArch(self.base.options.target.cpu.arch),
.text = @intCast(u32, text_i),
.data = @intCast(u32, data_i),
.syms = @intCast(u32, sym_buf.items.len),
.bss = 0,
.pcsz = 0,
.spsz = 0,
.entry = @intCast(u32, self.entry_val.?),
};
std.mem.copy(u8, hdr_slice, self.hdr.toU8s()[0..hdr_size]);
// write the fat header for 64 bit entry points
if (self.sixtyfour_bit) {
mem.writeIntSliceBig(u64, hdr_buf[32..40], self.entry_val.?);
}
// write it all!
try file.pwritevAll(iovecs, 0);
}
fn addDeclExports(
self: *Plan9,
module: *Module,
decl: *Module.Decl,
exports: []const *Module.Export,
) !void {
for (exports) |exp| {
// plan9 does not support custom sections
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) {
try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{}));
break;
}
}
const sym = .{
.value = decl.link.plan9.offset.?,
.type = decl.link.plan9.type.toGlobal(),
.name = exp.options.name,
};
if (exp.link.plan9) |i| {
self.syms.items[i] = sym;
} else {
try self.syms.append(self.base.allocator, sym);
exp.link.plan9 = self.syms.items.len - 1;
}
}
}
pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void {
// TODO this is not the correct check for being function body,
// it could just be a function pointer.
// TODO audit the lifetimes of decls table entries. It's possible to get
// allocateDeclIndexes and then freeDecl without any updateDecl in between.
// However that is planned to change, see the TODO comment in Module.zig
// in the deleteUnusedDecl function.
const is_fn = (decl.ty.zigTypeTag() == .Fn);
if (is_fn) {
_ = self.fn_decl_table.swapRemove(decl);
} else {
_ = self.data_decl_table.swapRemove(decl);
}
}
pub fn updateDeclExports(
self: *Plan9,
module: *Module,
decl: *Module.Decl,
exports: []const *Module.Export,
) !void {
// we do all the things in flush
_ = self;
_ = module;
_ = decl;
_ = exports;
}
pub fn deinit(self: *Plan9) void {
var itf = self.fn_decl_table.iterator();
while (itf.next()) |entry| {
self.base.allocator.free(entry.value_ptr.*);
}
self.fn_decl_table.deinit(self.base.allocator);
var itd = self.data_decl_table.iterator();
while (itd.next()) |entry| {
self.base.allocator.free(entry.value_ptr.*);
}
self.data_decl_table.deinit(self.base.allocator);
self.syms.deinit(self.base.allocator);
}
pub const Export = ?usize;
pub const base_tag = .plan9;
pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
assert(options.object_format == .plan9);
const file = try options.emit.?.directory.handle.createFile(sub_path, .{
.truncate = false,
.read = true,
.mode = link.determineMode(options),
});
errdefer file.close();
const self = try createEmpty(allocator, options);
errdefer self.base.destroy();
self.bases = defaultBaseAddrs(options.target.cpu.arch);
// first 3 symbols in our table are edata, end, etext
try self.syms.appendSlice(self.base.allocator, &.{
.{
.value = 0xcafebabe,
.type = .B,
.name = "edata",
},
.{
.value = 0xcafebabe,
.type = .B,
.name = "end",
},
.{
.value = 0xcafebabe,
.type = .T,
.name = "etext",
},
});
self.base.file = file;
return self;
}
pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
const writer = buf.writer();
for (self.syms.items) |sym| {
log.debug("sym.name: {s}", .{sym.name});
log.debug("sym.value: {x}", .{sym.value});
if (mem.eql(u8, sym.name, "_start"))
self.entry_val = sym.value;
if (!self.sixtyfour_bit) {
try writer.writeIntBig(u32, @intCast(u32, sym.value));
} else {
try writer.writeIntBig(u64, sym.value);
}
try writer.writeByte(@enumToInt(sym.type));
try writer.writeAll(sym.name);
try writer.writeByte(0);
}
}
pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void {
if (decl.link.plan9.got_index == null) {
self.got_len += 1;
decl.link.plan9.got_index = self.got_len - 1;
}
}
|
src/link/Plan9.zig
|
const std = @import("std");
const stdin = std.io.getStdIn().reader();
const stdout = std.io.getStdOut().writer();
const eql = std.mem.eql;
const interpreter = @import("./interpreter.zig");
const Forth = interpreter.ForthInterpreter;
const ForthError = interpreter.InterpreterError;
const Word = @import("./word.zig").Word;
pub fn main() anyerror!void {
var args = std.process.args();
defer args.deinit();
// this one is executable itself
_ = args.nextPosix();
const first = args.nextPosix();
if (first == null) {
try runStream(stdin);
try stdout.print("\n", .{});
} else {
// seems like a file!
var file = try std.fs.cwd().openFile(first.?, .{});
defer file.close();
try runStream(file.reader());
}
}
fn runStream(reader: std.io.Reader(std.fs.File, std.os.ReadError, std.fs.File.read)) !void {
var g = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = g.deinit();
var vm = Forth(2000).init(g.allocator());
defer vm.deinit();
var buf: [1024]u8 = undefined;
var in_comment: bool = false;
var in_define: bool = false;
var seq = std.ArrayList(Word).init(g.allocator());
var newWord: []const u8 = undefined;
while (true) {
const code = (try reader.readUntilDelimiterOrEof(&buf, '\n')) orelse {
// No input, probably CTRL-d/EOF.
return;
};
var words = std.mem.tokenize(u8, code, " ");
var wordIt = words.next();
while (wordIt != null) : (wordIt = words.next()) {
if (eql(u8, wordIt.?, "(")) {
in_comment = true;
} else if (eql(u8, wordIt.?, ")")) {
in_comment = false;
continue;
} else if (eql(u8, wordIt.?, ":")) {
in_define = true;
seq = std.ArrayList(Word).init(g.allocator());
newWord = words.next().?;
continue;
} else if (eql(u8, wordIt.?, ";")) {
in_define = false;
try vm.record(newWord, seq.toOwnedSlice());
continue;
}
if (in_comment) {
continue;
}
if (in_define) {
try seq.append(try Word.fromString(wordIt.?));
continue;
}
const word = try Word.fromString(wordIt.?);
if (vm.run(word)) {} else |err| switch (err) {
ForthError.Bye => break,
else => return err,
}
}
try stdout.print("ok\n", .{});
}
}
|
src/main.zig
|
const std = @import("std");
const mem = std.mem;
const ascii = std.ascii;
const assert = std.debug.assert;
const zunicode = @import("zunicode");
const nodes = @import("nodes.zig");
const strings = @import("strings.zig");
const Options = @import("options.zig").Options;
const scanners = @import("scanners.zig");
const Reference = @import("parser.zig").Reference;
const MAX_BACKTICKS = 80;
const MAX_LINK_LABEL_LENGTH = 1000;
pub const ParseError = error{ OutOfMemory, InvalidUtf8 };
pub const Subject = struct {
allocator: mem.Allocator,
refmap: *std.StringHashMap(Reference),
options: *const Options,
input: []const u8,
pos: usize = 0,
last_delimiter: ?*Delimiter = null,
brackets: std.ArrayList(Bracket),
backticks: [MAX_BACKTICKS + 1]usize = [_]usize{0} ** (MAX_BACKTICKS + 1),
scanned_for_backticks: bool = false,
special_chars: *const [256]bool,
skip_chars: *const [256]bool,
pub fn init(allocator: mem.Allocator, refmap: *std.StringHashMap(Reference), options: *const Options, special_chars: *const [256]bool, skip_chars: *const [256]bool, input: []const u8) Subject {
var s = Subject{
.allocator = allocator,
.refmap = refmap,
.options = options,
.input = input,
.brackets = std.ArrayList(Bracket).init(allocator),
.special_chars = special_chars,
.skip_chars = skip_chars,
};
return s;
}
pub fn setCharsForOptions(options: *const Options, special_chars: *[256]bool, skip_chars: *[256]bool) void {
for ([_]u8{ '\n', '\r', '_', '*', '"', '`', '\'', '\\', '&', '<', '[', ']', '!' }) |c| {
special_chars.*[c] = true;
}
if (options.extensions.strikethrough) {
special_chars.*['~'] = true;
skip_chars.*['~'] = true;
}
if (options.parse.smart) {
for ([_]u8{ '"', '\'', '.', '-' }) |c| {
special_chars.*[c] = true;
}
}
}
pub fn deinit(self: *Subject) void {
self.brackets.deinit();
}
pub fn parseInline(self: *Subject, node: *nodes.AstNode) ParseError!bool {
const c = self.peekChar() orelse return false;
var new_inl: ?*nodes.AstNode = null;
switch (c) {
0 => return false,
'\n', '\r' => new_inl = try self.handleNewLine(),
'`' => new_inl = try self.handleBackticks(),
'\\' => new_inl = try self.handleBackslash(),
'&' => new_inl = try self.handleEntity(),
'<' => new_inl = try self.handlePointyBrace(),
'*', '_', '\'', '"' => new_inl = try self.handleDelim(c),
'-' => new_inl = try self.handleHyphen(),
'.' => new_inl = try self.handlePeriod(),
'[' => {
self.pos += 1;
var inl = try self.makeInline(.{ .Text = try self.allocator.dupe(u8, "[") });
try self.pushBracket(.Link, inl);
new_inl = inl;
},
']' => new_inl = try self.handleCloseBracket(),
'!' => {
self.pos += 1;
if (self.peekChar() orelse 0 == '[') {
self.pos += 1;
var inl = try self.makeInline(.{ .Text = try self.allocator.dupe(u8, "![") });
try self.pushBracket(.Image, inl);
new_inl = inl;
} else {
new_inl = try self.makeInline(.{ .Text = try self.allocator.dupe(u8, "!") });
}
},
else => {
if (self.options.extensions.strikethrough and c == '~') {
new_inl = try self.handleDelim(c);
} else {
const endpos = self.findSpecialChar();
var contents = self.input[self.pos..endpos];
self.pos = endpos;
if (self.peekChar()) |n| {
if (strings.isLineEndChar(n)) {
contents = strings.rtrim(contents);
}
}
new_inl = try self.makeInline(.{ .Text = try self.allocator.dupe(u8, contents) });
}
},
}
if (new_inl) |inl| {
node.append(inl);
}
return true;
}
fn makeInline(self: *Subject, value: nodes.NodeValue) !*nodes.AstNode {
return nodes.AstNode.create(self.allocator, .{
.value = value,
.content = std.ArrayList(u8).init(self.allocator),
});
}
fn makeAutolink(self: *Subject, url: []const u8, kind: nodes.AutolinkType) !*nodes.AstNode {
var inl = try self.makeInline(.{
.Link = .{
.url = try strings.cleanAutolink(self.allocator, url, kind),
.title = "",
},
});
inl.append(try self.makeInline(.{ .Text = try strings.unescapeHtml(self.allocator, url) }));
return inl;
}
pub fn processEmphasis(self: *Subject, stack_bottom: ?*Delimiter) !void {
var closer = self.last_delimiter;
var openers_bottom: [3][128]?*Delimiter = [_][128]?*Delimiter{[_]?*Delimiter{null} ** 128} ** 3;
for (openers_bottom) |*i| {
i['*'] = stack_bottom;
i['_'] = stack_bottom;
i['\''] = stack_bottom;
i['"'] = stack_bottom;
if (self.options.extensions.strikethrough)
i['~'] = stack_bottom;
}
while (closer != null and closer.?.prev != stack_bottom) {
closer = closer.?.prev;
}
while (closer != null) {
if (closer.?.can_close) {
var opener = closer.?.prev;
var opener_found = false;
while (opener != null and opener != openers_bottom[closer.?.length % 3][closer.?.delim_char]) {
if (opener.?.can_open and opener.?.delim_char == closer.?.delim_char) {
const odd_match = (closer.?.can_open or opener.?.can_close) and ((opener.?.length + closer.?.length) % 3 == 0) and !(opener.?.length % 3 == 0 and closer.?.length % 3 == 0);
if (!odd_match) {
opener_found = true;
break;
}
}
opener = opener.?.prev;
}
var old_closer = closer;
if (closer.?.delim_char == '*' or closer.?.delim_char == '_' or
(self.options.extensions.strikethrough and closer.?.delim_char == '~'))
{
if (opener_found) {
closer = try self.insertEmph(opener.?, closer.?);
} else {
closer = closer.?.next;
}
} else if (closer.?.delim_char == '\'') {
var al = closer.?.inl.data.value.text_mut().?;
self.allocator.free(al.*);
al.* = try self.allocator.dupe(u8, "’");
if (opener_found) {
al = opener.?.inl.data.value.text_mut().?;
self.allocator.free(al.*);
al.* = try self.allocator.dupe(u8, "‘");
}
closer = closer.?.next;
} else if (closer.?.delim_char == '"') {
var al = closer.?.inl.data.value.text_mut().?;
self.allocator.free(al.*);
al.* = try self.allocator.dupe(u8, "”");
if (opener_found) {
al = opener.?.inl.data.value.text_mut().?;
self.allocator.free(al.*);
al.* = try self.allocator.dupe(u8, "“");
}
closer = closer.?.next;
}
if (!opener_found) {
const ix = old_closer.?.length % 3;
openers_bottom[ix][old_closer.?.delim_char] =
old_closer.?.prev;
if (!old_closer.?.can_open) {
self.removeDelimiter(old_closer.?);
}
}
} else {
closer = closer.?.next;
}
}
while (self.last_delimiter != null and self.last_delimiter != stack_bottom) {
self.removeDelimiter(self.last_delimiter.?);
}
}
fn removeDelimiter(self: *Subject, delimiter: *Delimiter) void {
if (delimiter.next == null) {
assert(delimiter == self.last_delimiter.?);
self.last_delimiter = delimiter.prev;
} else {
delimiter.next.?.prev = delimiter.prev;
}
if (delimiter.prev != null) {
delimiter.prev.?.next = delimiter.next;
}
self.allocator.destroy(delimiter);
}
pub fn popBracket(self: *Subject) bool {
return self.brackets.popOrNull() != null;
}
fn eof(self: *Subject) bool {
return self.pos >= self.input.len;
}
pub fn peekChar(self: *Subject) ?u8 {
return self.peekCharN(0);
}
fn peekCharN(self: *Subject, n: usize) ?u8 {
if (self.pos + n >= self.input.len) {
return null;
}
const c = self.input[self.pos + n];
assert(c > 0);
return c;
}
pub fn spnl(self: *Subject) void {
self.skipSpaces();
if (self.skipLineEnd())
self.skipSpaces();
}
fn findSpecialChar(self: *Subject) usize {
var n = self.pos;
while (n < self.input.len) : (n += 1) {
if (self.special_chars[self.input[n]])
return n;
}
return n;
}
fn handleNewLine(self: *Subject) !*nodes.AstNode {
const nlpos = self.pos;
if (self.input[self.pos] == '\r') self.pos += 1;
if (self.input[self.pos] == '\n') self.pos += 1;
self.skipSpaces();
const line_break = nlpos > 1 and self.input[nlpos - 1] == ' ' and self.input[nlpos - 2] == ' ';
return self.makeInline(if (line_break) .LineBreak else .SoftBreak);
}
fn takeWhile(self: *Subject, c: u8) usize {
const start_pos = self.pos;
while (self.peekChar() == c) {
self.pos += 1;
}
return self.pos - start_pos;
}
fn scanToClosingBacktick(self: *Subject, openticklength: usize) ?usize {
if (openticklength > MAX_BACKTICKS) {
return null;
}
if (self.scanned_for_backticks and self.backticks[openticklength] <= self.pos) {
return null;
}
while (true) {
var peeked = self.peekChar();
while (peeked != null and peeked.? != '`') {
self.pos += 1;
peeked = self.peekChar();
}
if (self.pos >= self.input.len) {
self.scanned_for_backticks = true;
return null;
}
const numticks = self.takeWhile('`');
if (numticks <= MAX_BACKTICKS) {
self.backticks[numticks] = self.pos - numticks;
}
if (numticks == openticklength) {
return self.pos;
}
}
}
fn handleBackticks(self: *Subject) !*nodes.AstNode {
const openticks = self.takeWhile('`');
const startpos = self.pos;
const endpos = self.scanToClosingBacktick(openticks);
if (endpos) |end| {
const buf = self.input[startpos .. end - openticks];
var code = try strings.normalizeCode(self.allocator, buf);
return try self.makeInline(.{ .Code = code });
} else {
self.pos = startpos;
var contents = try self.allocator.alloc(u8, openticks);
std.mem.set(u8, contents, '`');
return try self.makeInline(.{ .Text = contents });
}
}
pub fn skipSpaces(self: *Subject) void {
while (self.peekChar()) |c| {
if (!(c == ' ' or c == '\t'))
break;
self.pos += 1;
}
}
fn handleBackslash(self: *Subject) !*nodes.AstNode {
self.pos += 1;
if (ascii.isPunct(self.peekChar() orelse 0)) {
self.pos += 1;
var contents = try self.allocator.dupe(u8, self.input[self.pos - 1 .. self.pos]);
return try self.makeInline(.{ .Text = contents });
} else if (!self.eof() and self.skipLineEnd()) {
return try self.makeInline(.LineBreak);
} else {
return try self.makeInline(.{ .Text = try self.allocator.dupe(u8, "\\") });
}
}
pub fn skipLineEnd(self: *Subject) bool {
const old_pos = self.pos;
if (self.peekChar() orelse 0 == '\r') self.pos += 1;
if (self.peekChar() orelse 0 == '\n') self.pos += 1;
return self.pos > old_pos or self.eof();
}
fn handleEntity(self: *Subject) !*nodes.AstNode {
self.pos += 1;
var out = std.ArrayList(u8).init(self.allocator);
if (try strings.unescapeInto(self.input[self.pos..], &out)) |len| {
self.pos += len;
return try self.makeInline(.{ .Text = out.toOwnedSlice() });
}
try out.append('&');
return try self.makeInline(.{ .Text = out.toOwnedSlice() });
}
fn handlePointyBrace(self: *Subject) !*nodes.AstNode {
self.pos += 1;
if (try scanners.autolinkUri(self.input[self.pos..])) |match_len| {
var inl = try self.makeAutolink(self.input[self.pos .. self.pos + match_len - 1], .URI);
self.pos += match_len;
return inl;
}
if (try scanners.autolinkEmail(self.input[self.pos..])) |match_len| {
var inl = try self.makeAutolink(self.input[self.pos .. self.pos + match_len - 1], .Email);
self.pos += match_len;
return inl;
}
if (try scanners.htmlTag(self.input[self.pos..])) |match_len| {
var contents = self.input[self.pos - 1 .. self.pos + match_len];
var inl = try self.makeInline(.{ .HtmlInline = try self.allocator.dupe(u8, contents) });
self.pos += match_len;
return inl;
}
return try self.makeInline(.{ .Text = try self.allocator.dupe(u8, "<") });
}
fn handleDelim(self: *Subject, c: u8) !*nodes.AstNode {
const scan = try self.scanDelims(c);
const contents = if (c == '\'' and self.options.parse.smart)
"’"
else if (c == '"' and self.options.parse.smart and scan.can_close)
"”"
else if (c == '"' and self.options.parse.smart and !scan.can_close)
"“"
else
self.input[self.pos - scan.num_delims .. self.pos];
const inl = try self.makeInline(.{ .Text = try self.allocator.dupe(u8, contents) });
if ((scan.can_open or scan.can_close) and (!(c == '\'' or c == '"') or self.options.parse.smart)) {
try self.pushDelimiter(c, scan.can_open, scan.can_close, inl);
}
return inl;
}
fn handleHyphen(self: *Subject) !*nodes.AstNode {
self.pos += 1;
var num_hyphens: usize = 1;
if (!self.options.parse.smart or (self.peekChar() orelse 0) != '-') {
return try self.makeInline(.{ .Text = try self.allocator.dupe(u8, "-") });
}
while (self.options.parse.smart and (self.peekChar() orelse 0) == '-') {
self.pos += 1;
num_hyphens += 1;
}
var ens_ems = if (num_hyphens % 3 == 0)
[2]usize{ 0, num_hyphens / 3 }
else if (num_hyphens % 2 == 0)
[2]usize{ num_hyphens / 2, 0 }
else if (num_hyphens % 3 == 2)
[2]usize{ 1, (num_hyphens - 2) / 3 }
else
[2]usize{ 2, (num_hyphens - 4) / 3 };
var text = std.ArrayList(u8).init(self.allocator);
while (ens_ems[1] > 0) : (ens_ems[1] -= 1)
try text.appendSlice("—");
while (ens_ems[0] > 0) : (ens_ems[0] -= 1)
try text.appendSlice("–");
return try self.makeInline(.{ .Text = text.toOwnedSlice() });
}
fn handlePeriod(self: *Subject) !*nodes.AstNode {
self.pos += 1;
if (self.options.parse.smart and (self.peekChar() orelse 0) == @as(u8, '.')) {
self.pos += 1;
if (self.peekChar() == @as(u8, '.')) {
self.pos += 1;
return try self.makeInline(.{ .Text = try self.allocator.dupe(u8, "…") });
}
return try self.makeInline(.{ .Text = try self.allocator.dupe(u8, "..") });
}
return try self.makeInline(.{ .Text = try self.allocator.dupe(u8, ".") });
}
const ScanResult = struct {
num_delims: usize,
can_open: bool,
can_close: bool,
};
fn scanDelims(self: *Subject, c: u8) !ScanResult {
var before_char: u21 = '\n';
if (self.pos > 0) {
var before_char_pos = self.pos - 1;
while (before_char_pos > 0 and (self.input[before_char_pos] >> 6 == 2 or self.skip_chars[self.input[before_char_pos]])) {
before_char_pos -= 1;
}
var utf8 = std.unicode.Utf8View.initUnchecked(self.input[before_char_pos..self.pos]).iterator();
if (utf8.nextCodepoint()) |codepoint| {
if (codepoint >= 256 or !self.skip_chars[codepoint]) {
before_char = codepoint;
}
}
}
var num_delims: usize = 0;
if (c == '\'' or c == '"') {
num_delims += 1;
self.pos += 1;
} else while (self.peekChar() == c) {
num_delims += 1;
self.pos += 1;
}
var after_char: u21 = '\n';
if (!self.eof()) {
var after_char_pos = self.pos;
while (after_char_pos < self.input.len - 1 and self.skip_chars[self.input[after_char_pos]]) {
after_char_pos += 1;
}
var utf8 = std.unicode.Utf8View.initUnchecked(self.input[after_char_pos..]).iterator();
if (utf8.nextCodepoint()) |codepoint| {
if (codepoint >= 256 or !self.skip_chars[codepoint]) {
after_char = codepoint;
}
}
}
const left_flanking = num_delims > 0 and !zunicode.isSpace(after_char) and !(zunicode.isPunct(after_char) and !zunicode.isSpace(before_char) and !zunicode.isPunct(before_char));
const right_flanking = num_delims > 0 and !zunicode.isSpace(before_char) and !(zunicode.isPunct(before_char) and !zunicode.isSpace(after_char) and !zunicode.isPunct(after_char));
if (c == '_') {
return ScanResult{
.num_delims = num_delims,
.can_open = left_flanking and (!right_flanking or zunicode.isPunct(before_char)),
.can_close = right_flanking and (!left_flanking or zunicode.isPunct(after_char)),
};
} else if (c == '\'' or c == '"') {
return ScanResult{
.num_delims = num_delims,
.can_open = left_flanking and !right_flanking and before_char != ']' and before_char != ')',
.can_close = right_flanking,
};
} else {
return ScanResult{
.num_delims = num_delims,
.can_open = left_flanking,
.can_close = right_flanking,
};
}
}
fn pushDelimiter(self: *Subject, c: u8, can_open: bool, can_close: bool, inl: *nodes.AstNode) !void {
var delimiter = try self.allocator.create(Delimiter);
delimiter.* = .{
.inl = inl,
.length = inl.data.value.text().?.len,
.delim_char = c,
.can_open = can_open,
.can_close = can_close,
.prev = self.last_delimiter,
.next = null,
};
if (delimiter.prev) |prev| {
prev.next = delimiter;
}
self.last_delimiter = delimiter;
}
fn insertEmph(self: *Subject, opener: *Delimiter, closer: *Delimiter) !?*Delimiter {
const opener_char = opener.inl.data.value.text().?[0];
var opener_num_chars = opener.inl.data.value.text().?.len;
var closer_num_chars = closer.inl.data.value.text().?.len;
const use_delims: u8 = if (closer_num_chars >= 2 and opener_num_chars >= 2) 2 else 1;
opener_num_chars -= use_delims;
closer_num_chars -= use_delims;
if (self.options.extensions.strikethrough and opener_char == '~' and (opener_num_chars != closer_num_chars or opener_num_chars > 0))
return null;
var opener_text = opener.inl.data.value.text_mut().?;
opener_text.* = self.allocator.shrink(opener_text.*, opener_num_chars);
var closer_text = closer.inl.data.value.text_mut().?;
closer_text.* = self.allocator.shrink(closer_text.*, closer_num_chars);
var delim = closer.prev;
while (delim != null and delim != opener) {
var prev = delim.?.prev;
self.removeDelimiter(delim.?);
delim = prev;
}
var value: nodes.NodeValue = undefined;
if (self.options.extensions.strikethrough and opener_char == '~') {
value = .Strikethrough;
} else if (use_delims == 1) {
value = .Emph;
} else {
value = .Strong;
}
var emph = try self.makeInline(value);
var tmp = opener.inl.next.?;
while (tmp != closer.inl) {
var next = tmp.next;
emph.append(tmp);
if (next) |n| {
tmp = n;
} else {
break;
}
}
opener.inl.insertAfter(emph);
if (opener_num_chars == 0) {
opener.inl.detachDeinit();
self.removeDelimiter(opener);
}
if (closer_num_chars == 0) {
closer.inl.detachDeinit();
var next = closer.next;
self.removeDelimiter(closer);
return next;
} else {
return closer;
}
}
fn pushBracket(self: *Subject, kind: BracketKind, inl_text: *nodes.AstNode) !void {
const len = self.brackets.items.len;
if (len > 0)
self.brackets.items[len - 1].bracket_after = true;
try self.brackets.append(.{
.previous_delimiter = self.last_delimiter,
.inl_text = inl_text,
.position = self.pos,
.kind = kind,
.active = true,
.bracket_after = false,
});
}
fn handleCloseBracket(self: *Subject) !?*nodes.AstNode {
self.pos += 1;
const initial_pos = self.pos;
const brackets_len = self.brackets.items.len;
if (brackets_len == 0) {
return try self.makeInline(.{ .Text = try self.allocator.dupe(u8, "]") });
}
if (!self.brackets.items[brackets_len - 1].active) {
_ = self.brackets.pop();
return try self.makeInline(.{ .Text = try self.allocator.dupe(u8, "]") });
}
const kind = self.brackets.items[brackets_len - 1].kind;
const after_link_text_pos = self.pos;
var sps: usize = 0;
var url: []const u8 = "";
var n: usize = 0;
if (self.peekChar() orelse 0 == '(' and blk: {
sps = (try scanners.spacechars(self.input[self.pos + 1 ..])) orelse 0;
break :blk manualScanLinkUrl(self.input[self.pos + 1 + sps ..], &url, &n);
}) {
const starturl = self.pos + 1 + sps;
const endurl = starturl + n;
const starttitle = endurl + ((try scanners.spacechars(self.input[endurl..])) orelse 0);
const endtitle = if (starttitle == endurl) starttitle else starttitle + ((try scanners.linkTitle(self.input[starttitle..])) orelse 0);
const endall = endtitle + ((try scanners.spacechars(self.input[endtitle..])) orelse 0);
if (endall < self.input.len and self.input[endall] == ')') {
self.pos = endall + 1;
var cleanUrl = try strings.cleanUrl(self.allocator, url);
var cleanTitle = try strings.cleanTitle(self.allocator, self.input[starttitle..endtitle]);
try self.closeBracketMatch(kind, cleanUrl, cleanTitle);
return null;
} else {
self.pos = after_link_text_pos;
}
}
var label: ?[]const u8 = null;
if (self.linkLabel()) |lab| {
label = lab;
}
if (label == null) {
self.pos = initial_pos;
}
if ((label == null or label.?.len == 0) and !self.brackets.items[brackets_len - 1].bracket_after) {
label = self.input[self.brackets.items[brackets_len - 1].position .. initial_pos - 1];
}
var normalized = try strings.normalizeLabel(self.allocator, label orelse "");
defer self.allocator.free(normalized);
var maybe_ref = if (label != null) self.refmap.get(normalized) else null;
if (maybe_ref) |ref| {
try self.closeBracketMatch(kind, try self.allocator.dupe(u8, ref.url), try self.allocator.dupe(u8, ref.title));
return null;
}
_ = self.brackets.pop();
self.pos = initial_pos;
return try self.makeInline(.{ .Text = try self.allocator.dupe(u8, "]") });
}
pub fn linkLabel(self: *Subject) ?[]const u8 {
const startpos = self.pos;
if (self.peekChar() orelse 0 != '[') {
return null;
}
self.pos += 1;
var length: usize = 0;
var c: u8 = 0;
while (true) {
c = self.peekChar() orelse 0;
if (c == '[' or c == ']') {
break;
}
if (c == '\\') {
self.pos += 1;
length += 1;
if (ascii.isPunct(self.peekChar() orelse 0)) {
self.pos += 1;
length += 1;
}
} else {
self.pos += 1;
length += 1;
}
if (length > MAX_LINK_LABEL_LENGTH) {
self.pos = startpos;
return null;
}
}
if (c == ']') {
const raw_label = strings.trim(self.input[startpos + 1 .. self.pos]);
self.pos += 1;
return raw_label;
} else {
self.pos = startpos;
return null;
}
}
/// Takes ownership of `url' and `title'.
fn closeBracketMatch(self: *Subject, kind: BracketKind, url: []u8, title: []u8) !void {
const nl = nodes.NodeLink{ .url = url, .title = title };
var inl = try self.makeInline(switch (kind) {
.Link => .{ .Link = nl },
.Image => .{ .Image = nl },
});
var brackets_len = self.brackets.items.len;
self.brackets.items[brackets_len - 1].inl_text.insertBefore(inl);
var tmpch = self.brackets.items[brackets_len - 1].inl_text.next;
while (tmpch) |tmp| {
tmpch = tmp.next;
inl.append(tmp);
}
self.brackets.items[brackets_len - 1].inl_text.detachDeinit();
const previous_delimiter = self.brackets.items[brackets_len - 1].previous_delimiter;
try self.processEmphasis(previous_delimiter);
_ = self.brackets.pop();
brackets_len -= 1;
if (kind == .Link) {
var i = @intCast(i32, brackets_len) - 1;
while (i >= 0) : (i -= 1) {
if (self.brackets.items[@intCast(usize, i)].kind == .Link) {
if (!self.brackets.items[@intCast(usize, i)].active) {
break;
} else {
self.brackets.items[@intCast(usize, i)].active = false;
}
}
}
}
}
pub fn manualScanLinkUrl(input: []const u8, url: *[]const u8, n: *usize) bool {
const len = input.len;
var i: usize = 0;
if (i < len and input[i] == '<') {
i += 1;
while (i < len) {
switch (input[i]) {
'>' => {
i += 1;
break;
},
'\\' => {
i += 2;
},
'\n', '<' => {
return false;
},
else => {
i += 1;
},
}
}
} else {
return manualScanLinkUrl2(input, url, n);
}
if (i >= len) {
return false;
} else {
url.* = input[1 .. i - 1];
n.* = i;
return true;
}
}
fn manualScanLinkUrl2(input: []const u8, url: *[]const u8, n: *usize) bool {
const len = input.len;
var i: usize = 0;
var nb_p: usize = 0;
while (i < len) {
if (input[i] == '\\' and i + 1 < len and ascii.isPunct(input[i + 1])) {
i += 2;
} else if (input[i] == '(') {
nb_p += 1;
i += 1;
if (nb_p > 32)
return false;
} else if (input[i] == ')') {
if (nb_p == 0)
break;
nb_p -= 1;
i += 1;
} else if (ascii.isSpace(input[i])) {
if (i == 0)
return false;
break;
} else {
i += 1;
}
}
if (i >= len) {
return false;
} else {
url.* = input[0..i];
n.* = i;
return true;
}
}
};
const Delimiter = struct {
inl: *nodes.AstNode,
length: usize,
delim_char: u8,
can_open: bool,
can_close: bool,
prev: ?*Delimiter,
next: ?*Delimiter,
};
const Bracket = struct {
previous_delimiter: ?*Delimiter,
inl_text: *nodes.AstNode,
position: usize,
kind: BracketKind,
active: bool,
bracket_after: bool,
};
const BracketKind = enum { Link, Image };
|
src/inlines.zig
|
const std = @import("std");
const mem = std.mem;
const Allocator = mem.Allocator;
const assert = std.debug.assert;
const bog = @import("bog.zig");
const Node = bog.Node;
const Tree = bog.Tree;
const TokenIndex = bog.Token.Index;
const RegRef = bog.RegRef;
const Errors = bog.Errors;
const util = @import("util.zig");
pub const max_params = 32;
pub fn compile(gpa: *Allocator, source: []const u8, errors: *Errors) (Compiler.Error || bog.Parser.Error || bog.Tokenizer.Error)!*bog.Module {
var tree = try bog.parse(gpa, source, errors);
defer tree.deinit();
var arena_state = std.heap.ArenaAllocator.init(gpa);
defer arena_state.deinit();
const arena = &arena_state.allocator;
var module = Compiler.Fn{
.captures = undefined,
.code = Compiler.Code.init(gpa),
};
defer module.code.deinit();
var compiler = Compiler{
.gpa = gpa,
.errors = errors,
.tokens = tree.tokens,
.source = source,
.arena = arena,
.func = &module,
.module_code = Compiler.Code.init(gpa),
.scopes = std.ArrayList(Compiler.Scope).init(gpa),
.strings = std.ArrayList(u8).init(gpa),
.string_interner = std.StringHashMap(u32).init(gpa),
};
defer compiler.deinit();
try compiler.scopes.append(.{ .module = &module });
for (tree.nodes) |node| {
try compiler.autoForwardDecl(node);
}
for (tree.nodes) |node| {
try compiler.addLineInfo(node);
const val = try compiler.genNode(node, .discard);
if (val.isRt()) {
const reg = val.getRt();
defer val.free(&compiler, reg);
// discard unused runtime value
try compiler.emitSingle(.discard_single, reg);
}
}
const entry = compiler.module_code.items.len;
try compiler.module_code.appendSlice(module.code.items);
const mod = try gpa.create(bog.Module);
mod.* = .{
.name = "",
.code = compiler.module_code.toOwnedSlice(),
.strings = compiler.strings.toOwnedSlice(),
.entry = @intCast(u32, entry),
};
return mod;
}
pub fn compileRepl(repl: *@import("repl.zig").Repl, node: *Node) Compiler.Error!bog.Module {
repl.compiler.tokens = repl.tokenizer.tokens.items;
repl.compiler.source = repl.buffer.items;
const start_len = repl.compiler.module_code.items.len;
try repl.compiler.autoForwardDecl(node);
try repl.compiler.addLineInfo(node);
const val = try repl.compiler.genNode(node, .discard);
if (val != .empty) {
const reg = try val.toRt(&repl.compiler);
defer val.free(&repl.compiler, reg);
try repl.compiler.emitSingle(.discard_single, reg);
}
try repl.compiler.module_code.appendSlice(repl.compiler.func.code.items);
repl.compiler.func.code.items.len = 0;
return bog.Module{
.name = "<stdin>",
.code = repl.compiler.module_code.items,
.strings = repl.compiler.strings.items,
.entry = @intCast(u32, start_len),
};
}
pub const Compiler = struct {
tokens: []const bog.Token,
source: []const u8,
errors: *Errors,
gpa: *Allocator,
arena: *Allocator,
scopes: std.ArrayList(Scope),
func: *Fn,
module_code: Code,
strings: std.ArrayList(u8),
string_interner: std.StringHashMap(u32),
pub fn deinit(self: *Compiler) void {
self.scopes.deinit();
self.strings.deinit();
self.module_code.deinit();
self.string_interner.deinit();
}
pub const Code = std.ArrayList(bog.Instruction);
pub const Fn = struct {
code: Code,
captures: std.ArrayList(Symbol),
regs: std.PackedIntArray(u1, 256) = .{
.bytes = [_]u8{0} ** 32,
},
// index of first free register
free_index: u8 = 0,
pub fn regAlloc(self: *Fn) !RegRef {
var i: u32 = self.free_index;
while (i < 256) : (i += 1) {
if (self.regs.get(i) == 0) {
self.regs.set(i, 1);
self.free_index = @truncate(u8, i + 1);
return @truncate(u8, i);
}
}
// return error.RanOutOfRegisters;
@panic("TODO RanOutOfRegisters");
}
pub fn regFree(self: *Fn, reg: RegRef) void {
self.regs.set(reg, 0);
self.free_index = std.math.min(self.free_index, reg);
}
fn regAllocN(self: *Fn, n: usize) !RegRef {
var i = self.free_index;
outer: while (i + n < 256) : (i += 1) {
var j: usize = 0;
while (j < n) : (j += 1) {
if (self.regs.get(i + j) == 1) continue :outer;
}
j = 0;
while (j < n) : (j += 1) {
self.regs.set(i + j, 1);
}
return i;
}
@panic("TODO RanOutOfRegisters");
}
fn regFreeN(self: *Fn, start: usize, n: usize) void {
var i: usize = 0;
while (i < n) : (i += 1) {
self.regs.set(start + i, 0);
}
}
};
const Symbol = struct {
name: []const u8,
// val: Value,
reg: RegRef,
mut: bool,
};
const Loop = struct {
breaks: BreakList,
cond_begin: u32,
const BreakList = std.ArrayList(u32);
};
pub const Scope = union(enum) {
module: *Fn,
func: *Fn,
loop: *Loop,
symbol: Symbol,
forward_decl: Symbol,
try_catch: *Try,
};
const Try = struct {
jumps: JumpList,
err_reg: RegRef,
};
const JumpList = std.ArrayList(u32);
const Value = union(enum) {
/// result of continue, break, return and assignment; cannot exist at runtime
empty,
rt: RegRef,
/// reference to a variable
ref: RegRef,
none,
int: i64,
num: f64,
Bool: bool,
str: []const u8,
func: struct {
params: u8,
offset: u32,
captures: []Symbol,
},
fn isRt(val: Value) bool {
return switch (val) {
.rt, .ref => true,
else => false,
};
}
fn maybeRt(val: Value, self: *Compiler, res: Result) !Value {
if (res == .rt) {
try self.makeRuntime(res.rt, val);
return res.toVal();
}
return val;
}
fn free(val: Value, self: *Compiler, reg: RegRef) void {
if (val != .ref) {
self.func.regFree(reg);
}
}
fn toRt(val: Value, self: *Compiler) !RegRef {
switch (val) {
.rt, .ref => |r| return r,
.empty => unreachable,
else => {
const reg = try self.func.regAlloc();
try self.makeRuntime(reg, val);
return reg;
},
}
}
fn getRt(val: Value) RegRef {
switch (val) {
.rt, .ref => |r| return r,
else => unreachable,
}
}
fn getBool(val: Value, self: *Compiler, tok: TokenIndex) !bool {
if (val != .Bool) {
return self.reportErr("expected a boolean", tok);
}
return val.Bool;
}
fn getInt(val: Value, self: *Compiler, tok: TokenIndex) !i64 {
if (val != .int) {
return self.reportErr("expected an integer", tok);
}
return val.int;
}
fn getNum(val: Value) f64 {
return switch (val) {
.int => |v| @intToFloat(f64, v),
.num => |v| v,
else => unreachable,
};
}
fn getStr(val: Value, self: *Compiler, tok: TokenIndex) ![]const u8 {
if (val != .str) {
return self.reportErr("expected a string", tok);
}
return val.str;
}
fn checkNum(val: Value, self: *Compiler, tok: TokenIndex) !void {
if (val != .int and val != .num) {
return self.reportErr("expected a number", tok);
}
}
};
pub const Error = error{CompileError} || Allocator.Error;
fn getTry(self: *Compiler) ?*Try {
var i = self.scopes.items.len;
while (true) {
i -= 1;
const scope = self.scopes.items[i];
switch (scope) {
.try_catch => |t| return t,
.func, .module => return null,
else => {},
}
}
}
fn clearScopes(self: *Compiler, scope_count: usize) void {
var i = self.scopes.items.len;
while (i > scope_count) {
i -= 1;
const scope = self.scopes.items[i];
switch (scope) {
.symbol => |sym| self.func.regFree(sym.reg),
.try_catch, .loop => {},
else => unreachable,
}
}
self.scopes.items.len = scope_count;
}
fn emitSingle(self: *Compiler, op: bog.Op, arg: RegRef) !void {
try self.func.code.append(.{
.single = .{
.op = op,
.arg = arg,
},
});
}
fn emitDouble(self: *Compiler, op: bog.Op, res: RegRef, arg: RegRef) !void {
try self.func.code.append(.{
.double = .{
.op = op,
.res = res,
.arg = arg,
},
});
}
fn emitTriple(self: *Compiler, op: bog.Op, res: RegRef, lhs: RegRef, rhs: RegRef) !void {
try self.func.code.append(.{
.triple = .{
.op = op,
.res = res,
.lhs = lhs,
.rhs = rhs,
},
});
}
fn emitOff(self: *Compiler, op: bog.Op, res: RegRef, off: u32) !void {
const long = off >= 0xFFFF;
try self.func.code.append(.{
.off = .{
.op = op,
.res = res,
.off = if (long) 0xFFFF else @truncate(u16, off),
},
});
if (long) try self.func.code.append(.{ .bare = off });
}
fn emitJump(self: *Compiler, op: bog.Op, arg: ?RegRef) !u32 {
try self.func.code.append(.{ .jump = .{ .op = op, .arg = arg orelse 0 } });
try self.func.code.append(undefined);
return @intCast(u32, self.func.code.items.len - 1);
}
fn makeRuntime(self: *Compiler, res: RegRef, val: Value) Error!void {
return switch (val) {
.empty => unreachable,
.ref, .rt => |v| assert(v == res),
.none => try self.func.code.append(.{
.primitive = .{ .res = res, .kind = .none },
}),
.int => |v| if (v >= std.math.minInt(i15) and v <= std.math.maxInt(i15)) {
try self.func.code.append(.{ .int = .{ .res = res, .long = false, .arg = @truncate(i15, v) } });
} else {
try self.func.code.append(.{ .int = .{ .res = res, .long = true, .arg = 0 } });
const arr = @bitCast([2]u32, v);
try self.func.code.append(.{ .bare = arr[0] });
try self.func.code.append(.{ .bare = arr[1] });
},
.num => |v| {
try self.func.code.append(.{ .single = .{ .op = .const_num, .arg = res } });
const arr = @bitCast([2]u32, v);
try self.func.code.append(.{ .bare = arr[0] });
try self.func.code.append(.{ .bare = arr[1] });
},
.Bool => |v| try self.func.code.append(.{ .primitive = .{ .res = res, .kind = if (v) .True else .False } }),
.str => |v| try self.emitOff(.const_string_off, res, try self.putString(v)),
.func => |v| {
try self.func.code.append(.{
.func = .{
.res = res,
.arg_count = v.params,
.capture_count = @truncate(u8, v.captures.len),
},
});
try self.func.code.append(.{ .bare = v.offset });
for (v.captures) |capture, i| {
try self.emitTriple(
.store_capture_triple,
res,
capture.reg,
@truncate(RegRef, i),
);
}
},
};
}
fn putString(self: *Compiler, str: []const u8) !u32 {
if (self.string_interner.get(str)) |some| return some;
const offset = @intCast(u32, self.strings.items.len);
try self.strings.appendSlice(mem.asBytes(&@intCast(u32, str.len)));
try self.strings.appendSlice(str);
_ = try self.string_interner.put(str, offset);
return offset;
}
fn autoForwardDecl(self: *Compiler, node: *Node) !void {
if (node.id != .Decl) return;
const decl = @fieldParentPtr(Node.Decl, "base", node);
// only forward declarations like
// `const IDENTIFIER = fn ...`
if (self.tokens[decl.let_const].id == .Keyword_const and
decl.capture.id != .Identifier or decl.value.id != .Fn)
return;
const ident = @fieldParentPtr(Node.SingleToken, "base", decl.capture);
const reg = try self.func.regAlloc();
try self.scopes.append(.{
.forward_decl = .{
// .val = undefined,
.name = self.tokenSlice(ident.tok),
.reg = reg,
.mut = false,
},
});
}
const RegAndMut = struct {
reg: RegRef,
mut: bool,
};
fn findSymbol(self: *Compiler, tok: TokenIndex) !RegAndMut {
return self.findSymbolExtra(tok, self.scopes.items.len, null);
}
fn findSymbolExtra(self: *Compiler, tok: TokenIndex, start_index: usize, parent_fn: ?*Fn) Error!RegAndMut {
const name = self.tokenSlice(tok);
var i = start_index;
while (true) {
i -= 1;
const item = self.scopes.items[i];
switch (item) {
.module => break,
.func => |f| {
for (f.captures.items) |capture| {
if (mem.eql(u8, capture.name, name)) {
const capture_reg = try f.regAlloc();
try f.code.append(.{
.double = .{
.op = .load_capture_double,
.res = capture_reg,
.arg = @truncate(u8, f.captures.items.len - 1),
},
});
return RegAndMut{
.reg = capture_reg,
.mut = capture.mut,
};
}
}
const sym = try self.findSymbolExtra(tok, i, f);
const capture_reg = try f.regAlloc();
try f.code.append(.{
.double = .{
.op = .load_capture_double,
.res = capture_reg,
.arg = @truncate(u8, f.captures.items.len - 1),
},
});
if (parent_fn) |some| {
try some.captures.append(.{
.name = name,
.reg = capture_reg,
.mut = sym.mut,
});
}
return RegAndMut{
.reg = capture_reg,
.mut = sym.mut,
};
},
.loop, .try_catch => {},
.symbol, .forward_decl => |sym| if (mem.eql(u8, sym.name, name)) {
if (parent_fn) |some| {
try some.captures.append(.{
.name = name,
.reg = sym.reg,
.mut = sym.mut,
});
}
return RegAndMut{
.reg = sym.reg,
.mut = sym.mut,
};
},
}
}
return self.reportErr("use of undeclared identifier", tok);
}
fn checkRedeclaration(self: *Compiler, tok: TokenIndex) !void {
const name = self.tokenSlice(tok);
var i = self.scopes.items.len;
while (i > 0) {
i -= 1;
const scope = self.scopes.items[i];
switch (scope) {
.symbol => |sym| if (std.mem.eql(u8, sym.name, name)) {
const msg = try bog.Value.String.init(self.gpa, "redeclaration of '{s}'", .{name});
try self.errors.add(msg, self.tokens[tok].start, .err);
return error.CompileError;
},
else => {},
}
}
}
fn getForwardDecl(self: *Compiler, tok: TokenIndex) ?RegRef {
const name = self.tokenSlice(tok);
var i = self.scopes.items.len;
var in_fn_scope = false;
while (true) {
i -= 1;
const item = self.scopes.items[i];
switch (item) {
.module => break,
.func => in_fn_scope = true,
.loop, .try_catch, .symbol => {},
.forward_decl => |sym| if (mem.eql(u8, sym.name, name)) {
assert(!in_fn_scope);
return sym.reg;
},
}
}
return null;
}
const Result = union(enum) {
/// A runtime value is expected
rt: RegRef,
/// A value, runtime or constant, is expected
value,
/// No value is expected if some is given it will be discarded
discard,
fn toRt(res: Result, compiler: *Compiler) !Result {
return if (res == .rt) res else .{ .rt = try compiler.func.regAlloc() };
}
/// returns .empty if res != .rt
fn toVal(res: Result) Value {
return if (res != .rt)
.empty
else
.{ .rt = res.rt };
}
};
fn genNode(self: *Compiler, node: *Node, res: Result) Error!Value {
switch (node.id) {
.Grouped => return self.genNode(@fieldParentPtr(Node.Grouped, "base", node).expr, res),
.Literal => return self.genLiteral(@fieldParentPtr(Node.Literal, "base", node), res),
.Block => return self.genBlock(@fieldParentPtr(Node.Block, "base", node), res),
.Prefix => return self.genPrefix(@fieldParentPtr(Node.Prefix, "base", node), res),
.Decl => return self.genDecl(@fieldParentPtr(Node.Decl, "base", node), res),
.Identifier => return self.genIdentifier(@fieldParentPtr(Node.SingleToken, "base", node), res),
.Infix => return self.genInfix(@fieldParentPtr(Node.Infix, "base", node), res),
.If => return self.genIf(@fieldParentPtr(Node.If, "base", node), res),
.Tuple => return self.genTupleList(@fieldParentPtr(Node.ListTupleMap, "base", node), res),
.Discard => return self.reportErr("'_' can only be used to discard unwanted tuple/list items in destructuring assignment", node.firstToken()),
.TypeInfix => return self.genTypeInfix(@fieldParentPtr(Node.TypeInfix, "base", node), res),
.Fn => return self.genFn(@fieldParentPtr(Node.Fn, "base", node), res),
.Suffix => return self.genSuffix(@fieldParentPtr(Node.Suffix, "base", node), res),
.Error => return self.genError(@fieldParentPtr(Node.Error, "base", node), res),
.While => return self.genWhile(@fieldParentPtr(Node.While, "base", node), res),
.Jump => return self.genJump(@fieldParentPtr(Node.Jump, "base", node), res),
.List => return self.genTupleList(@fieldParentPtr(Node.ListTupleMap, "base", node), res),
.Import => return self.genImport(@fieldParentPtr(Node.Import, "base", node), res),
.For => return self.genFor(@fieldParentPtr(Node.For, "base", node), res),
.Map => return self.genMap(@fieldParentPtr(Node.ListTupleMap, "base", node), res),
.MapItem => unreachable,
.This => return self.genThis(@fieldParentPtr(Node.SingleToken, "base", node), res),
.Tagged => return self.genTagged(@fieldParentPtr(Node.Tagged, "base", node), res),
.Range => return self.genRange(@fieldParentPtr(Node.Range, "base", node), res),
.FormatString => return self.genFormatString(@fieldParentPtr(Node.FormatString, "base", node), res),
.Try => return self.genTry(@fieldParentPtr(Node.Try, "base", node), res),
.Catch => unreachable, // hanlded in genTry
.Match => return self.genMatch(@fieldParentPtr(Node.Match, "base", node), res),
.MatchCatchAll => unreachable, // hanlded in genMatch
.MatchLet => unreachable, // hanlded in genMatch
.MatchCase => unreachable, // hanlded in genMatch
}
}
fn genNodeNonEmpty(self: *Compiler, node: *Node, res: Result) Error!Value {
const val = try self.genNode(node, res);
if (val == .empty) {
return self.reportErr("expected a value", node.firstToken());
}
return val;
}
fn genMap(self: *Compiler, node: *Node.ListTupleMap, res: Result) Error!Value {
if (node.values.len > std.math.maxInt(u32)) {
return self.reportErr("too many items", node.base.firstToken());
}
const sub_res = try res.toRt(self);
try self.emitOff(.build_map_off, sub_res.rt, @intCast(u32, node.values.len));
// prepare registers
const container_reg = sub_res.rt;
const index_reg = try self.func.regAlloc();
defer self.func.regFree(index_reg);
const result_reg = try self.func.regAlloc();
defer self.func.regFree(result_reg);
for (node.values) |val| {
const item = @fieldParentPtr(Node.MapItem, "base", val);
if (item.key) |some| {
const last_node = self.getLastNode(some);
if (last_node.id == .Identifier) {
// `ident: value` is equal to `"ident": value`
const ident = @fieldParentPtr(Node.SingleToken, "base", last_node);
const str_loc = try self.putString(self.tokenSlice(ident.tok));
try self.emitOff(.const_string_off, index_reg, str_loc);
} else {
_ = try self.genNode(some, .{ .rt = index_reg });
}
} else {
const last_node = self.getLastNode(item.value);
if (last_node.id != .Identifier) {
return self.reportErr("expected a key", item.value.firstToken());
}
// `ident` is equal to `"ident": ident`
const ident = @fieldParentPtr(Node.SingleToken, "base", last_node);
const str_loc = try self.putString(self.tokenSlice(ident.tok));
try self.emitOff(.const_string_off, index_reg, str_loc);
}
_ = try self.genNode(item.value, .{ .rt = result_reg });
try self.emitTriple(.set_triple, container_reg, index_reg, result_reg);
}
return sub_res.toVal();
}
fn genTupleList(self: *Compiler, node: *Node.ListTupleMap, res: Result) Error!Value {
if (node.values.len > std.math.maxInt(u32)) {
return self.reportErr("too many items", node.base.firstToken());
}
const sub_res = try res.toRt(self);
try self.emitOff(switch (node.base.id) {
.Tuple => .build_tuple_off,
.List => .build_list_off,
else => unreachable,
}, sub_res.rt, @intCast(u32, node.values.len));
// prepare registers
const container_reg = sub_res.rt;
const index_reg = try self.func.regAlloc();
defer self.func.regFree(index_reg);
const result_reg = try self.func.regAlloc();
defer self.func.regFree(result_reg);
var index = Value{ .int = 0 };
for (node.values) |val| {
_ = try self.genNode(val, .{ .rt = result_reg });
try self.makeRuntime(index_reg, index);
try self.emitTriple(.set_triple, container_reg, index_reg, result_reg);
index.int += 1;
}
return sub_res.toVal();
}
fn genFn(self: *Compiler, node: *Node.Fn, res: Result) Error!Value {
if (node.params.len > max_params) {
return self.reportErr("too many parameters", node.fn_tok);
}
const param_count = @truncate(u8, node.params.len);
const prev_func = self.func;
defer self.func = prev_func;
const scopes = self.scopes.items.len;
defer self.scopes.items.len = scopes;
var func = Fn{
.captures = std.ArrayList(Symbol).init(self.gpa),
.code = Code.init(self.gpa),
};
defer func.captures.deinit();
defer func.code.deinit();
try self.scopes.append(.{ .func = &func });
const old_func = self.func;
self.func = &func;
// destructure parameters
for (node.params) |param| {
// we checked that there are less than max_params params
const reg = try func.regAlloc();
try self.genLval(param, .{
.mut = &Value{ .rt = reg },
});
}
// gen body and return result
try self.addLineInfo(node.body);
// for one liner functions return the value of the expression,
// otherwise require an explicit return statement
const last = self.getLastNode(node.body);
const should_discard = switch (last.id) {
.Block => true,
.Infix => switch (@fieldParentPtr(Node.Infix, "base", last).op) {
.assign, .add_assign, .sub_assign, .mul_assign, .pow_assign, // -
.div_assign, .div_floor_assign, .mod_assign, .l_shift_assign, // -
.r_shift_assign, .bit_and_assign, .bit_or_assign, .bit_x_or_assign => true,
else => false,
},
else => false,
};
const body_val = try self.genNode(node.body, if (should_discard) .discard else .value);
if (body_val == .empty or body_val == .none) {
try self.func.code.append(.{ .op = .{ .op = .return_none } });
} else {
const reg = try body_val.toRt(self);
defer body_val.free(self, reg);
try self.emitSingle(.return_single, reg);
}
// reset regs after generating body
self.func = old_func;
const offset = @intCast(u32, self.module_code.items.len);
try self.module_code.appendSlice(func.code.items);
const ret_val = Value{
.func = .{
.params = param_count,
.offset = offset,
.captures = try self.arena.dupe(Symbol, func.captures.items),
},
};
return ret_val.maybeRt(self, res);
}
fn genBlock(self: *Compiler, node: *Node.Block, res: Result) Error!Value {
const scope_count = self.scopes.items.len;
defer self.clearScopes(scope_count);
for (node.stmts) |stmt, i| {
try self.addLineInfo(stmt);
// return value of last instruction if it is not discarded
if (i + 1 == node.stmts.len and res != .discard) {
return self.genNode(stmt, res);
}
const val = try self.genNode(stmt, .discard);
if (val.isRt()) {
const reg = val.getRt();
defer val.free(self, reg);
// discard unused runtime value
try self.emitSingle(.discard_single, reg);
}
}
return Value{ .empty = {} };
}
fn genIf(self: *Compiler, node: *Node.If, res: Result) Error!Value {
const scope_count = self.scopes.items.len;
var if_skip: usize = undefined;
const cond_val = try self.genNodeNonEmpty(node.cond, .value);
if (node.capture != null) {
// TODO handle cond_val.isRt()
const cond_reg = try cond_val.toRt(self);
// jump past if_body if cond == .none
if_skip = try self.emitJump(.jump_none, cond_reg);
const lval_res = try self.genLval(node.capture.?, if (self.tokens[node.let_const.?].id == .Keyword_let)
.{ .mut = &Value{ .rt = cond_reg } }
else
.{ .constant = &Value{ .rt = cond_reg } });
} else if (!cond_val.isRt()) {
const bool_val = try cond_val.getBool(self, node.cond.firstToken());
if (bool_val) {
return self.genNode(node.if_body, res);
} else if (node.else_body) |some| {
return self.genNode(some, res);
}
const res_val = Value{ .none = {} };
return res_val.maybeRt(self, res);
} else {
// jump past if_body if cond == false
if_skip = try self.emitJump(.jump_false, cond_val.getRt());
}
const sub_res = switch (res) {
.rt, .discard => res,
.value => Result{
// value is only known at runtime
.rt = try self.func.regAlloc(),
},
};
const if_val = try self.genNode(node.if_body, sub_res);
if (sub_res != .rt and if_val.isRt()) {
try self.emitSingle(.discard_single, if_val.getRt());
}
// jump past else_body since if_body was executed
const else_skip = if (node.else_body != null or sub_res == .rt)
try self.emitJump(.jump, null)
else
null;
self.finishJump(if_skip);
// end capture scope
self.clearScopes(scope_count);
if (node.else_body) |some| {
const else_val = try self.genNode(some, sub_res);
if (sub_res != .rt and else_val.isRt()) {
try self.emitSingle(.discard_single, else_val.getRt());
}
} else if (sub_res == .rt) {
try self.func.code.append(.{
.primitive = .{ .res = sub_res.rt, .kind = .none },
});
}
if (else_skip) |some| {
self.finishJump(some);
}
return sub_res.toVal();
}
fn genJump(self: *Compiler, node: *Node.Jump, res: Result) Error!Value {
if (res != .discard) {
return self.reportErr("jump expression produces no value", node.tok);
}
if (node.op == .Return) {
if (node.op.Return) |some| {
const val = try self.genNode(some, .value);
const reg = try val.toRt(self);
defer val.free(self, reg);
try self.emitSingle(.return_single, reg);
} else {
try self.func.code.append(.{ .op = .{ .op = .return_none } });
}
return Value{ .empty = {} };
}
// find inner most loop
const loop_scope = blk: {
var i = self.scopes.items.len;
while (true) {
i -= 1;
const scope = self.scopes.items[i];
switch (scope) {
.module, .func => return self.reportErr(if (node.op == .Continue)
"continue outside of loop"
else
"break outside of loop", node.tok),
.loop => |loop| break :blk loop,
else => {},
}
}
};
if (node.op == .Continue) {
self.func.code.items[try self.emitJump(.jump, null)] = .{
.bare_signed = @intCast(i32, -@intCast(isize, self.func.code.items.len - loop_scope.cond_begin - 1)),
};
} else {
try loop_scope.breaks.append(try self.emitJump(.jump, null));
}
return Value{ .empty = {} };
}
fn createListComprehension(self: *Compiler, reg: ?RegRef) !Result {
const list = reg orelse try self.func.regAlloc();
try self.emitOff(.build_list_off, list, 0);
return Result{ .rt = list };
}
fn genWhile(self: *Compiler, node: *Node.While, res: Result) Error!Value {
const sub_res = switch (res) {
.discard => res,
.value => try self.createListComprehension(null),
.rt => |reg| try self.createListComprehension(reg),
};
const scope_count = self.scopes.items.len;
defer self.clearScopes(scope_count);
var loop = Loop{
.breaks = Loop.BreakList.init(self.strings.allocator),
.cond_begin = @intCast(u32, self.func.code.items.len),
};
defer loop.breaks.deinit();
try self.scopes.append(.{ .loop = &loop });
// beginning of condition
var cond_jump: ?usize = null;
const cond_val = try self.genNode(node.cond, .value);
if (node.capture != null) {
// TODO handle cond_val.isRt()
const cond_reg = try cond_val.toRt(self);
// jump past exit loop if cond == .none
cond_jump = try self.emitJump(.jump_none, cond_reg);
try self.genLval(node.capture.?, if (self.tokens[node.let_const.?].id == .Keyword_let)
.{ .mut = &Value{ .rt = cond_reg } }
else
.{ .constant = &Value{ .rt = cond_reg } });
} else if (cond_val.isRt()) {
cond_jump = try self.emitJump(.jump_false, cond_val.getRt());
} else {
const bool_val = try cond_val.getBool(self, node.cond.firstToken());
if (bool_val == false) {
// never executed
const res_val = Value{ .none = {} };
return res_val.maybeRt(self, res);
}
}
switch (sub_res) {
.discard => {
const body_val = try self.genNode(node.body, res);
if (body_val.isRt()) {
try self.emitSingle(.discard_single, body_val.getRt());
}
},
.rt => |list| {
const body_val = try self.genNodeNonEmpty(node.body, .value);
const reg = try body_val.toRt(self);
defer self.func.regFree(reg);
try self.emitDouble(.append_double, list, reg);
},
else => unreachable,
}
// jump back to condition
const end = try self.emitJump(.jump, null);
self.func.code.items[end] = .{
.bare_signed = @truncate(i32, -@intCast(isize, end - loop.cond_begin)),
};
// exit loop if cond == false
if (cond_jump) |some| {
self.finishJump(some);
}
for (loop.breaks.items) |some| {
self.finishJump(some);
}
return sub_res.toVal();
}
fn genFor(self: *Compiler, node: *Node.For, res: Result) Error!Value {
const sub_res = switch (res) {
.discard => res,
.value => try self.createListComprehension(null),
.rt => |reg| try self.createListComprehension(reg),
};
const scope_count = self.scopes.items.len;
defer self.clearScopes(scope_count);
const cond_val = try self.genNode(node.cond, .value);
if (!cond_val.isRt() and cond_val != .str)
return self.reportErr("expected iterable value", node.cond.firstToken());
const cond_reg = try cond_val.toRt(self);
defer cond_val.free(self, cond_reg);
const iter_reg = try self.func.regAlloc();
defer self.func.regFree(iter_reg);
// initialize the iterator
try self.emitDouble(.iter_init_double, iter_reg, cond_reg);
if (self.getTry()) |try_scope| {
try self.emitDouble(.move_double, try_scope.err_reg, iter_reg);
try try_scope.jumps.append(try self.emitJump(.jump_error, iter_reg));
}
var loop = Loop{
.breaks = Loop.BreakList.init(self.strings.allocator),
.cond_begin = @intCast(u32, self.func.code.items.len),
};
defer loop.breaks.deinit();
try self.scopes.append(.{ .loop = &loop });
const iter_val_reg = try self.func.regAlloc();
defer self.func.regFree(iter_val_reg);
// iter_next is fused with a jump_none
try self.emitDouble(.iter_next_double, iter_val_reg, iter_reg);
const exit_jump = self.func.code.items.len;
try self.func.code.append(.{ .bare = 0 });
if (node.capture != null) {
try self.genLval(node.capture.?, if (self.tokens[node.let_const.?].id == .Keyword_let)
.{ .mut = &Value{ .rt = iter_val_reg } }
else
.{ .constant = &Value{ .rt = iter_val_reg } });
}
switch (sub_res) {
.discard => {
const body_val = try self.genNode(node.body, res);
if (body_val.isRt()) {
try self.emitSingle(.discard_single, body_val.getRt());
}
},
.rt => |list| {
const body_val = try self.genNodeNonEmpty(node.body, .value);
const reg = try body_val.toRt(self);
defer self.func.regFree(reg);
try self.emitDouble(.append_double, list, reg);
},
else => unreachable,
}
// jump to the start of the loop
const end = try self.emitJump(.jump, null);
self.func.code.items[end] = .{
.bare_signed = @truncate(i32, -@intCast(isize, end - loop.cond_begin)),
};
// exit loop when IterNext results in None
self.finishJump(exit_jump);
for (loop.breaks.items) |some| {
self.finishJump(some);
}
return sub_res.toVal();
}
fn genPrefix(self: *Compiler, node: *Node.Prefix, res: Result) Error!Value {
const rhs_val = try self.genNodeNonEmpty(node.rhs, .value);
if (rhs_val.isRt()) {
const op_id: bog.Op = switch (node.op) {
.bool_not => .bool_not_double,
.bit_not => .bit_not_double,
.minus => .negate_double,
// TODO should unary + be a no-op
.plus => return rhs_val,
};
const reg = rhs_val.getRt();
defer rhs_val.free(self, reg);
const sub_res = try res.toRt(self);
try self.emitDouble(op_id, sub_res.rt, reg);
return sub_res.toVal();
}
const ret_val: Value = switch (node.op) {
.bool_not => .{ .Bool = !try rhs_val.getBool(self, node.rhs.firstToken()) },
.bit_not => .{ .int = ~try rhs_val.getInt(self, node.rhs.firstToken()) },
.minus => blk: {
try rhs_val.checkNum(self, node.rhs.firstToken());
if (rhs_val == .int) {
// TODO check for overflow
break :blk Value{ .int = -rhs_val.int };
} else {
break :blk Value{ .num = -rhs_val.num };
}
},
.plus => blk: {
try rhs_val.checkNum(self, node.rhs.firstToken());
break :blk rhs_val;
},
};
return ret_val.maybeRt(self, res);
}
fn genTypeInfix(self: *Compiler, node: *Node.TypeInfix, res: Result) Error!Value {
const lhs = try self.genNodeNonEmpty(node.lhs, .value);
const type_str = self.tokenSlice(node.type_tok);
const type_id = if (mem.eql(u8, type_str, "none"))
.none
else if (mem.eql(u8, type_str, "int"))
.int
else if (mem.eql(u8, type_str, "num"))
.num
else if (mem.eql(u8, type_str, "bool"))
.bool
else if (mem.eql(u8, type_str, "str"))
.str
else if (mem.eql(u8, type_str, "tuple"))
.tuple
else if (mem.eql(u8, type_str, "map"))
.map
else if (mem.eql(u8, type_str, "list"))
.list
else if (mem.eql(u8, type_str, "error"))
.err
else if (mem.eql(u8, type_str, "range"))
.range
else if (mem.eql(u8, type_str, "fn"))
.func
else if (mem.eql(u8, type_str, "tagged"))
bog.Type.tagged
else
return self.reportErr("expected a type name", node.type_tok);
if (lhs.isRt()) {
const sub_res = try res.toRt(self);
const reg = lhs.getRt();
defer lhs.free(self, reg);
try self.func.code.append(.{
.type_id = .{
.op = if (node.op == .as) .as_type_id else .is_type_id,
.res = sub_res.rt,
.arg = reg,
.type_id = type_id,
},
});
// `as` can result in a type error
if (node.op == .as) {
if (self.getTry()) |try_scope| {
try self.emitDouble(.move_double, try_scope.err_reg, sub_res.rt);
try try_scope.jumps.append(try self.emitJump(.jump_error, sub_res.rt));
}
}
return sub_res.toVal();
}
const ret_val = switch (node.op) {
.as => switch (type_id) {
.none => Value{ .none = {} },
.int => Value{
.int = switch (lhs) {
.int => |val| val,
.num => |val| @floatToInt(i64, val),
.Bool => |val| @boolToInt(val),
.str => |str| util.parseInt(str) catch
return self.reportErr("invalid cast to int", node.lhs.firstToken()),
else => return self.reportErr("invalid cast to int", node.lhs.firstToken()),
},
},
.num => Value{
.num = switch (lhs) {
.num => |val| val,
.int => |val| @intToFloat(f64, val),
.Bool => |val| @intToFloat(f64, @boolToInt(val)),
.str => |str| util.parseNum(str) catch
return self.reportErr("invalid cast to num", node.lhs.firstToken()),
else => return self.reportErr("invalid cast to num", node.lhs.firstToken()),
},
},
.bool => Value{
.Bool = switch (lhs) {
.int => |val| val != 0,
.num => |val| val != 0,
.Bool => |val| val,
.str => |val| if (mem.eql(u8, val, "true"))
true
else if (mem.eql(u8, val, "false"))
false
else
return self.reportErr("cannot cast string to bool", node.lhs.firstToken()),
else => return self.reportErr("invalid cast to bool", node.lhs.firstToken()),
},
},
.str => Value{
.str = switch (lhs) {
.int => |val| try std.fmt.allocPrint(self.arena, "{}", .{val}),
.num => |val| try std.fmt.allocPrint(self.arena, "{d}", .{val}),
.Bool => |val| if (val) "true" else "false",
.str => |val| val,
else => return self.reportErr("invalid cast to string", node.lhs.firstToken()),
},
},
.func => return self.reportErr("cannot cast to function", node.type_tok),
.err => return self.reportErr("cannot cast to error", node.type_tok),
.range => return self.reportErr("cannot cast to range", node.type_tok),
.tuple, .map, .list, .tagged => return self.reportErr("invalid cast", node.type_tok),
else => unreachable,
},
.is => Value{
.Bool = switch (type_id) {
.none => lhs == .none,
.int => lhs == .int,
.num => lhs == .num,
.bool => lhs == .Bool,
.str => lhs == .str,
else => false,
},
},
};
return ret_val.maybeRt(self, res);
}
fn genSuffix(self: *Compiler, node: *Node.Suffix, res: Result) Error!Value {
const lhs_val = try self.genNode(node.lhs, .value);
if (lhs_val != .str and lhs_val != .func and !lhs_val.isRt()) {
return self.reportErr("invalid left hand side to suffix op", node.lhs.firstToken());
}
const l_reg = try lhs_val.toRt(self);
defer if (node.op == .call) {
// member access and subscript will set `this` so we can't free l_reg
lhs_val.free(self, l_reg);
};
const index_val = switch (node.op) {
.call => |args| {
if (args.len > max_params) {
return self.reportErr("too many arguments", node.l_tok);
}
var start = try self.func.regAllocN(args.len);
var i = start;
for (args) |arg| {
_ = try self.genNode(arg, .{ .rt = i });
i += 1;
}
self.func.regFreeN(start, args.len);
const sub_res = try res.toRt(self);
try self.func.code.append(.{
.call = .{
.res = sub_res.rt,
.func = l_reg,
.first = start,
},
});
try self.func.code.append(.{ .bare = @truncate(u8, args.len) });
if (self.getTry()) |try_scope| {
try self.emitDouble(.move_double, try_scope.err_reg, sub_res.rt);
try try_scope.jumps.append(try self.emitJump(.jump_error, sub_res.rt));
}
return sub_res.toVal();
},
.member => Value{ .str = self.tokenSlice(node.r_tok) },
.subscript => |val| try self.genNodeNonEmpty(val, .value),
};
const index_reg = try index_val.toRt(self);
defer index_val.free(self, index_reg);
const sub_res = try res.toRt(self);
try self.emitTriple(.get_triple, sub_res.rt, l_reg, index_reg);
return sub_res.toVal();
}
fn genInfix(self: *Compiler, node: *Node.Infix, res: Result) Error!Value {
switch (node.op) {
.bool_or,
.bool_and,
=> return self.genBoolInfix(node, res),
.less_than,
.less_than_equal,
.greater_than,
.greater_than_equal,
.equal,
.not_equal,
.in,
=> return self.genComparisonInfix(node, res),
.bit_and,
.bit_or,
.bit_xor,
.l_shift,
.r_shift,
=> return self.genIntInfix(node, res),
.add,
.sub,
.mul,
.div,
.div_floor,
.mod,
.pow,
=> return self.genNumericInfix(node, res),
.assign,
.add_assign,
.sub_assign,
.mul_assign,
.pow_assign,
.div_assign,
.div_floor_assign,
.mod_assign,
.l_shift_assign,
.r_shift_assign,
.bit_and_assign,
.bit_or_assign,
.bit_x_or_assign,
=> return self.genAssignInfix(node, res),
.append => {
var lhs_val = try self.genNodeNonEmpty(node.lhs, .value);
var rhs_val = try self.genNodeNonEmpty(node.rhs, .value);
if (lhs_val.isRt() or rhs_val.isRt()) {
const l_reg = try lhs_val.toRt(self);
const r_reg = try rhs_val.toRt(self);
defer rhs_val.free(self, r_reg);
try self.emitDouble(.append_double, l_reg, r_reg);
if (res == .discard) {
return Value{ .none = {} };
} else if (res == .rt) {
try self.emitDouble(.move_double, res.rt, l_reg);
return Value{ .rt = res.rt };
} else {
return Value{ .rt = l_reg };
}
}
if (res == .discard) {
return Value{ .none = {} };
}
const l_str = try lhs_val.getStr(self, node.lhs.firstToken());
const r_str = try rhs_val.getStr(self, node.rhs.firstToken());
const concatted = Value{
.str = try mem.concat(self.arena, u8, &[_][]const u8{ l_str, r_str }),
};
return concatted.maybeRt(self, res);
},
}
}
fn genAssignInfix(self: *Compiler, node: *Node.Infix, res: Result) Error!Value {
if (res != .discard) {
return self.reportErr("assignment produces no value", node.tok);
}
const rhs_val = try self.genNodeNonEmpty(node.rhs, .value);
if (node.op == .assign) {
try self.genLval(node.lhs, .{ .assign = &rhs_val });
return .empty;
}
var lhs_val: Value = .none;
try self.genLval(node.lhs, .{ .aug_assign = &lhs_val });
if (!rhs_val.isRt()) switch (node.op) {
.add_assign,
.sub_assign,
.mul_assign,
.pow_assign,
.div_assign,
.div_floor_assign,
.mod_assign,
=> try rhs_val.checkNum(self, node.rhs.firstToken()),
.l_shift_assign,
.r_shift_assign,
.bit_and_assign,
.bit_or_assign,
.bit_x_or_assign,
=> _ = try rhs_val.getInt(self, node.rhs.firstToken()),
else => unreachable,
};
const reg = try rhs_val.toRt(self);
defer rhs_val.free(self, reg);
try self.emitTriple(switch (node.op) {
.add_assign => .add_triple,
.sub_assign => .sub_triple,
.mul_assign => .mul_triple,
.pow_assign => .pow_triple,
.div_assign => .div_triple,
.div_floor_assign => .div_floor_triple,
.mod_assign => .mod_triple,
.l_shift_assign => .l_shift_triple,
.r_shift_assign => .r_shift_triple,
.bit_and_assign => .bit_and_triple,
.bit_or_assign => .bit_or_triple,
.bit_x_or_assign => .bit_xor_triple,
else => unreachable,
}, lhs_val.getRt(), lhs_val.getRt(), reg);
return Value.empty;
}
fn needNum(a: Value, b: Value) bool {
return a == .num or b == .num;
}
fn genNumericInfix(self: *Compiler, node: *Node.Infix, res: Result) Error!Value {
var lhs_val = try self.genNodeNonEmpty(node.lhs, .value);
var rhs_val = try self.genNodeNonEmpty(node.rhs, .value);
if (rhs_val.isRt() or lhs_val.isRt()) {
const sub_res = try res.toRt(self);
const l_reg = try lhs_val.toRt(self);
const r_reg = try rhs_val.toRt(self);
defer {
rhs_val.free(self, r_reg);
lhs_val.free(self, l_reg);
}
try self.emitTriple(switch (node.op) {
.add => .add_triple,
.sub => .sub_triple,
.mul => .mul_triple,
.div => .div_triple,
.div_floor => .div_floor_triple,
.mod => .mod_triple,
.pow => .pow_triple,
else => unreachable,
}, sub_res.rt, l_reg, r_reg);
return sub_res.toVal();
}
try lhs_val.checkNum(self, node.lhs.firstToken());
try rhs_val.checkNum(self, node.rhs.firstToken());
// TODO makeRuntime if overflow
const ret_val = switch (node.op) {
.add => blk: {
if (needNum(lhs_val, rhs_val)) {
break :blk Value{ .num = lhs_val.getNum() + rhs_val.getNum() };
}
break :blk Value{ .int = lhs_val.int + rhs_val.int };
},
.sub => blk: {
if (needNum(lhs_val, rhs_val)) {
break :blk Value{ .num = lhs_val.getNum() - rhs_val.getNum() };
}
break :blk Value{ .int = lhs_val.int - rhs_val.int };
},
.mul => blk: {
if (needNum(lhs_val, rhs_val)) {
break :blk Value{ .num = lhs_val.getNum() * rhs_val.getNum() };
}
break :blk Value{ .int = lhs_val.int * rhs_val.int };
},
.div => Value{ .num = lhs_val.getNum() / rhs_val.getNum() },
.div_floor => blk: {
if (needNum(lhs_val, rhs_val)) {
break :blk Value{ .int = @floatToInt(i64, @divFloor(lhs_val.getNum(), rhs_val.getNum())) };
}
break :blk Value{ .int = @divFloor(lhs_val.int, rhs_val.int) };
},
.mod => blk: {
if (needNum(lhs_val, rhs_val)) {
break :blk Value{ .num = @rem(lhs_val.getNum(), rhs_val.getNum()) };
}
break :blk Value{ .int = std.math.rem(i64, lhs_val.int, rhs_val.int) catch @panic("TODO") };
},
.pow => blk: {
if (needNum(lhs_val, rhs_val)) {
break :blk Value{ .num = std.math.pow(f64, lhs_val.getNum(), rhs_val.getNum()) };
}
break :blk Value{
.int = std.math.powi(i64, lhs_val.int, rhs_val.int) catch
return self.reportErr("TODO integer overflow", node.tok),
};
},
else => unreachable,
};
return ret_val.maybeRt(self, res);
}
fn genComparisonInfix(self: *Compiler, node: *Node.Infix, res: Result) Error!Value {
var lhs_val = try self.genNodeNonEmpty(node.lhs, .value);
var rhs_val = try self.genNodeNonEmpty(node.rhs, .value);
if (rhs_val.isRt() or lhs_val.isRt()) {
const sub_res = try res.toRt(self);
const l_reg = try lhs_val.toRt(self);
const r_reg = try rhs_val.toRt(self);
defer {
rhs_val.free(self, r_reg);
lhs_val.free(self, l_reg);
}
try self.emitTriple(switch (node.op) {
.less_than => .less_than_triple,
.less_than_equal => .less_than_equal_triple,
.greater_than => .greater_than_triple,
.greater_than_equal => .greater_than_equal_triple,
.equal => .equal_triple,
.not_equal => .not_equal_triple,
.in => .in_triple,
else => unreachable,
}, sub_res.rt, l_reg, r_reg);
return sub_res.toVal();
}
// order comparisons are only allowed on numbers
switch (node.op) {
.in, .equal, .not_equal => {},
else => {
try lhs_val.checkNum(self, node.lhs.firstToken());
try rhs_val.checkNum(self, node.rhs.firstToken());
},
}
const ret_val: Value = switch (node.op) {
.less_than => .{
.Bool = if (needNum(lhs_val, rhs_val))
lhs_val.getNum() < rhs_val.getNum()
else
lhs_val.int < rhs_val.int,
},
.less_than_equal => .{
.Bool = if (needNum(lhs_val, rhs_val))
lhs_val.getNum() <= rhs_val.getNum()
else
lhs_val.int <= rhs_val.int,
},
.greater_than => .{
.Bool = if (needNum(lhs_val, rhs_val))
lhs_val.getNum() > rhs_val.getNum()
else
lhs_val.int > rhs_val.int,
},
.greater_than_equal => .{
.Bool = if (needNum(lhs_val, rhs_val))
lhs_val.getNum() >= rhs_val.getNum()
else
lhs_val.int >= rhs_val.int,
},
.equal, .not_equal => blk: {
const eql = switch (lhs_val) {
.none => |a_val| switch (rhs_val) {
.none => true,
else => false,
},
.int => |a_val| switch (rhs_val) {
.int => |b_val| a_val == b_val,
.num => |b_val| @intToFloat(f64, a_val) == b_val,
else => false,
},
.num => |a_val| switch (rhs_val) {
.int => |b_val| a_val == @intToFloat(f64, b_val),
.num => |b_val| a_val == b_val,
else => false,
},
.Bool => |a_val| switch (rhs_val) {
.Bool => |b_val| a_val == b_val,
else => false,
},
.str => |a_val| switch (rhs_val) {
.str => |b_val| mem.eql(u8, a_val, b_val),
else => false,
},
.empty, .rt, .ref, .func => unreachable,
};
// broken LLVM module found: Terminator found in the middle of a basic block!
// break :blk Value{ .Bool = if (node.op == .Equal) eql else !eql };
const copy = if (node.op == .equal) eql else !eql;
break :blk Value{ .Bool = copy };
},
.in => .{
.Bool = switch (lhs_val) {
.str => mem.indexOf(
u8,
try lhs_val.getStr(self, node.lhs.firstToken()),
try rhs_val.getStr(self, node.rhs.firstToken()),
) != null,
else => return self.reportErr("TODO: range without strings", node.lhs.firstToken()),
},
},
else => unreachable,
};
return ret_val.maybeRt(self, res);
}
fn genBoolInfix(self: *Compiler, node: *Node.Infix, res: Result) Error!Value {
var lhs_val = try self.genNodeNonEmpty(node.lhs, .value);
if (!lhs_val.isRt()) {
const l_bool = try lhs_val.getBool(self, node.lhs.firstToken());
if (node.op == .bool_and) {
if (!l_bool) return lhs_val;
} else {
if (l_bool) return lhs_val;
}
return self.genNodeNonEmpty(node.rhs, res);
}
const sub_res = try res.toRt(self);
const l_reg = lhs_val.getRt();
try self.emitDouble(.move_double, sub_res.rt, l_reg);
const rhs_skip = if (node.op == .bool_and)
try self.emitJump(.jump_false, l_reg)
else
try self.emitJump(.jump_true, l_reg);
_ = try self.genNodeNonEmpty(node.rhs, sub_res);
self.finishJump(rhs_skip);
return sub_res.toVal();
}
fn genIntInfix(self: *Compiler, node: *Node.Infix, res: Result) Error!Value {
var lhs_val = try self.genNodeNonEmpty(node.lhs, .value);
var rhs_val = try self.genNodeNonEmpty(node.rhs, .value);
if (lhs_val.isRt() or rhs_val.isRt()) {
const sub_res = try res.toRt(self);
const l_reg = try lhs_val.toRt(self);
const r_reg = try rhs_val.toRt(self);
defer {
rhs_val.free(self, r_reg);
lhs_val.free(self, l_reg);
}
try self.emitTriple(switch (node.op) {
.bit_and => .bit_and_triple,
.bit_or => .bit_or_triple,
.bit_xor => .bit_xor_triple,
.l_shift => .l_shift_triple,
.r_shift => .r_shift_triple,
else => unreachable,
}, sub_res.rt, l_reg, r_reg);
return sub_res.toVal();
}
const l_int = try lhs_val.getInt(self, node.lhs.firstToken());
const r_int = try rhs_val.getInt(self, node.rhs.firstToken());
const ret_val: Value = switch (node.op) {
.bit_and => .{ .int = l_int & r_int },
.bit_or => .{ .int = l_int | r_int },
.bit_xor => .{ .int = l_int ^ r_int },
.l_shift => blk: {
if (r_int < 0)
return self.reportErr("shift by negative amount", node.rhs.firstToken());
const val = if (r_int > std.math.maxInt(u6)) 0 else l_int << @truncate(u6, @bitCast(u64, r_int));
break :blk Value{ .int = val };
},
.r_shift => blk: {
if (r_int < 0)
return self.reportErr("shift by negative amount", node.rhs.firstToken());
const val = if (r_int > std.math.maxInt(u6)) 0 else l_int >> @truncate(u6, @bitCast(u64, r_int));
break :blk Value{ .int = val };
},
else => unreachable,
};
return ret_val.maybeRt(self, res);
}
fn genDecl(self: *Compiler, node: *Node.Decl, res: Result) Error!Value {
const rhs_val = try self.genNodeNonEmpty(node.value, .value);
try self.genLval(node.capture, if (self.tokens[node.let_const].id == .Keyword_let)
.{ .mut = &rhs_val }
else
.{ .constant = &rhs_val });
return Value.empty;
}
fn genIdentifier(self: *Compiler, node: *Node.SingleToken, res: Result) Error!Value {
const sym = try self.findSymbol(node.tok);
if (res == .rt) {
try self.emitDouble(if (sym.mut) .move_double else .copy_double, res.rt, sym.reg);
return res.toVal();
}
return Value{ .ref = sym.reg };
}
fn genThis(self: *Compiler, node: *Node.SingleToken, res: Result) Error!Value {
const sub_res = try res.toRt(self);
try self.emitSingle(.load_this_single, sub_res.rt);
return sub_res.toVal();
}
fn genLiteral(self: *Compiler, node: *Node.Literal, res: Result) Error!Value {
const ret_val: Value = switch (node.kind) {
.int => .{
.int = util.parseInt(self.tokenSlice(node.tok)) catch
return self.reportErr("TODO big int", node.tok),
},
.True => .{ .Bool = true },
.False => .{ .Bool = false },
.none => .none,
.str => .{ .str = try self.parseStr(node.tok) },
.num => .{ .num = util.parseNum(self.tokenSlice(node.tok)) catch unreachable },
};
return ret_val.maybeRt(self, res);
}
fn genImport(self: *Compiler, node: *Node.Import, res: Result) Error!Value {
const sub_res = try res.toRt(self);
const str = try self.parseStr(node.str_tok);
const str_loc = try self.putString(str);
try self.emitOff(.import_off, sub_res.rt, str_loc);
return sub_res.toVal();
}
fn genError(self: *Compiler, node: *Node.Error, res: Result) Error!Value {
const val = if (node.capture) |some|
try self.genNodeNonEmpty(some, .value)
else
Value{ .none = {} };
const sub_res = try res.toRt(self);
const reg = try val.toRt(self);
defer val.free(self, reg);
try self.emitDouble(.build_error_double, sub_res.rt, reg);
return sub_res.toVal();
}
fn genTagged(self: *Compiler, node: *Node.Tagged, res: Result) Error!Value {
const sub_res = try res.toRt(self);
const str_loc = try self.putString(self.tokenSlice(node.name));
try self.func.code.append(.{
.tagged = .{
.res = sub_res.rt,
.kind = if (node.capture == null) .none else .some,
.arg = if (node.capture) |some| blk: {
const val = try self.genNodeNonEmpty(some, .value);
const reg = try val.toRt(self);
defer val.free(self, reg);
break :blk reg;
} else 0,
},
});
try self.func.code.append(.{
.bare = str_loc,
});
return sub_res.toVal();
}
fn genRange(self: *Compiler, node: *Node.Range, res: Result) Error!Value {
const sub_res = try res.toRt(self);
const start = try self.genRangePart(node.start);
const end = try self.genRangePart(node.end);
const step = try self.genRangePart(node.step);
try self.func.code.append(.{
.range = .{
.res = sub_res.rt,
.start = start.val,
.end = end.val,
},
});
try self.func.code.append(.{
.range_cont = .{
.step = step.val,
.start_kind = start.kind,
.end_kind = end.kind,
.step_kind = step.kind,
},
});
return sub_res.toVal();
}
const RangePart = struct {
kind: bog.Instruction.RangeKind = .default,
val: RegRef = 0,
};
fn genRangePart(self: *Compiler, node: ?*Node) !RangePart {
var res: RangePart = .{};
const some = node orelse return res;
const value = try self.genNodeNonEmpty(some, .value);
if (value.isRt()) {
res.kind = .reg;
res.val = value.getRt();
return res;
}
const int = try value.getInt(self, some.firstToken());
switch (int) {
0...std.math.maxInt(RegRef) => {
res.kind = .value;
res.val = @truncate(RegRef, @bitCast(u64, int));
},
else => {
res.kind = .reg;
res.val = try value.toRt(self);
},
}
return res;
}
fn genFormatString(self: *Compiler, node: *Node.FormatString, res: Result) Error!Value {
// transform f"foo{255:X}bar" into "foo{X}bar".format((255,))
var buf = std.ArrayList(u8).init(self.gpa);
defer buf.deinit();
var i: usize = 0;
for (node.format) |str| {
var slice = self.tokenSlice(str);
if (slice[0] == 'f') slice = slice[2..];
if (slice[0] == ':') slice = slice[1..];
if (self.tokens[str].id == .FormatEnd) slice = slice[0 .. slice.len - 1];
try buf.ensureCapacity(buf.capacity + slice.len);
buf.expandToCapacity();
i += try self.parseStrExtra(str, slice, buf.items[i..]);
}
const format_string = try self.arena.dupe(u8, buf.items[0..i]);
// format_reg = "formatstring".format
const sub_res = try res.toRt(self);
const format_reg = try (Value{ .str = format_string }).toRt(self);
const format_member_str = try (Value{ .str = "format" }).toRt(self);
try self.emitTriple(.get_triple, format_reg, format_reg, format_member_str);
defer self.func.regFree(format_member_str);
// arg_reg = (args...)
const arg_reg = format_member_str;
try self.emitOff(.build_tuple_off, arg_reg, @intCast(u32, node.args.len));
// prepare registers
const index_reg = try self.func.regAlloc();
defer self.func.regFree(index_reg);
const result_reg = try self.func.regAlloc();
defer self.func.regFree(result_reg);
var index = Value{ .int = 0 };
for (node.args) |arg| {
_ = try self.genNode(arg, .{ .rt = result_reg });
try self.makeRuntime(index_reg, index);
try self.emitTriple(.set_triple, arg_reg, index_reg, result_reg);
index.int += 1;
}
// sub_res.rt = format_reg(arg_reg)
try self.func.code.append(.{
.call = .{
.res = sub_res.rt,
.func = format_reg,
.first = arg_reg,
},
});
try self.func.code.append(.{ .bare = 1 });
return sub_res.toVal();
}
fn genTry(self: *Compiler, node: *Node.Try, res: Result) Error!Value {
const sub_res = switch (res) {
.rt, .discard => res,
.value => Result{
// value is only known at runtime
.rt = try self.func.regAlloc(),
},
};
var try_scope = Try{
.jumps = JumpList.init(self.gpa),
.err_reg = try self.func.regAlloc(),
};
defer {
try_scope.jumps.deinit();
self.func.regFree(try_scope.err_reg);
}
try self.scopes.append(.{ .try_catch = &try_scope });
const expr_val = try self.genNode(node.expr, sub_res);
if (sub_res != .rt and expr_val.isRt()) {
try self.emitSingle(.discard_single, expr_val.getRt());
}
// no longer in try scope
assert(self.scopes.pop() == .try_catch);
if (try_scope.jumps.items.len == 0) {
// no possible error to be handled
return sub_res.toVal();
}
// if no error jump over all catchers
const first = try self.emitJump(.jump_not_error, try_scope.err_reg);
for (try_scope.jumps.items) |jump| {
self.finishJump(jump);
}
try_scope.jumps.items.len = 0;
try try_scope.jumps.append(first);
// otherwise unwrap the error
try self.emitDouble(.unwrap_error_double, try_scope.err_reg, try_scope.err_reg);
const scope_count = self.scopes.items.len;
defer self.clearScopes(scope_count);
const capture_reg = try self.func.regAlloc();
defer self.func.regFree(capture_reg);
var seen_catch_all = false;
for (node.catches) |catcher, catcher_i| {
if (seen_catch_all) {
return self.reportErr("additional handlers after catch-all handler", catcher.firstToken());
}
const catch_node = @fieldParentPtr(Node.Catch, "base", catcher);
var catch_skip: ?usize = null;
if (catch_node.capture) |some| {
if (catch_node.let_const) |tok| {
seen_catch_all = true;
try self.genLval(some, if (self.tokens[tok].id == .Keyword_let)
.{ .mut = &Value{ .rt = try_scope.err_reg } }
else
.{ .constant = &Value{ .rt = try_scope.err_reg } });
} else {
_ = try self.genNodeNonEmpty(some, .{ .rt = capture_reg });
// if not equal to the error value jump over this handler
try self.emitTriple(.equal_triple, capture_reg, capture_reg, try_scope.err_reg);
catch_skip = try self.emitJump(.jump_false, capture_reg);
}
} else {
seen_catch_all = true;
}
const catch_res = try self.genNode(catch_node.expr, sub_res);
if (sub_res != .rt and catch_res.isRt()) {
try self.emitSingle(.discard_single, catch_res.getRt());
}
// exit this hanler (unless it's the last one)
if (catcher_i + 1 != node.catches.len) {
try try_scope.jumps.append(try self.emitJump(.jump, null));
}
// jump over this handler if the value doesn't match
if (catch_skip) |some| {
self.finishJump(some);
}
}
// return uncaught errors
if (!seen_catch_all) {
try self.emitSingle(.return_single, try_scope.err_reg);
}
// exit try-catch
for (try_scope.jumps.items) |jump| {
self.finishJump(jump);
}
return sub_res.toVal();
}
fn genMatch(self: *Compiler, node: *Node.Match, res: Result) Error!Value {
const sub_res = switch (res) {
.rt, .discard => res,
.value => Result{
// value is only known at runtime
.rt = try self.func.regAlloc(),
},
};
const cond_val = try self.genNodeNonEmpty(node.expr, .value);
const cond_reg = try cond_val.toRt(self);
var jumps = JumpList.init(self.gpa);
defer jumps.deinit();
var case_reg = try self.func.regAlloc();
defer self.func.regFree(case_reg);
var seen_catch_all = false;
for (node.cases) |uncasted_case, case_i| {
if (seen_catch_all) {
return self.reportErr("additional cases after catch-all case", uncasted_case.firstToken());
}
const scope_count = self.scopes.items.len;
defer self.clearScopes(scope_count);
var expr: *Node = undefined;
var case_skip: ?usize = null;
if (uncasted_case.cast(.MatchCatchAll)) |case| {
seen_catch_all = true;
expr = case.expr;
} else if (uncasted_case.cast(.MatchLet)) |case| {
seen_catch_all = true;
expr = case.expr;
try self.genLval(case.capture, if (self.tokens[case.let_const].id == .Keyword_let)
.{ .mut = &Value{ .rt = cond_reg } }
else
.{ .constant = &Value{ .rt = cond_reg } });
} else if (uncasted_case.cast(.MatchCase)) |case| {
expr = case.expr;
if (case.items.len == 1) {
_ = try self.genNodeNonEmpty(case.items[0], .{ .rt = case_reg });
// if not equal to the error value jump over this handler
try self.emitTriple(.equal_triple, case_reg, case_reg, cond_reg);
case_skip = try self.emitJump(.jump_false, case_reg);
} else {
var tally_reg = try self.func.regAlloc();
defer self.func.regFree(tally_reg);
for (case.items) |item, i| {
_ = try self.genNodeNonEmpty(item, .{ .rt = case_reg });
if (i == 0) {
try self.emitTriple(.equal_triple, tally_reg, case_reg, cond_reg);
} else {
try self.emitTriple(.equal_triple, case_reg, case_reg, cond_reg);
try self.emitTriple(.bool_or_triple, tally_reg, tally_reg, case_reg);
}
}
case_skip = try self.emitJump(.jump_false, tally_reg);
}
} else unreachable;
const case_res = try self.genNode(expr, sub_res);
if (sub_res != .rt and case_res.isRt()) {
try self.emitSingle(.discard_single, case_res.getRt());
}
// exit match (unless it's this is the last case)
if (case_i + 1 != node.cases.len) {
try jumps.append(try self.emitJump(.jump, null));
}
// jump over this case if the value doesn't match
if (case_skip) |some| {
self.finishJump(some);
}
}
// result in none if no matches
if (!seen_catch_all and sub_res == .rt) {
try self.func.code.append(.{
.primitive = .{ .res = sub_res.rt, .kind = .none },
});
}
// exit match
for (jumps.items) |jump| {
self.finishJump(jump);
}
return sub_res.toVal();
}
const Lval = union(enum) {
constant: *const Value,
mut: *const Value,
assign: *const Value,
aug_assign: *Value,
};
fn genLval(self: *Compiler, node: *Node, lval: Lval) Error!void {
switch (node.id) {
.Literal,
.Block,
.Prefix,
.Decl,
.Infix,
.If,
.TypeInfix,
.Fn,
.While,
.Jump,
.Catch,
.Import,
.For,
.This,
.Match,
.MatchCatchAll,
.MatchLet,
.MatchCase,
.FormatString,
.Try,
=> return self.reportErr("invalid left hand side to assignment", node.firstToken()),
.Discard => return self.reportErr("'_' can only be used to discard unwanted tuple/list items in destructuring assignment", node.firstToken()),
.Grouped => return self.genLval(@fieldParentPtr(Node.Grouped, "base", node).expr, lval),
.Tagged => return self.genLValTagged(@fieldParentPtr(Node.Tagged, "base", node), lval),
.Range => return self.genLValRange(@fieldParentPtr(Node.Range, "base", node), lval),
.Error => return self.genLValError(@fieldParentPtr(Node.Error, "base", node), lval),
.MapItem => unreachable,
.List => return self.genLValTupleList(@fieldParentPtr(Node.ListTupleMap, "base", node), lval),
.Tuple => return self.genLValTupleList(@fieldParentPtr(Node.ListTupleMap, "base", node), lval),
.Map => return self.genLValMap(@fieldParentPtr(Node.ListTupleMap, "base", node), lval),
.Identifier => return self.genLValIdentifier(@fieldParentPtr(Node.SingleToken, "base", node), lval),
.Suffix => return self.genLValSuffix(@fieldParentPtr(Node.Suffix, "base", node), lval),
}
}
fn genLValRange(self: *Compiler, node: *Node.Range, lval: Lval) Error!void {
const val = switch (lval) {
.constant, .mut, .assign => |val| val,
.aug_assign => return self.reportErr("invalid left hand side to augmented assignment", node.base.firstToken()),
};
if (!val.isRt()) {
return self.reportErr("expected a range", node.base.firstToken());
}
if (node.start) |some| {
try self.genLValRangePart(some, val.getRt(), lval, "start");
}
if (node.end) |some| {
try self.genLValRangePart(some, val.getRt(), lval, "end");
}
if (node.step) |some| {
try self.genLValRangePart(some, val.getRt(), lval, "step");
}
}
fn genLValRangePart(self: *Compiler, node: *Node, range_reg: RegRef, lval: Lval, part: []const u8) Error!void {
const result_reg = try self.func.regAlloc();
const index_reg = try self.func.regAlloc();
defer self.func.regFree(index_reg);
const str_loc = try self.putString(part);
try self.emitOff(.const_string_off, index_reg, str_loc);
try self.emitTriple(.get_triple, result_reg, range_reg, index_reg);
const rt_val = Value{ .rt = result_reg };
try self.genLval(node, switch (lval) {
.constant => .{ .constant = &rt_val },
.mut => .{ .mut = &rt_val },
.assign => .{ .assign = &rt_val },
.aug_assign => unreachable,
});
}
fn genLValTagged(self: *Compiler, node: *Node.Tagged, lval: Lval) Error!void {
const val = switch (lval) {
.constant, .mut, .assign => |val| val,
.aug_assign => return self.reportErr("invalid left hand side to augmented assignment", node.at),
};
if (!val.isRt()) {
return self.reportErr("expected a tagged value", node.base.firstToken());
}
if (node.capture == null) {
return self.reportErr("expected a capture", node.base.firstToken());
}
const str_loc = try self.putString(self.tokenSlice(node.name));
const unwrap_reg = try self.func.regAlloc();
try self.emitDouble(.unwrap_tagged, unwrap_reg, val.getRt());
try self.func.code.append(.{ .bare = str_loc });
const rhs_val = Value{ .rt = unwrap_reg };
try self.genLval(node.capture.?, switch (lval) {
.constant => .{ .constant = &rhs_val },
.mut => .{ .mut = &rhs_val },
.assign => .{ .assign = &rhs_val },
else => unreachable,
});
}
fn genLValError(self: *Compiler, node: *Node.Error, lval: Lval) Error!void {
const val = switch (lval) {
.constant, .mut, .assign => |val| val,
.aug_assign => return self.reportErr("invalid left hand side to augmented assignment", node.tok),
};
if (!val.isRt()) {
return self.reportErr("expected an error", node.tok);
}
if (node.capture == null) {
return self.reportErr("expected a capture", node.tok);
}
const unwrap_reg = try self.func.regAlloc();
try self.emitDouble(.unwrap_error_double, unwrap_reg, val.getRt());
const rhs_val = Value{ .rt = unwrap_reg };
try self.genLval(node.capture.?, switch (lval) {
.constant => .{ .constant = &rhs_val },
.mut => .{ .mut = &rhs_val },
.assign => .{ .assign = &rhs_val },
else => unreachable,
});
}
fn genLValTupleList(self: *Compiler, node: *Node.ListTupleMap, lval: Lval) Error!void {
if (node.values.len > std.math.maxInt(u32)) {
return self.reportErr("too many items", node.l_tok);
}
const res = switch (lval) {
.constant, .mut, .assign => |val| val,
.aug_assign => return self.reportErr("invalid left hand side to augmented assignment", node.l_tok),
};
if (!res.isRt()) {
return self.reportErr("expected a tuple/list", node.l_tok);
}
// prepare registers
const container_reg = res.getRt();
const index_reg = try self.func.regAlloc();
defer self.func.regFree(index_reg);
var result_reg = try self.func.regAlloc();
var index = Value{ .int = 0 };
for (node.values) |val, i| {
if (val.id == .Discard) {
index.int += 1;
continue;
}
try self.makeRuntime(index_reg, index);
try self.emitTriple(.get_triple, result_reg, container_reg, index_reg);
const rt_val = Value{ .rt = result_reg };
try self.genLval(val, switch (lval) {
.constant => .{ .constant = &rt_val },
.mut => .{ .mut = &rt_val },
.assign => .{ .assign = &rt_val },
else => unreachable,
});
index.int += 1;
if (i + 1 != node.values.len and lval != .assign)
result_reg = try self.func.regAlloc();
}
}
fn genLValMap(self: *Compiler, node: *Node.ListTupleMap, lval: Lval) Error!void {
if (node.values.len > std.math.maxInt(u32)) {
return self.reportErr("too many items", node.base.firstToken());
}
const res = switch (lval) {
.constant, .mut, .assign => |val| val,
.aug_assign => return self.reportErr("invalid left hand side to augmented assignment", node.l_tok),
};
if (!res.isRt()) {
return self.reportErr("expected a map", node.base.firstToken());
}
const container_reg = res.getRt();
const index_reg = try self.func.regAlloc();
defer self.func.regFree(index_reg);
var result_reg = try self.func.regAlloc();
for (node.values) |val, i| {
const item = @fieldParentPtr(Node.MapItem, "base", val);
if (item.key) |some| {
const last_node = self.getLastNode(some);
if (last_node.id == .Identifier) {
// `oldname: newname` is equal to `"oldname": newname`
const ident = @fieldParentPtr(Node.SingleToken, "base", last_node);
const str_loc = try self.putString(self.tokenSlice(ident.tok));
try self.emitOff(.const_string_off, index_reg, str_loc);
} else {
_ = try self.genNode(some, .{ .rt = index_reg });
}
} else {
const last_node = self.getLastNode(item.value);
if (last_node.id != .Identifier) {
return self.reportErr("expected a key", item.value.firstToken());
}
// `oldname` is equal to `"oldname": oldname`
const ident = @fieldParentPtr(Node.SingleToken, "base", last_node);
const str_loc = try self.putString(self.tokenSlice(ident.tok));
try self.emitOff(.const_string_off, index_reg, str_loc);
}
try self.emitTriple(.get_triple, result_reg, container_reg, index_reg);
const rt_val = Value{ .rt = result_reg };
try self.genLval(item.value, switch (lval) {
.constant => .{ .constant = &rt_val },
.mut => .{ .mut = &rt_val },
.assign => .{ .assign = &rt_val },
else => unreachable,
});
if (i + 1 != node.values.len and lval != .assign) result_reg = try self.func.regAlloc();
}
}
fn genLValIdentifier(self: *Compiler, node: *Node.SingleToken, lval: Lval) Error!void {
switch (lval) {
.mut, .constant => |val| {
if (self.getForwardDecl(node.tok)) |some| {
// only functions can be forward declared
assert(val.* == .func);
return self.makeRuntime(some, val.*);
}
try self.checkRedeclaration(node.tok);
var reg = try val.toRt(self);
if (val.* == .ref and lval == .mut) {
// copy on assign
const copy_reg = try self.func.regAlloc();
try self.emitDouble(.copy_double, copy_reg, reg);
reg = copy_reg;
}
const sym = Symbol{
.name = self.tokenSlice(node.tok),
.reg = reg,
.mut = lval == .mut,
};
try self.scopes.append(.{ .symbol = sym });
},
.assign => |val| {
const sym = try self.findSymbol(node.tok);
if (!sym.mut) {
return self.reportErr("assignment to constant", node.tok);
}
if (val.* == .ref) {
try self.emitDouble(.copy_double, sym.reg, val.getRt());
} else if (val.isRt()) {
try self.emitDouble(.move_double, sym.reg, val.getRt());
} else {
try self.makeRuntime(sym.reg, val.*);
}
},
.aug_assign => |val| {
const sym = try self.findSymbol(node.tok);
if (!sym.mut) {
return self.reportErr("assignment to constant", node.tok);
}
val.* = Value{ .ref = sym.reg };
},
}
}
fn genLValSuffix(self: *Compiler, node: *Node.Suffix, lval: Lval) Error!void {
if (node.op == .call) {
return self.reportErr("invalid left hand side to assignment", node.lhs.firstToken());
}
const lhs_val = try self.genNode(node.lhs, .value);
if (lhs_val != .str and !lhs_val.isRt()) {
return self.reportErr("invalid left hand side to suffix op", node.lhs.firstToken());
}
const l_reg = try lhs_val.toRt(self);
defer lhs_val.free(self, l_reg);
const index_val = switch (node.op) {
.call => unreachable,
.member => Value{ .str = self.tokenSlice(node.r_tok) },
.subscript => |val| try self.genNodeNonEmpty(val, .value),
};
const index_reg = try index_val.toRt(self);
defer index_val.free(self, index_reg);
switch (lval) {
.mut, .constant => return self.reportErr("cannot declare to subscript", node.l_tok),
.aug_assign => |val| {
const res_reg = try self.func.regAlloc();
try self.emitTriple(.get_triple, res_reg, l_reg, index_reg);
val.* = Value{ .rt = res_reg };
},
.assign => |rhs_val| {
const r_reg = try rhs_val.toRt(self);
defer rhs_val.free(self, r_reg);
try self.emitTriple(.set_triple, l_reg, index_reg, r_reg);
},
}
}
fn addLineInfo(self: *Compiler, node: *Node) !void {
const tok = self.tokens[node.firstToken()];
try self.func.code.append(.{ .op = .{ .op = .line_info } });
try self.func.code.append(.{ .bare = tok.start });
}
fn finishJump(self: *Compiler, jump_addr: usize) void {
self.func.code.items[jump_addr] = .{
.bare = @intCast(u32, self.func.code.items.len - jump_addr),
};
}
fn getLastNode(self: *Compiler, first_node: *Node) *Node {
var node = first_node;
while (true) switch (node.id) {
.Grouped => node = @fieldParentPtr(Node.Grouped, "base", node).expr,
else => return node,
};
}
fn tokenSlice(self: *Compiler, token: TokenIndex) []const u8 {
const tok = self.tokens[token];
return self.source[tok.start..tok.end];
}
fn parseStr(self: *Compiler, tok: TokenIndex) ![]u8 {
var slice = self.tokenSlice(tok);
slice = slice[1 .. slice.len - 1];
var buf = try self.arena.alloc(u8, slice.len);
return buf[0..try self.parseStrExtra(tok, slice, buf)];
}
fn parseStrExtra(self: *Compiler, tok: TokenIndex, slice: []const u8, buf: []u8) !usize {
var slice_i: u32 = 0;
var i: u32 = 0;
while (slice_i < slice.len) : (slice_i += 1) {
const c = slice[slice_i];
switch (c) {
'\\' => {
slice_i += 1;
buf[i] = switch (slice[slice_i]) {
'\\' => '\\',
'n' => '\n',
'r' => '\r',
't' => '\t',
'\'' => '\'',
'"' => '"',
'x', 'u' => return self.reportErr("TODO: more escape sequences", tok),
else => unreachable,
};
},
else => buf[i] = c,
}
i += 1;
}
return i;
}
fn reportErr(self: *Compiler, msg: []const u8, tok: TokenIndex) Error {
try self.errors.add(.{ .data = msg }, self.tokens[tok].start, .err);
return error.CompileError;
}
};
|
src/compiler.zig
|
const std = @import("std");
const builtin = @import("builtin");
const ThreadPool = @import("thread_pool");
/// Global thread pool which mimics other async runtimes
var thread_pool: ThreadPool = undefined;
/// Global allocator which mimics other async runtimes
pub var allocator: *std.mem.Allocator = undefined;
/// Zig async wrapper around ThreadPool.Task
const Task = struct {
tp_task: ThreadPool.Task = .{ .callback = onSchedule },
frame: anyframe,
fn onSchedule(tp_task: *ThreadPool.Task) void {
const task = @fieldParentPtr(Task, "tp_task", tp_task);
resume task.frame;
}
fn schedule(self: *Task) void {
const batch = ThreadPool.Batch.from(&self.tp_task);
thread_pool.schedule(batch);
}
};
fn ReturnTypeOf(comptime asyncFn: anytype) type {
return @typeInfo(@TypeOf(asyncFn)).Fn.return_type.?;
}
/// Entry point for the synchronous main() to an async function.
/// Initializes the global thread pool and allocator
/// then calls asyncFn(...args) in the thread pool and returns the results.
pub fn run(comptime asyncFn: anytype, args: anytype) ReturnTypeOf(asyncFn) {
const Args = @TypeOf(args);
const Wrapper = struct {
fn entry(task: *Task, fn_args: Args) ReturnTypeOf(asyncFn) {
// Prepare the task to resume this frame once it's scheduled.
// Returns execution to after `async Wrapper.entry(&task, args)`.
suspend {
task.* = .{ .frame = @frame() };
}
// Begin teardown of the thread pool after the entry point async fn completes.
defer thread_pool.shutdown();
// Run the entry point async fn
return @call(.{}, asyncFn, fn_args);
}
};
var task: Task = undefined;
var frame = async Wrapper.entry(&task, args);
// On windows, use the process heap allocator.
// On posix systems, use the libc allocator.
const is_windows = builtin.target.os.tag == .windows;
var win_heap: if (is_windows) std.heap.HeapAllocator else void = undefined;
if (is_windows) {
win_heap = @TypeOf(win_heap).init();
win_heap.heap_handle = std.os.windows.kernel32.GetProcessHeap() orelse unreachable;
allocator = &win_heap.allocator;
} else if (builtin.link_libc) {
allocator = std.heap.c_allocator;
} else {
@compileError("link to libc with '-Dc' as zig stdlib doesn't provide a fast, libc-less, general purpose allocator (yet)");
}
const num_cpus = std.Thread.getCpuCount() catch @panic("failed to get cpu core count");
const num_threads = std.math.cast(u16, num_cpus) catch std.math.maxInt(u16);
thread_pool = ThreadPool.init(.{ .max_threads = num_threads });
// Schedule the task onto the thread pool and wait for the thread pool to be shutdown() by the task.
task.schedule();
thread_pool.deinit();
// At this point, all threads in the pool should not be running async tasks
// so the main task/frame has been completed.
return nosuspend await frame;
}
/// State synchronization which handles waiting for the result of a spawned async function.
fn SpawnHandle(comptime T: type) type {
return struct {
state: std.atomic.Atomic(usize) = std.atomic.Atomic(usize).init(0),
const Self = @This();
const DETACHED: usize = 0b1;
const Waiter = struct {
task: Task,
value: T,
};
/// Called by the async function to resolve the join() coroutine with the function result.
/// Returns without doing anything if it was detach()ed.
pub fn complete(self: *Self, value: T) void {
// Prepare our waiter node with the value
var waiter = Waiter{
.value = value,
.task = .{ .frame = @frame() },
};
// Suspend get ready to wait asynchonously.
suspend {
// Acquire barrier to ensuer we see the join()'s *Waiter writes if present.
// Release barrier to ensure join() and detach() see our *Waiter writes.
const state = self.state.swap(@ptrToInt(&waiter), .AcqRel);
// If join() or detach() were called before us.
if (state != 0) {
// Then fill the result value for join() & wake it up.
if (state != DETACHED) {
const joiner = @intToPtr(*Waiter, state);
joiner.value = waiter.value;
joiner.task.schedule();
}
// Also wake ourselves up since there's nothing to wait for.
waiter.task.schedule();
}
}
}
/// Waits for the async fn to call complete(T) and returns the T given to complete().
pub fn join(self: *Self) T {
var waiter = Waiter{
.value = undefined, // the complete() task will fill this for us
.task = .{ .frame = @frame() },
};
suspend {
// Acquire barrier to ensuer we see the complete()'s *Waiter writes if present.
// Release barrier to ensure complete() sees our *Waiter writes.
if (@intToPtr(?*Waiter, self.state.swap(@ptrToInt(&waiter), .AcqRel))) |completer| {
// complete() was waiting for us to consume its value.
// Do so and reschedule both of us.
waiter.value = completer.value;
completer.task.schedule();
waiter.task.schedule();
}
}
// Return the waiter value which is either:
// - consumed by the waiting complete() above or
// - filled in by complete() when it goes to suspend
return waiter.value;
}
pub fn detach(self: *Self) void {
// Mark the state as detached, making a subsequent complete() no-op
// Wake up the waiting complete() if it was there before us.
// Acquire barrier in order to see the complete()'s *Waiter writes.
if (@intToPtr(?*Waiter, self.state.swap(DETACHED, .Acquire))) |completer| {
completer.task.schedule();
}
}
};
}
/// A type-safe wrapper around SpawnHandle() for the spawn() caller.
pub fn JoinHandle(comptime T: type) type {
return struct {
spawn_handle: *SpawnHandle(T),
pub fn join(self: @This()) T {
return self.spawn_handle.join();
}
pub fn detach(self: @This()) void {
return self.spawn_handle.detach();
}
};
}
/// Dynamically allocates and runs an async function concurrently to the caller.
/// Returns a handle to the async function which can be used to wait for its result or detach it as a dependency.
pub fn spawn(comptime asyncFn: anytype, args: anytype) JoinHandle(ReturnTypeOf(asyncFn)) {
const Args = @TypeOf(args);
const Result = ReturnTypeOf(asyncFn);
const Wrapper = struct {
fn entry(spawn_handle_ref: **SpawnHandle(Result), fn_args: Args) void {
// Create the spawn handle in the @Frame() and return a reference of it to the caller.
var spawn_handle = SpawnHandle(Result){};
spawn_handle_ref.* = &spawn_handle;
// Reschedule the @Frame() so that it can run concurrently from the caller.
// This returns execution to after `async Wrapper.entry()` down below.
var task = Task{ .frame = @frame() };
suspend {
task.schedule();
}
// Run the async function and synchronize the reuslt with the spawn/join handle.
const result = @call(.{}, asyncFn, fn_args);
spawn_handle.complete(result);
// Finally, we deallocate this @Frame() since we're done with it.
// The `suspend` is there as a trick to avoid a use-after-free:
//
// Zig async functions are appended with some code to resume an `await`er if any.
// That code involves interacting with the Frame's memory which is a no-no once deallocated.
// To avoid that, we first suspend. This ensures any frame interactions happen befor the suspend-block.
// This also means that any `await`er would block indefinitely,
// but that's fine since we're using a custom method with SpawnHandle instead of await to get the value.
suspend {
allocator.destroy(@frame());
}
}
};
const frame = allocator.create(@Frame(Wrapper.entry)) catch @panic("failed to allocate coroutine");
var spawn_handle: *SpawnHandle(Result) = undefined;
frame.* = async Wrapper.entry(&spawn_handle, args);
return JoinHandle(Result){ .spawn_handle = spawn_handle };
}
|
benchmarks/zig/async.zig
|
const std = @import("std");
const mem = std.mem;
const ModifierLetter = @This();
allocator: *mem.Allocator,
array: []bool,
lo: u21 = 688,
hi: u21 = 125259,
pub fn init(allocator: *mem.Allocator) !ModifierLetter {
var instance = ModifierLetter{
.allocator = allocator,
.array = try allocator.alloc(bool, 124572),
};
mem.set(bool, instance.array, false);
var index: u21 = 0;
index = 0;
while (index <= 17) : (index += 1) {
instance.array[index] = true;
}
index = 22;
while (index <= 33) : (index += 1) {
instance.array[index] = true;
}
index = 48;
while (index <= 52) : (index += 1) {
instance.array[index] = true;
}
instance.array[60] = true;
instance.array[62] = true;
instance.array[196] = true;
instance.array[202] = true;
instance.array[681] = true;
instance.array[912] = true;
index = 1077;
while (index <= 1078) : (index += 1) {
instance.array[index] = true;
}
index = 1348;
while (index <= 1349) : (index += 1) {
instance.array[index] = true;
}
instance.array[1354] = true;
instance.array[1386] = true;
instance.array[1396] = true;
instance.array[1400] = true;
instance.array[1729] = true;
instance.array[2966] = true;
instance.array[3094] = true;
instance.array[3660] = true;
instance.array[5415] = true;
instance.array[5523] = true;
instance.array[6135] = true;
index = 6600;
while (index <= 6605) : (index += 1) {
instance.array[index] = true;
}
index = 6780;
while (index <= 6842) : (index += 1) {
instance.array[index] = true;
}
instance.array[6856] = true;
index = 6891;
while (index <= 6927) : (index += 1) {
instance.array[index] = true;
}
instance.array[7617] = true;
instance.array[7631] = true;
index = 7648;
while (index <= 7660) : (index += 1) {
instance.array[index] = true;
}
index = 10700;
while (index <= 10701) : (index += 1) {
instance.array[index] = true;
}
instance.array[10943] = true;
instance.array[11135] = true;
instance.array[11605] = true;
index = 11649;
while (index <= 11653) : (index += 1) {
instance.array[index] = true;
}
instance.array[11659] = true;
index = 11757;
while (index <= 11758) : (index += 1) {
instance.array[index] = true;
}
index = 11852;
while (index <= 11854) : (index += 1) {
instance.array[index] = true;
}
instance.array[40293] = true;
index = 41544;
while (index <= 41549) : (index += 1) {
instance.array[index] = true;
}
instance.array[41820] = true;
instance.array[41935] = true;
index = 41964;
while (index <= 41965) : (index += 1) {
instance.array[index] = true;
}
index = 42087;
while (index <= 42095) : (index += 1) {
instance.array[index] = true;
}
instance.array[42176] = true;
instance.array[42200] = true;
index = 42312;
while (index <= 42313) : (index += 1) {
instance.array[index] = true;
}
instance.array[42783] = true;
instance.array[42806] = true;
instance.array[42944] = true;
instance.array[43053] = true;
index = 43075;
while (index <= 43076) : (index += 1) {
instance.array[index] = true;
}
index = 43180;
while (index <= 43183) : (index += 1) {
instance.array[index] = true;
}
instance.array[43193] = true;
instance.array[64704] = true;
index = 64750;
while (index <= 64751) : (index += 1) {
instance.array[index] = true;
}
index = 92304;
while (index <= 92307) : (index += 1) {
instance.array[index] = true;
}
index = 93411;
while (index <= 93423) : (index += 1) {
instance.array[index] = true;
}
index = 93488;
while (index <= 93489) : (index += 1) {
instance.array[index] = true;
}
instance.array[93491] = true;
index = 122503;
while (index <= 122509) : (index += 1) {
instance.array[index] = true;
}
instance.array[124571] = true;
// Placeholder: 0. Struct name, 1. Code point kind
return instance;
}
pub fn deinit(self: *ModifierLetter) void {
self.allocator.free(self.array);
}
// isModifierLetter checks if cp is of the kind Modifier_Letter.
pub fn isModifierLetter(self: ModifierLetter, cp: u21) bool {
if (cp < self.lo or cp > self.hi) return false;
const index = cp - self.lo;
return if (index >= self.array.len) false else self.array[index];
}
|
src/components/autogen/DerivedGeneralCategory/ModifierLetter.zig
|
const std = @import("std");
const expect = std.testing.expect;
const test_allocator = std.testing.allocator;
const Allocator = std.mem.Allocator;
const School = struct {
const Self = @This();
allocator: Allocator,
fish: [9]u64,
pub fn load(allocator: Allocator, str: []const u8) !Self {
var fish = [_]u64{0} ** 9;
var iter = std.mem.tokenize(u8, str, ",");
while (iter.next()) |num| {
if (num.len != 0) {
const trimmed = std.mem.trimRight(u8, num, "\n");
const value = try std.fmt.parseInt(u64, trimmed, 10);
fish[value] += 1;
}
}
return Self{ .allocator = allocator, .fish = fish };
}
pub fn deinit(_: *Self) void {}
pub fn population(self: *Self, age: u64) !u64 {
var curr: u64 = 0;
var pop: [9]u64 = undefined;
for (self.fish) |f, idx| {
pop[idx] = f;
}
while (curr < age) : (curr += 1) {
var spawnCount = pop[0];
var idx: u64 = 0;
while (idx < 8) : (idx += 1) {
pop[idx] = pop[idx + 1];
}
pop[8] = spawnCount;
pop[6] += spawnCount;
}
var total: u64 = 0;
for (pop) |v| {
total += v;
}
return total;
}
};
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const str = @embedFile("../input.txt");
var s = try School.load(allocator, str);
defer s.deinit();
const stdout = std.io.getStdOut().writer();
const part1 = try s.population(80);
try stdout.print("Part 1: {d}\n", .{part1});
const part2 = try s.population(256);
try stdout.print("Part 2: {d}\n", .{part2});
}
test "part1 test" {
const str = @embedFile("../test.txt");
var s = try School.load(test_allocator, str);
defer s.deinit();
const score = try s.population(80);
std.debug.print("\nScore={d}\n", .{score});
try expect(5934 == score);
}
test "part2 test" {
const str = @embedFile("../test.txt");
var s = try School.load(test_allocator, str);
defer s.deinit();
const score = try s.population(256);
std.debug.print("\nScore={d}\n", .{score});
try expect(26984457539 == score);
}
|
day06/src/main.zig
|
const std = @import("std");
const m = @import("../math/math.zig");
pub const SdfInfo = @import("sdf_info.zig").SdfInfo;
pub const appendNoMatCheck = @import("sdf_info.zig").appendNoMatCheck;
pub const IterationContext = @import("iteration_context.zig").IterationContext;
pub const Templates = @import("shader_templates.zig");
pub const EnterCommandFn = fn (ctxt: *IterationContext, iter: usize, mat_offset: usize, buffer: *[]u8) []const u8;
pub const ExitCommandFn = fn (ctxt: *IterationContext, iter: usize, buffer: *[]u8) []const u8;
pub const AppendMatCheckFn = fn (ctxt: *IterationContext, exit_command: []const u8, buffer: *[]u8, mat_offset: usize, alloc: std.mem.Allocator) []const u8;
pub const SphereBoundFn = fn (buffer: *[]u8, bound: *m.sphereBound, children: []m.sphereBound) void;
// Combinators
pub const Intersection = @import("combinators/intersection.zig");
pub const SmoothIntersection = @import("combinators/smooth_intersection.zig");
pub const SmoothSubtraction = @import("combinators/smooth_subtraction.zig");
pub const SmoothUnion = @import("combinators/smooth_union.zig");
pub const Subtraction = @import("combinators/subtraction.zig");
pub const Union = @import("combinators/union.zig");
// Materials
pub const CustomMaterial = @import("materials/custom_material.zig");
pub const Discard = @import("materials/discard.zig");
pub const Lambert = @import("materials/lambert.zig");
pub const OrenNayar = @import("materials/oren_nayar.zig");
// Modifiers
pub const Bend = @import("modifiers/bend.zig");
pub const Displacement = @import("modifiers/displacement.zig");
pub const DisplacementNoise = @import("modifiers/displacement_noise.zig");
pub const Elongate = @import("modifiers/elongate.zig");
pub const FiniteRepetition = @import("modifiers/finite_repetition.zig");
pub const InfiniteRepetition = @import("modifiers/infinite_repetition.zig");
pub const Onion = @import("modifiers/onion.zig");
pub const Rounding = @import("modifiers/rounding.zig");
pub const Scale = @import("modifiers/scale.zig");
pub const Symmetry = @import("modifiers/symmetry.zig");
pub const Transform = @import("modifiers/transform.zig");
pub const Twist = @import("modifiers/twist.zig");
pub const Wrinkles = @import("modifiers/wrinkles.zig");
// Special
pub const CustomNode = @import("special/custom_node.zig");
pub const SphereBoundNode = @import("special/sphere_bound.zig");
// Surfaces
pub const BezierCurve = @import("surfaces/bezier_curve.zig");
pub const BoundingBox = @import("surfaces/bounding_box.zig");
pub const Box = @import("surfaces/box.zig");
pub const CappedCone = @import("surfaces/capped_cone.zig");
pub const CappedCylinder = @import("surfaces/capped_cylinder.zig");
pub const CappedTorus = @import("surfaces/capped_torus.zig");
pub const Capsule = @import("surfaces/capsule.zig");
pub const Cone = @import("surfaces/cone.zig");
pub const Dodecahedron = @import("surfaces/dodecahedron.zig");
pub const DodecahedronEdges = @import("surfaces/dodecahedron_edges.zig");
pub const Ellipsoid = @import("surfaces/ellipsoid.zig");
pub const HexagonalPrism = @import("surfaces/hexagonal_prism.zig");
pub const Icosahedron = @import("surfaces/icosahedron.zig");
pub const IcosahedronEdges = @import("surfaces/icosahedron_edges.zig");
pub const InfiniteCone = @import("surfaces/infinite_cone.zig");
pub const InfiniteCylinder = @import("surfaces/infinite_cylinder.zig");
pub const Link = @import("surfaces/link.zig");
pub const Octahedron = @import("surfaces/octahedron.zig");
pub const Plane = @import("surfaces/plane.zig");
pub const Pyramid = @import("surfaces/pyramid.zig");
pub const Quad = @import("surfaces/quad.zig");
pub const Rhombus = @import("surfaces/rhombus.zig");
pub const RoundBox = @import("surfaces/round_box.zig");
pub const RoundCone = @import("surfaces/round_cone.zig");
pub const RoundedCylinder = @import("surfaces/rounded_cylinder.zig");
pub const SolidAngle = @import("surfaces/solid_angle.zig");
pub const Sphere = @import("surfaces/sphere.zig");
pub const Torus = @import("surfaces/torus.zig");
pub const Triangle = @import("surfaces/triangle.zig");
pub const TriangularPrism = @import("surfaces/triangular_prism.zig");
pub const VerticalCappedCone = @import("surfaces/vertical_capped_cone.zig");
pub const VerticalCappedCylinder = @import("surfaces/vertical_capped_cylinder.zig");
pub const VerticalCapsule = @import("surfaces/vertical_capsule.zig");
pub const VerticalRoundCone = @import("surfaces/vertical_round_cone.zig");
const all_node_types = [_]SdfInfo{
Intersection.info,
SmoothIntersection.info,
SmoothSubtraction.info,
SmoothUnion.info,
Subtraction.info,
Union.info,
CustomMaterial.info,
Discard.info,
Lambert.info,
OrenNayar.info,
Bend.info,
Displacement.info,
DisplacementNoise.info,
Elongate.info,
FiniteRepetition.info,
InfiniteRepetition.info,
Onion.info,
Rounding.info,
Scale.info,
Symmetry.info,
Transform.info,
Twist.info,
Wrinkles.info,
CustomNode.info,
SphereBoundNode.info,
BezierCurve.info,
BoundingBox.info,
Box.info,
CappedCone.info,
CappedCylinder.info,
CappedTorus.info,
Capsule.info,
Cone.info,
Dodecahedron.info,
DodecahedronEdges.info,
Ellipsoid.info,
HexagonalPrism.info,
Icosahedron.info,
IcosahedronEdges.info,
InfiniteCone.info,
InfiniteCylinder.info,
Link.info,
Octahedron.info,
Plane.info,
Pyramid.info,
Quad.info,
Rhombus.info,
RoundBox.info,
RoundCone.info,
RoundedCylinder.info,
SolidAngle.info,
Sphere.info,
Torus.info,
Triangle.info,
TriangularPrism.info,
VerticalCappedCone.info,
VerticalCappedCylinder.info,
VerticalCapsule.info,
VerticalRoundCone.info,
};
const SdfKV = struct {
@"0": []const u8,
@"1": *const SdfInfo,
};
fn collectToKV(comptime collection: []const SdfInfo) []SdfKV {
var kvs = [1]SdfKV{undefined} ** collection.len;
for (collection) |*sdf_info, i|
kvs[i] = .{ .@"0" = sdf_info.name, .@"1" = sdf_info };
return kvs[0..];
}
pub const node_map = std.ComptimeStringMap(
*const SdfInfo,
collectToKV(all_node_types[0..]),
);
|
src/sdf/sdf.zig
|
const std = @import("std");
const builtin = @import("builtin");
const optz = @import("optz");
const slimy = @import("slimy.zig");
const version = @import("version.zig");
pub fn main() u8 {
const stdout = std.io.getStdOut();
const stderr = std.io.getStdErr();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const options = parseArgs(arena.allocator()) catch |err| switch (err) {
error.Help => {
usage(stdout);
return 0;
},
error.Version => {
stdout.writeAll(version.full_desc ++ "\n") catch return 1;
return 0;
},
error.Benchmark => {
return @import("bench.zig").main();
},
error.TooManyArgs => {
usage(stderr);
std.log.err("Too many arguments", .{});
return 1;
},
error.NotEnoughArgs => {
usage(stderr);
std.log.err("Not enough arguments", .{});
return 1;
},
error.InvalidFlag => {
usage(stderr);
std.log.err("Invalid option", .{});
return 1;
},
error.MissingParameter => {
usage(stderr);
std.log.err("Missing option parameter", .{});
return 1;
},
error.InvalidFormat => {
std.log.err("Invalid output format. Must be 'human' or 'csv'", .{});
return 1;
},
error.InvalidMethod => {
std.log.err("Invalid search method. Must be 'gpu' or 'cpu'", .{});
return 1;
},
error.InvalidCharacter => {
std.log.err("Invalid number", .{});
return 1;
},
error.Overflow => {
std.log.err("Number too large", .{});
return 1;
},
error.JsonError => return 1, // Handled in parseArgs
error.OutOfMemory => @panic("Out of memory"),
else => |e| if (builtin.os.tag == .windows) {
switch (e) {
error.InvalidCmdLine => {
std.log.err("Encoding error in command line arguments", .{});
return 1;
},
}
} else switch (e) {},
};
var ctx = OutputContext.init(std.heap.page_allocator, options.output);
defer ctx.flush();
for (options.searches) |search| {
slimy.search(search, &ctx, OutputContext.result, OutputContext.progress) catch |err| switch (err) {
error.ThreadQuotaExceeded => @panic("Thread quota exceeded"),
error.SystemResources => @panic("System resources error"),
error.OutOfMemory => @panic("Out of memory"),
error.LockedMemoryLimitExceeded => unreachable,
error.Unexpected => @panic("Unexpected error"),
error.VulkanInit => {
std.log.err("Vulkan initialization failed. Your GPU may not support Vulkan; try using the CPU search instead (-mcpu option)", .{});
return 1;
},
error.ShaderInit => @panic("Compute pipeline init failed"),
error.BufferInit => @panic("Buffer allocation failed"),
error.ShaderExec => @panic("GPU compute execution failed"),
error.Timeout => @panic("Shader execution timed out"),
error.MemoryMapFailed => @panic("Mapping buffer memory failed"),
};
}
return 0;
}
const OutputContext = struct {
lock: std.Thread.Mutex = .{},
f: std.fs.File,
options: OutputOptions,
buf: std.ArrayList(slimy.Result),
progress_timer: ?std.time.Timer,
const progress_tick = std.time.ns_per_s / 4;
const progress_spinner = [_]u21{
'◜',
'◝',
'◞',
'◟',
};
pub fn init(allocator: std.mem.Allocator, options: OutputOptions) OutputContext {
return OutputContext{
.f = std.io.getStdOut(),
.options = options,
.buf = std.ArrayList(slimy.Result).init(allocator),
.progress_timer = if (options.progress)
std.time.Timer.start() catch null
else
null,
};
}
pub fn result(self: *OutputContext, res: slimy.Result) void {
self.lock.lock();
defer self.lock.unlock();
if (self.options.sort) {
self.buf.append(res) catch {
std.log.warn("Out of memory while attempting to sort items; output may be unsorted", .{});
self.output(res);
return;
};
std.sort.insertionSort(slimy.Result, self.buf.items, {}, slimy.Result.sortLessThan);
} else {
self.output(res);
}
}
fn print(self: OutputContext, comptime fmt: []const u8, args: anytype) void {
self.f.writer().print(fmt, args) catch |err| {
std.debug.panic("Error writing output: {s}", .{@errorName(err)});
};
}
fn output(self: *OutputContext, res: slimy.Result) void {
self.progressLineClear();
switch (self.options.format) {
.human => self.print("({:>5}, {:>5}) {}\n", .{ res.x, res.z, res.count }),
.csv => self.print("{},{},{}\n", .{ res.x, res.z, res.count }),
}
}
fn progressLineClear(self: *OutputContext) void {
if (self.progress_timer != null) {
std.debug.print("\r\x1b[K", .{});
}
}
pub fn progress(self: *OutputContext, completed: u64, total: u64) void {
self.lock.lock();
defer self.lock.unlock();
if (self.progress_timer) |timer| {
const tick = timer.read() / progress_tick;
self.progressLineClear();
std.debug.print("[{u}] {d:.2}%", .{
progress_spinner[tick % progress_spinner.len],
@intToFloat(f64, 100_00 * completed / total) * 0.01,
});
}
}
pub fn flush(self: *OutputContext) void {
for (self.buf.items) |res| {
self.output(res);
}
self.progressLineClear();
self.buf.deinit();
}
};
pub const OutputOptions = struct {
format: Format,
sort: bool,
progress: bool,
const Format = enum {
csv,
human,
};
};
test "output context" {
var ctx = OutputContext.init(std.testing.allocator, .{
.format = .csv,
.sort = true,
.progress = false,
});
var pipe = try std.os.pipe();
var readf = std.fs.File{ .handle = pipe[0] };
defer readf.close();
var writef = std.fs.File{ .handle = pipe[1] };
defer writef.close();
ctx.f = writef;
ctx.progress(1, 10);
ctx.flush();
}
fn usage(out: std.fs.File) void {
out.writeAll(
\\Usage:
\\ slimy [OPTIONS] SEED RANGE THRESHOLD
\\ slimy [OPTIONS] -s SEED
\\
\\ -h Display this help message
\\ -v Display version information
\\ -f FORMAT Output format (human [default] or csv)
\\ -u Disable output sorting
\\ -q Disable progress reporting
\\ -m METHOD Search method (gpu [default] or cpu)
\\ -j THREADS Number of threads to use (for cpu method only)
\\ -s FILENAME Read search parameters from a JSON file (or - for stdin)
\\ -b Benchmark mode
\\
\\
) catch return;
}
const Options = struct {
searches: []const slimy.SearchParams,
output: OutputOptions,
};
fn parseArgs(allocator: std.mem.Allocator) !Options {
var args = try std.process.argsWithAllocator(allocator);
_ = args;
var flags = try optz.parse(allocator, struct {
h: bool = false,
v: bool = false,
f: []const u8 = "human",
u: bool = false,
q: bool = false,
m: []const u8 = "gpu",
j: u8 = 0,
s: ?[]const u8 = null,
b: bool = false,
}, &args);
if (flags.h) return error.Help;
if (flags.v) return error.Version;
if (flags.b) return error.Benchmark;
const format = std.meta.stringToEnum(OutputOptions.Format, flags.f) orelse {
return error.InvalidFormat;
};
const progress = !flags.q and std.io.getStdErr().supportsAnsiEscapeCodes();
const method_id = std.meta.stringToEnum(std.meta.Tag(slimy.SearchMethod), flags.m) orelse {
return error.InvalidMethod;
};
if (method_id == .cpu) {
if (builtin.single_threaded) {
if (flags.j == 0) {
flags.j = 1;
} else if (flags.j != 1) {
return error.SingleThreaded;
}
} else {
if (flags.j == 0) {
flags.j = std.math.lossyCast(u8, std.Thread.getCpuCount() catch 1);
}
}
}
const seed_s = args.next() orelse return error.NotEnoughArgs;
const seed = try std.fmt.parseInt(i64, seed_s, 10);
const method: slimy.SearchMethod = switch (method_id) {
.gpu => .gpu,
.cpu => .{ .cpu = flags.j },
};
var searches: []const slimy.SearchParams = undefined;
if (flags.s) |path| {
// TODO: require all same world seed, or specify world seed on command line or something
const json_params = readJsonParams(allocator, path) catch |err| {
std.log.err("Error reading JSON file '{s}': {s}", .{ path, @errorName(err) });
return error.JsonError;
};
var s = try allocator.alloc(slimy.SearchParams, json_params.len);
for (json_params) |param, i| {
var p = param;
if (p.x0 > p.x1) {
std.mem.swap(i32, &p.x0, &p.x1);
}
if (p.z0 > p.z1) {
std.mem.swap(i32, &p.z0, &p.z1);
}
s[i] = .{
.world_seed = seed,
.threshold = p.threshold,
.x0 = p.x0,
.z0 = p.z0,
.x1 = p.x1,
.z1 = p.z1,
.method = method,
};
}
searches = s;
} else {
const range = args.next() orelse return error.NotEnoughArgs;
const threshold = args.next() orelse return error.NotEnoughArgs;
if (args.skip()) {
return error.TooManyArgs;
}
const range_n: i32 = try std.fmt.parseInt(u31, range, 10);
var s = try allocator.alloc(slimy.SearchParams, 1);
s[0] =
.{
.world_seed = seed,
.threshold = try std.fmt.parseInt(i32, threshold, 10),
.x0 = -range_n,
.z0 = -range_n,
.x1 = range_n,
.z1 = range_n,
.method = method,
};
searches = s;
}
return Options{
.searches = searches,
.output = .{
.format = format,
.sort = !flags.u,
.progress = progress,
},
};
}
fn readJsonParams(allocator: std.mem.Allocator, path: []const u8) ![]const JsonParams {
const data = if (std.mem.eql(u8, path, "-"))
try std.io.getStdIn().readToEndAlloc(allocator, 1 << 20)
else
try std.fs.cwd().readFileAlloc(allocator, path, 1 << 20);
var stream = std.json.TokenStream.init(data);
return try std.json.parse([]const JsonParams, &stream, .{ .allocator = allocator });
}
const JsonParams = struct {
threshold: i32,
x0: i32,
z0: i32,
x1: i32,
z1: i32,
};
|
src/main.zig
|
const std = @import("std");
const with_trace = true;
const assert = std.debug.assert;
fn trace(comptime fmt: []const u8, args: anytype) void {
if (with_trace) std.debug.print(fmt, args);
}
const Action = enum {
toggle,
on,
off,
};
const Vec2 = struct {
x: u32,
y: u32,
};
const Rect = struct {
min: Vec2,
max: Vec2,
};
fn parsevec2(text: []const u8) Vec2 {
const sepmaybe = std.mem.indexOf(u8, text, ",");
const sep = sepmaybe.?;
return Vec2{
.x = std.fmt.parseInt(u32, text[0..sep], 10) catch unreachable,
.y = std.fmt.parseInt(u32, text[sep + 1 ..], 10) catch unreachable,
};
}
fn fill_rect(grid: []u8, action: Action, rect: Rect) void {
var y = rect.min.y;
while (y <= rect.max.y) : (y += 1) {
var x = rect.min.x;
while (x <= rect.max.x) : (x += 1) {
const g = &grid[y * 1000 + x];
switch (action) {
.on => g.* += 1,
.off => g.* = if (g.* > 0) g.* - 1 else 0,
.toggle => g.* += 2,
}
}
}
}
fn parseline(line: []const u8, action: *Action, rect: *Rect) void {
const off = "turn off ";
const on = "turn on ";
const tog = "toggle ";
const thgh = " through ";
var rectstr: []const u8 = undefined;
if (std.mem.eql(u8, off, line[0..off.len])) {
action.* = .off;
rectstr = line[off.len..];
}
if (std.mem.eql(u8, on, line[0..on.len])) {
action.* = .on;
rectstr = line[on.len..];
}
if (std.mem.eql(u8, tog, line[0..tog.len])) {
action.* = .toggle;
rectstr = line[tog.len..];
}
const sepmaybe = std.mem.indexOf(u8, rectstr, thgh[0..]);
const sep = sepmaybe.?;
rect.min = parsevec2(rectstr[0..sep]);
rect.max = parsevec2(rectstr[sep + thgh.len ..]);
}
pub fn main() anyerror!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const limit = 1 * 1024 * 1024 * 1024;
const text = try std.fs.cwd().readFileAlloc(allocator, "day6.txt", limit);
var grid = try allocator.alloc(u8, 1000 * 1000);
std.mem.set(u8, grid, 0);
var it = std.mem.split(u8, text, "\n");
while (it.next()) |line_full| {
const line = std.mem.trim(u8, line_full, " \n\r\t");
if (line.len == 0)
continue;
var action: Action = undefined;
var rect: Rect = undefined;
parseline(line, &action, &rect);
fill_rect(grid, action, rect);
}
var count: u32 = 0;
for (grid) |g| {
count += g;
}
const out = std.io.getStdOut().writer();
try out.print("lights={} \n", count);
// return error.SolutionNotFound;
}
|
2015/day6.zig
|
const std = @import("std");
const version = @import("version");
const zzz = @import("zzz");
const uri = @import("uri");
const api = @import("api.zig");
const utils = @import("utils.zig");
const ThreadSafeArenaAllocator = @import("ThreadSafeArenaAllocator.zig");
const Self = @This();
const Allocator = std.mem.Allocator;
const mem = std.mem;
const testing = std.testing;
pub const SourceType = std.meta.Tag(Source);
alias: []const u8,
src: Source,
pub const Source = union(enum) {
pkg: struct {
user: []const u8,
name: []const u8,
version: version.Range,
repository: []const u8,
},
github: struct {
user: []const u8,
repo: []const u8,
ref: []const u8,
root: ?[]const u8,
},
url: struct {
str: []const u8,
root: ?[]const u8,
},
local: struct {
path: []const u8,
root: ?[]const u8,
},
git: struct {
url: []const u8,
ref: []const u8,
root: ?[]const u8,
},
pub fn format(
source: Source,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
_ = fmt;
_ = options;
switch (source) {
.pkg => |pkg| try writer.print("{s}/{s}/{s}: {}", .{ pkg.repository, pkg.user, pkg.name, pkg.version }),
.github => |gh| {
const root = gh.root orelse utils.default_root;
try writer.print("github.com/{s}/{s}/{s}: {s}", .{ gh.user, gh.repo, root, gh.ref });
},
.url => |url| {
const root = url.root orelse utils.default_root;
try writer.print("{s}/{s}", .{ url.str, root });
},
.local => |local| {
const root = local.root orelse utils.default_root;
try writer.print("{s}/{s}", .{ local.path, root });
},
.git => |git| {
const root = git.root orelse utils.default_root;
try writer.print("{s}:{s}:{s}", .{ git.url, git.ref, root });
},
}
}
};
/// There are four ways for a dependency to be declared in the project file:
///
/// A package from some other index:
/// ```
/// name:
/// pkg:
/// user: <user>
/// name: <name> # optional
/// version: <version string>
/// repository: <repository> # optional
/// ```
///
/// A github repo:
/// ```
/// name:
/// github:
/// user: <user>
/// repo: <repo>
/// ref: <ref>
/// root: <root file>
/// ```
///
/// A git repo:
/// ```
/// name:
/// git:
/// url: <url>
/// ref: <ref>
/// root: <root file>
/// ```
///
/// A raw url:
/// ```
/// name:
/// url: <url>
/// root: <root file>
/// ```
pub fn fromZNode(arena: *ThreadSafeArenaAllocator, node: *zzz.ZNode) !Self {
if (node.*.child == null) return error.NoChildren;
// check if only one child node and that it has no children
if (node.*.child.?.value == .String and node.*.child.?.child == null) {
if (node.*.child.?.sibling != null) return error.Unknown;
const key = try utils.zGetString(node);
const info = try utils.parseUserRepo(key);
const ver_str = try utils.zGetString(node.*.child.?);
return Self{
.alias = info.repo,
.src = .{
.pkg = .{
.user = info.user,
.name = info.repo,
.version = try version.Range.parse(ver_str),
.repository = utils.default_repo,
},
},
};
}
const alias = try utils.zGetString(node);
var root: ?[]const u8 = null;
{
var it = node;
var depth: isize = 0;
while (it.nextUntil(node, &depth)) |child| : (it = child) {
switch (child.value) {
.String => |str| if (mem.eql(u8, str, "root")) {
if (root != null) {
std.log.err("multiple roots defined", .{});
return error.Explained;
// TODO: handle child.value not being string
} else {
root = try utils.zGetString(child.child.?);
}
},
else => continue,
}
}
}
// search for src node
const src_node = blk: {
var it = utils.ZChildIterator.init(node);
while (it.next()) |child| {
switch (child.value) {
.String => |str| if (mem.eql(u8, str, "src")) break :blk child,
else => continue,
}
} else break :blk node;
};
const src: Source = blk: {
const child = src_node.child orelse return error.SrcNeedsChild;
const src_str = try utils.zGetString(child);
const src_type = inline for (std.meta.fields(SourceType)) |field| {
if (mem.eql(u8, src_str, field.name)) break @field(SourceType, field.name);
} else return error.InvalidSrcTag;
break :blk switch (src_type) {
.pkg => .{
.pkg = .{
.user = (try utils.zFindString(child, "user")) orelse return error.MissingUser,
.name = (try utils.zFindString(child, "name")) orelse alias,
.version = try version.Range.parse((try utils.zFindString(child, "version")) orelse return error.MissingVersion),
.repository = (try utils.zFindString(child, "repository")) orelse utils.default_repo,
},
},
.github => gh: {
const url = try std.fmt.allocPrint(arena.allocator(), "https://github.com/{s}/{s}.git", .{
(try utils.zFindString(child, "user")) orelse return error.GithubMissingUser,
(try utils.zFindString(child, "repo")) orelse return error.GithubMissingRepo,
});
break :gh .{
.git = .{
.url = url,
.ref = (try utils.zFindString(child, "ref")) orelse return error.GithubMissingRef,
.root = root,
},
};
},
.url => .{
.url = .{
.str = try utils.zGetString(child.child orelse return error.UrlMissingStr),
.root = root,
},
},
.local => .{
.local = .{
.path = try utils.zGetString(child.child orelse return error.UrlMissingStr),
.root = root,
},
},
.git => .{
.git = .{
.url = (try utils.zFindString(child, "url")) orelse return error.GitMissingUrl,
.ref = (try utils.zFindString(child, "ref")) orelse return error.GitMissingRef,
.root = root,
},
},
};
};
if (src == .url) {
_ = uri.parse(src.url.str) catch |err| {
switch (err) {
error.InvalidFormat => {
std.log.err(
"Failed to parse '{s}' as a url ({}), did you forget to wrap your url in double quotes?",
.{ src.url.str, err },
);
},
else => return err,
}
return error.Explained;
};
}
// TODO: integrity
return Self{ .alias = alias, .src = src };
}
/// for testing
fn fromString(arena: *ThreadSafeArenaAllocator, str: []const u8) !Self {
var tree = zzz.ZTree(1, 1000){};
const root = try tree.appendText(str);
return Self.fromZNode(arena, root.*.child.?);
}
fn expectNullStrEqual(expected: ?[]const u8, actual: ?[]const u8) !void {
if (expected) |e| if (actual) |a| {
try testing.expectEqualStrings(e, a);
return;
};
try testing.expectEqual(expected, actual);
}
fn expectDepEqual(expected: Self, actual: Self) !void {
try testing.expectEqualStrings(expected.alias, actual.alias);
try testing.expectEqual(@as(SourceType, expected.src), @as(SourceType, actual.src));
return switch (expected.src) {
.pkg => |pkg| {
try testing.expectEqualStrings(pkg.user, actual.src.pkg.user);
try testing.expectEqualStrings(pkg.name, actual.src.pkg.name);
try testing.expectEqualStrings(pkg.repository, actual.src.pkg.repository);
try testing.expectEqual(pkg.version, actual.src.pkg.version);
},
.github => |gh| {
try testing.expectEqualStrings(gh.user, actual.src.github.user);
try testing.expectEqualStrings(gh.repo, actual.src.github.repo);
try testing.expectEqualStrings(gh.ref, actual.src.github.ref);
try expectNullStrEqual(gh.root, actual.src.github.root);
},
.git => |git| {
try testing.expectEqualStrings(git.url, actual.src.git.url);
try testing.expectEqualStrings(git.ref, actual.src.git.ref);
try expectNullStrEqual(git.root, actual.src.git.root);
},
.url => |url| {
try testing.expectEqualStrings(url.str, actual.src.url.str);
try expectNullStrEqual(url.root, actual.src.url.root);
},
.local => |local| {
try testing.expectEqualStrings(local.path, actual.src.local.path);
try expectNullStrEqual(local.root, actual.src.local.root);
},
};
}
test "default repo pkg" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
try expectDepEqual(Self{
.alias = "something",
.src = .{
.pkg = .{
.user = "matt",
.name = "something",
.version = try version.Range.parse("^0.1.0"),
.repository = utils.default_repo,
},
},
}, try fromString(&arena, "matt/something: ^0.1.0"));
}
test "legacy aliased, default repo pkg" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\something:
\\ src:
\\ pkg:
\\ user: matt
\\ name: blarg
\\ version: ^0.1.0
);
const expected = Self{
.alias = "something",
.src = .{
.pkg = .{
.user = "matt",
.name = "blarg",
.version = try version.Range.parse("^0.1.0"),
.repository = utils.default_repo,
},
},
};
try expectDepEqual(expected, actual);
}
test "aliased, default repo pkg" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\something:
\\ pkg:
\\ user: matt
\\ name: blarg
\\ version: ^0.1.0
);
const expected = Self{
.alias = "something",
.src = .{
.pkg = .{
.user = "matt",
.name = "blarg",
.version = try version.Range.parse("^0.1.0"),
.repository = utils.default_repo,
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy non-default repo pkg" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\something:
\\ src:
\\ pkg:
\\ user: matt
\\ version: ^0.1.0
\\ repository: example.com
);
const expected = Self{
.alias = "something",
.src = .{
.pkg = .{
.user = "matt",
.name = "something",
.version = try version.Range.parse("^0.1.0"),
.repository = "example.com",
},
},
};
try expectDepEqual(expected, actual);
}
test "non-default repo pkg" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\something:
\\ pkg:
\\ user: matt
\\ version: ^0.1.0
\\ repository: example.com
);
const expected = Self{
.alias = "something",
.src = .{
.pkg = .{
.user = "matt",
.name = "something",
.version = try version.Range.parse("^0.1.0"),
.repository = "example.com",
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy aliased, non-default repo pkg" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\something:
\\ src:
\\ pkg:
\\ user: matt
\\ name: real_name
\\ version: ^0.1.0
\\ repository: example.com
);
const expected = Self{
.alias = "something",
.src = .{
.pkg = .{
.user = "matt",
.name = "real_name",
.version = try version.Range.parse("^0.1.0"),
.repository = "example.com",
},
},
};
try expectDepEqual(expected, actual);
}
test "aliased, non-default repo pkg" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\something:
\\ pkg:
\\ user: matt
\\ name: real_name
\\ version: ^0.1.0
\\ repository: example.com
);
const expected = Self{
.alias = "something",
.src = .{
.pkg = .{
.user = "matt",
.name = "real_name",
.version = try version.Range.parse("^0.1.0"),
.repository = "example.com",
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy github default root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ src:
\\ github:
\\ user: test
\\ repo: something
\\ ref: main
);
const expected = Self{
.alias = "foo",
.src = .{
.git = .{
.url = "https://github.com/test/something.git",
.ref = "main",
.root = null,
},
},
};
try expectDepEqual(expected, actual);
}
test "github default root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ github:
\\ user: test
\\ repo: something
\\ ref: main
);
const expected = Self{
.alias = "foo",
.src = .{
.git = .{
.url = "https://github.com/test/something.git",
.ref = "main",
.root = null,
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy github explicit root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ src:
\\ github:
\\ user: test
\\ repo: something
\\ ref: main
\\ root: main.zig
);
const expected = Self{
.alias = "foo",
.src = .{
.git = .{
.url = "https://github.com/test/something.git",
.ref = "main",
.root = "main.zig",
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy github explicit root, incorrect indent" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ src:
\\ github:
\\ user: test
\\ repo: something
\\ ref: main
\\ root: main.zig
);
const expected = Self{
.alias = "foo",
.src = .{
.git = .{
.url = "https://github.com/test/something.git",
.ref = "main",
.root = "main.zig",
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy github explicit root, mixed with newer root indent" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ src:
\\ github:
\\ user: test
\\ repo: something
\\ ref: main
\\ root: main.zig
);
const expected = Self{
.alias = "foo",
.src = .{
.git = .{
.url = "https://github.com/test/something.git",
.ref = "main",
.root = "main.zig",
},
},
};
try expectDepEqual(expected, actual);
}
test "github explicit root, incorrect indent" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ github:
\\ user: test
\\ repo: something
\\ ref: main
\\ root: main.zig
);
const expected = Self{
.alias = "foo",
.src = .{
.git = .{
.url = "https://github.com/test/something.git",
.ref = "main",
.root = "main.zig",
},
},
};
try expectDepEqual(expected, actual);
}
test "github explicit root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ github:
\\ user: test
\\ repo: something
\\ ref: main
\\ root: main.zig
);
const expected = Self{
.alias = "foo",
.src = .{
.git = .{
.url = "https://github.com/test/something.git",
.ref = "main",
.root = "main.zig",
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy raw default root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ src:
\\ url: "https://astrolabe.pm"
);
const expected = Self{
.alias = "foo",
.src = .{
.url = .{
.str = "https://astrolabe.pm",
.root = null,
},
},
};
try expectDepEqual(expected, actual);
}
test "raw default root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ url: "https://astrolabe.pm"
);
const expected = Self{
.alias = "foo",
.src = .{
.url = .{
.str = "https://astrolabe.pm",
.root = null,
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy raw explicit root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ src:
\\ url: "https://astrolabe.pm"
\\ root: main.zig
);
const expected = Self{
.alias = "foo",
.src = .{
.url = .{
.str = "https://astrolabe.pm",
.root = "main.zig",
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy raw explicit root, incorrect indent" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ src:
\\ url: "https://astrolabe.pm"
\\ root: main.zig
);
const expected = Self{
.alias = "foo",
.src = .{
.url = .{
.str = "https://astrolabe.pm",
.root = "main.zig",
},
},
};
try expectDepEqual(expected, actual);
}
test "raw explicit root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ url: "https://astrolabe.pm"
\\ root: main.zig
);
const expected = Self{
.alias = "foo",
.src = .{
.url = .{
.str = "https://astrolabe.pm",
.root = "main.zig",
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy local with default root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ src:
\\ local: "mypkgs/cool-project"
);
const expected = Self{
.alias = "foo",
.src = .{
.local = .{
.path = "mypkgs/cool-project",
.root = null,
},
},
};
try expectDepEqual(expected, actual);
}
test "local with default root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ local: "mypkgs/cool-project"
);
const expected = Self{
.alias = "foo",
.src = .{
.local = .{
.path = "mypkgs/cool-project",
.root = null,
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy local with explicit root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ src:
\\ local: "mypkgs/cool-project"
\\ root: main.zig
);
const expected = Self{
.alias = "foo",
.src = .{
.local = .{
.path = "mypkgs/cool-project",
.root = "main.zig",
},
},
};
try expectDepEqual(expected, actual);
}
test "legacy local with explicit root, incorrect indent" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ src:
\\ local: "mypkgs/cool-project"
\\ root: main.zig
);
const expected = Self{
.alias = "foo",
.src = .{
.local = .{
.path = "mypkgs/cool-project",
.root = "main.zig",
},
},
};
try expectDepEqual(expected, actual);
}
test "local with explicit root" {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const actual = try fromString(&arena,
\\foo:
\\ local: "mypkgs/cool-project"
\\ root: main.zig
);
const expected = Self{
.alias = "foo",
.src = .{
.local = .{
.path = "mypkgs/cool-project",
.root = "main.zig",
},
},
};
try expectDepEqual(expected, actual);
}
test "pkg can't take a root" {
// TODO
}
test "pkg can't take an integrity" {
// TODO
}
test "github can't take an integrity " {
// TODO
}
/// serializes dependency information back into zzz format
pub fn addToZNode(
self: Self,
arena: *ThreadSafeArenaAllocator,
tree: *zzz.ZTree(1, 1000),
parent: *zzz.ZNode,
explicit: bool,
) !void {
var alias = try tree.addNode(parent, .{ .String = self.alias });
switch (self.src) {
.pkg => |pkg| if (!explicit and
std.mem.eql(u8, self.alias, pkg.name) and
std.mem.eql(u8, pkg.repository, utils.default_repo))
{
var fifo = std.fifo.LinearFifo(u8, .{ .Dynamic = {} }).init(arena.allocator());
try fifo.writer().print("{s}/{s}", .{ pkg.user, pkg.name });
alias.value.String = fifo.readableSlice(0);
const ver_str = try std.fmt.allocPrint(arena.allocator(), "{}", .{pkg.version});
_ = try tree.addNode(alias, .{ .String = ver_str });
} else {
var node = try tree.addNode(alias, .{ .String = "pkg" });
try utils.zPutKeyString(tree, node, "user", pkg.user);
if (explicit or !std.mem.eql(u8, pkg.name, self.alias)) {
try utils.zPutKeyString(tree, node, "name", pkg.name);
}
const ver_str = try std.fmt.allocPrint(arena.allocator(), "{}", .{pkg.version});
try utils.zPutKeyString(tree, node, "version", ver_str);
if (explicit or !std.mem.eql(u8, pkg.repository, utils.default_repo)) {
try utils.zPutKeyString(tree, node, "repository", pkg.repository);
}
},
.github => |gh| {
var github = try tree.addNode(alias, .{ .String = "github" });
try utils.zPutKeyString(tree, github, "user", gh.user);
try utils.zPutKeyString(tree, github, "repo", gh.repo);
try utils.zPutKeyString(tree, github, "ref", gh.ref);
if (explicit or gh.root != null) {
try utils.zPutKeyString(tree, github, "root", gh.root orelse utils.default_root);
}
},
.git => |g| {
var git = try tree.addNode(alias, .{ .String = "git" });
try utils.zPutKeyString(tree, git, "url", g.url);
try utils.zPutKeyString(tree, git, "ref", g.ref);
if (explicit or g.root != null) {
try utils.zPutKeyString(tree, git, "root", g.root orelse utils.default_root);
}
},
.url => |url| {
try utils.zPutKeyString(tree, alias, "url", url.str);
if (explicit or url.root != null) {
try utils.zPutKeyString(tree, alias, "root", url.root orelse utils.default_root);
}
},
.local => |local| {
try utils.zPutKeyString(tree, alias, "local", local.path);
if (explicit or local.root != null) {
try utils.zPutKeyString(tree, alias, "root", local.root orelse utils.default_root);
}
},
}
}
fn expectZzzEqual(expected: *zzz.ZNode, actual: *zzz.ZNode) !void {
var expected_it: *zzz.ZNode = expected;
var actual_it: *zzz.ZNode = actual;
var expected_depth: isize = 0;
var actual_depth: isize = 0;
while (expected_it.next(&expected_depth)) |exp| : (expected_it = exp) {
if (actual_it.next(&actual_depth)) |act| {
defer actual_it = act;
try testing.expectEqual(expected_depth, actual_depth);
switch (exp.value) {
.String => |str| try testing.expectEqualStrings(str, act.value.String),
.Int => |int| try testing.expectEqual(int, act.value.Int),
.Float => |float| try testing.expectEqual(float, act.value.Float),
.Bool => |b| try testing.expectEqual(b, act.value.Bool),
else => {},
}
} else {
try testing.expect(false);
}
}
try testing.expectEqual(
expected_it.next(&expected_depth),
actual_it.next(&actual_depth),
);
}
fn serializeTest(from: []const u8, to: []const u8, explicit: bool) !void {
var arena = ThreadSafeArenaAllocator.init(testing.allocator);
defer arena.deinit();
const dep = try fromString(&arena, from);
var actual = zzz.ZTree(1, 1000){};
var actual_root = try actual.addNode(null, .{ .Null = {} });
try dep.addToZNode(&arena, &actual, actual_root, explicit);
var expected = zzz.ZTree(1, 1000){};
const expected_root = try expected.appendText(to);
try expectZzzEqual(expected_root, actual_root);
}
test "serialize pkg non-explicit" {
const from =
\\something:
\\ pkg:
\\ user: test
\\ version: ^0.0.0
\\
;
const to = "test/something: ^0.0.0";
try serializeTest(from, to, false);
}
test "serialize pkg explicit" {
const from =
\\something:
\\ pkg:
\\ user: test
\\ version: ^0.0.0
\\
;
const to =
\\something:
\\ pkg:
\\ user: test
\\ name: something
\\ version: ^0.0.0
\\ repository: astrolabe.pm
\\
;
try serializeTest(from, to, true);
}
test "serialize github non-explicit" {
const from =
\\something:
\\ github:
\\ user: test
\\ repo: my_repo
\\ ref: master
\\ root: main.zig
\\
;
const to =
\\something:
\\ git:
\\ url: "https://github.com/test/my_repo.git"
\\ ref: master
\\ root: main.zig
\\
;
try serializeTest(from, to, false);
}
test "serialize github non-explicit, default root" {
const from =
\\something:
\\ github:
\\ user: test
\\ repo: my_repo
\\ ref: master
\\
;
const to =
\\something:
\\ git:
\\ url: "https://github.com/test/my_repo.git"
\\ ref: master
\\
;
try serializeTest(from, to, false);
}
test "serialize github explicit, default root" {
const from =
\\something:
\\ github:
\\ user: test
\\ repo: my_repo
\\ ref: master
\\ root: src/main.zig
\\
;
const to =
\\something:
\\ git:
\\ url: "https://github.com/test/my_repo.git"
\\ ref: master
\\ root: src/main.zig
\\
;
try serializeTest(from, to, true);
}
test "serialize github explicit" {
const from =
\\something:
\\ github:
\\ user: test
\\ repo: my_repo
\\ ref: master
\\
;
const to =
\\something:
\\ git:
\\ url: "https://github.com/test/my_repo.git"
\\ ref: master
\\ root: src/main.zig
\\
;
try serializeTest(from, to, true);
}
test "serialize url non-explicit" {
const str =
\\something:
\\ url: "https://github.com"
\\ root: main.zig
\\
;
try serializeTest(str, str, false);
}
test "serialize url explicit" {
const from =
\\something:
\\ url: "https://github.com"
\\
;
const to =
\\something:
\\ url: "https://github.com"
\\ root: src/main.zig
\\
;
try serializeTest(from, to, true);
}
|
src/Dependency.zig
|
const std = @import("std");
const stb = @import("stb_image");
const ImGuiImplementation = @import("imguiImplementation.zig");
const TimeManager = @import("timeManager.zig");
const zt = @import("../zt.zig");
const gl = @import("gl");
usingnamespace @import("glfw");
usingnamespace @import("gl");
usingnamespace @import("imgui");
pub fn App(comptime Data: type) type {
return struct {
const Self = @This();
pub const InputEvent = union(enum) {
keyboard: struct {
key: c_int,
scan: c_int,
action: c_int,
mods: c_int,
},
mouseWheel: struct {
x: f64,
y: f64,
},
mouseButton: struct {
key: c_int,
action: c_int,
mods: c_int,
},
mousePosition: struct {
x: f64,
y: f64,
},
character: struct {
value: c_uint,
},
};
pub const Settings = struct {
energySaving: bool = true,
imguiActive: bool = true,
/// Do not edit, this simply tells you whether or not the vsync is on. To modify this state
/// use `context.setVsync(true)`
vsync: bool = true,
};
pub const Context = struct {
_updateSeconds: f32 = -1.0,
allocator: *std.mem.Allocator = undefined,
data: Data = undefined,
settings: Settings = .{},
window: ?*GLFWwindow = undefined,
input: std.ArrayList(InputEvent) = undefined,
time: TimeManager = undefined,
open: bool = false,
pub fn deinit(self: *Context) void {
self.input.deinit();
ImGuiImplementation.Shutdown();
glfwTerminate();
self.allocator.destroy(self);
}
pub fn beginFrame(self: *Context) void {
_ = self;
glClear(GL_COLOR_BUFFER_BIT);
igNewFrame();
}
pub fn endFrame(self: *Context) void {
var io = igGetIO();
// Render
if (self.settings.imguiActive) {
igRender();
var dd = igGetDrawData();
ImGuiImplementation.RenderDrawData(dd);
} else {
igEndFrame();
}
self.input.items.len = 0;
glfwSwapBuffers(self.window);
glfwPollEvents();
self.time.tick();
io.*.DeltaTime = self.time.dt;
if (self.settings.energySaving) {
if (self._updateSeconds > 0.0) {
self._updateSeconds -= self.time.dt;
} else {
glfwWaitEvents();
}
}
self.open = glfwWindowShouldClose(self.window) == 0;
}
pub fn setWindowSize(self: *Context, width: c_int, height: c_int) void {
glfwSetWindowSize(self.window, width, height);
}
pub fn setWindowTitle(self: *Context, string: []const u8) void {
glfwSetWindowTitle(self.window, string.ptr);
}
pub fn setVsync(self: *Context, on: bool) void {
if (on) {
glfwSwapInterval(1);
} else {
glfwSwapInterval(0);
}
self.settings.vsync = on;
}
pub fn setWindowIcon(self: *Context, path: []const u8) void {
stb.stbi_set_flip_vertically_on_load(0);
var image = GLFWimage{ .width = 0, .height = 0, .pixels = null };
image.pixels = stb.stbi_load(path.ptr, &image.width, &image.height, 0, 4);
if (image.width > 0 and image.height > 0) {
glfwSetWindowIcon(self.window, 1, &image);
stb.stbi_image_free(image.pixels);
} else {
std.debug.print("Failed to load icon {s}", .{path});
}
}
/// If you have an animation that needs to play you can queue an amount of seconds that will ignore energy saving.
pub fn setAnimationFrames(self: *Context, seconds: f32) void {
self._updateSeconds = seconds;
}
/// If you are using multithreading or anything that will require the UI to update from outside, this can force a redraw,
/// if you are using energy saving mode.
pub fn forceUpdate(self: *Context) void {
_ = self;
glfwPostEmptyEvent();
}
pub fn addFont(self: *Context, path: []const u8, pxSize: f32) *ImFont {
var io = igGetIO();
var newFont: *ImFont = ImFontAtlas_AddFontFromFileTTF(io.*.Fonts, path.ptr, pxSize, null, ImFontAtlas_GetGlyphRangesDefault(io.*.Fonts));
self.rebuildFont();
return newFont;
}
/// This will destroy and rebuild the font texture used for imgui.
/// This is also called for you when you add and remove fonts.
pub fn rebuildFont(self: *Context) void {
_ = self;
var io = igGetIO();
// Delete the old texture
var texId = @intCast(c_uint, @ptrToInt(io.*.Fonts.*.TexID));
glDeleteTextures(1, &texId);
// Generate new texture
var newTexId: c_uint = 0;
var pixels: [*c]u8 = undefined;
var fontTexWidth: i32 = undefined;
var fontTexHeight: i32 = undefined;
ImFontAtlas_GetTexDataAsRGBA32(io.*.Fonts, &pixels, &fontTexWidth, &fontTexHeight, null);
// Upload texture to graphics system
glGenTextures(1, &newTexId);
glBindTexture(GL_TEXTURE_2D, newTexId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
if (@hasDecl(gl, "GL_UNPACK_ROW_LENGTH"))
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, fontTexWidth, fontTexHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
// Store our identifier
io.*.Fonts.*.TexID = @intToPtr(*c_void, newTexId);
ImGuiImplementation.g_FontTexture = newTexId;
}
};
pub fn begin(applicationAllocator: *std.mem.Allocator) *Context {
var self: *Context = applicationAllocator.create(Context) catch unreachable;
self.* = .{};
if(Data != void) {
self.data = .{};
}
self.allocator = applicationAllocator;
self.input = std.ArrayList(InputEvent).init(applicationAllocator);
self.time = TimeManager.init();
if (glfwInit() < 0) {
std.debug.panic("Failed to init GLFW", .{});
}
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
self.window = glfwCreateWindow(800, 600, "ZT Application", null, null).?;
self.open = true;
glfwMakeContextCurrent(self.window);
if (gladLoadGL() < 0) {
std.debug.panic("Failed to init Glad GL Loader", .{});
}
ImGuiImplementation.init("#version 330");
glfwSetWindowUserPointer(self.window, self);
_ = glfwSetFramebufferSizeCallback(self.window, windowSizeChanged);
_ = glfwSetKeyCallback(self.window, inputCallback);
_ = glfwSetCharCallback(self.window, charCallback);
_ = glfwSetMouseButtonCallback(self.window, mousebuttonCallback);
_ = glfwSetCursorPosCallback(self.window, cursorCallback);
_ = glfwSetScrollCallback(self.window, mouseWheelCallback);
glClearColor(0.1, 0.1, 0.12, 1.0);
windowSizeChanged(self.window, 800, 600);
return self;
}
// Callbacks
fn windowSizeChanged(win: ?*GLFWwindow, newWidth: c_int, newHeight: c_int) callconv(.C) void {
_ = win;
glViewport(0, 0, newWidth, newHeight);
var io = igGetIO();
io.*.DisplaySize = .{ .x = @intToFloat(f32, newWidth), .y = @intToFloat(f32, newHeight) };
}
fn inputCallback(win: ?*GLFWwindow, key: c_int, scan: c_int, action: c_int, mods: c_int) callconv(.C) void {
var context: *Context = @ptrCast(*Context, @alignCast(@alignOf(*Context), glfwGetWindowUserPointer(win).?));
var io = igGetIO();
io.*.KeysDown[@intCast(usize, key)] = if (action == GLFW_PRESS) true else false;
io.*.KeyShift = io.*.KeysDown[@intCast(usize, GLFW_KEY_LEFT_SHIFT)] or io.*.KeysDown[@intCast(usize, GLFW_KEY_RIGHT_SHIFT)];
io.*.KeyCtrl = io.*.KeysDown[@intCast(usize, GLFW_KEY_LEFT_CONTROL)] or io.*.KeysDown[@intCast(usize, GLFW_KEY_RIGHT_CONTROL)];
io.*.KeyAlt = io.*.KeysDown[@intCast(usize, GLFW_KEY_LEFT_ALT)] or io.*.KeysDown[@intCast(usize, GLFW_KEY_RIGHT_ALT)];
context.input.append(.{ .keyboard = .{
.key = key,
.scan = scan,
.action = action,
.mods = mods,
} }) catch unreachable;
}
fn mouseWheelCallback(win: ?*GLFWwindow, x: f64, y: f64) callconv(.C) void {
var context: *Context = @ptrCast(*Context, @alignCast(@alignOf(*Context), glfwGetWindowUserPointer(win).?));
var io = igGetIO();
io.*.MouseWheel = @floatCast(f32, y);
io.*.MouseWheelH = @floatCast(f32, x);
context.input.append(.{ .mouseWheel = .{
.x = x,
.y = y,
} }) catch unreachable;
}
fn mousebuttonCallback(win: ?*GLFWwindow, key: c_int, action: c_int, mods: c_int) callconv(.C) void {
var context: *Context = @ptrCast(*Context, @alignCast(@alignOf(*Context), glfwGetWindowUserPointer(win).?));
var io = igGetIO();
switch (key) {
GLFW_MOUSE_BUTTON_LEFT => {
io.*.MouseDown[ImGuiMouseButton_Left] = if (action == GLFW_PRESS) true else false;
},
GLFW_MOUSE_BUTTON_MIDDLE => {
io.*.MouseDown[ImGuiMouseButton_Middle] = if (action == GLFW_PRESS) true else false;
},
GLFW_MOUSE_BUTTON_RIGHT => {
io.*.MouseDown[ImGuiMouseButton_Right] = if (action == GLFW_PRESS) true else false;
},
else => {},
}
context.input.append(.{ .mouseButton = .{
.key = key,
.action = action,
.mods = mods,
} }) catch unreachable;
}
fn cursorCallback(win: ?*GLFWwindow, x: f64, y: f64) callconv(.C) void {
var context: *Context = @ptrCast(*Context, @alignCast(@alignOf(*Context), glfwGetWindowUserPointer(win).?));
var io = igGetIO();
io.*.MousePos = .{ .x = @floatCast(f32, x), .y = @floatCast(f32, y) };
context.input.append(.{ .mousePosition = .{
.x = x,
.y = y,
} }) catch unreachable;
}
fn charCallback(win: ?*GLFWwindow, char: c_uint) callconv(.C) void {
var context: *Context = @ptrCast(*Context, @alignCast(@alignOf(*Context), glfwGetWindowUserPointer(win).?));
var io = igGetIO();
ImGuiIO_AddInputCharacter(io, char);
context.input.append(.{ .character = .{
.value = char,
} }) catch unreachable;
}
};
}
|
src/zt/app.zig
|
const std = @import("std");
const Builder = std.build.Builder;
const LibExeObjStep = std.build.LibExeObjStep;
/// Can be one of "ptx" or "fatbin". "cubin" don't work, because it implies a main.
/// Fatbin contains device specialized assembly for all GPU arch supported
/// by this compiler. This provides faster startup time.
/// Ptx is a high-level text assembly that can converted to GPU specialized
/// instruction on loading.
/// We default to .ptx because it's more easy to distribute.
// TODO: make this a build option
const NVCC_OUTPUT_FORMAT = "ptx";
const ZIG_STAGE2 = "zig2";
const SDK_ROOT = sdk_root() ++ "/";
/// For a given object:
/// 1. Compile the given .cu file to a .ptx
/// 2. Add lib C
/// 3. Add cuda headers, and cuda lib path
/// 4. Add Cudaz package with the given .cu file that will get imported as C code.
///
/// The .ptx file will have the same base name than the executable
/// and will appear in zig-out/bin folder next to the executable.
/// In release mode the .ptx will be embedded inside the executable
/// so you can distribute it.
pub fn addCudaz(
b: *Builder,
exe: *LibExeObjStep,
comptime cuda_dir: []const u8,
comptime kernel_path: [:0]const u8,
) void {
const outfile = std.mem.join(
b.allocator,
".",
&[_][]const u8{ exe.name, NVCC_OUTPUT_FORMAT },
) catch unreachable;
const kernel_ptx_path = std.fs.path.joinZ(
b.allocator,
&[_][]const u8{ b.exe_dir, outfile },
) catch unreachable;
std.fs.cwd().makePath(b.exe_dir) catch @panic("Couldn't create zig-out output dir");
// Use nvcc to compile the .cu file
const nvcc = b.addSystemCommand(&[_][]const u8{
cuda_dir ++ "/bin/nvcc",
// In Zig spirit, promote warnings to errors.
"--Werror=all-warnings",
"--display-error-number",
"--" ++ NVCC_OUTPUT_FORMAT,
"-I",
SDK_ROOT ++ "src",
kernel_path,
"-o",
kernel_ptx_path,
});
exe.step.dependOn(&nvcc.step);
addCudazDeps(b, exe, cuda_dir, kernel_path, kernel_ptx_path);
}
/// Leverages stage2 to generate the .ptx from a .zig file.
/// This restricts the kernel to use the subset of Zig supported by stage2.
pub fn addCudazWithZigKernel(
b: *Builder,
exe: *LibExeObjStep,
comptime cuda_dir: []const u8,
comptime kernel_path: [:0]const u8,
) void {
const name = std.fs.path.basename(kernel_path);
const dummy_zig_kernel = b.addObject(name, kernel_path);
dummy_zig_kernel.setTarget(.{ .cpu_arch = .nvptx64, .os_tag = .cuda });
const kernel_ptx_path = std.mem.joinZ(
b.allocator,
"",
&[_][]const u8{ b.exe_dir, "/", dummy_zig_kernel.out_filename, ".ptx" },
) catch unreachable;
if (needRebuild(kernel_path, kernel_ptx_path)) {
// Actually we need to use Stage2 here, so don't use the dummy obj,
// and manually run this command.
const emit_bin = std.mem.join(b.allocator, "=", &[_][]const u8{ "-femit-bin", kernel_ptx_path[0 .. kernel_ptx_path.len - 4] }) catch unreachable;
const zig_kernel = b.addSystemCommand(&[_][]const u8{
ZIG_STAGE2, "build-obj", kernel_path,
"-target", "nvptx64-cuda", "-OReleaseSafe",
"-Dcpu=sm_30", emit_bin,
});
// "--verbose-llvm-ir",
const validate_ptx = b.addSystemCommand(
&[_][]const u8{ cuda_dir ++ "/bin/ptxas", kernel_ptx_path },
);
validate_ptx.step.dependOn(enableAddrspace(b, &zig_kernel.step, kernel_path));
exe.step.dependOn(&validate_ptx.step);
} else {
std.log.warn("Kernel up-to-date {s}", .{kernel_ptx_path});
}
addCudazDeps(b, exe, cuda_dir, kernel_path, kernel_ptx_path);
}
pub fn addCudazDeps(
b: *Builder,
exe: *LibExeObjStep,
comptime cuda_dir: []const u8,
comptime kernel_path: [:0]const u8,
kernel_ptx_path: [:0]const u8,
) void {
const kernel_dir = std.fs.path.dirname(kernel_path).?;
// Add libc and cuda headers / lib, and our own .cu files
exe.linkLibC();
exe.addLibPath(cuda_dir ++ "/lib64");
exe.linkSystemLibraryName("cuda");
exe.addIncludeDir(cuda_dir ++ "/include");
exe.addIncludeDir(SDK_ROOT ++ "src");
exe.addIncludeDir(kernel_dir);
// Add cudaz package with the kernel paths.
const cudaz_options = b.addOptions();
cudaz_options.addOption([:0]const u8, "kernel_path", kernel_path);
cudaz_options.addOption([]const u8, "kernel_name", std.fs.path.basename(kernel_path));
cudaz_options.addOption([:0]const u8, "kernel_ptx_path", kernel_ptx_path);
cudaz_options.addOption([]const u8, "kernel_dir", kernel_dir);
cudaz_options.addOption(bool, "cuda_kernel", std.mem.endsWith(u8, kernel_path, ".cu"));
// Portable mode will embed the cuda modules inside the binary.
// In debug mode we skip this step to have faster compilation.
// But this makes the debug executable dependent on a hard-coded path.
cudaz_options.addOption(bool, "portable", exe.build_mode != .Debug);
const cudaz_pkg = std.build.Pkg{
.name = "cudaz",
.path = .{ .path = SDK_ROOT ++ "src/cuda.zig" },
.dependencies = &[_]std.build.Pkg{
.{ .name = "cudaz_options", .path = cudaz_options.getSource() },
},
};
const root_src = exe.root_src.?;
if (std.mem.eql(u8, root_src.path, "src/cuda.zig")) {
// Don't include the package in itself
exe.addOptions("cudaz_options", cudaz_options);
} else {
exe.addPackage(cudaz_pkg);
}
}
fn sdk_root() []const u8 {
return std.fs.path.dirname(@src().file).?;
}
fn needRebuild(kernel_path: [:0]const u8, kernel_ptx_path: [:0]const u8) bool {
var ptx_file = std.fs.openFileAbsoluteZ(kernel_ptx_path, .{}) catch return true;
var ptx_stat = ptx_file.stat() catch return true;
// detect empty .ptx files
if (ptx_stat.size < 128) return true;
var zig_file = (std.fs.cwd().openFileZ(kernel_path, .{}) catch return true);
var zig_time = (zig_file.stat() catch return true).mtime;
return zig_time >= ptx_stat.mtime + std.time.ns_per_s * 10;
}
/// Wraps the given step by a small text processing
/// that enables then disables Stage2 only code.
fn enableAddrspace(b: *Builder, step: *std.build.Step, src: [:0]const u8) *std.build.Step {
_ = b;
_ = step;
_ = src;
const enable_stage2 = b.addSystemCommand(&[_][]const u8{
"perl", "-ni", "-e", "s:^( *)// (.* // stage2):$1$2:g ; print", src,
});
const disable_stage1 = b.addSystemCommand(&[_][]const u8{
"perl", "-ni", "-e", "s:^( *)([^/]+ // stage1):$1// $2:g ; print", src,
});
disable_stage1.step.dependOn(&enable_stage2.step);
step.dependOn(&disable_stage1.step);
// return step;
const enable_stage1 = b.addSystemCommand(&[_][]const u8{
"perl", "-ni", "-e", "s:^(\\s*)// (.* // stage1):$1$2:g ; print", src,
});
const disable_stage2 = b.addSystemCommand(&[_][]const u8{
"perl", "-ni", "-e", "s:^(\\s*)([^/]* // stage2):$1// $2:g ; print", src,
});
enable_stage1.step.dependOn(step);
disable_stage2.step.dependOn(&enable_stage1.step);
return &disable_stage2.step;
}
|
cudaz/sdk.zig
|
const std = @import("std");
const zap = @import("zap");
const Socket = @import("socket.zig").Socket;
const os = std.os;
const net = std.net;
const mem = std.mem;
const testing = std.testing;
const print = std.debug.print;
const assert = std.debug.assert;
pub const Reactor = struct {
pub const Handle = struct {
onEventFn: fn (handle: *Reactor.Handle, batch: *zap.Pool.Batch, event: Reactor.Event) void,
pub fn call(self: *Reactor.Handle, batch: *zap.Pool.Batch, event: Reactor.Event) void {
(self.onEventFn)(self, batch, event);
}
};
pub const Event = struct {
data: usize,
is_error: bool,
is_hup: bool,
is_readable: bool,
is_writable: bool,
};
pub const Interest = struct {
oneshot: bool = false,
readable: bool = false,
writable: bool = false,
pub fn flags(self: Interest) u32 {
var set: u32 = os.EPOLLRDHUP;
set |= if (self.oneshot) os.EPOLLONESHOT else os.EPOLLET;
if (self.readable) set |= os.EPOLLIN;
if (self.writable) set |= os.EPOLLOUT;
return set;
}
};
pub const AutoResetEvent = struct {
fd: os.fd_t,
reactor: Reactor,
notified: bool = true,
handle: Reactor.Handle = .{ .onEventFn = onEvent },
pub fn init(flags: u32, reactor: Reactor) !AutoResetEvent {
return AutoResetEvent{ .fd = try os.eventfd(0, flags | os.EFD_NONBLOCK), .reactor = reactor };
}
pub fn deinit(self: *AutoResetEvent) void {
os.close(self.fd);
}
pub fn post(self: *AutoResetEvent) void {
if (!@atomicRmw(bool, &self.notified, .Xchg, false, .AcqRel)) return;
os.epoll_ctl(self.reactor.fd, os.EPOLL_CTL_MOD, self.fd, &os.epoll_event{
.events = os.EPOLLONESHOT | os.EPOLLOUT,
.data = .{ .ptr = @ptrToInt(&self.handle) },
}) catch {};
}
pub fn onEvent(handle: *Reactor.Handle, batch: *zap.Pool.Batch, event: Reactor.Event) void {
assert(event.is_writable);
const self = @fieldParentPtr(AutoResetEvent, "handle", handle);
@atomicStore(bool, &self.notified, true, .Release);
}
};
fd: os.fd_t,
pub fn init(flags: u32) !Reactor {
const fd = try os.epoll_create1(flags);
return Reactor{ .fd = fd };
}
pub fn deinit(self: Reactor) void {
os.close(self.fd);
}
pub fn add(self: Reactor, fd: os.fd_t, data: anytype, interest: Interest) !void {
try os.epoll_ctl(self.fd, os.EPOLL_CTL_ADD, fd, &os.epoll_event{
.events = interest.flags(),
.data = .{ .ptr = if (@typeInfo(@TypeOf(data)) == .Pointer) @ptrToInt(data) else data },
});
}
pub fn poll(self: Reactor, comptime max_num_events: comptime_int, closure: anytype, timeout_milliseconds: ?u64) !void {
var events: [max_num_events]os.epoll_event = undefined;
const num_events = os.epoll_wait(self.fd, &events, if (timeout_milliseconds) |ms| @intCast(i32, ms) else -1);
for (events[0..num_events]) |ev| {
const is_error = ev.events & os.EPOLLERR != 0;
const is_hup = ev.events & (os.EPOLLHUP | os.EPOLLRDHUP) != 0;
const is_readable = ev.events & os.EPOLLIN != 0;
const is_writable = ev.events & os.EPOLLOUT != 0;
closure.call(Event{
.data = ev.data.ptr,
.is_error = is_error,
.is_hup = is_hup,
.is_readable = is_readable,
.is_writable = is_writable,
});
}
}
};
test {
testing.refAllDecls(Reactor);
}
test "reactor: shutdown before accept async socket" {
const reactor = try Reactor.init(os.EPOLL_CLOEXEC);
defer reactor.deinit();
const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer a.deinit();
try reactor.add(a.fd, 0, .{ .readable = true });
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 0,
.is_error = false,
.is_hup = true,
.is_readable = false,
.is_writable = false,
},
event,
);
}
}, null);
try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0));
try a.listen(128);
const binded_address = try a.getName();
print("Binded to address: {}\n", .{binded_address});
const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer b.deinit();
try reactor.add(b.fd, 1, .{ .readable = true, .writable = true });
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 1,
.is_error = false,
.is_hup = true,
.is_readable = false,
.is_writable = true,
},
event,
);
}
}, null);
b.connect(binded_address) catch |err| switch (err) {
error.WouldBlock => {},
else => return err,
};
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 1,
.is_error = false,
.is_hup = false,
.is_readable = false,
.is_writable = true,
},
event,
);
}
}, null);
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 0,
.is_error = false,
.is_hup = false,
.is_readable = true,
.is_writable = false,
},
event,
);
}
}, null);
const ab = try a.accept(os.SOCK_CLOEXEC);
defer ab.socket.deinit();
try os.shutdown(ab.socket.fd, .both);
try reactor.add(ab.socket.fd, 2, .{ .readable = true, .writable = true });
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 1,
.is_error = false,
.is_hup = true,
.is_readable = true,
.is_writable = true,
},
event,
);
}
}, null);
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 2,
.is_error = false,
.is_hup = true,
.is_readable = true,
.is_writable = true,
},
event,
);
}
}, null);
}
test "reactor: shutdown async socket" {
const reactor = try Reactor.init(os.EPOLL_CLOEXEC);
defer reactor.deinit();
const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer a.deinit();
try reactor.add(a.fd, 0, .{ .readable = true });
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 0,
.is_error = false,
.is_hup = true,
.is_readable = false,
.is_writable = false,
},
event,
);
}
}, null);
try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0));
try a.listen(128);
const binded_address = try a.getName();
print("Binded to address: {}\n", .{binded_address});
const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer b.deinit();
try reactor.add(b.fd, 1, .{ .readable = true, .writable = true });
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 1,
.is_error = false,
.is_hup = true,
.is_readable = false,
.is_writable = true,
},
event,
);
}
}, null);
b.connect(binded_address) catch |err| switch (err) {
error.WouldBlock => {},
else => return err,
};
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 1,
.is_error = false,
.is_hup = false,
.is_readable = false,
.is_writable = true,
},
event,
);
}
}, null);
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 0,
.is_error = false,
.is_hup = false,
.is_readable = true,
.is_writable = false,
},
event,
);
}
}, null);
const ab = try a.accept(os.SOCK_CLOEXEC);
defer ab.socket.deinit();
try reactor.add(ab.socket.fd, 2, .{ .readable = true, .writable = true });
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 2,
.is_error = false,
.is_hup = false,
.is_readable = false,
.is_writable = true,
},
event,
);
}
}, null);
try os.shutdown(b.fd, .both);
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 2,
.is_error = false,
.is_hup = true,
.is_readable = true,
.is_writable = true,
},
event,
);
}
}, null);
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 1,
.is_error = false,
.is_hup = true,
.is_readable = true,
.is_writable = true,
},
event,
);
}
}, null);
}
test "reactor: async socket" {
const reactor = try Reactor.init(os.EPOLL_CLOEXEC);
defer reactor.deinit();
const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer a.deinit();
try reactor.add(a.fd, 0, .{ .readable = true });
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 0,
.is_error = false,
.is_hup = true,
.is_readable = false,
.is_writable = false,
},
event,
);
}
}, null);
try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0));
try a.listen(128);
const binded_address = try a.getName();
print("Binded to address: {}\n", .{binded_address});
const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP);
defer b.deinit();
try reactor.add(b.fd, 1, .{ .readable = true, .writable = true });
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 1,
.is_error = false,
.is_hup = true,
.is_readable = false,
.is_writable = true,
},
event,
);
}
}, null);
b.connect(binded_address) catch |err| switch (err) {
error.WouldBlock => {},
else => return err,
};
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 1,
.is_error = false,
.is_hup = false,
.is_readable = false,
.is_writable = true,
},
event,
);
}
}, null);
try reactor.poll(1, struct {
fn call(event: Reactor.Event) void {
testing.expectEqual(
Reactor.Event{
.data = 0,
.is_error = false,
.is_hup = false,
.is_readable = true,
.is_writable = false,
},
event,
);
}
}, null);
}
test "reactor/auto_reset_event: post a notification 1024 times" {
const reactor = try Reactor.init(os.EPOLL_CLOEXEC);
defer reactor.deinit();
var test_event = try Reactor.AutoResetEvent.init(os.EFD_CLOEXEC, reactor);
defer test_event.deinit();
try reactor.add(test_event.fd, &test_event.handle, .{});
try reactor.poll(1, struct {
pub fn call(event: Reactor.Event) void {
unreachable;
}
}, 0);
// Registering an eventfd to epoll will not trigger a notification.
// Deregistering an eventfd from epoll will not trigger a notification.
// Attempt to post a notification to see if we achieve expected behavior.
var i: usize = 0;
while (i < 1024) : (i += 1) {
test_event.post();
try reactor.poll(1, struct {
pub fn call(event: Reactor.Event) void {
const handle = @intToPtr(*Reactor.Handle, event.data);
var batch: zap.Pool.Batch = .{};
defer testing.expect(batch.isEmpty());
handle.call(&batch, event);
}
}, null);
}
testing.expect(@atomicLoad(bool, &test_event.notified, .Monotonic));
}
|
reactor.zig
|
const std = @import("std");
pub const BuildStep = struct {
pub const Child = union(enum) {
MacOSBundle: *MacOSBundleStep,
DefaultStep: *std.build.LibExeObjStep,
};
builder: *std.build.Builder,
options: Options,
child: Child,
step: *std.build.Step,
source_path: []const u8,
pub const Options = struct {
name: []const u8,
version: std.builtin.Version,
target: std.zig.CrossTarget = std.zig.CrossTarget{},
mode: ?std.builtin.Mode = null,
macos_bundle: ?MacOSBundleStep.BundleOptions = null,
};
pub fn create(
builder: *std.build.Builder,
source_path: []const u8,
options: Options,
) *BuildStep {
const self = builder.allocator.create(BuildStep) catch unreachable;
self.* = init(builder, source_path, options);
return self;
}
pub fn init(
builder: *std.build.Builder,
source_path: []const u8,
options: Options,
) BuildStep {
var lib_step = builder.addSharedLibrary(options.name, source_path, .{
.versioned = options.version,
});
lib_step.setTarget(options.target);
if (options.mode) |mode| {
lib_step.setBuildMode(mode);
}
switch (options.target.getOsTag()) {
.macos => {
// TODO There's probably a nicer way to do this.
if (options.macos_bundle == null) {
@panic("You cannot build a macOS VST without setting the required bundle information");
}
var self = BuildStep{
.child = .{
.MacOSBundle = MacOSBundleStep.create(builder, lib_step, options),
},
.step = undefined,
.builder = builder,
.options = options,
.source_path = source_path,
};
self.step = &self.child.MacOSBundle.step;
return self;
},
else => {
// TODO Wrap this in a seperate step where we copy the shared library to
// zig-cache/lib/<name>(.dll|.so)
var self = BuildStep{
.child = .{ .DefaultStep = lib_step },
.step = undefined,
.builder = builder,
.options = options,
.source_path = source_path,
};
lib_step.install();
self.step = &self.child.DefaultStep.step;
return self;
},
}
}
// TODO Have this function take a package, which should point to
// this library. That way we can resolve the paths to
// src/hr_plugin.zig etc.
pub fn hotReload(self: *BuildStep) *HotReloadStep {
var lib_step = switch (self.child) {
.MacOSBundle => |bundle_step| bundle_step.lib_step,
.DefaultStep => |default_step| default_step,
};
var file_name_sha1 = std.crypto.hash.Sha1.init(.{});
var out: [20]u8 = undefined;
file_name_sha1.update(self.source_path);
file_name_sha1.final(&out);
const out_text = std.fmt.fmtSliceHexLower(&out);
const meta_id = self.builder.fmt("{}", .{out_text});
return HotReloadStep.create(self.builder, lib_step, self.options, meta_id);
}
};
pub const HotReloadStep = struct {
builder: *std.build.Builder,
step: std.build.Step,
watch_lib_step: *std.build.LibExeObjStep,
vst_step: *BuildStep,
meta_package: std.build.Pkg,
meta_id: []const u8,
const Paths = struct {
base: []const u8,
watch_path: []const u8,
log_path: []const u8,
package_path: []const u8,
};
pub fn create(
builder: *std.build.Builder,
watch_lib_step: *std.build.LibExeObjStep,
parent_options: BuildStep.Options,
meta_id: []const u8,
) *HotReloadStep {
var self = builder.allocator.create(HotReloadStep) catch unreachable;
var options = parent_options;
options.name = std.mem.join(builder.allocator, " ", &[_][]const u8{
parent_options.name,
"(Hot Reload)",
}) catch unreachable;
self.builder = builder;
self.watch_lib_step = watch_lib_step;
self.vst_step = BuildStep.create(builder, "src/hr_plugin.zig", options);
self.meta_id = meta_id;
const paths = self.getPaths() catch unreachable;
self.setupPaths(paths) catch unreachable;
const package_source = builder.fmt("pub const watch_path = \"{s}\";\npub const log_path = \"{s}\";\n", .{
escapeBackslash(builder.allocator, paths.watch_path) catch unreachable,
escapeBackslash(builder.allocator, paths.log_path) catch unreachable,
});
const package_write_step = builder.addWriteFile(paths.package_path, package_source);
var hr_lib_step = switch (self.vst_step.child) {
.MacOSBundle => |bundle_step| bundle_step.lib_step,
.DefaultStep => |default_step| default_step,
};
self.meta_package = .{
.name = "hot-reload-meta",
.path = paths.package_path,
};
hr_lib_step.step.dependOn(&package_write_step.step);
hr_lib_step.addPackage(self.meta_package);
self.step = std.build.Step.init(.Custom, "Create Hot Reload Wrapper", builder.allocator, make);
self.step.dependOn(self.vst_step.step);
self.step.dependOn(&self.watch_lib_step.step);
return self;
}
fn getPaths(self: *HotReloadStep) !Paths {
const cwd = std.fs.cwd();
const allocator = self.builder.allocator;
const meta_id = self.meta_id;
const meta_dir = self.builder.fmt("vst-meta/{s}", .{meta_id});
const meta_path = self.builder.getInstallPath(.Prefix, meta_dir);
return Paths{
.base = meta_path,
.watch_path = try std.fs.path.join(allocator, &[_][]const u8{
meta_path,
"watch",
}),
.log_path = try std.fs.path.join(allocator, &[_][]const u8{
meta_path,
"log.log",
}),
.package_path = try std.fs.path.join(allocator, &[_][]const u8{
meta_path,
"package.zig",
}),
};
}
fn setupPaths(self: *HotReloadStep, paths: Paths) !void {
const cwd = std.fs.cwd();
try cwd.makePath(paths.base);
var log_file = try cwd.createFile(paths.log_path, .{});
defer log_file.close();
var package_file = try cwd.createFile(paths.package_path, .{});
defer package_file.close();
}
fn make(step: *std.build.Step) !void {
const self = @fieldParentPtr(HotReloadStep, "step", step);
const lib_output_path = self.watch_lib_step.getOutputPath();
const paths = try self.getPaths();
const cwd = std.fs.cwd();
var watch_file = try cwd.createFile(paths.watch_path, .{});
defer watch_file.close();
try watch_file.writeAll(lib_output_path);
}
pub fn trackLogs(self: *HotReloadStep) *std.build.Step {
const paths = self.getPaths() catch unreachable;
const tail = self.builder.addSystemCommand(&[_][]const u8{
"tail",
"-n +0",
"-f",
paths.log_path,
});
return &tail.step;
}
};
pub const MacOSBundleStep = struct {
builder: *std.build.Builder,
lib_step: *std.build.LibExeObjStep,
step: std.build.Step,
options: BuildStep.Options,
pub const BundleOptions = struct {
bundle_identifier: []const u8,
bundle_signature: [4]u8 = [_]u8{'?'} ** 4,
bundle_development_region: []const u8 = "English",
};
pub fn create(
builder: *std.build.Builder,
lib_step: *std.build.LibExeObjStep,
options: BuildStep.Options,
) *MacOSBundleStep {
const self = builder.allocator.create(MacOSBundleStep) catch unreachable;
self.* = init(builder, lib_step, options) catch unreachable;
return self;
}
pub fn init(
builder: *std.build.Builder,
lib_step: *std.build.LibExeObjStep,
options: BuildStep.Options,
) !MacOSBundleStep {
var self = MacOSBundleStep{
.builder = builder,
.lib_step = lib_step,
.step = undefined,
.options = options,
};
self.step = std.build.Step.init(.Custom, "Create macOS .vst bundle", builder.allocator, make);
self.step.dependOn(&self.lib_step.step);
return self;
}
fn make(step: *std.build.Step) !void {
const self = @fieldParentPtr(MacOSBundleStep, "step", step);
const cwd = std.fs.cwd();
const vst_path = self.builder.getInstallPath(.Prefix, "vst");
const bundle_basename = self.builder.fmt("{s}.vst", .{self.options.name});
const bundle_path = try std.fs.path.join(self.builder.allocator, &[_][]const u8{
vst_path,
bundle_basename,
});
defer self.builder.allocator.free(bundle_path);
var bundle_dir = try cwd.makeOpenPath(bundle_path, .{});
defer bundle_dir.close();
try bundle_dir.makePath("Contents/MacOS");
const binary_path = try std.fs.path.join(self.builder.allocator, &[_][]const u8{
"Contents/MacOS",
self.options.name,
});
defer self.builder.allocator.free(binary_path);
const lib_output_path = self.lib_step.getOutputPath();
try cwd.copyFile(lib_output_path, bundle_dir, binary_path, .{});
const plist_file = try bundle_dir.createFile("Contents/Info.plist", .{});
defer plist_file.close();
try self.createPList(plist_file);
const pkginfo_file = try bundle_dir.createFile("Contents/PkgInfo", .{});
defer pkginfo_file.close();
try self.createPkgInfo(pkginfo_file);
}
fn createPList(self: *MacOSBundleStep, out_file: std.fs.File) !void {
const template = @embedFile("Info.template.plist");
var variables = std.StringHashMap([]const u8).init(self.builder.allocator);
defer variables.deinit();
try self.generateVariables(&variables);
const final = try replaceVariables(self.builder.allocator, template, variables);
defer self.builder.allocator.free(final);
try out_file.writeAll(final);
}
fn createPkgInfo(self: *MacOSBundleStep, out_file: std.fs.File) !void {
const bundle_options = self.options.macos_bundle.?;
var buffer: [8]u8 = undefined;
std.mem.copy(u8, &buffer, "BNDL");
std.mem.copy(u8, buffer[4..], &bundle_options.bundle_signature);
try out_file.writeAll(&buffer);
}
fn generateVariables(self: *MacOSBundleStep, into: *std.StringHashMap([]const u8)) !void {
const bundle_options = self.options.macos_bundle.?;
const version_string = self.builder.fmt("{}.{}.{}", .{
self.options.version.major,
self.options.version.minor,
self.options.version.patch,
});
_ = try into.put("$ZIGVST_CFBundleName", self.options.name);
_ = try into.put("$ZIGVST_CFBundleExecutable", self.options.name);
_ = try into.put("$ZIGVST_CFBundleSignature", &bundle_options.bundle_signature);
_ = try into.put("$ZIGVST_CFBundleVersion", version_string);
_ = try into.put("$ZIGVST_CFBundleIdentifier", bundle_options.bundle_identifier);
_ = try into.put("$ZIGVST_CFBundleDevelopmentRegion", bundle_options.bundle_development_region);
}
};
fn replaceVariables(
allocator: *std.mem.Allocator,
initial: []const u8,
variables: std.StringHashMap([]const u8),
) ![]u8 {
var it = variables.iterator();
var buffer = try std.mem.dupe(allocator, u8, initial);
while (it.next()) |entry| {
const key = entry.key_ptr.*;
const value = entry.value_ptr.*;
while (replace(allocator, buffer, key, value)) |new_buffer| {
allocator.free(buffer);
buffer = new_buffer;
} else |err| {
switch (err) {
error.NotFound => {},
else => return err,
}
}
}
return buffer;
}
fn replace(
allocator: *std.mem.Allocator,
src: []const u8,
needle: []const u8,
replacement: []const u8,
) ![]u8 {
const dest = try allocator.alloc(u8, src.len - needle.len + replacement.len);
errdefer allocator.free(dest);
const index = std.mem.indexOf(u8, src, needle) orelse return error.NotFound;
std.mem.copy(u8, dest[0..index], src[0..index]);
std.mem.copy(u8, dest[index..], replacement);
std.mem.copy(u8, dest[index + replacement.len ..], src[index + needle.len ..]);
return dest;
}
fn escapeBackslash(allocator: *std.mem.Allocator, path: []const u8) ![]const u8 {
var count_iterator = std.mem.split(path, "\\");
var count: usize = 0;
while (count_iterator.next()) |_| count += 1;
var list = try allocator.alloc([]const u8, count);
defer allocator.free(list);
var iterator = std.mem.split(path, "\\");
var i: usize = 0;
while (iterator.next()) |part| {
list[i] = part;
i += 1;
}
return std.mem.join(allocator, "\\\\", list);
}
|
src/build_util.zig
|
const std = @import("std");
pub const platform = @import("modules/platform/build.zig");
pub const graphics = @import("modules/graphics/build.zig");
pub const audio = @import("modules/audio/build.zig");
pub const algo = @import("modules/algo/build.zig");
const all_modules = .{
platform,
graphics,
audio,
algo,
};
pub fn build(b: *std.build.Builder) !void {
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
const mode = b.standardReleaseOptions();
const target_opts = b.standardTargetOptions(.{});
const test_step = b.step("test", "Run all tests");
inline for (all_modules) |module| {
test_step.dependOn(&module.buildTests(b, mode, target_opts).step);
}
{ // examples
const build_root_dir = try std.fs.openDirAbsolute(b.build_root, .{});
const dir = try build_root_dir.openDir("examples", .{ .iterate = true });
var example_id: usize = 0;
var dir_it = dir.iterate();
while (try dir_it.next()) |entry| {
switch (entry.kind) {
.Directory => {
const example = b.addExecutable(
entry.name,
try std.fmt.allocPrint(b.allocator, "examples/{s}/main.zig", .{entry.name}),
);
example.setTarget(target_opts);
example.setBuildMode(mode);
inline for (all_modules) |module| {
example.addPackage(module.pkg);
module.buildAndLink(example);
}
example.install();
const example_runstep = example.run();
example_runstep.step.dependOn(b.getInstallStep());
if (b.args) |args| example_runstep.addArgs(args);
var run = b.step(
try std.fmt.allocPrint(b.allocator, "run-example-{:0>3}", .{example_id}),
try std.fmt.allocPrint(b.allocator, "Build and run example {s}", .{entry.name}),
);
run.dependOn(&example_runstep.step);
example_id += 1;
},
else => {},
}
}
}
}
|
build.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const Obj = @import("./object.zig").Obj;
const Value = @import("./value.zig").Value;
const Entry = struct {
key: ?*Obj.String,
value: Value,
pub fn isTombstone(self: *Entry) bool {
if (self.key != null) {
return false;
} else {
return (!self.value.isNil());
}
}
};
pub const Table = struct {
allocator: *Allocator,
entries: []Entry,
count: usize,
pub fn init(allocator: *Allocator) Table {
return Table{
.allocator = allocator,
.entries = &[_]Entry{},
.count = 0,
};
}
pub fn deinit(self: *Table) void {
self.allocator.free(self.entries);
}
pub fn set(self: *Table, key: *Obj.String, value: Value) !bool {
// Encodes a 75% capacity
if (4 * (self.count + 1) > 3 * self.entries.len) {
try self.increaseCapacity();
}
const entry = findEntry(self.entries, key);
const isNewKey = entry.key == null;
if (isNewKey) {
if (!entry.isTombstone()) self.count += 1;
}
entry.key = key;
entry.value = value;
return isNewKey;
}
pub fn get(self: *Table, key: *Obj.String, value: *Value) bool {
if (self.entries.len == 0) return false;
const entry = findEntry(self.entries, key);
if (entry.key == null) return false;
value.* = entry.value;
return true;
}
pub fn delete(self: *Table, key: *Obj.String) bool {
if (self.entries.len == 0) return false;
var entry = findEntry(self.entries, key);
if (entry.key == null) return false;
// Add "tombstone" represented by empty key and true value
entry.key = null;
entry.value = Value.fromBool(true);
return true;
}
pub fn increaseCapacity(self: *Table) !void {
const newCapacity = if (self.entries.len < 8) 8 else self.entries.len * 2;
var entries = try self.allocator.alloc(Entry, newCapacity);
// Initialize new entries to empty values
for (entries) |*entry| {
entry.key = null;
entry.value = Value.nil();
}
// Copy old entries to new entries
self.count = 0;
for (self.entries) |entry| {
if (entry.key) |key| {
var dest = findEntry(entries, key);
dest.key = key;
dest.value = entry.value;
self.count += 1;
}
}
// Free old entries
self.allocator.free(self.entries);
// Set entries reference to new entries
self.entries = entries;
}
pub fn copyTo(self: *Table, dest: *Table) !void {
for (self.entries) |entry| {
if (entry.key) |key| {
dest.set(key, entry.value);
}
}
}
fn findEntry(entries: []Entry, key: *Obj.String) *Entry {
var index = key.hash & (entries.len - 1);
var maybeTombstone: ?*Entry = null;
// Linear probing on entries
while (true) {
var entry = &entries[index];
if (entry.key) |entryKey| {
if (entryKey == key) return entry;
} else {
if (entry.isTombstone()) {
if (maybeTombstone == null) maybeTombstone = entry;
} else {
return maybeTombstone orelse entry;
}
}
index = (index + 1) & (entries.len - 1);
}
}
pub fn findString(self: *Table, bytes: []const u8, hash: u32) ?*Obj.String {
const entries = self.entries;
if (entries.len == 0) return null;
var index = hash & (entries.len - 1);
// Linear probing on entries
while (true) {
var entry = entries[index];
if (entry.key) |entryKey| {
if (entryKey.hash == hash and
std.mem.eql(u8, entryKey.bytes, bytes))
{
return entryKey;
}
} else if (!entry.isTombstone()) {
// Stop if we find an empty non-tombstone entry.
return null;
}
index = (index + 1) & (entries.len - 1);
}
}
};
|
src/table.zig
|
const std = @import("std");
const arm_m = @import("arm_m");
const warn = @import("nonsecure-common/debug.zig").warn;
const port = @import("ports/" ++ @import("build_options").BOARD ++ "/nonsecure.zig");
const port_timer = @import("ports/" ++ @import("build_options").BOARD ++ "/timer.zig");
const nonsecure_init = @import("nonsecure-common/init.zig");
// zig fmt: off
// The (unprocessed) Non-Secure exception vector table.
export const raw_exception_vectors linksection(".text.raw_isr_vector") =
@import("nonsecure-common/excvector.zig")
.getDefaultBaremetal()
.setTcExcHandler(port_timer.irqs.Timer0_IRQn, handleTimer0)
.setTcExcHandler(port_timer.irqs.Timer1_IRQn, handleTimer1);
// zig fmt: on
/// Used for communication between `main` and the interrupt handlers
const CommBlock = struct {
current_delay: u32 = 0,
measure_done: u8 = 0,
};
var comm_block = CommBlock{};
fn comm() *volatile CommBlock {
return @ptrCast(*volatile CommBlock, &comm_block);
}
export fn main() void {
port.init();
nonsecure_init.disableNestedExceptionIfDisallowed();
warn("Starting the interrupt latency benchmark...\r\n", .{});
warn("\r\n", .{});
warn("-------------------------------------------------------- \r\n", .{});
warn("%output-start\r\n", .{});
warn("[\r\n", .{});
// Timer1 has a higher priority than Timer0, meaning Timer1 can preempt
// Timer0's handler.
arm_m.nvic.setIrqPriority(port_timer.irqs.Timer0_IRQn - 16, 0x80);
arm_m.nvic.setIrqPriority(port_timer.irqs.Timer1_IRQn - 16, 0x10);
arm_m.nvic.enableIrq(port_timer.irqs.Timer0_IRQn - 16);
arm_m.nvic.enableIrq(port_timer.irqs.Timer1_IRQn - 16);
port_timer.timer0.setReloadValue(0x1000000);
port_timer.timer1.setReloadValue(0x1000000);
var i: u32 = 2500;
while (i < 3500) : (i += 1) {
comm().current_delay = i;
comm().measure_done = 0;
// Timer1 fires late iff i > 3000.
port_timer.timer0.setValue(3000 - 1);
port_timer.timer0.clearInterruptFlag();
port_timer.timer1.setValue(i - 1);
port_timer.timer1.clearInterruptFlag();
port_timer.timer0.startWithInterruptEnabled();
port_timer.timer1.startWithInterruptEnabled();
while (comm().measure_done < 2) {}
}
warn("]\r\n", .{});
warn("%output-end\r\n", .{});
warn("-------------------------------------------------------- \r\n", .{});
warn("\r\n", .{});
warn("Done!\r\n", .{});
while (true) {}
}
fn handleTimer0() callconv(.C) void {
const timer = port_timer.timer0;
// Clear the interrupt flag
timer.clearInterruptFlag();
// Stop the timer
timer.stop();
// Flag the cases where Timer1 preempts Timer0's handler too late
asm volatile ("cpsid f");
var k: u32 = 0;
while (k < 1000) : (k += 1) {
asm volatile ("");
}
comm().measure_done += 1;
asm volatile ("cpsie f");
}
var last_observed_pattern_hash: u32 = 0;
fn handleTimer1() callconv(.C) void {
const timer = port_timer.timer1;
// Clear the interrupt flag
timer.clearInterruptFlag();
// Stop the timer
timer.stop();
// Collect information
const sp = @frameAddress();
const cycles = timer.getReloadValue() - timer.getValue();
const pat_hash = sp ^ cycles;
if (pat_hash != last_observed_pattern_hash) {
last_observed_pattern_hash = pat_hash;
// Output in the Python dict literal format
warn(" {{ \"cycles\": {}, \"sp\": 0x{x:08}, \"delay\": {} }},\r\n", .{ cycles, sp, comm().current_delay });
}
asm volatile ("cpsid f");
comm().measure_done += 1;
asm volatile ("cpsie f");
}
// Zig panic handler. See `panicking.zig` for details.
const panicking = @import("nonsecure-common/panicking.zig");
pub fn panic(msg: []const u8, error_return_trace: ?*panicking.StackTrace) noreturn {
panicking.panic(msg, error_return_trace);
}
|
examples/nonsecure-bench-latency.zig
|
const std = @import("std");
const utils = @import("utils.zig");
const expect = std.testing.expect;
const log = std.log;
pub const BibParser = struct {
bib: Bibliography,
idx: u32,
bytes: []const u8,
allocator: *std.mem.Allocator,
pub const Error = error {
SyntaxError,
OutOfMemory,
EndOfFile,
DuplicateLabel,
};
pub fn init(allocator: *std.mem.Allocator, path: []const u8, bytes: []const u8) BibParser {
var parser = BibParser{
.bib = Bibliography{
.path = path,
.label_entry_map = std.StringHashMap(Entry).init(allocator),
.field_arena = std.heap.ArenaAllocator.init(allocator),
},
.idx = 0,
.bytes = bytes,
.allocator = allocator,
};
return parser;
}
inline fn report_error(comptime err_msg: []const u8, args: anytype) void {
log.err(err_msg, args);
}
// zig stdlib's reader also returns EndOfStream err instead of returning null
// prob since ?u8 is actually u9 (1 extra bit for maybe bool)
// but !u8 returns a union with a value: u8 field and a tag field
// which is 0 (none) if there is no error otherwise it's the error code e.g. EndOfFile(51)
// so this would require even more space? (all irrelevant anyway since it will fit in a
// register, but I just wanna know how it works)
inline fn advance_to_next_byte(self: *BibParser) Error!void {
if (self.idx + 1 < self.bytes.len) {
self.idx += 1;
} else {
return Error.EndOfFile;
}
}
inline fn peek_byte(self: *BibParser) u8 {
// current position must be in bounds since we should've hit Error.EndOfFile otherwise
return self.bytes[self.idx];
}
inline fn eat_until(self: *BibParser, end: u8) Error!void {
while (self.peek_byte() != end) {
self.advance_to_next_byte() catch {
BibParser.report_error("Reached EOF while waiting for '{c}'!\n", .{ end });
return Error.EndOfFile;
};
}
}
fn eat_name(self: *BibParser) Error!void {
var byte = self.peek_byte();
while (true) {
switch (byte) {
// this is what btparse defines as name, but are that many special chars allowed?
// biber disallows " # ' ( ) , = { } %
'a'...'z', 'A'...'Z', '0'...'9',
'!', '$', '*', '+', '-', '.', '/', ':',
';', '<', '>', '?', '[', ']', '^', '_', '`', '|' => {},
else => break,
}
self.advance_to_next_byte() catch {
BibParser.report_error("Reached EOF while consuming name!\n", .{});
return Error.EndOfFile;
};
byte = self.peek_byte();
}
}
/// does not advance the idx
inline fn require_byte(self: *BibParser, byte: u8) Error!void {
if (self.peek_byte() != byte) {
BibParser.report_error("Expected '{c}' got '{c}'!\n", .{ byte, self.peek_byte() });
return Error.SyntaxError;
}
}
inline fn eat_byte(self: *BibParser, byte: u8) Error!void {
try self.require_byte(byte);
try self.advance_to_next_byte();
}
fn next(self: *BibParser) ?u8 {
if (self.idx + 1 < self.bytes.len) {
self.idx += 1;
var byte = self.bytes[self.idx];
return byte;
} else {
return null;
}
}
inline fn reached_eof(self: *BibParser) bool {
if (self.idx >= self.bytes.len) {
return true;
} else {
return false;
}
}
/// caller owns memory of return Bibliography
pub fn parse(self: *BibParser) Error!Bibliography {
// clean up on error so we don't leak on unsuccessful parses
errdefer {
self.bib.deinit();
}
while (self.idx < self.bytes.len) {
var byte = self.peek_byte();
if (utils.is_whitespace(byte)) {
self.idx += 1;
continue;
}
switch (byte) {
'@' => {
self.idx += 1;
try self.parse_entry();
},
'%' => {
// line-comment, used by jabref to state encoding
self.idx += 1;
self.skip_line();
},
else => {
BibParser.report_error(
"Unexpected byte '{c}' expected either '@' followed by a valid " ++
" entry type or '%' to start a line comment!\n", .{ byte });
return Error.SyntaxError;
},
}
}
return self.bib;
}
fn skip_line(self: *BibParser) void {
var byte: u8 = undefined;
while (self.idx < self.bytes.len) : (self.idx += 1) {
byte = self.bytes[self.idx];
if (byte == '\r') {
// macos classic line ending is just \r
self.idx += 1;
if (self.idx < self.bytes.len and self.bytes[self.idx] == '\n') {
self.idx += 1;
}
break;
} else if (byte == '\n') {
// unix line-ending
self.idx += 1;
break;
}
}
}
fn skip_whitespace(self: *BibParser, comptime eof_is_error: bool) Error!void {
while (utils.is_whitespace(self.peek_byte())) {
self.advance_to_next_byte() catch {
BibParser.report_error("Hit EOF while skipping whitespace!\n", .{});
return Error.EndOfFile;
};
}
}
fn parse_entry(self: *BibParser) Error!void {
const entry_type_start = self.idx;
while (self.peek_byte() != '{') {
self.advance_to_next_byte() catch {
BibParser.report_error("Reached EOF while parsing entry type name!\n", .{});
return Error.SyntaxError;
};
}
// convert to lower-case to match with enum tagNames
const lowercased = try std.ascii.allocLowerString(
self.allocator, self.bytes[entry_type_start..self.idx]);
const entry_type = std.meta.stringToEnum(
EntryType, lowercased) orelse {
BibParser.report_error("'{s}' is not a valid entry type!\n",
.{ self.bytes[entry_type_start..self.idx] });
return Error.SyntaxError;
};
self.allocator.free(lowercased);
if (entry_type == .comment) {
// NOTE: not allowing {} inside comments atm
try self.eat_until('}');
// TODO @Improve
self.advance_to_next_byte() catch {
// advance_to_next_byte errors since we're at the last byte before EOF
// but we know the current one is '{' so we advance it manually
// so the main loop in parse can exit properly
self.idx += 1;
return;
};
return;
}
try self.advance_to_next_byte();
const label_start = self.idx;
// TODO allow unicode labels?
try self.eat_name();
try self.require_byte(',');
const label = self.bytes[label_start..self.idx];
// make sure we don't have a duplicate label!
var entry_found = try self.bib.label_entry_map.getOrPut(label);
// ^ result will be a struct with a pointer to the HashMap.Entry and a bool
// whether an existing value was found
if (entry_found.found_existing) {
BibParser.report_error("Duplicate label '{s}'!\n", .{ label });
return Error.DuplicateLabel;
} else {
// actually write entry value (key was already written by getOrPut)
entry_found.value_ptr.* = Entry.init(&self.bib.field_arena.allocator, entry_type);
}
try self.advance_to_next_byte();
var byte: u8 = undefined;
while (!self.reached_eof()) {
try self.skip_whitespace(true);
byte = self.bytes[self.idx];
if (byte == '}') {
self.advance_to_next_byte() catch {
self.idx += 1;
return;
};
break;
} else {
// NOTE: label_entry_map can't be modified while the function below is
// still running otherwise the entry ptr might? get invalidated
try self.parse_entry_field(entry_found.value_ptr);
}
}
}
/// expects to start on first byte of field name
fn parse_entry_field(self: *BibParser, entry: *Entry) Error!void {
const field_name_start = self.idx;
try self.eat_name();
const field_name_end = self.idx;
// convert to lower-case to match with enum tagNames
const lowercased = try std.ascii.allocLowerString(
self.allocator, self.bytes[field_name_start..field_name_end]);
const field_name = std.meta.stringToEnum(FieldName, lowercased) orelse .custom;
self.allocator.free(lowercased);
try self.skip_whitespace(true);
try self.eat_byte('=');
try self.skip_whitespace(true);
// TODO worth to handle field values that are not wrapped in {} and those
// that are wrapped in "" and thus can be concatenated using '#'?
self.eat_byte('{') catch {
BibParser.report_error(
"This implementation of a bibtex parser requires all field values to " ++
"be wrapped in {{braces}}!\n", .{});
return Error.SyntaxError;
};
// need to reform field value string since it might contain braces and other
// escapes
var field_value_str = std.ArrayList(u8).init(&self.bib.field_arena.allocator);
var braces: u32 = 0;
// TODO foreign/special char escapes e.g. \'{o} for ó etc.
// currently just discards the braces
// TODO process into name lists etc.
// TODO {} protect from case-mangling etc.
var byte = self.peek_byte();
var added_until = self.idx; // exclusive
while (true) {
switch (byte) {
'{' => {
braces += 1;
try field_value_str.appendSlice(self.bytes[added_until..self.idx]);
added_until = self.idx + 1; // +1 so we don't add the {
},
'}' => {
if (braces == 0) {
try field_value_str.appendSlice(self.bytes[added_until..self.idx]);
break;
} else {
braces -= 1;
try field_value_str.appendSlice(self.bytes[added_until..self.idx]);
added_until = self.idx + 1; // +1 so we don't add the }
}
},
'\\' => {
try field_value_str.appendSlice(self.bytes[added_until..self.idx]);
try self.advance_to_next_byte();
byte = self.peek_byte();
try field_value_str.append(byte);
added_until = self.idx + 1;
},
else => {},
}
try self.advance_to_next_byte();
byte = self.peek_byte();
}
try self.eat_byte('}');
// , is technichally optional
if (self.peek_byte() == ',') {
try self.advance_to_next_byte();
}
// std.debug.print("Field -> {s}: {s}\n",
// .{ self.bytes[field_name_start..field_name_end], field_value_str.items });
const field_type = field_name.get_field_type();
// payload will be undefined after from_field_tag
var field_value = FieldType.from_field_tag(field_type);
switch (field_value) {
.name_list, .literal_list, .key_list => |*value| {
// NOTE: the split list still uses the memory of field_value_str
// so we can't free it
value.*.values = try BibParser.split_list(
&self.bib.field_arena.allocator, field_value_str.toOwnedSlice());
},
.literal_field, .range_field, .integer_field, .datepart_field, .date_field,
.verbatim_field, .uri_field, .separated_value_field, .pattern_field,
.key_field, .code_field, .special_field, => |*value| {
value.*.value = field_value_str.toOwnedSlice();
},
}
// not checking if field already present -> gets overwritten
try entry.fields.put(
self.bytes[field_name_start..field_name_end],
Field{
.name = field_name,
.data = field_value,
}
);
}
inline fn split_list(allocator: *std.mem.Allocator, bytes: []const u8) Error![]const []const u8 {
// NOTE: not checking keys of key lists
// NOTE: leaving names of name lists as-is and parsing them on demand
// with ListField.parse_name
var split_items = std.ArrayList([]const u8).init(allocator);
var split_iter = std.mem.split(bytes, " and ");
while (split_iter.next()) |item| {
try split_items.append(item);
}
return split_items.toOwnedSlice();
}
};
test "split list" {
const alloc = std.testing.allocator;
var res = try BibParser.split_list(alloc, "A and B and C and others");
var expected = [_][]const u8 { "A", "B", "C", "others" };
for (res) |it, i| {
try expect(std.mem.eql(u8, it, expected[i]));
}
// otherwise testing allocator complains about memory leak
alloc.free(res);
res = try BibParser.split_list(alloc, "Hand and von Band, Jr., Test and Stand de Ipsum and others");
expected = [_][]const u8 { "Hand", "von Band, Jr., Test", "Stand de Ipsum", "others" };
for (res) |it, i| {
try expect(std.mem.eql(u8, it, expected[i]));
}
alloc.free(res);
}
test "bibparser" {
const alloc = std.testing.allocator;
const bibtxt =
\\% Encoding: UTF-8
\\@Article{Seidel2015,
\\author = {{Dominik} {Seidel} and {<NAME>} and <NAME> and <NAME> and <NAME>},
\\title = {{How} neighborhood affects tree diameter increment},
\\journal = {Forest Ecology and Management},
\\year = {2015},
\\volume = {336},
\\pages = {119--128},
\\month = {jan},
\\doi = {10.1016/j.foreco.2014.10.020},
\\file = {:../Lit/Seidel et al. 2015_as downloaded.pdf:PDF},
\\publisher = {Elsevier {BV}},
\\}
\\@Comment{jabref-meta: databaseType:bibtex;}
;
var bp = BibParser.init(alloc, "/home/test/path", bibtxt);
var bib = try bp.parse();
defer bib.deinit();
try expect(std.mem.eql(u8, bib.path, "/home/test/path"));
var entry = bib.label_entry_map.get("Seidel2015").?;
try expect(entry._type == .article);
const author = entry.fields.get("author").?;
const author_expected = [_][]const u8 {
"<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"
};
for (author.data.name_list.values) |it, i| {
try expect(std.mem.eql(u8, it, author_expected[i]));
}
var field = entry.fields.get("title").?;
try expect(std.mem.eql(
u8, field.data.literal_field.value, "How neighborhood affects tree diameter increment"));
field = entry.fields.get("journal").?;
try expect(std.mem.eql(
u8, field.data.literal_field.value, "Forest Ecology and Management"));
field = entry.fields.get("year").?;
try expect(std.mem.eql(
u8, field.data.literal_field.value, "2015"));
field = entry.fields.get("volume").?;
try expect(std.mem.eql(
u8, field.data.integer_field.value, "336"));
field = entry.fields.get("pages").?;
try expect(std.mem.eql(
u8, field.data.range_field.value, "119--128"));
field = entry.fields.get("month").?;
try expect(std.mem.eql(
u8, field.data.literal_field.value, "jan"));
field = entry.fields.get("doi").?;
try expect(std.mem.eql(
u8, field.data.verbatim_field.value, "10.1016/j.foreco.2014.10.020"));
field = entry.fields.get("publisher").?;
try expect(field.data.literal_list.values.len == 1);
try expect(std.mem.eql(u8, field.data.literal_list.values[0], "Elsevier BV"));
}
test "bibparser no leak on err" {
const alloc = std.testing.allocator;
const bibtxt =
\\% Encoding: UTF-8
\\@Article{Seidel2015,
;
var bp = BibParser.init(alloc, "/home/test/path", bibtxt);
var bib = bp.parse();
try std.testing.expectError(BibParser.Error.EndOfFile, bib);
}
pub const FieldMap = std.StringHashMap(Field);
pub const Bibliography = struct {
path: []const u8,
label_entry_map: std.StringHashMap(Entry),
field_arena: std.heap.ArenaAllocator,
pub fn deinit(self: *Bibliography) void {
self.label_entry_map.deinit();
self.field_arena.deinit();
}
};
// could use std.meta.TagPayloadType to get payload type from tag on a tagged union
pub const FieldTypeTT = std.meta.TagType(FieldType);
pub const FieldType = union(enum) {
// lists
// all lists can be shortened with 'and others'
// in case of name_list it will then always print et al. in the bibliography
name_list: ListField,
literal_list: ListField,
key_list: ListField,
// fields
literal_field: SingleField,
range_field: SingleField,
integer_field: SingleField,
datepart_field: SingleField,
date_field: SingleField,
verbatim_field: SingleField,
uri_field: SingleField,
separated_value_field: SingleField,
pattern_field: SingleField,
key_field: SingleField,
code_field: SingleField,
special_field: SingleField,
/// returns union with correct tag activated based on it's tag type
/// NOTE: the payload type will be undefined!
pub fn from_field_tag(tag: FieldTypeTT) FieldType {
return switch (tag) {
.name_list => FieldType{ .name_list = undefined },
.literal_list => FieldType{ .literal_list = undefined },
.key_list => FieldType{ .key_list = undefined },
.literal_field => FieldType{ .literal_field = undefined },
.range_field => FieldType{ .range_field = undefined },
.integer_field => FieldType{ .integer_field = undefined },
.datepart_field => FieldType{ .datepart_field = undefined },
.date_field => FieldType{ .date_field = undefined },
.verbatim_field => FieldType{ .verbatim_field = undefined },
.uri_field => FieldType{ .uri_field = undefined },
.separated_value_field => FieldType{ .separated_value_field = undefined },
.pattern_field => FieldType{ .pattern_field = undefined },
.key_field => FieldType{ .key_field = undefined },
.code_field => FieldType{ .code_field = undefined },
.special_field => FieldType{ .special_field = undefined },
};
}
};
pub const ListField = struct {
values: []const []const u8,
pub const Name = struct {
last: ?[]const u8 = null,
first: ?[]const u8 = null,
prefix: ?[]const u8 = null,
suffix: ?[]const u8 = null,
const unkown_format_err = "Name has unkown format{s}!\n" ++
"Formats are:\n" ++
"First von Last\n" ++
"von Last, First\n" ++
"von Last, Jr, First\n";
};
const NameState = enum {
first,
prefix,
suffix,
last,
};
/// doesn't recognize lowercase unicode codepoints which is need for identifying
/// the prefix part of the Name
pub fn parse_name(name: []const u8) BibParser.Error!Name {
// 4 name components:
// Family name (also known as 'last' part)
// Given name (also known as 'first' part)
// Name prefix (also known as 'von' part)
// Name suffix (also known as 'Jr' part)
// the name can be typed in one of 3 forms:
// "First von Last"
// "von Last, First"
// "von Last, Jr, First"
var commas: u8 = 0;
for (name) |c| {
if (c == ',') {
commas += 1;
}
}
switch (commas) {
0 => return parse_name_simple(name),
1, 2 => return parse_name_with_commas(name, commas),
// technichally this should remove the extra commas >2
else => return parse_name_with_commas(name, commas),
}
return result;
}
fn parse_name_simple(name: []const u8) BibParser.Error!Name {
// words split by whitespace
// up to first lowercase word is 'first'
// all following lowercase words are 'prefix'
// first uppercase word starts 'last'
// no suffix part
var result = Name{};
var state = NameState.first;
var i: u16 = 0;
var last_end: u16 = 0; // exclusive
var last_word_start: u16 = 0; // inclusive
var prev_whitespace = false;
while (i < name.len) : ( i += 1 ) {
if (utils.is_whitespace(name[i])) {
prev_whitespace = true;
// to skip initial whitespace
if (i == last_end) {
last_end += 1;
}
} else if (prev_whitespace) {
switch (name[i]) {
'a'...'z' => {
switch (state) {
.first => {
state = .prefix;
// -1 to not include last space
result.first = name[last_end..i-1];
last_end = i;
},
else => {}
}
},
'A'...'Z' => {
switch (state) {
.prefix => {
state = .last;
// -1 to not include last space
result.prefix = name[last_end..i-1];
last_end = i;
},
else => {},
}
},
else => {},
}
prev_whitespace = false;
last_word_start = i;
}
}
// no words starting with a lowercase letter -> no prefix
// btparse then just treats the whole string as first
// but biblatex seems to interpret First Last correctly
if (state == .first) {
if (last_word_start != 0) {
result.first = name[0..last_word_start - 1];
result.last = name[last_word_start..];
} else {
result.last = name[0..];
}
} else {
result.last = name[last_end..];
}
return result;
}
fn parse_name_with_commas(name: []const u8, num_commas: u8) BibParser.Error!Name {
var result = Name{};
var state = NameState.prefix;
var last_end: u16 = 0; // exclusive
var i: u16 = 0;
var prev_whitespace = false;
while (i < name.len) : ( i += 1) {
if (utils.is_whitespace(name[i])) {
prev_whitespace = true;
// to skip initial whitespace
if (i == last_end) {
last_end += 1;
}
} else if (prev_whitespace) {
switch (name[i]) {
// 'a'...'z' => {
// },
'A'...'Z' => {
if (state == .prefix) {
state = .last;
if (last_end != i) {
result.prefix = name[last_end..i-1];
last_end = i;
}
}
},
else => {},
}
prev_whitespace = false;
}
switch (name[i]) {
',' => {
switch (state) {
.last, .prefix => {
// prefix:
// only lowercase words so far but comma would now start first/suffix
// treat everything up to here as last
state = if (num_commas < 2) .first else .suffix;
result.last = name[last_end..i];
last_end = i + 1; // skip ,
},
.suffix => {
state = .first;
result.suffix = name[last_end..i];
last_end = i + 1; // skip ,
},
else => {},
}
},
else => {}
}
}
result.first = name[last_end..];
return result;
}
};
pub const SingleField = struct {
value: []const u8,
};
inline fn is_single_field_type(field_type: FieldTypeTT) void {
return switch (field_type) {
.name_list, .literal_list, .key_list => false,
else => true,
};
}
test "parse name simple" {
var res = try ListField.parse_name("First von Last");
try expect(std.mem.eql(u8, res.first.?, "First"));
try expect(std.mem.eql(u8, res.prefix.?, "von"));
try expect(std.mem.eql(u8, res.last.?, "Last"));
try expect(res.suffix == null);
res = try ListField.parse_name("First Last");
try expect(std.mem.eql(u8, res.first.?, "First"));
try expect(res.prefix == null);
try expect(std.mem.eql(u8, res.last.?, "Last"));
try expect(res.suffix == null);
// currently not stripping whitespace in the First Last case
res = try ListField.parse_name("First Last ");
try expect(std.mem.eql(u8, res.first.?, "First "));
try expect(res.prefix == null);
try expect(std.mem.eql(u8, res.last.?, "Last "));
try expect(res.suffix == null);
res = try ListField.parse_name("Last-Last");
try expect(res.first == null);
try expect(res.prefix == null);
try expect(std.mem.eql(u8, res.last.?, "Last-Last"));
try expect(res.suffix == null);
res = try ListField.parse_name("First J. Other von da of Last Last");
try expect(std.mem.eql(u8, res.first.?, "First J. Other"));
try expect(std.mem.eql(u8, res.prefix.?, "von da of"));
try expect(std.mem.eql(u8, res.last.?, "Last Last"));
try expect(res.suffix == null);
// only skipping initial whitespace for now
// TODO?
res = try ListField.parse_name(" First von Last");
try expect(std.mem.eql(u8, res.first.?, "First"));
try expect(std.mem.eql(u8, res.prefix.?, "von"));
try expect(std.mem.eql(u8, res.last.?, "Last"));
try expect(res.suffix == null);
}
test "parse name commas" {
var res = try ListField.parse_name("von Last, First");
try expect(std.mem.eql(u8, res.first.?, "First"));
try expect(std.mem.eql(u8, res.prefix.?, "von"));
try expect(std.mem.eql(u8, res.last.?, "Last"));
try expect(res.suffix == null);
res = try ListField.parse_name("von de of Last Last, First Other");
try expect(std.mem.eql(u8, res.first.?, "First Other"));
try expect(std.mem.eql(u8, res.prefix.?, "von de of"));
try expect(std.mem.eql(u8, res.last.?, "Last Last"));
try expect(res.suffix == null);
// 2 commas
res = try ListField.parse_name("von Last, Jr., First");
try expect(std.mem.eql(u8, res.first.?, "First"));
try expect(std.mem.eql(u8, res.prefix.?, "von"));
try expect(std.mem.eql(u8, res.last.?, "Last"));
try expect(std.mem.eql(u8, res.suffix.?, "Jr."));
res = try ListField.parse_name(" von de of Last Last, Jr. Sr., First Other");
try expect(std.mem.eql(u8, res.first.?, "First Other"));
try expect(std.mem.eql(u8, res.prefix.?, "von de of"));
try expect(std.mem.eql(u8, res.last.?, "Last Last"));
try expect(std.mem.eql(u8, res.suffix.?, "Jr. Sr."));
}
pub const Entry = struct {
_type: EntryType,
// label: []const u8,
fields: FieldMap,
/// FieldMap should be initialized with Bibliography.field_arena.allocator
/// so we don't have to deinit
pub fn init(allocator: *std.mem.Allocator, _type: EntryType) Entry {
return Entry{
._type = _type,
.fields = FieldMap.init(allocator),
};
}
pub fn deinit(self: *Entry) void {
self.fields.deinit();
}
};
/// not handling @STRING, @PREAMBLE since they're not mentioned in the
/// biblatex manual (only on the bibtex.org/format)
/// handling @COMMENT
pub const EntryType = enum {
// comment not actually an entry type -> content is ignored
comment,
article,
book,
mvbook,
inbook,
bookinbook,
suppbook,
booklet,
collection,
mvcollection,
incollection,
suppcollection,
dataset,
manual,
misc,
online,
patent,
periodical,
suppperiodical,
proceedings,
mvproceedings,
inproceedings,
reference,
mvreference,
inreference,
report,
set,
software,
thesis,
unpublished,
xdata,
// ?custom[a-f]
// aliases
conference, // -> inproceedings
electronic, // -> online
mastersthesis, // special case of thesis, as type tag
phdthesis, // special case of thesis, as type tag
techreport, // -> report, as type tag
www, // -> online
// non-standard types -> treated as misc
artwork,
audio,
bibnote,
commentary,
image,
jurisdiction,
legislation,
legal,
letter,
movie,
music,
performance,
review,
standard,
video,
};
pub const Field = struct {
// currently not storing str for custom FieldName here, use the key from FieldMap
name: FieldName,
data: FieldType,
};
pub const FieldName = enum {
// NOTE: IMPORTANT don't change the order of these since it requires to also change
// the switch in get_field_type below
// literal_field START
custom, // arbitrary name to e.g. save additional info
abstract,
addendum,
annotation,
booksubtitle,
booktitle,
booktitleaddon,
chapter,
edition, // can be integer or literal
eid,
entrysubtype,
eprintclass,
eprinttype,
eventtitle,
eventtitleaddon,
howpublished,
indextitle,
isan,
isbn,
ismn,
isrn,
issn,
issue,
issuetitle,
issuesubtitle,
issuetitleaddon,
iswc,
journaltitle,
journalsubtitle,
journaltitleaddon,
label,
library,
mainsubtitle,
maintitle,
maintitleaddon,
month,
nameaddon,
note,
number,
origtitle,
pagetotal,
part,
reprinttitle,
series,
shorthand,
shorthandintro,
shortjournal,
shortseries,
shorttitle,
subtitle,
title,
titleaddon,
venue,
version,
year,
usera,
userb,
userc,
userd,
usere,
userf,
verba,
verbb,
verbc,
annote, // alias for annotation
archiveprefix, // alias -> eprinttype
journal, // alias -> journaltitle
key, // aliast -> sortkey
primaryclass, // alias -> eprintclass
// literal_field END
//
// name_list START
afterword,
annotator,
author,
bookauthor,
commentator,
editor,
editora,
editorb,
editorc,
foreword,
holder,
introduction,
shortauthor,
shorteditor,
translator,
namea,
nameb,
namec,
// name_list END
//
// key_field START
authortype,
bookpagination,
editortype,
editoratype,
editorbtype,
editorctype,
pagination,
@"type",
nameatype,
namebtype,
namectype,
// key_field END
//
// date_field START
date,
eventdate,
origdate,
urldate,
// date_field END
//
// verbatim_field START
doi,
eprint,
file,
pdf, // alias -> file
// verbatim_field END
//
// literal_list START
institution,
location,
organization,
origlocation,
origpublisher,
publisher,
pubstate,
lista,
listb,
listc,
listd,
liste,
listf,
address, // alias for location
school, // alias -> institution
// literal_list END
//
// key_list START
language,
origlanguage,
// key_list END
//
// range_field START
pages,
// range_field END
//
// uri_field START
url,
// uri_field END
//
// integer_field START
volume,
volumes,
// integer_field END
//
// special fields START
ids,
crossref,
fakeset,
gender,
entryset,
execute,
hyphenation,
indexsorttitle,
keywords,
langid,
langidopts,
options,
presort,
related,
relatedoptions,
relatedtype,
sortshorthand,
sortkey,
sortname,
sorttitle,
sortyear,
xref,
// special fields END
pub fn get_field_type(self: FieldName) FieldTypeTT {
// ... is not allowed with enums, so cast it to it's underlying int tag type
return switch (@enumToInt(self)) {
@enumToInt(FieldName.custom) ... @enumToInt(FieldName.primaryclass) => .literal_field,
@enumToInt(FieldName.afterword) ... @enumToInt(FieldName.namec) => .name_list,
@enumToInt(FieldName.authortype) ... @enumToInt(FieldName.namectype) => .key_field,
@enumToInt(FieldName.date) ... @enumToInt(FieldName.urldate) => .date_field,
@enumToInt(FieldName.doi) ... @enumToInt(FieldName.pdf) => .verbatim_field,
@enumToInt(FieldName.institution) ... @enumToInt(FieldName.school) => .literal_list,
@enumToInt(FieldName.language), @enumToInt(FieldName.origlanguage) => .key_list,
@enumToInt(FieldName.pages) => .range_field,
@enumToInt(FieldName.url) => .uri_field,
@enumToInt(FieldName.volume), @enumToInt(FieldName.volumes) => .integer_field,
@enumToInt(FieldName.ids) ... @enumToInt(FieldName.xref) => .special_field,
else => unreachable,
};
}
};
|
src/bibtex.zig
|
// DO NOT MODIFY. This is not actually a source file; it is a textual representation of generated code.
// Use only for debugging purposes.
// Auto-generated constructor
// Access: public
Method <init> : V
(
// (no arguments)
) {
ALOAD 0
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/runtime/Application#<init>
RETURN
}
// Access: static
Field STARTUP_CONTEXT : Lio/quarkus/runtime/StartupContext;
// Access: protected final
Method doStart : V
(
arg 1 = [Ljava/lang/String;
) {
** label1
LDC (String) "java.util.logging.manager"
LDC (String) "org.jboss.logmanager.LogManager"
// Method descriptor: (Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
INVOKESTATIC java/lang/System#setProperty
POP
// Method descriptor: ()V
INVOKESTATIC io/quarkus/runtime/Timing#mainStarted
// Field descriptor: Lio/quarkus/runtime/StartupContext;
GETSTATIC io/quarkus/runner/ApplicationImpl1#STARTUP_CONTEXT
ASTORE 2
** label2
// Method descriptor: ()V
INVOKESTATIC io/quarkus/runtime/generated/RunTimeConfig#getRunTimeConfiguration
NEW io/quarkus/deployment/steps/AgroalProcessor$configureRuntimeProperties6
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/AgroalProcessor$configureRuntimeProperties6#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 2
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/LoggingResourceProcessor$setupLoggingRuntimeInit4
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/LoggingResourceProcessor$setupLoggingRuntimeInit4#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 2
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/ThreadPoolSetup$createExecutor5
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/ThreadPoolSetup$createExecutor5#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 2
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/NarayanaJtaProcessor$build2
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/NarayanaJtaProcessor$build2#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 2
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/ConfigBuildStep$validateConfigProperties17
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/ConfigBuildStep$validateConfigProperties17#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 2
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/HibernateOrmProcessor$startPersistenceUnits19
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/HibernateOrmProcessor$startPersistenceUnits19#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 2
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/UndertowBuildStep$boot21
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/UndertowBuildStep$boot21#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 2
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/LifecycleEventsBuildStep$startupEvent22
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/LifecycleEventsBuildStep$startupEvent22#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 2
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
LDC (String) "0.20.0"
LDC (String) "agroal, cdi, hibernate-orm, jdbc-h2, narayana-jta, resteasy, resteasy-jsonb, smallrye-health"
// Method descriptor: (Ljava/lang/String;Ljava/lang/String;)V
INVOKESTATIC io/quarkus/runtime/Timing#printStartupTime
** label3
GOTO label4
** label5
ASTORE 3
ALOAD 3
// Method descriptor: ()V
INVOKEVIRTUAL java/lang/Throwable#printStackTrace
ALOAD 2
// Method descriptor: ()V
INVOKEVIRTUAL io/quarkus/runtime/StartupContext#close
NEW java/lang/RuntimeException
DUP
LDC (String) "Failed to start quarkus"
ALOAD 3
// Method descriptor: (Ljava/lang/String;Ljava/lang/Throwable;)V
INVOKESPECIAL java/lang/RuntimeException#<init>
CHECKCAST java/lang/Throwable
ATHROW
** label6
GOTO label4
// Try from label2 to label3
// Catch java/lang/Throwable by going to label5
** label4
RETURN
** label7
}
// Access: protected final
Method doStop : V
(
// (no arguments)
) {
** label1
// Field descriptor: Lio/quarkus/runtime/StartupContext;
GETSTATIC io/quarkus/runner/ApplicationImpl1#STARTUP_CONTEXT
// Method descriptor: ()V
INVOKEVIRTUAL io/quarkus/runtime/StartupContext#close
RETURN
** label2
}
// Access: public static
Method <clinit> : V
(
// (no arguments)
) {
** label1
LDC (String) "java.util.logging.manager"
LDC (String) "org.jboss.logmanager.LogManager"
// Method descriptor: (Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
INVOKESTATIC java/lang/System#setProperty
POP
// Method descriptor: ()V
INVOKESTATIC io/quarkus/runtime/Timing#staticInitStarted
NEW io/quarkus/runtime/StartupContext
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/runtime/StartupContext#<init>
ASTORE 0
ALOAD 0
// Field descriptor: Lio/quarkus/runtime/StartupContext;
PUTSTATIC io/quarkus/runner/ApplicationImpl1#STARTUP_CONTEXT
** label2
NEW io/quarkus/deployment/steps/LoggingResourceProcessor$setupLoggingStaticInit1
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/LoggingResourceProcessor$setupLoggingStaticInit1#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/SmallRyeHealthProcessor$build7
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/SmallRyeHealthProcessor$build7#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/AgroalProcessor$build3
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/AgroalProcessor$build3#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/SubstrateConfigBuildStep$build11
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/SubstrateConfigBuildStep$build11#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/UndertowBuildStep$buildCorsFilter12
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/UndertowBuildStep$buildCorsFilter12#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/UndertowBuildStep$servletContextBean10
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/UndertowBuildStep$servletContextBean10#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/RuntimeBeanProcessor$build13
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/RuntimeBeanProcessor$build13#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/HibernateOrmProcessor$build8
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/HibernateOrmProcessor$build8#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/HibernateOrmProcessor$build14
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/HibernateOrmProcessor$build14#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/ArcProcessor$generateResources15
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/ArcProcessor$generateResources15#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/ResteasyServerCommonProcessor$setupInjection18
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/ResteasyServerCommonProcessor$setupInjection18#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/UndertowArcIntegrationBuildStep$integrateRequestContext16
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/UndertowArcIntegrationBuildStep$integrateRequestContext16#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
NEW io/quarkus/deployment/steps/UndertowBuildStep$build20
DUP
// Method descriptor: ()V
INVOKESPECIAL io/quarkus/deployment/steps/UndertowBuildStep$build20#<init>
CHECKCAST io/quarkus/runtime/StartupTask
ALOAD 0
// Method descriptor: (Lio/quarkus/runtime/StartupContext;)V
INVOKEINTERFACE io/quarkus/runtime/StartupTask#deploy
RETURN
** label3
GOTO label4
** label5
ASTORE 1
ALOAD 0
// Method descriptor: ()V
INVOKEVIRTUAL io/quarkus/runtime/StartupContext#close
NEW java/lang/RuntimeException
DUP
LDC (String) "Failed to start quarkus"
ALOAD 1
// Method descriptor: (Ljava/lang/String;Ljava/lang/Throwable;)V
INVOKESPECIAL java/lang/RuntimeException#<init>
CHECKCAST java/lang/Throwable
ATHROW
** label6
GOTO label4
// Try from label2 to label3
// Catch java/lang/Throwable by going to label5
** label4
** label7
}
|
target/generated-sources/gizmo/io/quarkus/runner/ApplicationImpl1.zig
|
const std = @import("std");
const builtin = @import("builtin");
const c = @cImport({
@cInclude("zlib.h");
@cInclude("stddef.h");
});
const alignment = @alignOf(c.max_align_t);
const Allocator = std.mem.Allocator;
pub const Error = error{
StreamEnd,
NeedDict,
Errno,
StreamError,
DataError,
MemError,
BufError,
VersionError,
OutOfMemory,
Unknown,
};
pub fn errorFromInt(val: c_int) Error {
return switch (val) {
c.Z_STREAM_END => error.StreamEnd,
c.Z_NEED_DICT => error.NeedDict,
c.Z_ERRNO => error.Errno,
c.Z_STREAM_ERROR => error.StreamError,
c.Z_DATA_ERROR => error.DataError,
c.Z_MEM_ERROR => error.MemError,
c.Z_BUF_ERROR => error.BufError,
c.Z_VERSION_ERROR => error.VersionError,
else => error.Unknown,
};
}
// method is copied from pfg's https://gist.github.com/pfgithub/65c13d7dc889a4b2ba25131994be0d20
// we have a header for each allocation that records the length, which we need
// for the allocator. Assuming that there aren't many small allocations this is
// acceptable overhead.
const magic_value = 0x1234;
const ZallocHeader = struct {
magic: usize,
size: usize,
const size_of_aligned = (std.math.divCeil(usize, @sizeOf(ZallocHeader), alignment) catch unreachable) * alignment;
};
comptime {
if (@alignOf(ZallocHeader) > alignment) {
@compileError("header has incorrect alignment");
}
}
fn zalloc(private: ?*anyopaque, items: c_uint, size: c_uint) callconv(.C) ?*anyopaque {
if (private == null)
return null;
const allocator = @ptrCast(*Allocator, @alignCast(@alignOf(*Allocator), private.?));
var buf = allocator.alloc(u8, ZallocHeader.size_of_aligned + (items * size)) catch return null;
const header = @ptrCast(*ZallocHeader, @alignCast(@alignOf(*ZallocHeader), buf.ptr));
header.* = .{
.magic = magic_value,
.size = items * size,
};
return buf[ZallocHeader.size_of_aligned..].ptr;
}
fn zfree(private: ?*anyopaque, addr: ?*anyopaque) callconv(.C) void {
if (private == null)
return;
const allocator = @ptrCast(*Allocator, @alignCast(@alignOf(*Allocator), private.?));
const header = @intToPtr(*ZallocHeader, @ptrToInt(addr.?) - ZallocHeader.size_of_aligned);
if (builtin.mode != .ReleaseFast) {
if (header.magic != magic_value)
@panic("magic value is incorrect");
}
var buf: []align(alignment) u8 = undefined;
buf.ptr = @ptrCast([*]align(alignment) u8, @alignCast(alignment, header));
buf.len = ZallocHeader.size_of_aligned + header.size;
allocator.free(buf);
}
pub const gzip = struct {
pub fn compressor(allocator: Allocator, writer: anytype) Error!Compressor(@TypeOf(writer)) {
return Compressor(@TypeOf(writer)).init(allocator, writer);
}
pub fn Compressor(comptime WriterType: type) type {
return struct {
allocator: Allocator,
stream: *c.z_stream,
inner: WriterType,
const Self = @This();
const WriterError = Error || WriterType.Error;
const Writer = std.io.Writer(*Self, WriterError, write);
pub fn init(allocator: Allocator, inner_writer: WriterType) !Self {
var ret = Self{
.allocator = allocator,
.stream = undefined,
.inner = inner_writer,
};
ret.stream = try allocator.create(c.z_stream);
errdefer allocator.destroy(ret.stream);
// if the user provides an allocator zlib uses an opaque pointer for
// custom malloc an free callbacks, this requires pinning, so we use
// the allocator to allocate the Allocator struct on the heap
const pinned = try allocator.create(Allocator);
errdefer allocator.destroy(pinned);
pinned.* = allocator;
ret.stream.@"opaque" = pinned;
ret.stream.zalloc = zalloc;
ret.stream.zfree = zfree;
const rc = c.deflateInit2(ret.stream, c.Z_DEFAULT_COMPRESSION, c.Z_DEFLATED, c.MAX_WBITS + 16, 8, c.Z_DEFAULT_STRATEGY);
return if (rc == c.Z_OK) ret else errorFromInt(rc);
}
pub fn deinit(self: *Self) void {
const pinned = @ptrCast(*Allocator, @alignCast(@alignOf(*Allocator), self.stream.@"opaque".?));
_ = c.deflateEnd(self.stream);
self.allocator.destroy(pinned);
self.allocator.destroy(self.stream);
}
pub fn flush(self: *Self) !void {
var tmp: [4096]u8 = undefined;
while (true) {
self.stream.next_out = &tmp;
self.stream.avail_out = tmp.len;
var rc = c.deflate(self.stream, c.Z_FINISH);
if (rc != c.Z_STREAM_END)
return errorFromInt(rc);
if (self.stream.avail_out != 0) {
const n = tmp.len - self.stream.avail_out;
try self.inner.writeAll(tmp[0..n]);
break;
} else try self.inner.writeAll(&tmp);
}
}
pub fn write(self: *Self, buf: []const u8) WriterError!usize {
var tmp: [4096]u8 = undefined;
self.stream.next_in = @intToPtr([*]u8, @ptrToInt(buf.ptr));
self.stream.avail_in = @intCast(c_uint, buf.len);
while (true) {
self.stream.next_out = &tmp;
self.stream.avail_out = tmp.len;
var rc = c.deflate(self.stream, c.Z_PARTIAL_FLUSH);
if (rc != c.Z_OK)
return errorFromInt(rc);
if (self.stream.avail_out != 0) {
const n = tmp.len - self.stream.avail_out;
try self.inner.writeAll(tmp[0..n]);
break;
} else try self.inner.writeAll(&tmp);
}
return buf.len - self.stream.avail_in;
}
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
};
}
};
test "compress gzip with zig interface" {
const allocator = std.testing.allocator;
var fifo = std.fifo.LinearFifo(u8, .Dynamic).init(allocator);
defer fifo.deinit();
const input = @embedFile("rfc1951.txt");
var compressor = try gzip.compressor(allocator, fifo.writer());
defer compressor.deinit();
const writer = compressor.writer();
try writer.writeAll(input);
try compressor.flush();
var decompressor = try std.compress.gzip.gzipStream(allocator, fifo.reader());
defer decompressor.deinit();
const actual = try decompressor.reader().readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(actual);
try std.testing.expectEqualStrings(input, actual);
}
test "compress gzip with C interface" {
var input = [_]u8{ 'b', 'l', 'a', 'r', 'g' };
var output_buf: [4096]u8 = undefined;
var zs: c.z_stream = undefined;
zs.zalloc = null;
zs.zfree = null;
zs.@"opaque" = null;
zs.avail_in = input.len;
zs.next_in = &input;
zs.avail_out = output_buf.len;
zs.next_out = &output_buf;
_ = c.deflateInit2(&zs, c.Z_DEFAULT_COMPRESSION, c.Z_DEFLATED, 15 | 16, 8, c.Z_DEFAULT_STRATEGY);
_ = c.deflate(&zs, c.Z_FINISH);
_ = c.deflateEnd(&zs);
}
|
.gyro/zig-zlib-mattnite-github.com-eca7a5ba/pkg/src/main.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
/// max str len of 512
pub fn L(str: []const u8) [512]u16 {
var result = []u16{0} ** 512;
const last = std.unicode.utf8ToUtf16Le(result[0..], str) catch unreachable;
result[last] = 0;
return result;
}
pub fn clamp(comptime T: type, x: T, a: T, b: T) T {
return std.math.max(std.math.min(a, b), std.math.min(x, std.math.max(a, b)));
}
fn nextPowerOf2(x: usize) usize {
if (x == 0) return 1;
var result = x -% 1;
result = switch (@sizeOf(usize)) {
8 => result | (result >> 32),
4 => result | (result >> 16),
2 => result | (result >> 8),
1 => result | (result >> 4),
else => 0,
};
result |= (result >> 4);
result |= (result >> 2);
result |= (result >> 1);
return result +% (1 + @boolToInt(x <= 0));
}
pub fn RingBuffer(comptime T: type) type {
return AlignedRingBuffer(T, @alignOf(T));
}
pub fn AlignedRingBuffer(comptime T: type, comptime A: u29) type {
return struct {
const Self = @This();
allocator: *Allocator,
items: []align(A) ?T,
write: usize,
read: usize,
pub fn init(allocator: *Allocator) Self {
return Self{
.allocator = allocator,
.items = []align(A) ?T{},
.write = 0,
.read = 0,
};
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.items);
}
fn mask(self: Self, idx: usize) usize {
return idx & (self.capacity() - 1);
}
pub fn empty(self: Self) bool {
return self.write == self.read;
}
pub fn full(self: Self) bool {
return self.count() == self.capacity();
}
pub fn count(self: Self) usize {
return self.write - self.read;
}
pub fn capacity(self: Self) usize {
return self.items.len;
}
pub fn push(self: *Self, data: T) !void {
if (self.full()) {
const new_capacity = nextPowerOf2(self.capacity() + 1);
self.items = try self.allocator.realloc(self.items, new_capacity);
}
self.items[self.mask(self.write)] = data;
self.write += 1;
}
pub fn pop(self: *Self) ?T {
if (!self.empty()) {
self.read += 1;
return self.items[self.mask(self.read - 1)];
}
return null;
}
};
}
|
src/app/util.zig
|
const std = @import("std");
const testing = std.testing;
const parser = @import("parser.zig");
const traverser = @import("traverser.zig");
pub const Traverser = traverser.Traverser;
pub const totalSize = traverser.totalSize;
pub const Node = struct {
name: []const u8,
props: []Prop,
root: *Node,
parent: ?*Node,
children: []*Node,
pub fn propAt(start: *const Node, path: []const []const u8, comptime prop_tag: std.meta.Tag(Prop)) ?std.meta.TagPayload(Prop, prop_tag) {
var node: *const Node = start;
var i: usize = 0;
while (i < path.len) : (i += 1) {
node = node.child(path[i]) orelse return null;
}
return node.prop(prop_tag);
}
pub fn child(node: *const Node, child_name: []const u8) ?*Node {
for (node.children) |c| {
if (std.mem.eql(u8, child_name, c.name)) {
return c;
}
}
return null;
}
pub fn prop(node: *const Node, comptime prop_tag: std.meta.Tag(Prop)) ?std.meta.TagPayload(Prop, prop_tag) {
for (node.props) |p| {
if (p == prop_tag) {
return @field(p, @tagName(prop_tag));
}
}
return null;
}
pub fn interruptCells(node: *Node) ?u32 {
if (node.prop(.InterruptCells)) |ic| {
return ic;
}
if (node.interruptParent()) |ip| {
return ip.interruptCells();
}
return null;
}
pub fn interruptParent(node: *Node) ?*Node {
if (node.prop(.InterruptParent)) |ip| {
return node.root.findPHandle(ip);
}
return node.parent;
}
pub fn findPHandle(node: *Node, phandle: u32) ?*Node {
if (node.prop(.PHandle)) |v| {
if (v == phandle) {
return node;
}
}
for (node.children) |c| {
if (c.findPHandle(phandle)) |n| {
return n;
}
}
return null;
}
pub fn addressCells(node: *const Node) ?u32 {
return node.prop(.AddressCells) orelse (node.parent orelse return null).addressCells();
}
pub fn sizeCells(node: *const Node) ?u32 {
return node.prop(.SizeCells) orelse (node.parent orelse return null).sizeCells();
}
pub fn deinit(node: *Node, allocator: *std.mem.Allocator) void {
for (node.props) |p| {
p.deinit(allocator);
}
allocator.free(node.props);
for (node.children) |c| {
c.deinit(allocator);
}
allocator.free(node.children);
allocator.destroy(node);
}
pub fn format(node: Node, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
try node.formatNode(writer, 0);
}
fn formatNode(node: Node, writer: anytype, depth: usize) std.os.WriteError!void {
try indent(writer, depth);
try std.fmt.format(writer, "Node <{'}>\n", .{std.zig.fmtEscapes(node.name)});
for (node.props) |p| {
try indent(writer, depth + 1);
try std.fmt.format(writer, "{}\n", .{p});
}
for (node.children) |c| {
try c.formatNode(writer, depth + 1);
}
}
fn indent(writer: anytype, depth: usize) !void {
var i: usize = 0;
while (i < depth) : (i += 1) {
try writer.writeAll(" ");
}
}
};
pub const PropStatus = enum {
Okay,
Disabled,
Fail,
pub fn format(status: PropStatus, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
switch (status) {
.Okay => try writer.writeAll("okay"),
.Disabled => try writer.writeAll("disabled"),
.Fail => try writer.writeAll("fail"),
}
}
};
pub const Prop = union(enum) {
AddressCells: u32,
SizeCells: u32,
InterruptCells: u32,
ClockCells: u32,
RegShift: u32,
PHandle: u32,
InterruptParent: u32,
Reg: [][2]u128,
Ranges: [][3]u128,
Compatible: [][]const u8,
Status: PropStatus,
Interrupts: [][]u32,
Clocks: [][]u32,
ClockNames: [][]const u8,
ClockOutputNames: [][]const u8,
ClockFrequency: u64,
InterruptNames: [][]const u8,
RegIoWidth: u64,
PinctrlNames: [][]const u8,
Pinctrl0: []u32,
Pinctrl1: []u32,
Pinctrl2: []u32,
AssignedClockRates: []u32,
AssignedClocks: [][]u32,
Unresolved: PropUnresolved,
Unknown: PropUnknown,
pub fn format(prop: Prop, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
switch (prop) {
.AddressCells => |v| try std.fmt.format(writer, "#address-cells: 0x{x:0>2}", .{v}),
.SizeCells => |v| try std.fmt.format(writer, "#size-cells: 0x{x:0>2}", .{v}),
.InterruptCells => |v| try std.fmt.format(writer, "#interrupt-cells: 0x{x:0>2}", .{v}),
.ClockCells => |v| try std.fmt.format(writer, "#clock-cells: 0x{x:0>2}", .{v}),
.RegShift => |v| try std.fmt.format(writer, "reg-shift: 0x{x:0>2}", .{v}),
.Reg => |v| {
try writer.writeAll("reg: <");
for (v) |pair, i| {
if (i != 0) {
try writer.writeAll(">, <");
}
try std.fmt.format(writer, "0x{x:0>2} 0x{x:0>2}", .{ pair[0], pair[1] });
}
try writer.writeByte('>');
},
.Ranges => |v| {
try writer.writeAll("ranges: <");
for (v) |triple, i| {
if (i != 0) {
try writer.writeAll(">, <");
}
try std.fmt.format(writer, "0x{x:0>2} 0x{x:0>2} 0x{x:0>2}", .{ triple[0], triple[1], triple[2] });
}
try writer.writeByte('>');
},
.Compatible => |v| try (StringListFormatter{ .string_list = v }).write("compatible: ", writer),
.Status => |v| try std.fmt.format(writer, "status: \"{s}\"", .{v}),
.PHandle => |v| try std.fmt.format(writer, "phandle: <0x{x:0>2}>", .{v}),
.InterruptParent => |v| try std.fmt.format(writer, "interrupt-parent: <0x{x:0>2}>", .{v}),
.Interrupts => |groups| {
try writer.writeAll("interrupts: <");
for (groups) |group, i| {
if (i != 0) {
try writer.writeAll(">, <");
}
for (group) |item, j| {
if (j != 0) {
try writer.writeAll(" ");
}
try std.fmt.format(writer, "0x{x:0>2}", .{item});
}
}
try writer.writeAll(">");
},
.Clocks,
.AssignedClocks,
=> |groups| {
switch (prop) {
.Clocks => try writer.writeAll("clocks: <"),
.AssignedClocks => try writer.writeAll("assigned-clocks: <"),
else => unreachable,
}
for (groups) |group, i| {
if (i != 0) {
try writer.writeAll(">, <");
}
for (group) |item, j| {
if (j != 0) {
try writer.writeAll(" ");
}
try std.fmt.format(writer, "0x{x:0>2}", .{item});
}
}
try writer.writeAll(">");
},
.ClockNames => |v| try (StringListFormatter{ .string_list = v }).write("clock-names: ", writer),
.ClockOutputNames => |v| try (StringListFormatter{ .string_list = v }).write("clock-output-names: ", writer),
.ClockFrequency => |v| try (FrequencyFormatter{ .freq = v }).write("clock-frequency: ", writer),
.InterruptNames => |v| try (StringListFormatter{ .string_list = v }).write("interrupt-names: ", writer),
.RegIoWidth => |v| try std.fmt.format(writer, "reg-io-width: 0x{x:0>2}", .{v}),
.PinctrlNames => |v| try (StringListFormatter{ .string_list = v }).write("pinctrl-names: ", writer),
.Pinctrl0, .Pinctrl1, .Pinctrl2 => |phandles| {
try std.fmt.format(writer, "pinctrl{s}: <", .{switch (prop) {
.Pinctrl0 => "0",
.Pinctrl1 => "1",
.Pinctrl2 => "2",
else => unreachable,
}});
for (phandles) |phandle, i| {
if (i != 0) {
try writer.writeAll(" ");
}
try std.fmt.format(writer, "0x{x:0>4}", .{phandle});
}
try writer.writeByte('>');
},
.AssignedClockRates => |clock_rates| {
try writer.writeAll("assigned-clock-rates: <");
for (clock_rates) |clock_rate, i| {
if (i != 0) {
try writer.writeAll(" ");
}
try (FrequencyFormatter{ .freq = clock_rate }).write("", writer);
}
try writer.writeByte('>');
},
.Unresolved => |v| try writer.writeAll("UNRESOLVED"),
.Unknown => |v| try std.fmt.format(writer, "{'}: (unk {} bytes) <{}>", .{ std.zig.fmtEscapes(v.name), v.value.len, std.zig.fmtEscapes(v.value) }),
}
}
const FrequencyFormatter = struct {
freq: u64,
// Exists to work around https://github.com/ziglang/zig/issues/7534.
pub fn write(this: @This(), comptime prefix: []const u8, writer: anytype) !void {
try writer.writeAll(prefix);
try this.format("", .{}, writer);
}
pub fn format(this: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
if (this.freq / 1_000_000_000 > 0) {
try std.fmt.format(writer, "{d}GHz", .{@intToFloat(f32, this.freq / 1_000_000) / 1_000});
} else if (this.freq / 1_000_000 > 0) {
try std.fmt.format(writer, "{d}MHz", .{@intToFloat(f32, this.freq / 1_000) / 1_000});
} else if (this.freq / 1_000 > 0) {
try std.fmt.format(writer, "{d}kHz", .{@intToFloat(f32, this.freq) / 1_000});
} else {
try std.fmt.format(writer, "{}Hz", .{this.freq});
}
}
};
const StringListFormatter = struct {
string_list: [][]const u8,
// Exists to work around https://github.com/ziglang/zig/issues/7534.
pub fn write(this: @This(), comptime prefix: []const u8, writer: anytype) !void {
try writer.writeAll(prefix);
try this.format("", .{}, writer);
}
pub fn format(this: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
try writer.writeByte('"');
for (this.string_list) |s, i| {
if (i != 0) {
try writer.writeAll("\", \"");
}
try writer.writeAll(s);
}
try writer.writeByte('"');
}
};
pub fn deinit(prop: Prop, allocator: *std.mem.Allocator) void {
switch (prop) {
.Reg => |v| allocator.free(v),
.Ranges => |v| allocator.free(v),
.Compatible,
.ClockNames,
.ClockOutputNames,
.InterruptNames,
.PinctrlNames,
=> |v| allocator.free(v),
.Interrupts,
.Clocks,
.AssignedClocks,
=> |groups| {
for (groups) |group| {
allocator.free(group);
}
allocator.free(groups);
},
.Pinctrl0,
.Pinctrl1,
.Pinctrl2,
=> |phandles| allocator.free(phandles),
.AssignedClockRates => |clock_rates| allocator.free(clock_rates),
.AddressCells,
.SizeCells,
.InterruptCells,
.ClockCells,
.RegShift,
.PHandle,
.InterruptParent,
.Status,
.Unresolved,
.Unknown,
.ClockFrequency,
.RegIoWidth,
=> {},
}
}
};
pub const PropUnresolved = union(enum) {
Reg: []const u8,
Ranges: []const u8,
Interrupts: []const u8,
Clocks: []const u8,
AssignedClocks: []const u8,
};
pub const PropUnknown = struct {
name: []const u8,
value: []const u8,
};
pub const parse = parser.parse;
pub const Error = parser.Error;
const qemu_arm64_dtb = @embedFile("../qemu_arm64.dtb");
const rockpro64_dtb = @embedFile("../rk3399-rockpro64.dtb");
test "parse" {
{
var qemu_arm64 = try parse(std.testing.allocator, qemu_arm64_dtb);
defer qemu_arm64.deinit(std.testing.allocator);
// This QEMU DTB places 512MiB of memory at 1GiB.
testing.expectEqualSlices(
[2]u128,
&.{.{ 1024 * 1024 * 1024, 512 * 1024 * 1024 }},
qemu_arm64.propAt(&.{"memory@40000000"}, .Reg).?,
);
// It has an A53-compatible CPU.
const compatible = qemu_arm64.propAt(&.{ "cpus", "cpu@0" }, .Compatible).?;
testing.expectEqual(@as(usize, 1), compatible.len);
testing.expectEqualStrings("arm,cortex-a53", compatible[0]);
// Its pl011 UART controller has interrupts defined by <0x00 0x01 0x04>.
// This tests the #interrupt-cells lookup. Its interrupt domain
// is defined ahead of it in the file.
// This defines one SPI-type interrupt, IRQ 1, active high
// level-sensitive. See https://git.io/JtKJk.
const pl011 = qemu_arm64.child("pl011@9000000").?;
const interrupts = pl011.prop(.Interrupts).?;
testing.expectEqual(@as(usize, 1), interrupts.len);
testing.expectEqualSlices(u32, &.{ 0x0, 0x01, 0x04 }, interrupts[0]);
// Test we refer to the apb-pclk's clock cells (0) correctly.
testing.expectEqual(@as(u32, 0), qemu_arm64.propAt(&.{"apb-pclk"}, .ClockCells).?);
const clock_names = pl011.prop(.ClockNames).?;
testing.expectEqual(@as(usize, 2), clock_names.len);
testing.expectEqualSlices(u8, "uartclk", clock_names[0]);
testing.expectEqualSlices(u8, "apb_pclk", clock_names[1]);
const clocks = pl011.prop(.Clocks).?;
testing.expectEqual(@as(usize, 2), clocks.len);
testing.expectEqualSlices(u32, &.{0x8000}, clocks[0]);
testing.expectEqualSlices(u32, &.{0x8000}, clocks[1]);
// Make sure this works (and that the code gets compiled).
std.debug.print("{}\n", .{qemu_arm64});
}
{
var rockpro64 = try parse(std.testing.allocator, rockpro64_dtb);
defer rockpro64.deinit(std.testing.allocator);
// This ROCKPro64 DTB has a serial at 0xff1a0000.
const serial = rockpro64.child("serial@ff1a0000").?;
testing.expectEqualSlices([2]u128, &.{.{ 0xff1a0000, 0x100 }}, serial.prop(.Reg).?);
testing.expectEqual(PropStatus.Okay, serial.prop(.Status).?);
testing.expectEqual(@as(u32, 2), serial.prop(.RegShift).?);
testing.expectEqual(@as(u32, 0x2e), serial.prop(.PHandle).?);
const compatible = serial.prop(.Compatible).?;
testing.expectEqual(@as(usize, 2), compatible.len);
testing.expectEqualStrings("rockchip,rk3399-uart", compatible[0]);
testing.expectEqualStrings("snps,dw-apb-uart", compatible[1]);
const interrupts = serial.prop(.Interrupts).?;
testing.expectEqual(@as(usize, 1), interrupts.len);
// GICv3 specifies 4 interrupt cells. This defines an SPI-type
// interrupt, IRQ 100, level triggered. The last field must be zero
// for SPI interrupts.
testing.expectEqualSlices(u32, &.{ 0x0, 0x64, 0x04, 0x00 }, interrupts[0]);
// Test that we refer to the clock controller's clock cells (1) correctly.
testing.expectEqual(@as(u32, 1), rockpro64.propAt(&.{"clock-controller@ff760000"}, .ClockCells).?);
const clock_names = serial.prop(.ClockNames).?;
testing.expectEqual(@as(usize, 2), clock_names.len);
testing.expectEqualSlices(u8, "baudclk", clock_names[0]);
testing.expectEqualSlices(u8, "apb_pclk", clock_names[1]);
const clocks = serial.prop(.Clocks).?;
testing.expectEqual(@as(usize, 2), clocks.len);
testing.expectEqualSlices(u32, &.{ 0x85, 0x53 }, clocks[0]);
testing.expectEqualSlices(u32, &.{ 0x85, 0x162 }, clocks[1]);
// Print it out.
std.debug.print("{}\n", .{rockpro64});
}
}
test "Traverser" {
var qemu_arm64: Traverser = undefined;
try qemu_arm64.init(qemu_arm64_dtb);
var state: union(enum) { OutsidePl011, InsidePl011 } = .OutsidePl011;
var ev = try qemu_arm64.current();
var reg_value: ?[]const u8 = null;
while (ev != .End) : (ev = try qemu_arm64.next()) {
switch (state) {
.OutsidePl011 => if (ev == .BeginNode and std.mem.startsWith(u8, ev.BeginNode, "pl011@")) {
state = .InsidePl011;
},
.InsidePl011 => switch (ev) {
.EndNode => state = .OutsidePl011,
.Prop => |prop| if (std.mem.eql(u8, prop.name, "reg")) {
reg_value = prop.value;
},
else => {},
},
}
}
std.testing.expectEqualSlices(u8, &.{ 0, 0, 0, 0, 0x09, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x10, 0 }, reg_value.?);
}
|
src/dtb.zig
|
const std = @import("std");
const serializer = @import("serializer.zig");
pub const JsonLexicon: serializer.Lexicon = .{ .convert = _jsonLexConv, .parse = _jsonLexParse };
fn _jsonLexConv(alloc: *std.mem.Allocator, tree: *serializer.Tree) []const u8 {
var bytes = std.ArrayList(u8).init(alloc);
_jsonLexInner(tree.root, &bytes);
return bytes.toOwnedSlice();
}
fn _jsonLexInner(node: *serializer.Node, holder: *std.ArrayList(u8)) void {
switch (node.data) {
.Map => {
holder.append('{') catch unreachable;
var count = node.data.Map.count();
var current: u32 = 0;
var iter = node.data.Map.iterator();
while (iter.next()) |innerNode| {
holder.append('\"') catch unreachable;
holder.appendSlice(innerNode.key_ptr.*) catch unreachable;
holder.appendSlice("\":") catch unreachable;
_jsonLexInner(innerNode.value_ptr.*, holder);
if (current + 1 < count) {
holder.append(',') catch unreachable;
}
current += 1;
}
holder.append('}') catch unreachable;
},
.Literal => {
switch (node.data.Literal) {
.String => {
holder.append('\"') catch unreachable;
holder.appendSlice(node.data.Literal.String) catch unreachable;
holder.append('\"') catch unreachable;
},
.Integer => {
var cast = std.fmt.allocPrint(std.heap.page_allocator, "{any}", .{node.data.Literal.Integer}) catch unreachable;
defer std.heap.page_allocator.free(cast);
holder.appendSlice(cast) catch unreachable;
},
.Float => {
var cast = std.fmt.allocPrint(std.heap.page_allocator, "{d:.5}", .{node.data.Literal.Float}) catch unreachable;
defer node.tree.alloc.free(cast);
holder.appendSlice(cast) catch unreachable;
},
.Bool => {
if (node.data.Literal.Bool == true) {
holder.appendSlice("true") catch unreachable;
} else {
holder.appendSlice("false") catch unreachable;
}
},
.Null => {
holder.appendSlice("null") catch unreachable;
},
}
},
.Array => {
holder.append('[') catch unreachable;
for (node.data.Array.items) |innerNode, i| {
_jsonLexInner(innerNode, holder);
if (i + 1 < node.data.Array.items.len) {
holder.append(',') catch unreachable;
}
}
holder.append(']') catch unreachable;
},
}
}
fn _jsonLexParse(allocator: *std.mem.Allocator, bytes: []const u8) *serializer.Tree {
return _jsonLexParseInner(allocator, bytes) catch |err| {
std.debug.panic("FAILED TO SERIALIZE JSON:\n{s}\nERROR:{s}", .{ bytes, @errorName(err) });
};
}
fn _jsonLexParseInner(allocator: *std.mem.Allocator, bytes: []const u8) !*serializer.Tree {
var tree: *serializer.Tree = serializer.Tree.initArena(allocator);
var nodeStack = std.ArrayList(*serializer.Node).init(allocator);
defer nodeStack.deinit();
var tag: ?[]const u8 = null;
var i: usize = 0;
while (i < bytes.len) {
switch (bytes[i]) {
'{' => {
var node = tree.newObject();
if (nodeStack.items.len == 0) {
tree.root = node;
} else {
var parent: *serializer.Node = nodeStack.items[nodeStack.items.len - 1];
parent.push(tag, node);
tag = null;
}
try nodeStack.append(node);
i += 1;
},
'}' => {
_ = nodeStack.pop();
if (nodeStack.items.len == 0) {
break;
}
i += 1;
},
'[' => {
var node = tree.newArray();
if (nodeStack.items.len == 0) {
tree.root = node;
} else {
var parent: *serializer.Node = nodeStack.items[nodeStack.items.len - 1];
parent.push(tag, node);
tag = null;
}
try nodeStack.append(node);
i += 1;
},
']' => {
_ = nodeStack.pop();
if (nodeStack.items.len == 0) {
break;
}
i += 1;
},
'"' => {
var start = i + 1;
var end = i + 1;
while (true) {
if (bytes[end] == '"' and bytes[end - 1] != '\\') {
break;
}
end += 1;
}
var slice = bytes[start..end];
if (bytes[end + 1] == ':') {
tag = slice;
i += (end - start) + 2;
continue;
}
std.debug.assert(nodeStack.items.len > 0);
var parent = nodeStack.items[nodeStack.items.len - 1];
var dupe: []const u8 = try tree.alloc.internal().dupeZ(u8, slice);
var container = tree.newString(dupe);
parent.push(tag, container);
tag = null;
i += (end - start) + 2;
},
't' => {
if (bytes[i + 1] == 'r' and bytes[i + 2] == 'u' and bytes[i + 3] == 'e') {
var parent: *serializer.Node = nodeStack.items[nodeStack.items.len - 1];
parent.push(tag, true);
tag = null;
i += 4;
} else {
i += 1;
}
},
'f' => {
if (bytes[i + 1] == 'a' and bytes[i + 2] == 'l' and bytes[i + 3] == 's' and bytes[i + 4] == 'e') {
var parent: *serializer.Node = nodeStack.items[nodeStack.items.len - 1];
parent.push(tag, false);
tag = null;
i += 5;
} else {
i += 1;
}
},
'n' => {
if (bytes[i + 1] == 'u' and bytes[i + 2] == 'l' and bytes[i + 3] == 'l') {
var parent: *serializer.Node = nodeStack.items[nodeStack.items.len - 1];
parent.push(tag, null);
tag = null;
i += 4;
} else {
i += 1;
}
},
'-', 48...57 => {
var start = i;
var end = i + 1;
var period: ?usize = null;
while (true) {
if (bytes[end] == 46) {
period = end;
end += 1;
continue;
}
if (bytes[end] >= 48 and bytes[end] <= 57) {
end += 1;
continue;
} else {
break;
}
}
if (period) |_| {
var slice = bytes[start..end];
var parse = try std.fmt.parseFloat(f32, slice);
var parent: *serializer.Node = nodeStack.items[nodeStack.items.len - 1];
parent.push(tag, parse);
tag = null;
} else {
var slice = bytes[start..end];
var parse = try std.fmt.parseInt(i32, slice, 0);
var parent: *serializer.Node = nodeStack.items[nodeStack.items.len - 1];
parent.push(tag, parse);
tag = null;
}
i += (end - start);
},
else => {
i += 1;
continue;
},
}
}
return tree;
}
test "json lex parse" {
const sample =
\\{"player":{"health":10.5,"mana":20,"lives":2,"names":["steven","paul"]}}
;
var tree = JsonLexicon.parse(std.testing.allocator, sample);
defer tree.deinit();
var playerNode: *serializer.Node = tree.root.data.Map.get("player").?;
var health = playerNode.data.Map.get("health").?;
var healthValue: f32 = 0;
health.into(&healthValue, null);
var names = playerNode.data.Map.get("names").?.data.Array.items;
try std.testing.expect(names.len == 2);
try std.testing.expect(std.mem.eql(u8, names[0].data.Literal.String, "steven"));
try std.testing.expect(std.mem.eql(u8, names[1].data.Literal.String, "paul"));
try std.testing.expectEqual(@as(f32, 10.5), healthValue);
}
test "complex structure deserialize" {
const structure = struct {
const tagField = std.ArrayList([]const u8);
const npcField = struct { name: []const u8, strength: i32, children: ?std.ArrayList([]const u8) };
const townField = struct {
name: []const u8,
families: std.StringHashMap(familyField),
};
const familyField = struct {
count: usize,
notable_history: []const u8,
};
npc: []npcField,
town: townField,
tags: std.ArrayList(tagField),
};
const data = @embedFile("json/test_npc.json");
var tree = JsonLexicon.parse(std.testing.allocator, std.mem.spanZ(data));
defer tree.deinit();
var value: structure = undefined;
tree.root.into(&value, null);
tree.print(JsonLexicon);
}
|
src/serializer_json.zig
|
const std = @import("std");
const util = @import("util");
const input = @embedFile("12.txt");
const actions = comptime blk: {
@setEvalBranchQuota(input.len * 20);
var buf: []const Action = &[_]Action{};
var lines = std.mem.tokenize(input, "\n");
while (lines.next()) |line| {
const param: comptime_int = util.parseUint(u31, line[1..]) catch unreachable;
const action = if (line[0] == 'R')
Action{ .rotate = @divExact(param, 90) }
else if (line[0] == 'L')
Action{ .rotate = @divExact(360 - param, 90) }
else
Action{
.move = .{
.direction = switch (line[0]) {
'N' => .north,
'E' => .east,
'S' => .south,
'W' => .west,
'F' => null,
else => unreachable,
},
.units = param,
},
};
std.debug.assert(action != .rotate or action.rotate != 0);
buf = buf ++ [_]Action{action};
}
break :blk buf[0..buf.len].*;
};
pub fn main(n: util.Utils) !void {
var part_1 = DirectNavigation{};
var part_2 = WaypointNavigation{};
for (actions) |action| {
part_1.apply(action);
part_2.apply(action);
}
try n.out.print("{}\n{}\n", .{
part_1.distance(),
part_2.distance(),
});
}
const Direction = enum(u2) {
east, south, west, north
};
const Action = union(enum) {
move: struct { direction: ?Direction, units: u31 },
rotate: u2,
};
const DirectNavigation = struct {
position: [2]i32 = [_]i32{ 0, 0 },
direction: Direction = .east,
fn apply(self: *@This(), action: Action) void {
switch (action) {
.move => |vector| {
const units = @intCast(i32, vector.units);
const direction = vector.direction orelse self.direction;
const axis = &self.position[@enumToInt(direction) % 2];
switch (direction) {
.east, .south => axis.* += units,
.west, .north => axis.* -= units,
}
},
.rotate => |amount| {
const as_int = @enumToInt(self.direction);
self.direction = @intToEnum(Direction, as_int +% amount);
},
}
}
fn distance(self: @This()) u32 {
return std.math.absCast(self.position[0]) + std.math.absCast(self.position[1]);
}
};
const WaypointNavigation = struct {
waypoint: [2]i32 = [_]i32{ 10, -1 },
position: [2]i32 = [_]i32{ 0, 0 },
fn apply(self: *@This(), action: Action) void {
switch (action) {
.move => |vector| {
const units = @intCast(i32, vector.units);
if (vector.direction) |direction| {
const axis = &self.waypoint[@enumToInt(direction) % 2];
switch (direction) {
.east, .south => axis.* += units,
.west, .north => axis.* -= units,
}
} else {
self.position[0] += units * self.waypoint[0];
self.position[1] += units * self.waypoint[1];
}
},
.rotate => |amount| {
const waypoint = self.waypoint;
self.waypoint = switch (amount) {
1 => [_]i32{ -waypoint[1], waypoint[0] },
2 => [_]i32{ -waypoint[0], -waypoint[1] },
3 => [_]i32{ waypoint[1], -waypoint[0] },
else => unreachable,
};
},
}
}
fn distance(self: @This()) u32 {
return std.math.absCast(self.position[0]) + std.math.absCast(self.position[1]);
}
};
|
2020/12.zig
|
usingnamespace @import("root").preamble;
const ImageRegion = lib.graphics.image_region.ImageRegion;
const ScrollingRegion = lib.graphics.scrolling_region.ScrollingRegion;
const PixelFormat = lib.graphics.pixel_format.PixelFormat;
const Color = lib.graphics.color.Color;
pub fn GlyphPrinter(comptime max_width: usize, comptime glyph_height: usize) type {
return struct {
buffer_data: [max_width * glyph_height * 4]u8 = undefined,
used_width: usize = 0,
scroller: ScrollingRegion = .{},
pub fn buffer(self: *@This(), width_to_add: usize, pixel_format: PixelFormat) ImageRegion {
self.used_width += width_to_add;
return .{
.bytes = self.buffer_data[0..],
.width = self.used_width,
.height = glyph_height,
.pitch = max_width * 4,
.pixel_format = pixel_format,
.invalidateRectFunc = null,
};
}
pub fn flush(self: *@This(), into: ImageRegion, bg: Color) void {
if (self.used_width != 0)
self.feedLine(into, bg);
}
pub fn feedLine(self: *@This(), into: ImageRegion, bg: Color) void {
const old_width = self.used_width;
if (self.used_width < into.width) {
self.buffer(
into.width - self.used_width,
into.pixel_format,
).fill(
bg,
old_width,
0,
into.width - old_width,
glyph_height,
false,
);
}
self.scroller.putBottom(self.buffer(0, into.pixel_format), into, old_width);
self.used_width = 0;
}
pub fn draw(self: *@This(), into: ImageRegion, glyph: ImageRegion, bg: Color) void {
// If we can't fit the glyph, feed line
if (glyph.width + self.used_width > into.width) {
self.feedLine(into, bg);
}
// Put the glyph into the buffer
self.buffer(glyph.width, into.pixel_format).drawImage(
glyph,
self.used_width - glyph.width,
0,
false,
);
}
pub fn retarget(self: *@This(), old: ImageRegion, new: ImageRegion, bg: Color) void {
if (new.width < old.width) {
// If we're making the buffer smaller, we have to flush the printer first.
self.flush(old, bg);
}
self.scroller.retarget(old, new, bg);
}
};
}
|
lib/graphics/glyph_printer.zig
|
const std = @import("std");
const upaya = @import("upaya");
const tilescript = @import("tilescript.zig");
pub fn import(file: []const u8) !tilescript.Map {
var bytes = try std.fs.cwd().readFileAlloc(upaya.mem.allocator, file, std.math.maxInt(usize));
var tokens = std.json.TokenStream.init(bytes);
// TODO: why does this poop itself when using the real allocator?
const options = std.json.ParseOptions{ .allocator = upaya.mem.tmp_allocator };
var res = try std.json.parse(TileKitMap, &tokens, options);
defer std.json.parseFree(TileKitMap, res, options);
var map = tilescript.Map.init(res.output_map.tile_w, res.output_map.tile_spacing);
map.w = res.input_map.w;
map.h = res.input_map.h;
map.image = try std.mem.dupe(upaya.mem.allocator, u8, res.output_map.image_filename);
upaya.mem.allocator.free(map.data);
map.data = try upaya.mem.allocator.alloc(u8, map.w * map.h);
std.mem.copy(u8, map.data, res.input_map.data);
map.ruleset.seed = res.final_ruleset.seed;
map.ruleset.repeat = res.final_ruleset.repeat;
for (res.final_ruleset.rules) |rule| {
map.ruleset.rules.append(rule.toAyaRule()) catch {};
}
for (res.rulesets) |ruleset| {
map.addPreRuleSet();
var pre_ruleset = &map.pre_rulesets.items[map.pre_rulesets.items.len - 1];
pre_ruleset.seed = ruleset.seed;
pre_ruleset.repeat = ruleset.repeat;
for (ruleset.rules) |rule| {
try pre_ruleset.rules.append(rule.toAyaRule());
}
}
for (res.output_map.animations) |anim| {
map.addAnimation(@intCast(u8, anim.idx - 1));
var aya_anim = &map.animations.items[map.animations.items.len - 1];
aya_anim.rate = @intCast(u16, anim.rate);
for (anim.frames) |frame| {
// dont double-add the root frame
if (frame != anim.idx) {
aya_anim.toggleSelected(frame - 1);
}
}
}
for (res.objects) |obj, i| {
map.addObject();
var aya_obj = &map.objects.items[map.objects.items.len - 1];
aya_obj.id = @intCast(u8, i);
std.mem.copy(u8, &aya_obj.name, obj.name);
aya_obj.x = @divTrunc(std.fmt.parseInt(usize, obj.x, 10) catch unreachable, res.output_map.tile_w);
aya_obj.y = @divTrunc(std.fmt.parseInt(usize, obj.y, 10) catch unreachable, res.output_map.tile_w);
}
return map;
}
pub const TileKitMap = struct {
// TODO: why cant zig compile with these?!?!
// version: []const u8 = "",
// tile_w: i32,
// tile_h: i32,
brush_idx: usize,
ruleset_idx: i32,
input_map: InputMap,
output_map: OutputMap,
final_ruleset: RuleSet,
rulesets: []RuleSet,
objects: []Object,
edit_mode: u8,
pub const InputMap = struct {
w: usize,
h: usize,
data: []u8,
};
pub const OutputMap = struct {
tile_w: usize,
tile_h: usize,
tile_spacing: usize,
image_filename: []const u8 = "",
animations: []Animation,
tags: []Tag,
};
pub const RuleSet = struct {
seed: usize,
repeat: u8,
rules: []Rule,
};
pub const Rule = struct {
label: []const u8 = "",
chance: u8,
offsets: []RuleOffsets,
results: []u8,
pub fn toAyaRule(self: @This()) tilescript.data.Rule {
var rule = tilescript.data.Rule.init();
std.mem.copy(u8, &rule.name, self.label);
rule.chance = self.chance;
for (self.results) |tile| {
rule.result_tiles.append(tile - 1);
}
for (self.offsets) |offset| {
var rule_tile = rule.get(@intCast(usize, offset.x + 2), @intCast(usize, offset.y + 2));
if (offset.type == 1) {
rule_tile.require(offset.val);
} else if (offset.type == 2) {
rule_tile.negate(offset.val);
} else {
rule_tile.tile = offset.val;
}
}
return rule;
}
};
pub const RuleOffsets = struct {
x: i32 = 0,
y: i32 = 0,
val: usize = 0,
type: u8 = 0,
};
pub const Animation = struct {
idx: usize = 0,
rate: usize = 0,
frames: []u8,
};
pub const Object = struct {
name: []const u8 = "",
id: []const u8 = "",
x: []const u8 = "",
y: []const u8 = "",
w: []const u8 = "",
h: []const u8 = "",
color: []const u8 = "",
destination: []const u8 = "",
};
pub const Tag = struct {
label: []const u8,
tiles: []u8,
};
};
|
tilescript/tilekit_importer.zig
|
const std = @import("index.zig");
const debug = std.debug;
const assert = debug.assert;
const math = std.math;
const builtin = @import("builtin");
const mem = @This();
const meta = std.meta;
const trait = meta.trait;
pub const Allocator = struct {
pub const Error = error{OutOfMemory};
/// Allocate byte_count bytes and return them in a slice, with the
/// slice's pointer aligned at least to alignment bytes.
/// The returned newly allocated memory is undefined.
/// `alignment` is guaranteed to be >= 1
/// `alignment` is guaranteed to be a power of 2
allocFn: fn (self: *Allocator, byte_count: usize, alignment: u29) Error![]u8,
/// If `new_byte_count > old_mem.len`:
/// * `old_mem.len` is the same as what was returned from allocFn or reallocFn.
/// * alignment >= alignment of old_mem.ptr
///
/// If `new_byte_count <= old_mem.len`:
/// * this function must return successfully.
/// * alignment <= alignment of old_mem.ptr
///
/// When `reallocFn` returns,
/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
/// as `old_mem` was when `reallocFn` is called. The bytes of
/// `return_value[old_mem.len..]` have undefined values.
/// `alignment` is guaranteed to be >= 1
/// `alignment` is guaranteed to be a power of 2
reallocFn: fn (self: *Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
freeFn: fn (self: *Allocator, old_mem: []u8) void,
/// Call `destroy` with the result
/// TODO this is deprecated. use createOne instead
pub fn create(self: *Allocator, init: var) Error!*@typeOf(init) {
const T = @typeOf(init);
if (@sizeOf(T) == 0) return &(T{});
const slice = try self.alloc(T, 1);
const ptr = &slice[0];
ptr.* = init;
return ptr;
}
/// Call `destroy` with the result.
/// Returns undefined memory.
pub fn createOne(self: *Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return &(T{});
const slice = try self.alloc(T, 1);
return &slice[0];
}
/// `ptr` should be the return value of `create`
pub fn destroy(self: *Allocator, ptr: var) void {
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
self.freeFn(self, non_const_ptr[0..@sizeOf(@typeOf(ptr).Child)]);
}
pub fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
return self.alignedAlloc(T, @alignOf(T), n);
}
pub fn alignedAlloc(self: *Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T {
if (n == 0) {
return ([*]align(alignment) T)(undefined)[0..0];
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.allocFn(self, byte_count, alignment);
assert(byte_slice.len == byte_count);
// This loop gets optimized out in ReleaseFast mode
for (byte_slice) |*byte| {
byte.* = undefined;
}
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
pub fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
return self.alignedRealloc(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
}
pub fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
if (old_mem.len == 0) {
return self.alignedAlloc(T, alignment, n);
}
if (n == 0) {
self.free(old_mem);
return ([*]align(alignment) T)(undefined)[0..0];
}
const old_byte_slice = @sliceToBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.reallocFn(self, old_byte_slice, byte_count, alignment);
assert(byte_slice.len == byte_count);
if (n > old_mem.len) {
// This loop gets optimized out in ReleaseFast mode
for (byte_slice[old_byte_slice.len..]) |*byte| {
byte.* = undefined;
}
}
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
/// Reallocate, but `n` must be less than or equal to `old_mem.len`.
/// Unlike `realloc`, this function cannot fail.
/// Shrinking to 0 is the same as calling `free`.
pub fn shrink(self: *Allocator, comptime T: type, old_mem: []T, n: usize) []T {
return self.alignedShrink(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
}
pub fn alignedShrink(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T {
if (n == 0) {
self.free(old_mem);
return old_mem[0..0];
}
assert(n <= old_mem.len);
// Here we skip the overflow checking on the multiplication because
// n <= old_mem.len and the multiplication didn't overflow for that operation.
const byte_count = @sizeOf(T) * n;
const byte_slice = self.reallocFn(self, @sliceToBytes(old_mem), byte_count, alignment) catch unreachable;
assert(byte_slice.len == byte_count);
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
pub fn free(self: *Allocator, memory: var) void {
const bytes = @sliceToBytes(memory);
if (bytes.len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
self.freeFn(self, non_const_ptr[0..bytes.len]);
}
};
pub const Compare = enum {
LessThan,
Equal,
GreaterThan,
};
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
/// dest.ptr must be <= src.ptr.
pub fn copy(comptime T: type, dest: []T, source: []const T) void {
// TODO instead of manually doing this check for the whole array
// and turning off runtime safety, the compiler should detect loops like
// this and automatically omit safety checks for loops
@setRuntimeSafety(false);
assert(dest.len >= source.len);
for (source) |s, i|
dest[i] = s;
}
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
/// dest.ptr must be >= src.ptr.
pub fn copyBackwards(comptime T: type, dest: []T, source: []const T) void {
// TODO instead of manually doing this check for the whole array
// and turning off runtime safety, the compiler should detect loops like
// this and automatically omit safety checks for loops
@setRuntimeSafety(false);
assert(dest.len >= source.len);
var i = source.len;
while (i > 0) {
i -= 1;
dest[i] = source[i];
}
}
pub fn set(comptime T: type, dest: []T, value: T) void {
for (dest) |*d|
d.* = value;
}
pub fn secureZero(comptime T: type, s: []T) void {
// NOTE: We do not use a volatile slice cast here since LLVM cannot
// see that it can be replaced by a memset.
const ptr = @ptrCast([*]volatile u8, s.ptr);
const length = s.len * @sizeOf(T);
@memset(ptr, 0, length);
}
test "mem.secureZero" {
var a = []u8{0xfe} ** 8;
var b = []u8{0xfe} ** 8;
set(u8, a[0..], 0);
secureZero(u8, b[0..]);
assert(eql(u8, a[0..], b[0..]));
}
pub fn compare(comptime T: type, lhs: []const T, rhs: []const T) Compare {
const n = math.min(lhs.len, rhs.len);
var i: usize = 0;
while (i < n) : (i += 1) {
if (lhs[i] == rhs[i]) {
continue;
} else if (lhs[i] < rhs[i]) {
return Compare.LessThan;
} else if (lhs[i] > rhs[i]) {
return Compare.GreaterThan;
} else {
unreachable;
}
}
if (lhs.len == rhs.len) {
return Compare.Equal;
} else if (lhs.len < rhs.len) {
return Compare.LessThan;
} else if (lhs.len > rhs.len) {
return Compare.GreaterThan;
}
unreachable;
}
test "mem.compare" {
assert(compare(u8, "abcd", "bee") == Compare.LessThan);
assert(compare(u8, "abc", "abc") == Compare.Equal);
assert(compare(u8, "abc", "abc0") == Compare.LessThan);
assert(compare(u8, "", "") == Compare.Equal);
assert(compare(u8, "", "a") == Compare.LessThan);
}
/// Returns true if lhs < rhs, false otherwise
pub fn lessThan(comptime T: type, lhs: []const T, rhs: []const T) bool {
var result = compare(T, lhs, rhs);
if (result == Compare.LessThan) {
return true;
} else
return false;
}
test "mem.lessThan" {
assert(lessThan(u8, "abcd", "bee"));
assert(!lessThan(u8, "abc", "abc"));
assert(lessThan(u8, "abc", "abc0"));
assert(!lessThan(u8, "", ""));
assert(lessThan(u8, "", "a"));
}
/// Compares two slices and returns whether they are equal.
pub fn eql(comptime T: type, a: []const T, b: []const T) bool {
if (a.len != b.len) return false;
for (a) |item, index| {
if (b[index] != item) return false;
}
return true;
}
pub fn len(comptime T: type, ptr: [*]const T) usize {
var count: usize = 0;
while (ptr[count] != 0) : (count += 1) {}
return count;
}
pub fn toSliceConst(comptime T: type, ptr: [*]const T) []const T {
return ptr[0..len(T, ptr)];
}
pub fn toSlice(comptime T: type, ptr: [*]T) []T {
return ptr[0..len(T, ptr)];
}
/// Returns true if all elements in a slice are equal to the scalar value provided
pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool {
for (slice) |item| {
if (item != scalar) return false;
}
return true;
}
/// Copies ::m to newly allocated memory. Caller is responsible to free it.
pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
const new_buf = try allocator.alloc(T, m.len);
copy(T, new_buf, m);
return new_buf;
}
/// Remove values from the beginning of a slice.
pub fn trimLeft(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
var begin: usize = 0;
while (begin < slice.len and indexOfScalar(T, values_to_strip, slice[begin]) != null) : (begin += 1) {}
return slice[begin..];
}
/// Remove values from the end of a slice.
pub fn trimRight(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
var end: usize = slice.len;
while (end > 0 and indexOfScalar(T, values_to_strip, slice[end - 1]) != null) : (end -= 1) {}
return slice[0..end];
}
/// Remove values from the beginning and end of a slice.
pub fn trim(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
var begin: usize = 0;
var end: usize = slice.len;
while (begin < end and indexOfScalar(T, values_to_strip, slice[begin]) != null) : (begin += 1) {}
while (end > begin and indexOfScalar(T, values_to_strip, slice[end - 1]) != null) : (end -= 1) {}
return slice[begin..end];
}
test "mem.trim" {
assert(eql(u8, trimLeft(u8, " foo\n ", " \n"), "foo\n "));
assert(eql(u8, trimRight(u8, " foo\n ", " \n"), " foo"));
assert(eql(u8, trim(u8, " foo\n ", " \n"), "foo"));
assert(eql(u8, trim(u8, "foo", " \n"), "foo"));
}
/// Linear search for the index of a scalar value inside a slice.
pub fn indexOfScalar(comptime T: type, slice: []const T, value: T) ?usize {
return indexOfScalarPos(T, slice, 0, value);
}
/// Linear search for the last index of a scalar value inside a slice.
pub fn lastIndexOfScalar(comptime T: type, slice: []const T, value: T) ?usize {
var i: usize = slice.len;
while (i != 0) {
i -= 1;
if (slice[i] == value) return i;
}
return null;
}
pub fn indexOfScalarPos(comptime T: type, slice: []const T, start_index: usize, value: T) ?usize {
var i: usize = start_index;
while (i < slice.len) : (i += 1) {
if (slice[i] == value) return i;
}
return null;
}
pub fn indexOfAny(comptime T: type, slice: []const T, values: []const T) ?usize {
return indexOfAnyPos(T, slice, 0, values);
}
pub fn lastIndexOfAny(comptime T: type, slice: []const T, values: []const T) ?usize {
var i: usize = slice.len;
while (i != 0) {
i -= 1;
for (values) |value| {
if (slice[i] == value) return i;
}
}
return null;
}
pub fn indexOfAnyPos(comptime T: type, slice: []const T, start_index: usize, values: []const T) ?usize {
var i: usize = start_index;
while (i < slice.len) : (i += 1) {
for (values) |value| {
if (slice[i] == value) return i;
}
}
return null;
}
pub fn indexOf(comptime T: type, haystack: []const T, needle: []const T) ?usize {
return indexOfPos(T, haystack, 0, needle);
}
/// Find the index in a slice of a sub-slice, searching from the end backwards.
/// To start looking at a different index, slice the haystack first.
/// TODO is there even a better algorithm for this?
pub fn lastIndexOf(comptime T: type, haystack: []const T, needle: []const T) ?usize {
if (needle.len > haystack.len) return null;
var i: usize = haystack.len - needle.len;
while (true) : (i -= 1) {
if (mem.eql(T, haystack[i .. i + needle.len], needle)) return i;
if (i == 0) return null;
}
}
// TODO boyer-moore algorithm
pub fn indexOfPos(comptime T: type, haystack: []const T, start_index: usize, needle: []const T) ?usize {
if (needle.len > haystack.len) return null;
var i: usize = start_index;
const end = haystack.len - needle.len;
while (i <= end) : (i += 1) {
if (eql(T, haystack[i .. i + needle.len], needle)) return i;
}
return null;
}
test "mem.indexOf" {
assert(indexOf(u8, "one two three four", "four").? == 14);
assert(lastIndexOf(u8, "one two three two four", "two").? == 14);
assert(indexOf(u8, "one two three four", "gour") == null);
assert(lastIndexOf(u8, "one two three four", "gour") == null);
assert(indexOf(u8, "foo", "foo").? == 0);
assert(lastIndexOf(u8, "foo", "foo").? == 0);
assert(indexOf(u8, "foo", "fool") == null);
assert(lastIndexOf(u8, "foo", "lfoo") == null);
assert(lastIndexOf(u8, "foo", "fool") == null);
assert(indexOf(u8, "foo foo", "foo").? == 0);
assert(lastIndexOf(u8, "foo foo", "foo").? == 4);
assert(lastIndexOfAny(u8, "boo, cat", "abo").? == 6);
assert(lastIndexOfScalar(u8, "boo", 'o').? == 2);
}
/// Reads an integer from memory with size equal to bytes.len.
/// T specifies the return type, which must be large enough to store
/// the result.
pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: builtin.Endian) ReturnType {
var result: ReturnType = 0;
switch (endian) {
builtin.Endian.Big => {
for (bytes) |b| {
result = (result << 8) | b;
}
},
builtin.Endian.Little => {
const ShiftType = math.Log2Int(ReturnType);
for (bytes) |b, index| {
result = result | (ReturnType(b) << @intCast(ShiftType, index * 8));
}
},
}
return result;
}
/// Reads an integer from memory with bit count specified by T.
/// The bit count of T must be evenly divisible by 8.
/// This function cannot fail and cannot cause undefined behavior.
/// Assumes the endianness of memory is native. This means the function can
/// simply pointer cast memory.
pub fn readIntNative(comptime T: type, bytes: *const [@sizeOf(T)]u8) T {
comptime assert(T.bit_count % 8 == 0);
return @ptrCast(*align(1) const T, bytes).*;
}
/// Reads an integer from memory with bit count specified by T.
/// The bit count of T must be evenly divisible by 8.
/// This function cannot fail and cannot cause undefined behavior.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
pub fn readIntForeign(comptime T: type, bytes: *const [@sizeOf(T)]u8) T {
return @bswap(T, readIntNative(T, bytes));
}
pub const readIntLittle = switch (builtin.endian) {
builtin.Endian.Little => readIntNative,
builtin.Endian.Big => readIntForeign,
};
pub const readIntBig = switch (builtin.endian) {
builtin.Endian.Little => readIntForeign,
builtin.Endian.Big => readIntNative,
};
/// Asserts that bytes.len >= @sizeOf(T). Reads the integer starting from index 0
/// and ignores extra bytes.
/// Note that @sizeOf(u24) is 3.
/// The bit count of T must be evenly divisible by 8.
/// Assumes the endianness of memory is native. This means the function can
/// simply pointer cast memory.
pub fn readIntSliceNative(comptime T: type, bytes: []const u8) T {
assert(@sizeOf(u24) == 3);
assert(bytes.len >= @sizeOf(T));
// TODO https://github.com/ziglang/zig/issues/863
return readIntNative(T, @ptrCast(*const [@sizeOf(T)]u8, bytes.ptr));
}
/// Asserts that bytes.len >= @sizeOf(T). Reads the integer starting from index 0
/// and ignores extra bytes.
/// Note that @sizeOf(u24) is 3.
/// The bit count of T must be evenly divisible by 8.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
pub fn readIntSliceForeign(comptime T: type, bytes: []const u8) T {
return @bswap(T, readIntSliceNative(T, bytes));
}
pub const readIntSliceLittle = switch (builtin.endian) {
builtin.Endian.Little => readIntSliceNative,
builtin.Endian.Big => readIntSliceForeign,
};
pub const readIntSliceBig = switch (builtin.endian) {
builtin.Endian.Little => readIntSliceForeign,
builtin.Endian.Big => readIntSliceNative,
};
/// Reads an integer from memory with bit count specified by T.
/// The bit count of T must be evenly divisible by 8.
/// This function cannot fail and cannot cause undefined behavior.
pub fn readInt(comptime T: type, bytes: *const [@sizeOf(T)]u8, endian: builtin.Endian) T {
if (endian == builtin.endian) {
return readIntNative(T, bytes);
} else {
return readIntForeign(T, bytes);
}
}
/// Asserts that bytes.len >= @sizeOf(T). Reads the integer starting from index 0
/// and ignores extra bytes.
/// Note that @sizeOf(u24) is 3.
/// The bit count of T must be evenly divisible by 8.
pub fn readIntSlice(comptime T: type, bytes: []const u8, endian: builtin.Endian) T {
assert(@sizeOf(u24) == 3);
assert(bytes.len >= @sizeOf(T));
// TODO https://github.com/ziglang/zig/issues/863
return readInt(T, @ptrCast(*const [@sizeOf(T)]u8, bytes.ptr), endian);
}
test "comptime read/write int" {
comptime {
var bytes: [2]u8 = undefined;
std.mem.writeIntLittle(u16, &bytes, 0x1234);
const result = std.mem.readIntBig(u16, &bytes);
std.debug.assert(result == 0x3412);
}
comptime {
var bytes: [2]u8 = undefined;
std.mem.writeIntBig(u16, &bytes, 0x1234);
const result = std.mem.readIntLittle(u16, &bytes);
std.debug.assert(result == 0x3412);
}
}
test "readIntBig and readIntLittle" {
assert(readIntSliceBig(u0, []u8{}) == 0x0);
assert(readIntSliceLittle(u0, []u8{}) == 0x0);
assert(readIntSliceBig(u8, []u8{0x32}) == 0x32);
assert(readIntSliceLittle(u8, []u8{0x12}) == 0x12);
assert(readIntSliceBig(u16, []u8{ 0x12, 0x34 }) == 0x1234);
assert(readIntSliceLittle(u16, []u8{ 0x12, 0x34 }) == 0x3412);
assert(readIntSliceBig(u72, []u8{ 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x24 }) == 0x123456789abcdef024);
assert(readIntSliceLittle(u72, []u8{ 0xec, 0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe }) == 0xfedcba9876543210ec);
assert(readIntSliceBig(i8, []u8{0xff}) == -1);
assert(readIntSliceLittle(i8, []u8{0xfe}) == -2);
assert(readIntSliceBig(i16, []u8{ 0xff, 0xfd }) == -3);
assert(readIntSliceLittle(i16, []u8{ 0xfc, 0xff }) == -4);
}
/// Writes an integer to memory, storing it in twos-complement.
/// This function always succeeds, has defined behavior for all inputs, and
/// accepts any integer bit width.
/// This function stores in native endian, which means it is implemented as a simple
/// memory store.
pub fn writeIntNative(comptime T: type, buf: *[@sizeOf(T)]u8, value: T) void {
@ptrCast(*align(1) T, buf).* = value;
}
/// Writes an integer to memory, storing it in twos-complement.
/// This function always succeeds, has defined behavior for all inputs, but
/// the integer bit width must be divisible by 8.
/// This function stores in foreign endian, which means it does a @bswap first.
pub fn writeIntForeign(comptime T: type, buf: *[@sizeOf(T)]u8, value: T) void {
writeIntNative(T, buf, @bswap(T, value));
}
pub const writeIntLittle = switch (builtin.endian) {
builtin.Endian.Little => writeIntNative,
builtin.Endian.Big => writeIntForeign,
};
pub const writeIntBig = switch (builtin.endian) {
builtin.Endian.Little => writeIntForeign,
builtin.Endian.Big => writeIntNative,
};
/// Writes an integer to memory, storing it in twos-complement.
/// This function always succeeds, has defined behavior for all inputs, but
/// the integer bit width must be divisible by 8.
pub fn writeInt(comptime T: type, buffer: *[@sizeOf(T)]u8, value: T, endian: builtin.Endian) void {
comptime assert(T.bit_count % 8 == 0);
if (endian == builtin.endian) {
return writeIntNative(T, buffer, value);
} else {
return writeIntForeign(T, buffer, value);
}
}
/// Writes a twos-complement little-endian integer to memory.
/// Asserts that buf.len >= @sizeOf(T). Note that @sizeOf(u24) is 3.
/// The bit count of T must be divisible by 8.
/// Any extra bytes in buffer after writing the integer are set to zero. To
/// avoid the branch to check for extra buffer bytes, use writeIntLittle
/// instead.
pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
comptime assert(@sizeOf(u24) == 3);
comptime assert(T.bit_count % 8 == 0);
assert(buffer.len >= @sizeOf(T));
// TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough
const uint = @IntType(false, T.bit_count);
var bits = @truncate(uint, value);
for (buffer) |*b| {
b.* = @truncate(u8, bits);
bits >>= 8;
}
}
/// Writes a twos-complement big-endian integer to memory.
/// Asserts that buffer.len >= @sizeOf(T). Note that @sizeOf(u24) is 3.
/// The bit count of T must be divisible by 8.
/// Any extra bytes in buffer before writing the integer are set to zero. To
/// avoid the branch to check for extra buffer bytes, use writeIntBig instead.
pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void {
comptime assert(@sizeOf(u24) == 3);
comptime assert(T.bit_count % 8 == 0);
assert(buffer.len >= @sizeOf(T));
// TODO I want to call writeIntBig here but comptime eval facilities aren't good enough
const uint = @IntType(false, T.bit_count);
var bits = @truncate(uint, value);
var index: usize = buffer.len;
while (index != 0) {
index -= 1;
buffer[index] = @truncate(u8, bits);
bits >>= 8;
}
}
pub const writeIntSliceNative = switch (builtin.endian) {
builtin.Endian.Little => writeIntSliceLittle,
builtin.Endian.Big => writeIntSliceBig,
};
pub const writeIntSliceForeign = switch (builtin.endian) {
builtin.Endian.Little => writeIntSliceBig,
builtin.Endian.Big => writeIntSliceLittle,
};
/// Writes a twos-complement integer to memory, with the specified endianness.
/// Asserts that buf.len >= @sizeOf(T). Note that @sizeOf(u24) is 3.
/// The bit count of T must be evenly divisible by 8.
/// Any extra bytes in buffer not part of the integer are set to zero, with
/// respect to endianness. To avoid the branch to check for extra buffer bytes,
/// use writeInt instead.
pub fn writeIntSlice(comptime T: type, buffer: []u8, value: T, endian: builtin.Endian) void {
comptime assert(T.bit_count % 8 == 0);
switch (endian) {
builtin.Endian.Little => return writeIntSliceLittle(T, buffer, value),
builtin.Endian.Big => return writeIntSliceBig(T, buffer, value),
}
}
test "writeIntBig and writeIntLittle" {
var buf0: [0]u8 = undefined;
var buf1: [1]u8 = undefined;
var buf2: [2]u8 = undefined;
var buf9: [9]u8 = undefined;
writeIntBig(u0, &buf0, 0x0);
assert(eql_slice_u8(buf0[0..], []u8{}));
writeIntLittle(u0, &buf0, 0x0);
assert(eql_slice_u8(buf0[0..], []u8{}));
writeIntBig(u8, &buf1, 0x12);
assert(eql_slice_u8(buf1[0..], []u8{0x12}));
writeIntLittle(u8, &buf1, 0x34);
assert(eql_slice_u8(buf1[0..], []u8{0x34}));
writeIntBig(u16, &buf2, 0x1234);
assert(eql_slice_u8(buf2[0..], []u8{ 0x12, 0x34 }));
writeIntLittle(u16, &buf2, 0x5678);
assert(eql_slice_u8(buf2[0..], []u8{ 0x78, 0x56 }));
writeIntBig(u72, &buf9, 0x123456789abcdef024);
assert(eql_slice_u8(buf9[0..], []u8{ 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x24 }));
writeIntLittle(u72, &buf9, 0xfedcba9876543210ec);
assert(eql_slice_u8(buf9[0..], []u8{ 0xec, 0x10, 0x32, 0x54, 0x76, 0x98, 0xba, 0xdc, 0xfe }));
writeIntBig(i8, &buf1, -1);
assert(eql_slice_u8(buf1[0..], []u8{0xff}));
writeIntLittle(i8, &buf1, -2);
assert(eql_slice_u8(buf1[0..], []u8{0xfe}));
writeIntBig(i16, &buf2, -3);
assert(eql_slice_u8(buf2[0..], []u8{ 0xff, 0xfd }));
writeIntLittle(i16, &buf2, -4);
assert(eql_slice_u8(buf2[0..], []u8{ 0xfc, 0xff }));
}
pub fn hash_slice_u8(k: []const u8) u32 {
// FNV 32-bit hash
var h: u32 = 2166136261;
for (k) |b| {
h = (h ^ b) *% 16777619;
}
return h;
}
pub fn eql_slice_u8(a: []const u8, b: []const u8) bool {
return eql(u8, a, b);
}
/// Returns an iterator that iterates over the slices of `buffer` that are not
/// any of the bytes in `split_bytes`.
/// split(" abc def ghi ", " ")
/// Will return slices for "abc", "def", "ghi", null, in that order.
pub fn split(buffer: []const u8, split_bytes: []const u8) SplitIterator {
return SplitIterator{
.index = 0,
.buffer = buffer,
.split_bytes = split_bytes,
};
}
test "mem.split" {
var it = split(" abc def ghi ", " ");
assert(eql(u8, it.next().?, "abc"));
assert(eql(u8, it.next().?, "def"));
assert(eql(u8, it.next().?, "ghi"));
assert(it.next() == null);
}
pub fn startsWith(comptime T: type, haystack: []const T, needle: []const T) bool {
return if (needle.len > haystack.len) false else eql(T, haystack[0..needle.len], needle);
}
test "mem.startsWith" {
assert(startsWith(u8, "Bob", "Bo"));
assert(!startsWith(u8, "Needle in haystack", "haystack"));
}
pub fn endsWith(comptime T: type, haystack: []const T, needle: []const T) bool {
return if (needle.len > haystack.len) false else eql(T, haystack[haystack.len - needle.len ..], needle);
}
test "mem.endsWith" {
assert(endsWith(u8, "Needle in haystack", "haystack"));
assert(!endsWith(u8, "Bob", "Bo"));
}
pub const SplitIterator = struct {
buffer: []const u8,
split_bytes: []const u8,
index: usize,
pub fn next(self: *SplitIterator) ?[]const u8 {
// move to beginning of token
while (self.index < self.buffer.len and self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {}
const start = self.index;
if (start == self.buffer.len) {
return null;
}
// move to end of token
while (self.index < self.buffer.len and !self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {}
const end = self.index;
return self.buffer[start..end];
}
/// Returns a slice of the remaining bytes. Does not affect iterator state.
pub fn rest(self: *const SplitIterator) []const u8 {
// move to beginning of token
var index: usize = self.index;
while (index < self.buffer.len and self.isSplitByte(self.buffer[index])) : (index += 1) {}
return self.buffer[index..];
}
fn isSplitByte(self: *const SplitIterator, byte: u8) bool {
for (self.split_bytes) |split_byte| {
if (byte == split_byte) {
return true;
}
}
return false;
}
};
/// Naively combines a series of strings with a separator.
/// Allocates memory for the result, which must be freed by the caller.
pub fn join(allocator: *Allocator, sep: u8, strings: ...) ![]u8 {
comptime assert(strings.len >= 1);
var total_strings_len: usize = strings.len; // 1 sep per string
{
comptime var string_i = 0;
inline while (string_i < strings.len) : (string_i += 1) {
const arg = ([]const u8)(strings[string_i]);
total_strings_len += arg.len;
}
}
const buf = try allocator.alloc(u8, total_strings_len);
errdefer allocator.free(buf);
var buf_index: usize = 0;
comptime var string_i = 0;
inline while (true) {
const arg = ([]const u8)(strings[string_i]);
string_i += 1;
copy(u8, buf[buf_index..], arg);
buf_index += arg.len;
if (string_i >= strings.len) break;
if (buf[buf_index - 1] != sep) {
buf[buf_index] = sep;
buf_index += 1;
}
}
return allocator.shrink(u8, buf, buf_index);
}
test "mem.join" {
assert(eql(u8, try join(debug.global_allocator, ',', "a", "b", "c"), "a,b,c"));
assert(eql(u8, try join(debug.global_allocator, ',', "a"), "a"));
}
test "testStringEquality" {
assert(eql(u8, "abcd", "abcd"));
assert(!eql(u8, "abcdef", "abZdef"));
assert(!eql(u8, "abcdefg", "abcdef"));
}
test "testReadInt" {
testReadIntImpl();
comptime testReadIntImpl();
}
fn testReadIntImpl() void {
{
const bytes = []u8{
0x12,
0x34,
0x56,
0x78,
};
assert(readInt(u32, &bytes, builtin.Endian.Big) == 0x12345678);
assert(readIntBig(u32, &bytes) == 0x12345678);
assert(readIntBig(i32, &bytes) == 0x12345678);
assert(readInt(u32, &bytes, builtin.Endian.Little) == 0x78563412);
assert(readIntLittle(u32, &bytes) == 0x78563412);
assert(readIntLittle(i32, &bytes) == 0x78563412);
}
{
const buf = []u8{
0x00,
0x00,
0x12,
0x34,
};
const answer = readInt(u32, &buf, builtin.Endian.Big);
assert(answer == 0x00001234);
}
{
const buf = []u8{
0x12,
0x34,
0x00,
0x00,
};
const answer = readInt(u32, &buf, builtin.Endian.Little);
assert(answer == 0x00003412);
}
{
const bytes = []u8{
0xff,
0xfe,
};
assert(readIntBig(u16, &bytes) == 0xfffe);
assert(readIntBig(i16, &bytes) == -0x0002);
assert(readIntLittle(u16, &bytes) == 0xfeff);
assert(readIntLittle(i16, &bytes) == -0x0101);
}
}
test "std.mem.writeIntSlice" {
testWriteIntImpl();
comptime testWriteIntImpl();
}
fn testWriteIntImpl() void {
var bytes: [8]u8 = undefined;
writeIntSlice(u0, bytes[0..], 0, builtin.Endian.Big);
assert(eql(u8, bytes, []u8{
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
}));
writeIntSlice(u0, bytes[0..], 0, builtin.Endian.Little);
assert(eql(u8, bytes, []u8{
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
}));
writeIntSlice(u64, bytes[0..], 0x12345678CAFEBABE, builtin.Endian.Big);
assert(eql(u8, bytes, []u8{
0x12,
0x34,
0x56,
0x78,
0xCA,
0xFE,
0xBA,
0xBE,
}));
writeIntSlice(u64, bytes[0..], 0xBEBAFECA78563412, builtin.Endian.Little);
assert(eql(u8, bytes, []u8{
0x12,
0x34,
0x56,
0x78,
0xCA,
0xFE,
0xBA,
0xBE,
}));
writeIntSlice(u32, bytes[0..], 0x12345678, builtin.Endian.Big);
assert(eql(u8, bytes, []u8{
0x00,
0x00,
0x00,
0x00,
0x12,
0x34,
0x56,
0x78,
}));
writeIntSlice(u32, bytes[0..], 0x78563412, builtin.Endian.Little);
assert(eql(u8, bytes, []u8{
0x12,
0x34,
0x56,
0x78,
0x00,
0x00,
0x00,
0x00,
}));
writeIntSlice(u16, bytes[0..], 0x1234, builtin.Endian.Big);
assert(eql(u8, bytes, []u8{
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x12,
0x34,
}));
writeIntSlice(u16, bytes[0..], 0x1234, builtin.Endian.Little);
assert(eql(u8, bytes, []u8{
0x34,
0x12,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
}));
}
pub fn min(comptime T: type, slice: []const T) T {
var best = slice[0];
for (slice[1..]) |item| {
best = math.min(best, item);
}
return best;
}
test "mem.min" {
assert(min(u8, "abcdefg") == 'a');
}
pub fn max(comptime T: type, slice: []const T) T {
var best = slice[0];
for (slice[1..]) |item| {
best = math.max(best, item);
}
return best;
}
test "mem.max" {
assert(max(u8, "abcdefg") == 'g');
}
pub fn swap(comptime T: type, a: *T, b: *T) void {
const tmp = a.*;
a.* = b.*;
b.* = tmp;
}
/// In-place order reversal of a slice
pub fn reverse(comptime T: type, items: []T) void {
var i: usize = 0;
const end = items.len / 2;
while (i < end) : (i += 1) {
swap(T, &items[i], &items[items.len - i - 1]);
}
}
test "std.mem.reverse" {
var arr = []i32{
5,
3,
1,
2,
4,
};
reverse(i32, arr[0..]);
assert(eql(i32, arr, []i32{
4,
2,
1,
3,
5,
}));
}
/// In-place rotation of the values in an array ([0 1 2 3] becomes [1 2 3 0] if we rotate by 1)
/// Assumes 0 <= amount <= items.len
pub fn rotate(comptime T: type, items: []T, amount: usize) void {
reverse(T, items[0..amount]);
reverse(T, items[amount..]);
reverse(T, items);
}
test "std.mem.rotate" {
var arr = []i32{
5,
3,
1,
2,
4,
};
rotate(i32, arr[0..], 2);
assert(eql(i32, arr, []i32{
1,
2,
4,
5,
3,
}));
}
/// Converts a little-endian integer to host endianness.
pub fn littleToNative(comptime T: type, x: T) T {
return switch (builtin.endian) {
builtin.Endian.Little => x,
builtin.Endian.Big => @bswap(T, x),
};
}
/// Converts a big-endian integer to host endianness.
pub fn bigToNative(comptime T: type, x: T) T {
return switch (builtin.endian) {
builtin.Endian.Little => @bswap(T, x),
builtin.Endian.Big => x,
};
}
/// Converts an integer from specified endianness to host endianness.
pub fn toNative(comptime T: type, x: T, endianness_of_x: builtin.Endian) T {
return switch (endianness_of_x) {
builtin.Endian.Little => littleToNative(T, x),
builtin.Endian.Big => bigToNative(T, x),
};
}
/// Converts an integer which has host endianness to the desired endianness.
pub fn nativeTo(comptime T: type, x: T, desired_endianness: builtin.Endian) T {
return switch (desired_endianness) {
builtin.Endian.Little => nativeToLittle(T, x),
builtin.Endian.Big => nativeToBig(T, x),
};
}
/// Converts an integer which has host endianness to little endian.
pub fn nativeToLittle(comptime T: type, x: T) T {
return switch (builtin.endian) {
builtin.Endian.Little => x,
builtin.Endian.Big => @bswap(T, x),
};
}
/// Converts an integer which has host endianness to big endian.
pub fn nativeToBig(comptime T: type, x: T) T {
return switch (builtin.endian) {
builtin.Endian.Little => @bswap(T, x),
builtin.Endian.Big => x,
};
}
fn AsBytesReturnType(comptime P: type) type {
if (comptime !trait.isSingleItemPtr(P))
@compileError("expected single item " ++ "pointer, passed " ++ @typeName(P));
const size = usize(@sizeOf(meta.Child(P)));
const alignment = comptime meta.alignment(P);
if (comptime trait.isConstPtr(P))
return *align(alignment) const [size]u8;
return *align(alignment) [size]u8;
}
///Given a pointer to a single item, returns a slice of the underlying bytes, preserving constness.
pub fn asBytes(ptr: var) AsBytesReturnType(@typeOf(ptr)) {
const P = @typeOf(ptr);
return @ptrCast(AsBytesReturnType(P), ptr);
}
test "std.mem.asBytes" {
const deadbeef = u32(0xDEADBEEF);
const deadbeef_bytes = switch (builtin.endian) {
builtin.Endian.Big => "\xDE\xAD\xBE\xEF",
builtin.Endian.Little => "\xEF\xBE\xAD\xDE",
};
debug.assert(std.mem.eql(u8, asBytes(&deadbeef), deadbeef_bytes));
var codeface = u32(0xC0DEFACE);
for (asBytes(&codeface).*) |*b|
b.* = 0;
debug.assert(codeface == 0);
const S = packed struct {
a: u8,
b: u8,
c: u8,
d: u8,
};
const inst = S{
.a = 0xBE,
.b = 0xEF,
.c = 0xDE,
.d = 0xA1,
};
debug.assert(std.mem.eql(u8, asBytes(&inst), "\xBE\xEF\xDE\xA1"));
}
///Given any value, returns a copy of its bytes in an array.
pub fn toBytes(value: var) [@sizeOf(@typeOf(value))]u8 {
return asBytes(&value).*;
}
test "std.mem.toBytes" {
var my_bytes = toBytes(u32(0x12345678));
switch (builtin.endian) {
builtin.Endian.Big => debug.assert(std.mem.eql(u8, my_bytes, "\x12\x34\x56\x78")),
builtin.Endian.Little => debug.assert(std.mem.eql(u8, my_bytes, "\x78\x56\x34\x12")),
}
my_bytes[0] = '\x99';
switch (builtin.endian) {
builtin.Endian.Big => debug.assert(std.mem.eql(u8, my_bytes, "\x99\x34\x56\x78")),
builtin.Endian.Little => debug.assert(std.mem.eql(u8, my_bytes, "\x99\x56\x34\x12")),
}
}
fn BytesAsValueReturnType(comptime T: type, comptime B: type) type {
const size = usize(@sizeOf(T));
if (comptime !trait.is(builtin.TypeId.Pointer)(B) or meta.Child(B) != [size]u8) {
@compileError("expected *[N]u8 " ++ ", passed " ++ @typeName(B));
}
const alignment = comptime meta.alignment(B);
return if (comptime trait.isConstPtr(B)) *align(alignment) const T else *align(alignment) T;
}
///Given a pointer to an array of bytes, returns a pointer to a value of the specified type
/// backed by those bytes, preserving constness.
pub fn bytesAsValue(comptime T: type, bytes: var) BytesAsValueReturnType(T, @typeOf(bytes)) {
return @ptrCast(BytesAsValueReturnType(T, @typeOf(bytes)), bytes);
}
test "std.mem.bytesAsValue" {
const deadbeef = u32(0xDEADBEEF);
const deadbeef_bytes = switch (builtin.endian) {
builtin.Endian.Big => "\xDE\xAD\xBE\xEF",
builtin.Endian.Little => "\xEF\xBE\xAD\xDE",
};
debug.assert(deadbeef == bytesAsValue(u32, &deadbeef_bytes).*);
var codeface_bytes = switch (builtin.endian) {
builtin.Endian.Big => "\xC0\xDE\xFA\xCE",
builtin.Endian.Little => "\xCE\xFA\xDE\xC0",
};
var codeface = bytesAsValue(u32, &codeface_bytes);
debug.assert(codeface.* == 0xC0DEFACE);
codeface.* = 0;
for (codeface_bytes) |b|
debug.assert(b == 0);
const S = packed struct {
a: u8,
b: u8,
c: u8,
d: u8,
};
const inst = S{
.a = 0xBE,
.b = 0xEF,
.c = 0xDE,
.d = 0xA1,
};
const inst_bytes = "\xBE\xEF\xDE\xA1";
const inst2 = bytesAsValue(S, &inst_bytes);
debug.assert(meta.eql(inst, inst2.*));
}
///Given a pointer to an array of bytes, returns a value of the specified type backed by a
/// copy of those bytes.
pub fn bytesToValue(comptime T: type, bytes: var) T {
return bytesAsValue(T, &bytes).*;
}
test "std.mem.bytesToValue" {
const deadbeef_bytes = switch (builtin.endian) {
builtin.Endian.Big => "\xDE\xAD\xBE\xEF",
builtin.Endian.Little => "\xEF\xBE\xAD\xDE",
};
const deadbeef = bytesToValue(u32, deadbeef_bytes);
debug.assert(deadbeef == u32(0xDEADBEEF));
}
fn SubArrayPtrReturnType(comptime T: type, comptime length: usize) type {
if (trait.isConstPtr(T))
return *const [length]meta.Child(meta.Child(T));
return *[length]meta.Child(meta.Child(T));
}
///Given a pointer to an array, returns a pointer to a portion of that array, preserving constness.
pub fn subArrayPtr(ptr: var, comptime start: usize, comptime length: usize) SubArrayPtrReturnType(@typeOf(ptr), length) {
debug.assert(start + length <= ptr.*.len);
const ReturnType = SubArrayPtrReturnType(@typeOf(ptr), length);
const T = meta.Child(meta.Child(@typeOf(ptr)));
return @ptrCast(ReturnType, &ptr[start]);
}
test "std.mem.subArrayPtr" {
const a1 = "abcdef";
const sub1 = subArrayPtr(&a1, 2, 3);
debug.assert(std.mem.eql(u8, sub1.*, "cde"));
var a2 = "abcdef";
var sub2 = subArrayPtr(&a2, 2, 3);
debug.assert(std.mem.eql(u8, sub2, "cde"));
sub2[1] = 'X';
debug.assert(std.mem.eql(u8, a2, "abcXef"));
}
|
std/mem.zig
|
const std = @import("std");
const mach = @import("mach");
const gpu = @import("gpu");
const glfw = @import("glfw");
const zm = @import("zmath");
const zigimg = @import("zigimg");
const Vertex = @import("cube_mesh.zig").Vertex;
const vertices = @import("cube_mesh.zig").vertices;
const UniformBufferObject = struct {
mat: zm.Mat,
};
var timer: std.time.Timer = undefined;
pipeline: gpu.RenderPipeline,
queue: gpu.Queue,
vertex_buffer: gpu.Buffer,
uniform_buffer: gpu.Buffer,
bind_group: gpu.BindGroup,
depth_texture: gpu.Texture,
depth_size: mach.Size,
const App = @This();
pub fn init(app: *App, engine: *mach.Engine) !void {
timer = try std.time.Timer.start();
engine.core.setKeyCallback(struct {
fn callback(_: *App, eng: *mach.Engine, key: mach.Key, action: mach.Action) void {
if (action == .press) {
switch (key) {
.space => eng.core.setShouldClose(true),
else => {},
}
}
}
}.callback);
try engine.core.setSizeLimits(.{ .width = 20, .height = 20 }, .{ .width = null, .height = null });
const vs_module = engine.gpu_driver.device.createShaderModule(&.{
.label = "my vertex shader",
.code = .{ .wgsl = @embedFile("vert.wgsl") },
});
const vertex_attributes = [_]gpu.VertexAttribute{
.{ .format = .float32x4, .offset = @offsetOf(Vertex, "pos"), .shader_location = 0 },
.{ .format = .float32x2, .offset = @offsetOf(Vertex, "uv"), .shader_location = 1 },
};
const vertex_buffer_layout = gpu.VertexBufferLayout{
.array_stride = @sizeOf(Vertex),
.step_mode = .vertex,
.attribute_count = vertex_attributes.len,
.attributes = &vertex_attributes,
};
const fs_module = engine.gpu_driver.device.createShaderModule(&.{
.label = "my fragment shader",
.code = .{ .wgsl = @embedFile("frag.wgsl") },
});
const blend = gpu.BlendState{
.color = .{
.operation = .add,
.src_factor = .src_alpha,
.dst_factor = .one_minus_src_alpha,
},
.alpha = .{
.operation = .add,
.src_factor = .one,
.dst_factor = .zero,
},
};
const color_target = gpu.ColorTargetState{
.format = engine.gpu_driver.swap_chain_format,
.blend = &blend,
.write_mask = gpu.ColorWriteMask.all,
};
const fragment = gpu.FragmentState{
.module = fs_module,
.entry_point = "main",
.targets = &.{color_target},
.constants = null,
};
const pipeline_descriptor = gpu.RenderPipeline.Descriptor{
.fragment = &fragment,
// Enable depth testing so that the fragment closest to the camera
// is rendered in front.
.depth_stencil = &.{
.format = .depth24_plus,
.depth_write_enabled = true,
.depth_compare = .less,
},
.vertex = .{
.module = vs_module,
.entry_point = "main",
.buffers = &.{vertex_buffer_layout},
},
.primitive = .{
.topology = .triangle_list,
// Backface culling since the cube is solid piece of geometry.
// Faces pointing away from the camera will be occluded by faces
// pointing toward the camera.
.cull_mode = .back,
},
};
const pipeline = engine.gpu_driver.device.createRenderPipeline(&pipeline_descriptor);
const vertex_buffer = engine.gpu_driver.device.createBuffer(&.{
.usage = .{ .vertex = true },
.size = @sizeOf(Vertex) * vertices.len,
.mapped_at_creation = true,
});
var vertex_mapped = vertex_buffer.getMappedRange(Vertex, 0, vertices.len);
std.mem.copy(Vertex, vertex_mapped, vertices[0..]);
vertex_buffer.unmap();
// Create a sampler with linear filtering for smooth interpolation.
const sampler = engine.gpu_driver.device.createSampler(&.{
.mag_filter = .linear,
.min_filter = .linear,
});
const queue = engine.gpu_driver.device.getQueue();
const img = try zigimg.Image.fromFilePath(engine.allocator, "examples/assets/gotta-go-fast.png");
const img_size = gpu.Extent3D{ .width = @intCast(u32, img.width), .height = @intCast(u32, img.height) };
const cube_texture = engine.gpu_driver.device.createTexture(&.{
.size = img_size,
.format = .rgba8_unorm,
.usage = .{
.texture_binding = true,
.copy_dst = true,
.render_attachment = true,
},
});
const data_layout = gpu.Texture.DataLayout{
.bytes_per_row = @intCast(u32, img.width * 4),
.rows_per_image = @intCast(u32, img.height),
};
switch (img.pixels.?) {
.Rgba32 => |pixels| queue.writeTexture(&.{ .texture = cube_texture }, pixels, &data_layout, &img_size),
.Rgb24 => |pixels| {
const data = try rgb24ToRgba32(engine.allocator, pixels);
//defer data.deinit(allocator);
queue.writeTexture(&.{ .texture = cube_texture }, data.Rgba32, &data_layout, &img_size);
},
else => @panic("unsupported image color format"),
}
const uniform_buffer = engine.gpu_driver.device.createBuffer(&.{
.usage = .{ .copy_dst = true, .uniform = true },
.size = @sizeOf(UniformBufferObject),
.mapped_at_creation = false,
});
const bind_group = engine.gpu_driver.device.createBindGroup(
&gpu.BindGroup.Descriptor{
.layout = pipeline.getBindGroupLayout(0),
.entries = &.{
gpu.BindGroup.Entry.buffer(0, uniform_buffer, 0, @sizeOf(UniformBufferObject)),
gpu.BindGroup.Entry.sampler(1, sampler),
gpu.BindGroup.Entry.textureView(2, cube_texture.createView(&gpu.TextureView.Descriptor{})),
},
},
);
const size = try engine.core.getFramebufferSize();
const depth_texture = engine.gpu_driver.device.createTexture(&gpu.Texture.Descriptor{
.size = gpu.Extent3D{
.width = size.width,
.height = size.height,
},
.format = .depth24_plus,
.usage = .{
.render_attachment = true,
.texture_binding = true,
},
});
app.pipeline = pipeline;
app.queue = queue;
app.vertex_buffer = vertex_buffer;
app.uniform_buffer = uniform_buffer;
app.bind_group = bind_group;
app.depth_texture = depth_texture;
app.depth_size = size;
vs_module.release();
fs_module.release();
}
pub fn deinit(app: *App, _: *mach.Engine) void {
app.vertex_buffer.release();
app.uniform_buffer.release();
app.bind_group.release();
}
pub fn update(app: *App, engine: *mach.Engine) !bool {
// If window is resized, recreate depth buffer otherwise we cannot use it.
const size = engine.core.getFramebufferSize() catch unreachable; // TODO: return type inference can't handle this
if (size.width != app.depth_size.width or size.height != app.depth_size.height) {
app.depth_texture = engine.gpu_driver.device.createTexture(&gpu.Texture.Descriptor{
.size = gpu.Extent3D{
.width = size.width,
.height = size.height,
},
.format = .depth24_plus,
.usage = .{
.render_attachment = true,
.texture_binding = true,
},
});
app.depth_size = size;
}
const back_buffer_view = engine.gpu_driver.swap_chain.?.getCurrentTextureView();
const color_attachment = gpu.RenderPassColorAttachment{
.view = back_buffer_view,
.clear_value = .{ .r = 0.5, .g = 0.5, .b = 0.5, .a = 0.0 },
.load_op = .clear,
.store_op = .store,
};
const encoder = engine.gpu_driver.device.createCommandEncoder(null);
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
.color_attachments = &.{color_attachment},
.depth_stencil_attachment = &.{
.view = app.depth_texture.createView(&gpu.TextureView.Descriptor{
.format = .depth24_plus,
.dimension = .dimension_2d,
.array_layer_count = 1,
.mip_level_count = 1,
}),
.depth_clear_value = 1.0,
.depth_load_op = .clear,
.depth_store_op = .store,
},
};
{
const time = @intToFloat(f32, timer.read()) / @as(f32, std.time.ns_per_s);
const model = zm.mul(zm.rotationX(time * (std.math.pi / 2.0)), zm.rotationZ(time * (std.math.pi / 2.0)));
const view = zm.lookAtRh(
zm.f32x4(0, 4, 2, 1),
zm.f32x4(0, 0, 0, 1),
zm.f32x4(0, 0, 1, 0),
);
const proj = zm.perspectiveFovRh(
(std.math.pi / 4.0),
@intToFloat(f32, engine.gpu_driver.current_desc.width) / @intToFloat(f32, engine.gpu_driver.current_desc.height),
0.1,
10,
);
const mvp = zm.mul(zm.mul(model, view), proj);
const ubo = UniformBufferObject{
.mat = zm.transpose(mvp),
};
encoder.writeBuffer(app.uniform_buffer, 0, UniformBufferObject, &.{ubo});
}
const pass = encoder.beginRenderPass(&render_pass_info);
pass.setPipeline(app.pipeline);
pass.setVertexBuffer(0, app.vertex_buffer, 0, @sizeOf(Vertex) * vertices.len);
pass.setBindGroup(0, app.bind_group, &.{});
pass.draw(vertices.len, 1, 0, 0);
pass.end();
pass.release();
var command = encoder.finish(null);
encoder.release();
app.queue.submit(&.{command});
command.release();
engine.gpu_driver.swap_chain.?.present();
back_buffer_view.release();
return true;
}
fn rgb24ToRgba32(allocator: std.mem.Allocator, in: []zigimg.color.Rgb24) !zigimg.color.ColorStorage {
const out = try zigimg.color.ColorStorage.init(allocator, .Rgba32, in.len);
var i: usize = 0;
while (i < in.len) : (i += 1) {
out.Rgba32[i] = zigimg.color.Rgba32{ .R = in[i].R, .G = in[i].G, .B = in[i].B, .A = 255 };
}
return out;
}
|
examples/textured-cube/main.zig
|
const std = @import("std");
const mem = std.mem;
/// Print the string as a Zig identifier escaping it with @"" syntax if needed.
pub fn formatId(
bytes: []const u8,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
if (isValidId(bytes)) {
return writer.writeAll(bytes);
}
try writer.writeAll("@\"");
try formatEscapes(bytes, fmt, options, writer);
try writer.writeByte('"');
}
/// Return a Formatter for a Zig identifier
pub fn fmtId(bytes: []const u8) std.fmt.Formatter(formatId) {
return .{ .data = bytes };
}
pub fn isValidId(bytes: []const u8) bool {
for (bytes) |c, i| {
switch (c) {
'_', 'a'...'z', 'A'...'Z' => {},
'0'...'9' => if (i == 0) return false,
else => return false,
}
}
return std.zig.Token.getKeyword(bytes) == null;
}
pub fn formatEscapes(
bytes: []const u8,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
for (bytes) |byte| switch (byte) {
'\n' => try writer.writeAll("\\n"),
'\r' => try writer.writeAll("\\r"),
'\t' => try writer.writeAll("\\t"),
'\\' => try writer.writeAll("\\\\"),
'"' => try writer.writeAll("\\\""),
'\'' => try writer.writeAll("\\'"),
' ', '!', '#'...'&', '('...'[', ']'...'~' => try writer.writeByte(byte),
// Use hex escapes for rest any unprintable characters.
else => {
try writer.writeAll("\\x");
try std.fmt.formatInt(byte, 16, false, .{ .width = 2, .fill = '0' }, writer);
},
};
}
/// Return a Formatter for Zig Escapes
pub fn fmtEscapes(bytes: []const u8) std.fmt.Formatter(formatEscapes) {
return .{ .data = bytes };
}
test "escape invalid identifiers" {
try std.fmt.testFmt("@\"while\"", "{}", .{fmtId("while")});
try std.fmt.testFmt("hello", "{}", .{fmtId("hello")});
try std.fmt.testFmt("@\"11\\\"23\"", "{}", .{fmtId("11\"23")});
try std.fmt.testFmt("@\"11\\x0f23\"", "{}", .{fmtId("11\x0F23")});
try std.fmt.testFmt("\\x0f", "{}", .{fmtEscapes("\x0f")});
try std.fmt.testFmt(
\\" \\ hi \x07 \x11 \" derp \'"
, "\"{}\"", .{fmtEscapes(" \\ hi \x07 \x11 \" derp '")});
}
|
lib/std/zig/fmt.zig
|
const aoc = @import("../aoc.zig");
const std = @import("std");
const Ticket = std.ArrayList(u16);
const Field = struct {
name: []const u8,
from1: u16,
to1: u16,
from2: u16,
to2: u16,
fn contains(self: *const Field, what: u16) bool {
return (what >= self.from1 and what <= self.to1)
or (what >= self.from2 and what <= self.to2);
}
};
pub fn run(problem: *aoc.Problem) !aoc.Solution {
const fields = blk: {
var fields = std.ArrayList(Field).init(problem.allocator);
const rules = problem.group().?;
var lines = std.mem.tokenize(u8, rules, "\n");
while (lines.next()) |line| {
const rule = line[0..std.mem.indexOf(u8, line, ":").?];
var range_tokens = std.mem.tokenize(u8, line[rule.len..], ":- or");
try fields.append(Field {
.name = rule,
.from1 = try std.fmt.parseInt(u16, range_tokens.next().?, 10),
.to1 = try std.fmt.parseInt(u16, range_tokens.next().?, 10),
.from2 = try std.fmt.parseInt(u16, range_tokens.next().?, 10),
.to2 = try std.fmt.parseInt(u16, range_tokens.next().?, 10),
});
}
break :blk fields;
};
defer fields.deinit();
var possible_fields = blk: {
var possible_fields = std.ArrayList(std.StringHashMap(void)).init(problem.allocator);
for (fields.items) |_| {
var index_fields = std.StringHashMap(void).init(problem.allocator);
for (fields.items) |field| {
try index_fields.put(field.name, {});
}
try possible_fields.append(index_fields);
}
break :blk possible_fields;
};
defer {
for (possible_fields.items) |*pi| {
pi.deinit();
}
possible_fields.deinit();
}
const your_ticket = blk: {
const group = problem.group().?;
const newline = std.mem.indexOf(u8, group, "\n").? + 1;
break :blk try parseTicket(problem.allocator, group[newline..]);
};
defer your_ticket.deinit();
const res1 = blk: {
var error_rate: usize = 0;
const nearby_tix = problem.group().?;
var lines = std.mem.tokenize(u8, nearby_tix, "\n");
_ = lines.next().?;
while (lines.next()) |line| {
const ticket = try parseTicket(problem.allocator, line);
defer ticket.deinit();
var is_ticket_valid = true;
outer: for (ticket.items) |ticket_field| {
for (fields.items) |field| {
if (field.contains(ticket_field)) {
continue :outer;
}
}
is_ticket_valid = false;
error_rate += ticket_field;
}
if (!is_ticket_valid) {
continue;
}
for (ticket.items) |ticket_field, idx| {
for (fields.items) |field| {
if (!field.contains(ticket_field)) {
_ = possible_fields.items[idx].remove(field.name);
}
}
}
}
break :blk error_rate;
};
const res2 = blk: {
var departures: usize = 1;
var processed: u8 = 0;
outer: while (processed != possible_fields.items.len) : (processed += 1) {
for (possible_fields.items) |*pi, idx| {
if (pi.count() == 1) {
const field = pi.iterator().next().?.key_ptr.*;
for (possible_fields.items) |*pi2| {
_ = pi2.remove(field);
}
if (std.mem.startsWith(u8, field, "departure")) {
departures *= your_ticket.items[idx];
}
continue :outer;
}
}
unreachable;
}
break :blk departures;
};
return problem.solution(res1, res2);
}
fn parseTicket(allocator: std.mem.Allocator, line: []const u8) !Ticket {
var ticket = Ticket.init(allocator);
var tokens = std.mem.tokenize(u8, line, ",");
while (tokens.next()) |token| {
try ticket.append(try std.fmt.parseInt(u16, token, 10));
}
return ticket;
}
|
src/main/zig/2020/day16.zig
|
const std = @import("std");
const print = std.debug.print;
usingnamespace @import("value.zig");
usingnamespace @import("chunk.zig");
usingnamespace @import("scanner.zig");
usingnamespace @import("parser.zig");
usingnamespace @import("gc.zig");
usingnamespace @import("vm.zig");
pub var heap: Heap = undefined;
pub const Heap = struct {
allocator: *std.mem.Allocator,
pub fn init(allocator: *std.mem.Allocator) Heap {
return Heap{
.allocator = allocator,
};
}
fn create(self: *Heap, comptime T: type) !*T {
//try collectGarbage();
const res = try self.allocator.create(T);
//GCLOG.info("{} allocate {} for {}", .{ @ptrToInt(res), @sizeOf(T), @typeName(T) });
return res;
}
fn destory(self: *Heap, ptr: anytype) void {
//GCLOG.info("{} free type {}", .{ @ptrToInt(ptr), @tagName((ptr.objType)) });
switch (ptr.objType) {
.str => {
const str = @ptrCast(*ObjString, ptr);
self.allocator.free(str.chars);
self.allocator.destroy(str);
},
.fun => self.allocator.destroy(@ptrCast(*ObjFunction, ptr)),
.closure => self.allocator.destroy(@ptrCast(*ObjClosure, ptr)),
.upvalue => self.allocator.destroy(@ptrCast(*ObjUpvalue, ptr)),
}
}
pub fn freeObjects(self: *Heap, objects: ?*Obj) void {
if (objects) |obj| {
var next = obj.next;
const first = obj;
while (next) |on| {
next = on.next;
self.destory(on);
}
self.destory(first);
}
}
fn alloc(self: *Heap, comptime T: type, n: usize) !*T {
//try collectGarage();
return try self.allocator.alloc(T, n);
}
fn setVmObjects(obj: *Obj) void {
obj.next = vm.objects;
vm.objects = obj;
}
pub fn copyString(self: *Heap, chars: []const u8) !*ObjString {
if (vm.strings.get(chars)) |val| {
return val;
} else {
var str_array = try self.allocator.alloc(u8, chars.len);
std.mem.copy(u8, str_array, chars);
var objString = try self.create(ObjString);
objString.obj = .{ .objType = .str, .isMarked = false, .next = null };
objString.chars = str_array;
setVmObjects(@ptrCast(*Obj, objString));
try vm.strings.put(str_array, objString);
return objString;
}
}
pub fn newFunction(self: *Heap) !*ObjFunction {
var function = try self.create(ObjFunction);
function.obj = .{ .objType = .fun, .isMarked = false, .next = null };
function.arity = 0;
function.name = null;
function.chunk = try Chunk.init(self.allocator);
function.upvalueCount = 0;
setVmObjects(@ptrCast(*Obj, function));
return function;
}
pub fn newClosure(self: *Heap, fun: *ObjFunction) !*ObjClosure {
var closure = try self.create(ObjClosure);
closure.obj = .{ .objType = .closure, .isMarked = false, .next = null };
closure.fun = fun;
closure.upvalues = ArrayListOfObjUpvalue.init(self.allocator);
setVmObjects(@ptrCast(*Obj, closure));
return closure;
}
pub fn newUpvalue(self: *Heap, slot: *Value) !*ObjUpvalue {
var upvalue = try self.create(ObjUpvalue);
upvalue.obj = .{ .objType = .upvalue, .isMarked = false, .next = null };
upvalue.location = slot;
upvalue.next = null;
upvalue.closed = Value.nil;
setVmObjects(@ptrCast(*Obj, upvalue));
return upvalue;
}
};
|
zvm/src/heap.zig
|
const std = @import("std");
const mem = std.mem;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
const fs = std.fs;
const codegen = @import("../codegen/c.zig");
const link = @import("../link.zig");
const trace = @import("../tracy.zig").trace;
const C = @This();
pub const base_tag: link.File.Tag = .c;
pub const zig_h = @embedFile("C/zig.h");
base: link.File,
/// Per-declaration data. For functions this is the body, and
/// the forward declaration is stored in the FnBlock.
pub const DeclBlock = struct {
code: std.ArrayListUnmanaged(u8),
pub const empty: DeclBlock = .{
.code = .{},
};
};
/// Per-function data.
pub const FnBlock = struct {
fwd_decl: std.ArrayListUnmanaged(u8),
pub const empty: FnBlock = .{
.fwd_decl = .{},
};
};
pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*C {
assert(options.object_format == .c);
if (options.use_llvm) return error.LLVMHasNoCBackend;
if (options.use_lld) return error.LLDHasNoCBackend;
const file = try options.emit.?.directory.handle.createFile(sub_path, .{
// Truncation is done on `flush`.
.truncate = false,
.mode = link.determineMode(options),
});
errdefer file.close();
var c_file = try allocator.create(C);
errdefer allocator.destroy(c_file);
c_file.* = C{
.base = .{
.tag = .c,
.options = options,
.file = file,
.allocator = allocator,
},
};
return c_file;
}
pub fn deinit(self: *C) void {
const module = self.base.options.module orelse return;
for (module.decl_table.items()) |entry| {
self.freeDecl(entry.value);
}
}
pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void {}
pub fn freeDecl(self: *C, decl: *Module.Decl) void {
decl.link.c.code.deinit(self.base.allocator);
decl.fn_link.c.fwd_decl.deinit(self.base.allocator);
}
pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
const tracy = trace(@src());
defer tracy.end();
const fwd_decl = &decl.fn_link.c.fwd_decl;
const code = &decl.link.c.code;
fwd_decl.shrinkRetainingCapacity(0);
code.shrinkRetainingCapacity(0);
var object: codegen.Object = .{
.dg = .{
.module = module,
.error_msg = null,
.decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa),
},
.gpa = module.gpa,
.code = code.toManaged(module.gpa),
.value_map = codegen.CValueMap.init(module.gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer object.value_map.deinit();
defer object.code.deinit();
defer object.dg.fwd_decl.deinit();
codegen.genDecl(&object) catch |err| switch (err) {
error.AnalysisFail => {
try module.failed_decls.put(module.gpa, decl, object.dg.error_msg.?);
return;
},
else => |e| return e,
};
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
code.* = object.code.moveToUnmanaged();
// Free excess allocated memory for this Decl.
fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len);
code.shrinkAndFree(module.gpa, code.items.len);
}
pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void {
// The C backend does not have the ability to fix line numbers without re-generating
// the entire Decl.
return self.updateDecl(module, decl);
}
pub fn flush(self: *C, comp: *Compilation) !void {
return self.flushModule(comp);
}
pub fn flushModule(self: *C, comp: *Compilation) !void {
const tracy = trace(@src());
defer tracy.end();
const module = self.base.options.module.?;
// This code path happens exclusively with -ofmt=c. The flush logic for
// emit-h is in `flushEmitH` below.
// We collect a list of buffers to write, and write them all at once with pwritev 😎
var all_buffers = std.ArrayList(std.os.iovec_const).init(comp.gpa);
defer all_buffers.deinit();
// This is at least enough until we get to the function bodies without error handling.
try all_buffers.ensureCapacity(module.decl_table.count() + 1);
var file_size: u64 = zig_h.len;
all_buffers.appendAssumeCapacity(.{
.iov_base = zig_h,
.iov_len = zig_h.len,
});
var fn_count: usize = 0;
// Forward decls and non-functions first.
// TODO: performance investigation: would keeping a list of Decls that we should
// generate, rather than querying here, be faster?
for (module.decl_table.items()) |kv| {
const decl = kv.value;
switch (decl.typed_value) {
.most_recent => |tvm| {
const buf = buf: {
if (tvm.typed_value.val.castTag(.function)) |_| {
fn_count += 1;
break :buf decl.fn_link.c.fwd_decl.items;
} else {
break :buf decl.link.c.code.items;
}
};
all_buffers.appendAssumeCapacity(.{
.iov_base = buf.ptr,
.iov_len = buf.len,
});
file_size += buf.len;
},
.never_succeeded => continue,
}
}
// Now the function bodies.
try all_buffers.ensureCapacity(all_buffers.items.len + fn_count);
for (module.decl_table.items()) |kv| {
const decl = kv.value;
switch (decl.typed_value) {
.most_recent => |tvm| {
if (tvm.typed_value.val.castTag(.function)) |_| {
const buf = decl.link.c.code.items;
all_buffers.appendAssumeCapacity(.{
.iov_base = buf.ptr,
.iov_len = buf.len,
});
file_size += buf.len;
}
},
.never_succeeded => continue,
}
}
const file = self.base.file.?;
try file.setEndPos(file_size);
try file.pwritevAll(all_buffers.items, 0);
}
pub fn flushEmitH(module: *Module) !void {
const tracy = trace(@src());
defer tracy.end();
const emit_h_loc = module.emit_h orelse return;
// We collect a list of buffers to write, and write them all at once with pwritev 😎
var all_buffers = std.ArrayList(std.os.iovec_const).init(module.gpa);
defer all_buffers.deinit();
try all_buffers.ensureCapacity(module.decl_table.count() + 1);
var file_size: u64 = zig_h.len;
all_buffers.appendAssumeCapacity(.{
.iov_base = zig_h,
.iov_len = zig_h.len,
});
for (module.decl_table.items()) |kv| {
const emit_h = kv.value.getEmitH(module);
const buf = emit_h.fwd_decl.items;
all_buffers.appendAssumeCapacity(.{
.iov_base = buf.ptr,
.iov_len = buf.len,
});
file_size += buf.len;
}
const directory = emit_h_loc.directory orelse module.comp.local_cache_directory;
const file = try directory.handle.createFile(emit_h_loc.basename, .{
// We set the end position explicitly below; by not truncating the file, we possibly
// make it easier on the file system by doing 1 reallocation instead of two.
.truncate = false,
});
defer file.close();
try file.setEndPos(file_size);
try file.pwritevAll(all_buffers.items, 0);
}
pub fn updateDeclExports(
self: *C,
module: *Module,
decl: *Module.Decl,
exports: []const *Module.Export,
) !void {}
|
src/link/C.zig
|
const std = @import("std");
const mem = std.mem;
const Neutral = @This();
allocator: *mem.Allocator,
array: []bool,
lo: u21 = 0,
hi: u21 = 917631,
pub fn init(allocator: *mem.Allocator) !Neutral {
var instance = Neutral{
.allocator = allocator,
.array = try allocator.alloc(bool, 917632),
};
mem.set(bool, instance.array, false);
var index: u21 = 0;
index = 0;
while (index <= 31) : (index += 1) {
instance.array[index] = true;
}
index = 127;
while (index <= 159) : (index += 1) {
instance.array[index] = true;
}
instance.array[160] = true;
instance.array[169] = true;
instance.array[171] = true;
instance.array[181] = true;
instance.array[187] = true;
index = 192;
while (index <= 197) : (index += 1) {
instance.array[index] = true;
}
index = 199;
while (index <= 207) : (index += 1) {
instance.array[index] = true;
}
index = 209;
while (index <= 214) : (index += 1) {
instance.array[index] = true;
}
index = 217;
while (index <= 221) : (index += 1) {
instance.array[index] = true;
}
index = 226;
while (index <= 229) : (index += 1) {
instance.array[index] = true;
}
instance.array[231] = true;
instance.array[235] = true;
index = 238;
while (index <= 239) : (index += 1) {
instance.array[index] = true;
}
instance.array[241] = true;
index = 244;
while (index <= 246) : (index += 1) {
instance.array[index] = true;
}
instance.array[251] = true;
instance.array[253] = true;
index = 255;
while (index <= 256) : (index += 1) {
instance.array[index] = true;
}
index = 258;
while (index <= 272) : (index += 1) {
instance.array[index] = true;
}
instance.array[274] = true;
index = 276;
while (index <= 282) : (index += 1) {
instance.array[index] = true;
}
index = 284;
while (index <= 293) : (index += 1) {
instance.array[index] = true;
}
index = 296;
while (index <= 298) : (index += 1) {
instance.array[index] = true;
}
index = 300;
while (index <= 304) : (index += 1) {
instance.array[index] = true;
}
index = 308;
while (index <= 311) : (index += 1) {
instance.array[index] = true;
}
index = 313;
while (index <= 318) : (index += 1) {
instance.array[index] = true;
}
instance.array[323] = true;
index = 325;
while (index <= 327) : (index += 1) {
instance.array[index] = true;
}
instance.array[332] = true;
index = 334;
while (index <= 337) : (index += 1) {
instance.array[index] = true;
}
index = 340;
while (index <= 357) : (index += 1) {
instance.array[index] = true;
}
index = 360;
while (index <= 362) : (index += 1) {
instance.array[index] = true;
}
index = 364;
while (index <= 442) : (index += 1) {
instance.array[index] = true;
}
instance.array[443] = true;
index = 444;
while (index <= 447) : (index += 1) {
instance.array[index] = true;
}
index = 448;
while (index <= 451) : (index += 1) {
instance.array[index] = true;
}
index = 452;
while (index <= 461) : (index += 1) {
instance.array[index] = true;
}
instance.array[463] = true;
instance.array[465] = true;
instance.array[467] = true;
instance.array[469] = true;
instance.array[471] = true;
instance.array[473] = true;
instance.array[475] = true;
index = 477;
while (index <= 592) : (index += 1) {
instance.array[index] = true;
}
index = 594;
while (index <= 608) : (index += 1) {
instance.array[index] = true;
}
index = 610;
while (index <= 659) : (index += 1) {
instance.array[index] = true;
}
instance.array[660] = true;
index = 661;
while (index <= 687) : (index += 1) {
instance.array[index] = true;
}
index = 688;
while (index <= 705) : (index += 1) {
instance.array[index] = true;
}
index = 706;
while (index <= 707) : (index += 1) {
instance.array[index] = true;
}
instance.array[709] = true;
instance.array[710] = true;
instance.array[712] = true;
instance.array[716] = true;
index = 718;
while (index <= 719) : (index += 1) {
instance.array[index] = true;
}
instance.array[721] = true;
index = 722;
while (index <= 727) : (index += 1) {
instance.array[index] = true;
}
instance.array[732] = true;
instance.array[734] = true;
index = 736;
while (index <= 740) : (index += 1) {
instance.array[index] = true;
}
index = 741;
while (index <= 747) : (index += 1) {
instance.array[index] = true;
}
instance.array[748] = true;
instance.array[749] = true;
instance.array[750] = true;
index = 751;
while (index <= 767) : (index += 1) {
instance.array[index] = true;
}
index = 880;
while (index <= 883) : (index += 1) {
instance.array[index] = true;
}
instance.array[884] = true;
instance.array[885] = true;
index = 886;
while (index <= 887) : (index += 1) {
instance.array[index] = true;
}
instance.array[890] = true;
index = 891;
while (index <= 893) : (index += 1) {
instance.array[index] = true;
}
instance.array[894] = true;
instance.array[895] = true;
index = 900;
while (index <= 901) : (index += 1) {
instance.array[index] = true;
}
instance.array[902] = true;
instance.array[903] = true;
index = 904;
while (index <= 906) : (index += 1) {
instance.array[index] = true;
}
instance.array[908] = true;
index = 910;
while (index <= 912) : (index += 1) {
instance.array[index] = true;
}
index = 938;
while (index <= 944) : (index += 1) {
instance.array[index] = true;
}
instance.array[962] = true;
index = 970;
while (index <= 1013) : (index += 1) {
instance.array[index] = true;
}
instance.array[1014] = true;
index = 1015;
while (index <= 1024) : (index += 1) {
instance.array[index] = true;
}
index = 1026;
while (index <= 1039) : (index += 1) {
instance.array[index] = true;
}
instance.array[1104] = true;
index = 1106;
while (index <= 1153) : (index += 1) {
instance.array[index] = true;
}
instance.array[1154] = true;
index = 1155;
while (index <= 1159) : (index += 1) {
instance.array[index] = true;
}
index = 1160;
while (index <= 1161) : (index += 1) {
instance.array[index] = true;
}
index = 1162;
while (index <= 1327) : (index += 1) {
instance.array[index] = true;
}
index = 1329;
while (index <= 1366) : (index += 1) {
instance.array[index] = true;
}
instance.array[1369] = true;
index = 1370;
while (index <= 1375) : (index += 1) {
instance.array[index] = true;
}
index = 1376;
while (index <= 1416) : (index += 1) {
instance.array[index] = true;
}
instance.array[1417] = true;
instance.array[1418] = true;
index = 1421;
while (index <= 1422) : (index += 1) {
instance.array[index] = true;
}
instance.array[1423] = true;
index = 1425;
while (index <= 1469) : (index += 1) {
instance.array[index] = true;
}
instance.array[1470] = true;
instance.array[1471] = true;
instance.array[1472] = true;
index = 1473;
while (index <= 1474) : (index += 1) {
instance.array[index] = true;
}
instance.array[1475] = true;
index = 1476;
while (index <= 1477) : (index += 1) {
instance.array[index] = true;
}
instance.array[1478] = true;
instance.array[1479] = true;
index = 1488;
while (index <= 1514) : (index += 1) {
instance.array[index] = true;
}
index = 1519;
while (index <= 1522) : (index += 1) {
instance.array[index] = true;
}
index = 1523;
while (index <= 1524) : (index += 1) {
instance.array[index] = true;
}
index = 1536;
while (index <= 1541) : (index += 1) {
instance.array[index] = true;
}
index = 1542;
while (index <= 1544) : (index += 1) {
instance.array[index] = true;
}
index = 1545;
while (index <= 1546) : (index += 1) {
instance.array[index] = true;
}
instance.array[1547] = true;
index = 1548;
while (index <= 1549) : (index += 1) {
instance.array[index] = true;
}
index = 1550;
while (index <= 1551) : (index += 1) {
instance.array[index] = true;
}
index = 1552;
while (index <= 1562) : (index += 1) {
instance.array[index] = true;
}
instance.array[1563] = true;
instance.array[1564] = true;
index = 1566;
while (index <= 1567) : (index += 1) {
instance.array[index] = true;
}
index = 1568;
while (index <= 1599) : (index += 1) {
instance.array[index] = true;
}
instance.array[1600] = true;
index = 1601;
while (index <= 1610) : (index += 1) {
instance.array[index] = true;
}
index = 1611;
while (index <= 1631) : (index += 1) {
instance.array[index] = true;
}
index = 1632;
while (index <= 1641) : (index += 1) {
instance.array[index] = true;
}
index = 1642;
while (index <= 1645) : (index += 1) {
instance.array[index] = true;
}
index = 1646;
while (index <= 1647) : (index += 1) {
instance.array[index] = true;
}
instance.array[1648] = true;
index = 1649;
while (index <= 1747) : (index += 1) {
instance.array[index] = true;
}
instance.array[1748] = true;
instance.array[1749] = true;
index = 1750;
while (index <= 1756) : (index += 1) {
instance.array[index] = true;
}
instance.array[1757] = true;
instance.array[1758] = true;
index = 1759;
while (index <= 1764) : (index += 1) {
instance.array[index] = true;
}
index = 1765;
while (index <= 1766) : (index += 1) {
instance.array[index] = true;
}
index = 1767;
while (index <= 1768) : (index += 1) {
instance.array[index] = true;
}
instance.array[1769] = true;
index = 1770;
while (index <= 1773) : (index += 1) {
instance.array[index] = true;
}
index = 1774;
while (index <= 1775) : (index += 1) {
instance.array[index] = true;
}
index = 1776;
while (index <= 1785) : (index += 1) {
instance.array[index] = true;
}
index = 1786;
while (index <= 1788) : (index += 1) {
instance.array[index] = true;
}
index = 1789;
while (index <= 1790) : (index += 1) {
instance.array[index] = true;
}
instance.array[1791] = true;
index = 1792;
while (index <= 1805) : (index += 1) {
instance.array[index] = true;
}
instance.array[1807] = true;
instance.array[1808] = true;
instance.array[1809] = true;
index = 1810;
while (index <= 1839) : (index += 1) {
instance.array[index] = true;
}
index = 1840;
while (index <= 1866) : (index += 1) {
instance.array[index] = true;
}
index = 1869;
while (index <= 1957) : (index += 1) {
instance.array[index] = true;
}
index = 1958;
while (index <= 1968) : (index += 1) {
instance.array[index] = true;
}
instance.array[1969] = true;
index = 1984;
while (index <= 1993) : (index += 1) {
instance.array[index] = true;
}
index = 1994;
while (index <= 2026) : (index += 1) {
instance.array[index] = true;
}
index = 2027;
while (index <= 2035) : (index += 1) {
instance.array[index] = true;
}
index = 2036;
while (index <= 2037) : (index += 1) {
instance.array[index] = true;
}
instance.array[2038] = true;
index = 2039;
while (index <= 2041) : (index += 1) {
instance.array[index] = true;
}
instance.array[2042] = true;
instance.array[2045] = true;
index = 2046;
while (index <= 2047) : (index += 1) {
instance.array[index] = true;
}
index = 2048;
while (index <= 2069) : (index += 1) {
instance.array[index] = true;
}
index = 2070;
while (index <= 2073) : (index += 1) {
instance.array[index] = true;
}
instance.array[2074] = true;
index = 2075;
while (index <= 2083) : (index += 1) {
instance.array[index] = true;
}
instance.array[2084] = true;
index = 2085;
while (index <= 2087) : (index += 1) {
instance.array[index] = true;
}
instance.array[2088] = true;
index = 2089;
while (index <= 2093) : (index += 1) {
instance.array[index] = true;
}
index = 2096;
while (index <= 2110) : (index += 1) {
instance.array[index] = true;
}
index = 2112;
while (index <= 2136) : (index += 1) {
instance.array[index] = true;
}
index = 2137;
while (index <= 2139) : (index += 1) {
instance.array[index] = true;
}
instance.array[2142] = true;
index = 2144;
while (index <= 2154) : (index += 1) {
instance.array[index] = true;
}
index = 2208;
while (index <= 2228) : (index += 1) {
instance.array[index] = true;
}
index = 2230;
while (index <= 2247) : (index += 1) {
instance.array[index] = true;
}
index = 2259;
while (index <= 2273) : (index += 1) {
instance.array[index] = true;
}
instance.array[2274] = true;
index = 2275;
while (index <= 2306) : (index += 1) {
instance.array[index] = true;
}
instance.array[2307] = true;
index = 2308;
while (index <= 2361) : (index += 1) {
instance.array[index] = true;
}
instance.array[2362] = true;
instance.array[2363] = true;
instance.array[2364] = true;
instance.array[2365] = true;
index = 2366;
while (index <= 2368) : (index += 1) {
instance.array[index] = true;
}
index = 2369;
while (index <= 2376) : (index += 1) {
instance.array[index] = true;
}
index = 2377;
while (index <= 2380) : (index += 1) {
instance.array[index] = true;
}
instance.array[2381] = true;
index = 2382;
while (index <= 2383) : (index += 1) {
instance.array[index] = true;
}
instance.array[2384] = true;
index = 2385;
while (index <= 2391) : (index += 1) {
instance.array[index] = true;
}
index = 2392;
while (index <= 2401) : (index += 1) {
instance.array[index] = true;
}
index = 2402;
while (index <= 2403) : (index += 1) {
instance.array[index] = true;
}
index = 2404;
while (index <= 2405) : (index += 1) {
instance.array[index] = true;
}
index = 2406;
while (index <= 2415) : (index += 1) {
instance.array[index] = true;
}
instance.array[2416] = true;
instance.array[2417] = true;
index = 2418;
while (index <= 2432) : (index += 1) {
instance.array[index] = true;
}
instance.array[2433] = true;
index = 2434;
while (index <= 2435) : (index += 1) {
instance.array[index] = true;
}
index = 2437;
while (index <= 2444) : (index += 1) {
instance.array[index] = true;
}
index = 2447;
while (index <= 2448) : (index += 1) {
instance.array[index] = true;
}
index = 2451;
while (index <= 2472) : (index += 1) {
instance.array[index] = true;
}
index = 2474;
while (index <= 2480) : (index += 1) {
instance.array[index] = true;
}
instance.array[2482] = true;
index = 2486;
while (index <= 2489) : (index += 1) {
instance.array[index] = true;
}
instance.array[2492] = true;
instance.array[2493] = true;
index = 2494;
while (index <= 2496) : (index += 1) {
instance.array[index] = true;
}
index = 2497;
while (index <= 2500) : (index += 1) {
instance.array[index] = true;
}
index = 2503;
while (index <= 2504) : (index += 1) {
instance.array[index] = true;
}
index = 2507;
while (index <= 2508) : (index += 1) {
instance.array[index] = true;
}
instance.array[2509] = true;
instance.array[2510] = true;
instance.array[2519] = true;
index = 2524;
while (index <= 2525) : (index += 1) {
instance.array[index] = true;
}
index = 2527;
while (index <= 2529) : (index += 1) {
instance.array[index] = true;
}
index = 2530;
while (index <= 2531) : (index += 1) {
instance.array[index] = true;
}
index = 2534;
while (index <= 2543) : (index += 1) {
instance.array[index] = true;
}
index = 2544;
while (index <= 2545) : (index += 1) {
instance.array[index] = true;
}
index = 2546;
while (index <= 2547) : (index += 1) {
instance.array[index] = true;
}
index = 2548;
while (index <= 2553) : (index += 1) {
instance.array[index] = true;
}
instance.array[2554] = true;
instance.array[2555] = true;
instance.array[2556] = true;
instance.array[2557] = true;
instance.array[2558] = true;
index = 2561;
while (index <= 2562) : (index += 1) {
instance.array[index] = true;
}
instance.array[2563] = true;
index = 2565;
while (index <= 2570) : (index += 1) {
instance.array[index] = true;
}
index = 2575;
while (index <= 2576) : (index += 1) {
instance.array[index] = true;
}
index = 2579;
while (index <= 2600) : (index += 1) {
instance.array[index] = true;
}
index = 2602;
while (index <= 2608) : (index += 1) {
instance.array[index] = true;
}
index = 2610;
while (index <= 2611) : (index += 1) {
instance.array[index] = true;
}
index = 2613;
while (index <= 2614) : (index += 1) {
instance.array[index] = true;
}
index = 2616;
while (index <= 2617) : (index += 1) {
instance.array[index] = true;
}
instance.array[2620] = true;
index = 2622;
while (index <= 2624) : (index += 1) {
instance.array[index] = true;
}
index = 2625;
while (index <= 2626) : (index += 1) {
instance.array[index] = true;
}
index = 2631;
while (index <= 2632) : (index += 1) {
instance.array[index] = true;
}
index = 2635;
while (index <= 2637) : (index += 1) {
instance.array[index] = true;
}
instance.array[2641] = true;
index = 2649;
while (index <= 2652) : (index += 1) {
instance.array[index] = true;
}
instance.array[2654] = true;
index = 2662;
while (index <= 2671) : (index += 1) {
instance.array[index] = true;
}
index = 2672;
while (index <= 2673) : (index += 1) {
instance.array[index] = true;
}
index = 2674;
while (index <= 2676) : (index += 1) {
instance.array[index] = true;
}
instance.array[2677] = true;
instance.array[2678] = true;
index = 2689;
while (index <= 2690) : (index += 1) {
instance.array[index] = true;
}
instance.array[2691] = true;
index = 2693;
while (index <= 2701) : (index += 1) {
instance.array[index] = true;
}
index = 2703;
while (index <= 2705) : (index += 1) {
instance.array[index] = true;
}
index = 2707;
while (index <= 2728) : (index += 1) {
instance.array[index] = true;
}
index = 2730;
while (index <= 2736) : (index += 1) {
instance.array[index] = true;
}
index = 2738;
while (index <= 2739) : (index += 1) {
instance.array[index] = true;
}
index = 2741;
while (index <= 2745) : (index += 1) {
instance.array[index] = true;
}
instance.array[2748] = true;
instance.array[2749] = true;
index = 2750;
while (index <= 2752) : (index += 1) {
instance.array[index] = true;
}
index = 2753;
while (index <= 2757) : (index += 1) {
instance.array[index] = true;
}
index = 2759;
while (index <= 2760) : (index += 1) {
instance.array[index] = true;
}
instance.array[2761] = true;
index = 2763;
while (index <= 2764) : (index += 1) {
instance.array[index] = true;
}
instance.array[2765] = true;
instance.array[2768] = true;
index = 2784;
while (index <= 2785) : (index += 1) {
instance.array[index] = true;
}
index = 2786;
while (index <= 2787) : (index += 1) {
instance.array[index] = true;
}
index = 2790;
while (index <= 2799) : (index += 1) {
instance.array[index] = true;
}
instance.array[2800] = true;
instance.array[2801] = true;
instance.array[2809] = true;
index = 2810;
while (index <= 2815) : (index += 1) {
instance.array[index] = true;
}
instance.array[2817] = true;
index = 2818;
while (index <= 2819) : (index += 1) {
instance.array[index] = true;
}
index = 2821;
while (index <= 2828) : (index += 1) {
instance.array[index] = true;
}
index = 2831;
while (index <= 2832) : (index += 1) {
instance.array[index] = true;
}
index = 2835;
while (index <= 2856) : (index += 1) {
instance.array[index] = true;
}
index = 2858;
while (index <= 2864) : (index += 1) {
instance.array[index] = true;
}
index = 2866;
while (index <= 2867) : (index += 1) {
instance.array[index] = true;
}
index = 2869;
while (index <= 2873) : (index += 1) {
instance.array[index] = true;
}
instance.array[2876] = true;
instance.array[2877] = true;
instance.array[2878] = true;
instance.array[2879] = true;
instance.array[2880] = true;
index = 2881;
while (index <= 2884) : (index += 1) {
instance.array[index] = true;
}
index = 2887;
while (index <= 2888) : (index += 1) {
instance.array[index] = true;
}
index = 2891;
while (index <= 2892) : (index += 1) {
instance.array[index] = true;
}
instance.array[2893] = true;
index = 2901;
while (index <= 2902) : (index += 1) {
instance.array[index] = true;
}
instance.array[2903] = true;
index = 2908;
while (index <= 2909) : (index += 1) {
instance.array[index] = true;
}
index = 2911;
while (index <= 2913) : (index += 1) {
instance.array[index] = true;
}
index = 2914;
while (index <= 2915) : (index += 1) {
instance.array[index] = true;
}
index = 2918;
while (index <= 2927) : (index += 1) {
instance.array[index] = true;
}
instance.array[2928] = true;
instance.array[2929] = true;
index = 2930;
while (index <= 2935) : (index += 1) {
instance.array[index] = true;
}
instance.array[2946] = true;
instance.array[2947] = true;
index = 2949;
while (index <= 2954) : (index += 1) {
instance.array[index] = true;
}
index = 2958;
while (index <= 2960) : (index += 1) {
instance.array[index] = true;
}
index = 2962;
while (index <= 2965) : (index += 1) {
instance.array[index] = true;
}
index = 2969;
while (index <= 2970) : (index += 1) {
instance.array[index] = true;
}
instance.array[2972] = true;
index = 2974;
while (index <= 2975) : (index += 1) {
instance.array[index] = true;
}
index = 2979;
while (index <= 2980) : (index += 1) {
instance.array[index] = true;
}
index = 2984;
while (index <= 2986) : (index += 1) {
instance.array[index] = true;
}
index = 2990;
while (index <= 3001) : (index += 1) {
instance.array[index] = true;
}
index = 3006;
while (index <= 3007) : (index += 1) {
instance.array[index] = true;
}
instance.array[3008] = true;
index = 3009;
while (index <= 3010) : (index += 1) {
instance.array[index] = true;
}
index = 3014;
while (index <= 3016) : (index += 1) {
instance.array[index] = true;
}
index = 3018;
while (index <= 3020) : (index += 1) {
instance.array[index] = true;
}
instance.array[3021] = true;
instance.array[3024] = true;
instance.array[3031] = true;
index = 3046;
while (index <= 3055) : (index += 1) {
instance.array[index] = true;
}
index = 3056;
while (index <= 3058) : (index += 1) {
instance.array[index] = true;
}
index = 3059;
while (index <= 3064) : (index += 1) {
instance.array[index] = true;
}
instance.array[3065] = true;
instance.array[3066] = true;
instance.array[3072] = true;
index = 3073;
while (index <= 3075) : (index += 1) {
instance.array[index] = true;
}
instance.array[3076] = true;
index = 3077;
while (index <= 3084) : (index += 1) {
instance.array[index] = true;
}
index = 3086;
while (index <= 3088) : (index += 1) {
instance.array[index] = true;
}
index = 3090;
while (index <= 3112) : (index += 1) {
instance.array[index] = true;
}
index = 3114;
while (index <= 3129) : (index += 1) {
instance.array[index] = true;
}
instance.array[3133] = true;
index = 3134;
while (index <= 3136) : (index += 1) {
instance.array[index] = true;
}
index = 3137;
while (index <= 3140) : (index += 1) {
instance.array[index] = true;
}
index = 3142;
while (index <= 3144) : (index += 1) {
instance.array[index] = true;
}
index = 3146;
while (index <= 3149) : (index += 1) {
instance.array[index] = true;
}
index = 3157;
while (index <= 3158) : (index += 1) {
instance.array[index] = true;
}
index = 3160;
while (index <= 3162) : (index += 1) {
instance.array[index] = true;
}
index = 3168;
while (index <= 3169) : (index += 1) {
instance.array[index] = true;
}
index = 3170;
while (index <= 3171) : (index += 1) {
instance.array[index] = true;
}
index = 3174;
while (index <= 3183) : (index += 1) {
instance.array[index] = true;
}
instance.array[3191] = true;
index = 3192;
while (index <= 3198) : (index += 1) {
instance.array[index] = true;
}
instance.array[3199] = true;
instance.array[3200] = true;
instance.array[3201] = true;
index = 3202;
while (index <= 3203) : (index += 1) {
instance.array[index] = true;
}
instance.array[3204] = true;
index = 3205;
while (index <= 3212) : (index += 1) {
instance.array[index] = true;
}
index = 3214;
while (index <= 3216) : (index += 1) {
instance.array[index] = true;
}
index = 3218;
while (index <= 3240) : (index += 1) {
instance.array[index] = true;
}
index = 3242;
while (index <= 3251) : (index += 1) {
instance.array[index] = true;
}
index = 3253;
while (index <= 3257) : (index += 1) {
instance.array[index] = true;
}
instance.array[3260] = true;
instance.array[3261] = true;
instance.array[3262] = true;
instance.array[3263] = true;
index = 3264;
while (index <= 3268) : (index += 1) {
instance.array[index] = true;
}
instance.array[3270] = true;
index = 3271;
while (index <= 3272) : (index += 1) {
instance.array[index] = true;
}
index = 3274;
while (index <= 3275) : (index += 1) {
instance.array[index] = true;
}
index = 3276;
while (index <= 3277) : (index += 1) {
instance.array[index] = true;
}
index = 3285;
while (index <= 3286) : (index += 1) {
instance.array[index] = true;
}
instance.array[3294] = true;
index = 3296;
while (index <= 3297) : (index += 1) {
instance.array[index] = true;
}
index = 3298;
while (index <= 3299) : (index += 1) {
instance.array[index] = true;
}
index = 3302;
while (index <= 3311) : (index += 1) {
instance.array[index] = true;
}
index = 3313;
while (index <= 3314) : (index += 1) {
instance.array[index] = true;
}
index = 3328;
while (index <= 3329) : (index += 1) {
instance.array[index] = true;
}
index = 3330;
while (index <= 3331) : (index += 1) {
instance.array[index] = true;
}
index = 3332;
while (index <= 3340) : (index += 1) {
instance.array[index] = true;
}
index = 3342;
while (index <= 3344) : (index += 1) {
instance.array[index] = true;
}
index = 3346;
while (index <= 3386) : (index += 1) {
instance.array[index] = true;
}
index = 3387;
while (index <= 3388) : (index += 1) {
instance.array[index] = true;
}
instance.array[3389] = true;
index = 3390;
while (index <= 3392) : (index += 1) {
instance.array[index] = true;
}
index = 3393;
while (index <= 3396) : (index += 1) {
instance.array[index] = true;
}
index = 3398;
while (index <= 3400) : (index += 1) {
instance.array[index] = true;
}
index = 3402;
while (index <= 3404) : (index += 1) {
instance.array[index] = true;
}
instance.array[3405] = true;
instance.array[3406] = true;
instance.array[3407] = true;
index = 3412;
while (index <= 3414) : (index += 1) {
instance.array[index] = true;
}
instance.array[3415] = true;
index = 3416;
while (index <= 3422) : (index += 1) {
instance.array[index] = true;
}
index = 3423;
while (index <= 3425) : (index += 1) {
instance.array[index] = true;
}
index = 3426;
while (index <= 3427) : (index += 1) {
instance.array[index] = true;
}
index = 3430;
while (index <= 3439) : (index += 1) {
instance.array[index] = true;
}
index = 3440;
while (index <= 3448) : (index += 1) {
instance.array[index] = true;
}
instance.array[3449] = true;
index = 3450;
while (index <= 3455) : (index += 1) {
instance.array[index] = true;
}
instance.array[3457] = true;
index = 3458;
while (index <= 3459) : (index += 1) {
instance.array[index] = true;
}
index = 3461;
while (index <= 3478) : (index += 1) {
instance.array[index] = true;
}
index = 3482;
while (index <= 3505) : (index += 1) {
instance.array[index] = true;
}
index = 3507;
while (index <= 3515) : (index += 1) {
instance.array[index] = true;
}
instance.array[3517] = true;
index = 3520;
while (index <= 3526) : (index += 1) {
instance.array[index] = true;
}
instance.array[3530] = true;
index = 3535;
while (index <= 3537) : (index += 1) {
instance.array[index] = true;
}
index = 3538;
while (index <= 3540) : (index += 1) {
instance.array[index] = true;
}
instance.array[3542] = true;
index = 3544;
while (index <= 3551) : (index += 1) {
instance.array[index] = true;
}
index = 3558;
while (index <= 3567) : (index += 1) {
instance.array[index] = true;
}
index = 3570;
while (index <= 3571) : (index += 1) {
instance.array[index] = true;
}
instance.array[3572] = true;
index = 3585;
while (index <= 3632) : (index += 1) {
instance.array[index] = true;
}
instance.array[3633] = true;
index = 3634;
while (index <= 3635) : (index += 1) {
instance.array[index] = true;
}
index = 3636;
while (index <= 3642) : (index += 1) {
instance.array[index] = true;
}
instance.array[3647] = true;
index = 3648;
while (index <= 3653) : (index += 1) {
instance.array[index] = true;
}
instance.array[3654] = true;
index = 3655;
while (index <= 3662) : (index += 1) {
instance.array[index] = true;
}
instance.array[3663] = true;
index = 3664;
while (index <= 3673) : (index += 1) {
instance.array[index] = true;
}
index = 3674;
while (index <= 3675) : (index += 1) {
instance.array[index] = true;
}
index = 3713;
while (index <= 3714) : (index += 1) {
instance.array[index] = true;
}
instance.array[3716] = true;
index = 3718;
while (index <= 3722) : (index += 1) {
instance.array[index] = true;
}
index = 3724;
while (index <= 3747) : (index += 1) {
instance.array[index] = true;
}
instance.array[3749] = true;
index = 3751;
while (index <= 3760) : (index += 1) {
instance.array[index] = true;
}
instance.array[3761] = true;
index = 3762;
while (index <= 3763) : (index += 1) {
instance.array[index] = true;
}
index = 3764;
while (index <= 3772) : (index += 1) {
instance.array[index] = true;
}
instance.array[3773] = true;
index = 3776;
while (index <= 3780) : (index += 1) {
instance.array[index] = true;
}
instance.array[3782] = true;
index = 3784;
while (index <= 3789) : (index += 1) {
instance.array[index] = true;
}
index = 3792;
while (index <= 3801) : (index += 1) {
instance.array[index] = true;
}
index = 3804;
while (index <= 3807) : (index += 1) {
instance.array[index] = true;
}
instance.array[3840] = true;
index = 3841;
while (index <= 3843) : (index += 1) {
instance.array[index] = true;
}
index = 3844;
while (index <= 3858) : (index += 1) {
instance.array[index] = true;
}
instance.array[3859] = true;
instance.array[3860] = true;
index = 3861;
while (index <= 3863) : (index += 1) {
instance.array[index] = true;
}
index = 3864;
while (index <= 3865) : (index += 1) {
instance.array[index] = true;
}
index = 3866;
while (index <= 3871) : (index += 1) {
instance.array[index] = true;
}
index = 3872;
while (index <= 3881) : (index += 1) {
instance.array[index] = true;
}
index = 3882;
while (index <= 3891) : (index += 1) {
instance.array[index] = true;
}
instance.array[3892] = true;
instance.array[3893] = true;
instance.array[3894] = true;
instance.array[3895] = true;
instance.array[3896] = true;
instance.array[3897] = true;
instance.array[3898] = true;
instance.array[3899] = true;
instance.array[3900] = true;
instance.array[3901] = true;
index = 3902;
while (index <= 3903) : (index += 1) {
instance.array[index] = true;
}
index = 3904;
while (index <= 3911) : (index += 1) {
instance.array[index] = true;
}
index = 3913;
while (index <= 3948) : (index += 1) {
instance.array[index] = true;
}
index = 3953;
while (index <= 3966) : (index += 1) {
instance.array[index] = true;
}
instance.array[3967] = true;
index = 3968;
while (index <= 3972) : (index += 1) {
instance.array[index] = true;
}
instance.array[3973] = true;
index = 3974;
while (index <= 3975) : (index += 1) {
instance.array[index] = true;
}
index = 3976;
while (index <= 3980) : (index += 1) {
instance.array[index] = true;
}
index = 3981;
while (index <= 3991) : (index += 1) {
instance.array[index] = true;
}
index = 3993;
while (index <= 4028) : (index += 1) {
instance.array[index] = true;
}
index = 4030;
while (index <= 4037) : (index += 1) {
instance.array[index] = true;
}
instance.array[4038] = true;
index = 4039;
while (index <= 4044) : (index += 1) {
instance.array[index] = true;
}
index = 4046;
while (index <= 4047) : (index += 1) {
instance.array[index] = true;
}
index = 4048;
while (index <= 4052) : (index += 1) {
instance.array[index] = true;
}
index = 4053;
while (index <= 4056) : (index += 1) {
instance.array[index] = true;
}
index = 4057;
while (index <= 4058) : (index += 1) {
instance.array[index] = true;
}
index = 4096;
while (index <= 4138) : (index += 1) {
instance.array[index] = true;
}
index = 4139;
while (index <= 4140) : (index += 1) {
instance.array[index] = true;
}
index = 4141;
while (index <= 4144) : (index += 1) {
instance.array[index] = true;
}
instance.array[4145] = true;
index = 4146;
while (index <= 4151) : (index += 1) {
instance.array[index] = true;
}
instance.array[4152] = true;
index = 4153;
while (index <= 4154) : (index += 1) {
instance.array[index] = true;
}
index = 4155;
while (index <= 4156) : (index += 1) {
instance.array[index] = true;
}
index = 4157;
while (index <= 4158) : (index += 1) {
instance.array[index] = true;
}
instance.array[4159] = true;
index = 4160;
while (index <= 4169) : (index += 1) {
instance.array[index] = true;
}
index = 4170;
while (index <= 4175) : (index += 1) {
instance.array[index] = true;
}
index = 4176;
while (index <= 4181) : (index += 1) {
instance.array[index] = true;
}
index = 4182;
while (index <= 4183) : (index += 1) {
instance.array[index] = true;
}
index = 4184;
while (index <= 4185) : (index += 1) {
instance.array[index] = true;
}
index = 4186;
while (index <= 4189) : (index += 1) {
instance.array[index] = true;
}
index = 4190;
while (index <= 4192) : (index += 1) {
instance.array[index] = true;
}
instance.array[4193] = true;
index = 4194;
while (index <= 4196) : (index += 1) {
instance.array[index] = true;
}
index = 4197;
while (index <= 4198) : (index += 1) {
instance.array[index] = true;
}
index = 4199;
while (index <= 4205) : (index += 1) {
instance.array[index] = true;
}
index = 4206;
while (index <= 4208) : (index += 1) {
instance.array[index] = true;
}
index = 4209;
while (index <= 4212) : (index += 1) {
instance.array[index] = true;
}
index = 4213;
while (index <= 4225) : (index += 1) {
instance.array[index] = true;
}
instance.array[4226] = true;
index = 4227;
while (index <= 4228) : (index += 1) {
instance.array[index] = true;
}
index = 4229;
while (index <= 4230) : (index += 1) {
instance.array[index] = true;
}
index = 4231;
while (index <= 4236) : (index += 1) {
instance.array[index] = true;
}
instance.array[4237] = true;
instance.array[4238] = true;
instance.array[4239] = true;
index = 4240;
while (index <= 4249) : (index += 1) {
instance.array[index] = true;
}
index = 4250;
while (index <= 4252) : (index += 1) {
instance.array[index] = true;
}
instance.array[4253] = true;
index = 4254;
while (index <= 4255) : (index += 1) {
instance.array[index] = true;
}
index = 4256;
while (index <= 4293) : (index += 1) {
instance.array[index] = true;
}
instance.array[4295] = true;
instance.array[4301] = true;
index = 4304;
while (index <= 4346) : (index += 1) {
instance.array[index] = true;
}
instance.array[4347] = true;
instance.array[4348] = true;
index = 4349;
while (index <= 4351) : (index += 1) {
instance.array[index] = true;
}
index = 4448;
while (index <= 4680) : (index += 1) {
instance.array[index] = true;
}
index = 4682;
while (index <= 4685) : (index += 1) {
instance.array[index] = true;
}
index = 4688;
while (index <= 4694) : (index += 1) {
instance.array[index] = true;
}
instance.array[4696] = true;
index = 4698;
while (index <= 4701) : (index += 1) {
instance.array[index] = true;
}
index = 4704;
while (index <= 4744) : (index += 1) {
instance.array[index] = true;
}
index = 4746;
while (index <= 4749) : (index += 1) {
instance.array[index] = true;
}
index = 4752;
while (index <= 4784) : (index += 1) {
instance.array[index] = true;
}
index = 4786;
while (index <= 4789) : (index += 1) {
instance.array[index] = true;
}
index = 4792;
while (index <= 4798) : (index += 1) {
instance.array[index] = true;
}
instance.array[4800] = true;
index = 4802;
while (index <= 4805) : (index += 1) {
instance.array[index] = true;
}
index = 4808;
while (index <= 4822) : (index += 1) {
instance.array[index] = true;
}
index = 4824;
while (index <= 4880) : (index += 1) {
instance.array[index] = true;
}
index = 4882;
while (index <= 4885) : (index += 1) {
instance.array[index] = true;
}
index = 4888;
while (index <= 4954) : (index += 1) {
instance.array[index] = true;
}
index = 4957;
while (index <= 4959) : (index += 1) {
instance.array[index] = true;
}
index = 4960;
while (index <= 4968) : (index += 1) {
instance.array[index] = true;
}
index = 4969;
while (index <= 4988) : (index += 1) {
instance.array[index] = true;
}
index = 4992;
while (index <= 5007) : (index += 1) {
instance.array[index] = true;
}
index = 5008;
while (index <= 5017) : (index += 1) {
instance.array[index] = true;
}
index = 5024;
while (index <= 5109) : (index += 1) {
instance.array[index] = true;
}
index = 5112;
while (index <= 5117) : (index += 1) {
instance.array[index] = true;
}
instance.array[5120] = true;
index = 5121;
while (index <= 5740) : (index += 1) {
instance.array[index] = true;
}
instance.array[5741] = true;
instance.array[5742] = true;
index = 5743;
while (index <= 5759) : (index += 1) {
instance.array[index] = true;
}
instance.array[5760] = true;
index = 5761;
while (index <= 5786) : (index += 1) {
instance.array[index] = true;
}
instance.array[5787] = true;
instance.array[5788] = true;
index = 5792;
while (index <= 5866) : (index += 1) {
instance.array[index] = true;
}
index = 5867;
while (index <= 5869) : (index += 1) {
instance.array[index] = true;
}
index = 5870;
while (index <= 5872) : (index += 1) {
instance.array[index] = true;
}
index = 5873;
while (index <= 5880) : (index += 1) {
instance.array[index] = true;
}
index = 5888;
while (index <= 5900) : (index += 1) {
instance.array[index] = true;
}
index = 5902;
while (index <= 5905) : (index += 1) {
instance.array[index] = true;
}
index = 5906;
while (index <= 5908) : (index += 1) {
instance.array[index] = true;
}
index = 5920;
while (index <= 5937) : (index += 1) {
instance.array[index] = true;
}
index = 5938;
while (index <= 5940) : (index += 1) {
instance.array[index] = true;
}
index = 5941;
while (index <= 5942) : (index += 1) {
instance.array[index] = true;
}
index = 5952;
while (index <= 5969) : (index += 1) {
instance.array[index] = true;
}
index = 5970;
while (index <= 5971) : (index += 1) {
instance.array[index] = true;
}
index = 5984;
while (index <= 5996) : (index += 1) {
instance.array[index] = true;
}
index = 5998;
while (index <= 6000) : (index += 1) {
instance.array[index] = true;
}
index = 6002;
while (index <= 6003) : (index += 1) {
instance.array[index] = true;
}
index = 6016;
while (index <= 6067) : (index += 1) {
instance.array[index] = true;
}
index = 6068;
while (index <= 6069) : (index += 1) {
instance.array[index] = true;
}
instance.array[6070] = true;
index = 6071;
while (index <= 6077) : (index += 1) {
instance.array[index] = true;
}
index = 6078;
while (index <= 6085) : (index += 1) {
instance.array[index] = true;
}
instance.array[6086] = true;
index = 6087;
while (index <= 6088) : (index += 1) {
instance.array[index] = true;
}
index = 6089;
while (index <= 6099) : (index += 1) {
instance.array[index] = true;
}
index = 6100;
while (index <= 6102) : (index += 1) {
instance.array[index] = true;
}
instance.array[6103] = true;
index = 6104;
while (index <= 6106) : (index += 1) {
instance.array[index] = true;
}
instance.array[6107] = true;
instance.array[6108] = true;
instance.array[6109] = true;
index = 6112;
while (index <= 6121) : (index += 1) {
instance.array[index] = true;
}
index = 6128;
while (index <= 6137) : (index += 1) {
instance.array[index] = true;
}
index = 6144;
while (index <= 6149) : (index += 1) {
instance.array[index] = true;
}
instance.array[6150] = true;
index = 6151;
while (index <= 6154) : (index += 1) {
instance.array[index] = true;
}
index = 6155;
while (index <= 6157) : (index += 1) {
instance.array[index] = true;
}
instance.array[6158] = true;
index = 6160;
while (index <= 6169) : (index += 1) {
instance.array[index] = true;
}
index = 6176;
while (index <= 6210) : (index += 1) {
instance.array[index] = true;
}
instance.array[6211] = true;
index = 6212;
while (index <= 6264) : (index += 1) {
instance.array[index] = true;
}
index = 6272;
while (index <= 6276) : (index += 1) {
instance.array[index] = true;
}
index = 6277;
while (index <= 6278) : (index += 1) {
instance.array[index] = true;
}
index = 6279;
while (index <= 6312) : (index += 1) {
instance.array[index] = true;
}
instance.array[6313] = true;
instance.array[6314] = true;
index = 6320;
while (index <= 6389) : (index += 1) {
instance.array[index] = true;
}
index = 6400;
while (index <= 6430) : (index += 1) {
instance.array[index] = true;
}
index = 6432;
while (index <= 6434) : (index += 1) {
instance.array[index] = true;
}
index = 6435;
while (index <= 6438) : (index += 1) {
instance.array[index] = true;
}
index = 6439;
while (index <= 6440) : (index += 1) {
instance.array[index] = true;
}
index = 6441;
while (index <= 6443) : (index += 1) {
instance.array[index] = true;
}
index = 6448;
while (index <= 6449) : (index += 1) {
instance.array[index] = true;
}
instance.array[6450] = true;
index = 6451;
while (index <= 6456) : (index += 1) {
instance.array[index] = true;
}
index = 6457;
while (index <= 6459) : (index += 1) {
instance.array[index] = true;
}
instance.array[6464] = true;
index = 6468;
while (index <= 6469) : (index += 1) {
instance.array[index] = true;
}
index = 6470;
while (index <= 6479) : (index += 1) {
instance.array[index] = true;
}
index = 6480;
while (index <= 6509) : (index += 1) {
instance.array[index] = true;
}
index = 6512;
while (index <= 6516) : (index += 1) {
instance.array[index] = true;
}
index = 6528;
while (index <= 6571) : (index += 1) {
instance.array[index] = true;
}
index = 6576;
while (index <= 6601) : (index += 1) {
instance.array[index] = true;
}
index = 6608;
while (index <= 6617) : (index += 1) {
instance.array[index] = true;
}
instance.array[6618] = true;
index = 6622;
while (index <= 6655) : (index += 1) {
instance.array[index] = true;
}
index = 6656;
while (index <= 6678) : (index += 1) {
instance.array[index] = true;
}
index = 6679;
while (index <= 6680) : (index += 1) {
instance.array[index] = true;
}
index = 6681;
while (index <= 6682) : (index += 1) {
instance.array[index] = true;
}
instance.array[6683] = true;
index = 6686;
while (index <= 6687) : (index += 1) {
instance.array[index] = true;
}
index = 6688;
while (index <= 6740) : (index += 1) {
instance.array[index] = true;
}
instance.array[6741] = true;
instance.array[6742] = true;
instance.array[6743] = true;
index = 6744;
while (index <= 6750) : (index += 1) {
instance.array[index] = true;
}
instance.array[6752] = true;
instance.array[6753] = true;
instance.array[6754] = true;
index = 6755;
while (index <= 6756) : (index += 1) {
instance.array[index] = true;
}
index = 6757;
while (index <= 6764) : (index += 1) {
instance.array[index] = true;
}
index = 6765;
while (index <= 6770) : (index += 1) {
instance.array[index] = true;
}
index = 6771;
while (index <= 6780) : (index += 1) {
instance.array[index] = true;
}
instance.array[6783] = true;
index = 6784;
while (index <= 6793) : (index += 1) {
instance.array[index] = true;
}
index = 6800;
while (index <= 6809) : (index += 1) {
instance.array[index] = true;
}
index = 6816;
while (index <= 6822) : (index += 1) {
instance.array[index] = true;
}
instance.array[6823] = true;
index = 6824;
while (index <= 6829) : (index += 1) {
instance.array[index] = true;
}
index = 6832;
while (index <= 6845) : (index += 1) {
instance.array[index] = true;
}
instance.array[6846] = true;
index = 6847;
while (index <= 6848) : (index += 1) {
instance.array[index] = true;
}
index = 6912;
while (index <= 6915) : (index += 1) {
instance.array[index] = true;
}
instance.array[6916] = true;
index = 6917;
while (index <= 6963) : (index += 1) {
instance.array[index] = true;
}
instance.array[6964] = true;
instance.array[6965] = true;
index = 6966;
while (index <= 6970) : (index += 1) {
instance.array[index] = true;
}
instance.array[6971] = true;
instance.array[6972] = true;
index = 6973;
while (index <= 6977) : (index += 1) {
instance.array[index] = true;
}
instance.array[6978] = true;
index = 6979;
while (index <= 6980) : (index += 1) {
instance.array[index] = true;
}
index = 6981;
while (index <= 6987) : (index += 1) {
instance.array[index] = true;
}
index = 6992;
while (index <= 7001) : (index += 1) {
instance.array[index] = true;
}
index = 7002;
while (index <= 7008) : (index += 1) {
instance.array[index] = true;
}
index = 7009;
while (index <= 7018) : (index += 1) {
instance.array[index] = true;
}
index = 7019;
while (index <= 7027) : (index += 1) {
instance.array[index] = true;
}
index = 7028;
while (index <= 7036) : (index += 1) {
instance.array[index] = true;
}
index = 7040;
while (index <= 7041) : (index += 1) {
instance.array[index] = true;
}
instance.array[7042] = true;
index = 7043;
while (index <= 7072) : (index += 1) {
instance.array[index] = true;
}
instance.array[7073] = true;
index = 7074;
while (index <= 7077) : (index += 1) {
instance.array[index] = true;
}
index = 7078;
while (index <= 7079) : (index += 1) {
instance.array[index] = true;
}
index = 7080;
while (index <= 7081) : (index += 1) {
instance.array[index] = true;
}
instance.array[7082] = true;
index = 7083;
while (index <= 7085) : (index += 1) {
instance.array[index] = true;
}
index = 7086;
while (index <= 7087) : (index += 1) {
instance.array[index] = true;
}
index = 7088;
while (index <= 7097) : (index += 1) {
instance.array[index] = true;
}
index = 7098;
while (index <= 7141) : (index += 1) {
instance.array[index] = true;
}
instance.array[7142] = true;
instance.array[7143] = true;
index = 7144;
while (index <= 7145) : (index += 1) {
instance.array[index] = true;
}
index = 7146;
while (index <= 7148) : (index += 1) {
instance.array[index] = true;
}
instance.array[7149] = true;
instance.array[7150] = true;
index = 7151;
while (index <= 7153) : (index += 1) {
instance.array[index] = true;
}
index = 7154;
while (index <= 7155) : (index += 1) {
instance.array[index] = true;
}
index = 7164;
while (index <= 7167) : (index += 1) {
instance.array[index] = true;
}
index = 7168;
while (index <= 7203) : (index += 1) {
instance.array[index] = true;
}
index = 7204;
while (index <= 7211) : (index += 1) {
instance.array[index] = true;
}
index = 7212;
while (index <= 7219) : (index += 1) {
instance.array[index] = true;
}
index = 7220;
while (index <= 7221) : (index += 1) {
instance.array[index] = true;
}
index = 7222;
while (index <= 7223) : (index += 1) {
instance.array[index] = true;
}
index = 7227;
while (index <= 7231) : (index += 1) {
instance.array[index] = true;
}
index = 7232;
while (index <= 7241) : (index += 1) {
instance.array[index] = true;
}
index = 7245;
while (index <= 7247) : (index += 1) {
instance.array[index] = true;
}
index = 7248;
while (index <= 7257) : (index += 1) {
instance.array[index] = true;
}
index = 7258;
while (index <= 7287) : (index += 1) {
instance.array[index] = true;
}
index = 7288;
while (index <= 7293) : (index += 1) {
instance.array[index] = true;
}
index = 7294;
while (index <= 7295) : (index += 1) {
instance.array[index] = true;
}
index = 7296;
while (index <= 7304) : (index += 1) {
instance.array[index] = true;
}
index = 7312;
while (index <= 7354) : (index += 1) {
instance.array[index] = true;
}
index = 7357;
while (index <= 7359) : (index += 1) {
instance.array[index] = true;
}
index = 7360;
while (index <= 7367) : (index += 1) {
instance.array[index] = true;
}
index = 7376;
while (index <= 7378) : (index += 1) {
instance.array[index] = true;
}
instance.array[7379] = true;
index = 7380;
while (index <= 7392) : (index += 1) {
instance.array[index] = true;
}
instance.array[7393] = true;
index = 7394;
while (index <= 7400) : (index += 1) {
instance.array[index] = true;
}
index = 7401;
while (index <= 7404) : (index += 1) {
instance.array[index] = true;
}
instance.array[7405] = true;
index = 7406;
while (index <= 7411) : (index += 1) {
instance.array[index] = true;
}
instance.array[7412] = true;
index = 7413;
while (index <= 7414) : (index += 1) {
instance.array[index] = true;
}
instance.array[7415] = true;
index = 7416;
while (index <= 7417) : (index += 1) {
instance.array[index] = true;
}
instance.array[7418] = true;
index = 7424;
while (index <= 7467) : (index += 1) {
instance.array[index] = true;
}
index = 7468;
while (index <= 7530) : (index += 1) {
instance.array[index] = true;
}
index = 7531;
while (index <= 7543) : (index += 1) {
instance.array[index] = true;
}
instance.array[7544] = true;
index = 7545;
while (index <= 7578) : (index += 1) {
instance.array[index] = true;
}
index = 7579;
while (index <= 7615) : (index += 1) {
instance.array[index] = true;
}
index = 7616;
while (index <= 7673) : (index += 1) {
instance.array[index] = true;
}
index = 7675;
while (index <= 7679) : (index += 1) {
instance.array[index] = true;
}
index = 7680;
while (index <= 7957) : (index += 1) {
instance.array[index] = true;
}
index = 7960;
while (index <= 7965) : (index += 1) {
instance.array[index] = true;
}
index = 7968;
while (index <= 8005) : (index += 1) {
instance.array[index] = true;
}
index = 8008;
while (index <= 8013) : (index += 1) {
instance.array[index] = true;
}
index = 8016;
while (index <= 8023) : (index += 1) {
instance.array[index] = true;
}
instance.array[8025] = true;
instance.array[8027] = true;
instance.array[8029] = true;
index = 8031;
while (index <= 8061) : (index += 1) {
instance.array[index] = true;
}
index = 8064;
while (index <= 8116) : (index += 1) {
instance.array[index] = true;
}
index = 8118;
while (index <= 8124) : (index += 1) {
instance.array[index] = true;
}
instance.array[8125] = true;
instance.array[8126] = true;
index = 8127;
while (index <= 8129) : (index += 1) {
instance.array[index] = true;
}
index = 8130;
while (index <= 8132) : (index += 1) {
instance.array[index] = true;
}
index = 8134;
while (index <= 8140) : (index += 1) {
instance.array[index] = true;
}
index = 8141;
while (index <= 8143) : (index += 1) {
instance.array[index] = true;
}
index = 8144;
while (index <= 8147) : (index += 1) {
instance.array[index] = true;
}
index = 8150;
while (index <= 8155) : (index += 1) {
instance.array[index] = true;
}
index = 8157;
while (index <= 8159) : (index += 1) {
instance.array[index] = true;
}
index = 8160;
while (index <= 8172) : (index += 1) {
instance.array[index] = true;
}
index = 8173;
while (index <= 8175) : (index += 1) {
instance.array[index] = true;
}
index = 8178;
while (index <= 8180) : (index += 1) {
instance.array[index] = true;
}
index = 8182;
while (index <= 8188) : (index += 1) {
instance.array[index] = true;
}
index = 8189;
while (index <= 8190) : (index += 1) {
instance.array[index] = true;
}
index = 8192;
while (index <= 8202) : (index += 1) {
instance.array[index] = true;
}
index = 8203;
while (index <= 8207) : (index += 1) {
instance.array[index] = true;
}
index = 8209;
while (index <= 8210) : (index += 1) {
instance.array[index] = true;
}
instance.array[8215] = true;
instance.array[8218] = true;
instance.array[8219] = true;
instance.array[8222] = true;
instance.array[8223] = true;
instance.array[8227] = true;
instance.array[8232] = true;
instance.array[8233] = true;
index = 8234;
while (index <= 8238) : (index += 1) {
instance.array[index] = true;
}
instance.array[8239] = true;
instance.array[8241] = true;
instance.array[8244] = true;
index = 8246;
while (index <= 8248) : (index += 1) {
instance.array[index] = true;
}
instance.array[8249] = true;
instance.array[8250] = true;
index = 8252;
while (index <= 8253) : (index += 1) {
instance.array[index] = true;
}
index = 8255;
while (index <= 8256) : (index += 1) {
instance.array[index] = true;
}
index = 8257;
while (index <= 8259) : (index += 1) {
instance.array[index] = true;
}
instance.array[8260] = true;
instance.array[8261] = true;
instance.array[8262] = true;
index = 8263;
while (index <= 8273) : (index += 1) {
instance.array[index] = true;
}
instance.array[8274] = true;
instance.array[8275] = true;
instance.array[8276] = true;
index = 8277;
while (index <= 8286) : (index += 1) {
instance.array[index] = true;
}
instance.array[8287] = true;
index = 8288;
while (index <= 8292) : (index += 1) {
instance.array[index] = true;
}
index = 8294;
while (index <= 8303) : (index += 1) {
instance.array[index] = true;
}
instance.array[8304] = true;
instance.array[8305] = true;
index = 8309;
while (index <= 8313) : (index += 1) {
instance.array[index] = true;
}
index = 8314;
while (index <= 8316) : (index += 1) {
instance.array[index] = true;
}
instance.array[8317] = true;
instance.array[8318] = true;
instance.array[8320] = true;
index = 8325;
while (index <= 8329) : (index += 1) {
instance.array[index] = true;
}
index = 8330;
while (index <= 8332) : (index += 1) {
instance.array[index] = true;
}
instance.array[8333] = true;
instance.array[8334] = true;
index = 8336;
while (index <= 8348) : (index += 1) {
instance.array[index] = true;
}
index = 8352;
while (index <= 8360) : (index += 1) {
instance.array[index] = true;
}
index = 8362;
while (index <= 8363) : (index += 1) {
instance.array[index] = true;
}
index = 8365;
while (index <= 8383) : (index += 1) {
instance.array[index] = true;
}
index = 8400;
while (index <= 8412) : (index += 1) {
instance.array[index] = true;
}
index = 8413;
while (index <= 8416) : (index += 1) {
instance.array[index] = true;
}
instance.array[8417] = true;
index = 8418;
while (index <= 8420) : (index += 1) {
instance.array[index] = true;
}
index = 8421;
while (index <= 8432) : (index += 1) {
instance.array[index] = true;
}
index = 8448;
while (index <= 8449) : (index += 1) {
instance.array[index] = true;
}
instance.array[8450] = true;
instance.array[8452] = true;
instance.array[8454] = true;
instance.array[8455] = true;
instance.array[8456] = true;
index = 8458;
while (index <= 8466) : (index += 1) {
instance.array[index] = true;
}
instance.array[8468] = true;
instance.array[8469] = true;
instance.array[8471] = true;
instance.array[8472] = true;
index = 8473;
while (index <= 8477) : (index += 1) {
instance.array[index] = true;
}
index = 8478;
while (index <= 8480) : (index += 1) {
instance.array[index] = true;
}
instance.array[8483] = true;
instance.array[8484] = true;
instance.array[8485] = true;
instance.array[8487] = true;
instance.array[8488] = true;
instance.array[8489] = true;
instance.array[8490] = true;
index = 8492;
while (index <= 8493) : (index += 1) {
instance.array[index] = true;
}
instance.array[8494] = true;
index = 8495;
while (index <= 8500) : (index += 1) {
instance.array[index] = true;
}
index = 8501;
while (index <= 8504) : (index += 1) {
instance.array[index] = true;
}
instance.array[8505] = true;
index = 8506;
while (index <= 8507) : (index += 1) {
instance.array[index] = true;
}
index = 8508;
while (index <= 8511) : (index += 1) {
instance.array[index] = true;
}
index = 8512;
while (index <= 8516) : (index += 1) {
instance.array[index] = true;
}
index = 8517;
while (index <= 8521) : (index += 1) {
instance.array[index] = true;
}
instance.array[8522] = true;
instance.array[8523] = true;
index = 8524;
while (index <= 8525) : (index += 1) {
instance.array[index] = true;
}
instance.array[8526] = true;
instance.array[8527] = true;
index = 8528;
while (index <= 8530) : (index += 1) {
instance.array[index] = true;
}
index = 8533;
while (index <= 8538) : (index += 1) {
instance.array[index] = true;
}
instance.array[8543] = true;
index = 8556;
while (index <= 8559) : (index += 1) {
instance.array[index] = true;
}
index = 8570;
while (index <= 8578) : (index += 1) {
instance.array[index] = true;
}
index = 8579;
while (index <= 8580) : (index += 1) {
instance.array[index] = true;
}
index = 8581;
while (index <= 8584) : (index += 1) {
instance.array[index] = true;
}
index = 8586;
while (index <= 8587) : (index += 1) {
instance.array[index] = true;
}
index = 8602;
while (index <= 8603) : (index += 1) {
instance.array[index] = true;
}
index = 8604;
while (index <= 8607) : (index += 1) {
instance.array[index] = true;
}
instance.array[8608] = true;
index = 8609;
while (index <= 8610) : (index += 1) {
instance.array[index] = true;
}
instance.array[8611] = true;
index = 8612;
while (index <= 8613) : (index += 1) {
instance.array[index] = true;
}
instance.array[8614] = true;
index = 8615;
while (index <= 8621) : (index += 1) {
instance.array[index] = true;
}
instance.array[8622] = true;
index = 8623;
while (index <= 8631) : (index += 1) {
instance.array[index] = true;
}
index = 8634;
while (index <= 8653) : (index += 1) {
instance.array[index] = true;
}
index = 8654;
while (index <= 8655) : (index += 1) {
instance.array[index] = true;
}
index = 8656;
while (index <= 8657) : (index += 1) {
instance.array[index] = true;
}
instance.array[8659] = true;
index = 8661;
while (index <= 8678) : (index += 1) {
instance.array[index] = true;
}
index = 8680;
while (index <= 8691) : (index += 1) {
instance.array[index] = true;
}
index = 8692;
while (index <= 8703) : (index += 1) {
instance.array[index] = true;
}
instance.array[8705] = true;
index = 8708;
while (index <= 8710) : (index += 1) {
instance.array[index] = true;
}
index = 8713;
while (index <= 8714) : (index += 1) {
instance.array[index] = true;
}
index = 8716;
while (index <= 8718) : (index += 1) {
instance.array[index] = true;
}
instance.array[8720] = true;
index = 8722;
while (index <= 8724) : (index += 1) {
instance.array[index] = true;
}
index = 8726;
while (index <= 8729) : (index += 1) {
instance.array[index] = true;
}
index = 8731;
while (index <= 8732) : (index += 1) {
instance.array[index] = true;
}
index = 8737;
while (index <= 8738) : (index += 1) {
instance.array[index] = true;
}
instance.array[8740] = true;
instance.array[8742] = true;
instance.array[8749] = true;
index = 8751;
while (index <= 8755) : (index += 1) {
instance.array[index] = true;
}
index = 8760;
while (index <= 8763) : (index += 1) {
instance.array[index] = true;
}
index = 8766;
while (index <= 8775) : (index += 1) {
instance.array[index] = true;
}
index = 8777;
while (index <= 8779) : (index += 1) {
instance.array[index] = true;
}
index = 8781;
while (index <= 8785) : (index += 1) {
instance.array[index] = true;
}
index = 8787;
while (index <= 8799) : (index += 1) {
instance.array[index] = true;
}
index = 8802;
while (index <= 8803) : (index += 1) {
instance.array[index] = true;
}
index = 8808;
while (index <= 8809) : (index += 1) {
instance.array[index] = true;
}
index = 8812;
while (index <= 8813) : (index += 1) {
instance.array[index] = true;
}
index = 8816;
while (index <= 8833) : (index += 1) {
instance.array[index] = true;
}
index = 8836;
while (index <= 8837) : (index += 1) {
instance.array[index] = true;
}
index = 8840;
while (index <= 8852) : (index += 1) {
instance.array[index] = true;
}
index = 8854;
while (index <= 8856) : (index += 1) {
instance.array[index] = true;
}
index = 8858;
while (index <= 8868) : (index += 1) {
instance.array[index] = true;
}
index = 8870;
while (index <= 8894) : (index += 1) {
instance.array[index] = true;
}
index = 8896;
while (index <= 8959) : (index += 1) {
instance.array[index] = true;
}
index = 8960;
while (index <= 8967) : (index += 1) {
instance.array[index] = true;
}
instance.array[8968] = true;
instance.array[8969] = true;
instance.array[8970] = true;
instance.array[8971] = true;
index = 8972;
while (index <= 8977) : (index += 1) {
instance.array[index] = true;
}
index = 8979;
while (index <= 8985) : (index += 1) {
instance.array[index] = true;
}
index = 8988;
while (index <= 8991) : (index += 1) {
instance.array[index] = true;
}
index = 8992;
while (index <= 8993) : (index += 1) {
instance.array[index] = true;
}
index = 8994;
while (index <= 9000) : (index += 1) {
instance.array[index] = true;
}
index = 9003;
while (index <= 9083) : (index += 1) {
instance.array[index] = true;
}
instance.array[9084] = true;
index = 9085;
while (index <= 9114) : (index += 1) {
instance.array[index] = true;
}
index = 9115;
while (index <= 9139) : (index += 1) {
instance.array[index] = true;
}
index = 9140;
while (index <= 9179) : (index += 1) {
instance.array[index] = true;
}
index = 9180;
while (index <= 9185) : (index += 1) {
instance.array[index] = true;
}
index = 9186;
while (index <= 9192) : (index += 1) {
instance.array[index] = true;
}
index = 9197;
while (index <= 9199) : (index += 1) {
instance.array[index] = true;
}
index = 9201;
while (index <= 9202) : (index += 1) {
instance.array[index] = true;
}
index = 9204;
while (index <= 9254) : (index += 1) {
instance.array[index] = true;
}
index = 9280;
while (index <= 9290) : (index += 1) {
instance.array[index] = true;
}
instance.array[9450] = true;
index = 9548;
while (index <= 9551) : (index += 1) {
instance.array[index] = true;
}
index = 9588;
while (index <= 9599) : (index += 1) {
instance.array[index] = true;
}
index = 9616;
while (index <= 9617) : (index += 1) {
instance.array[index] = true;
}
index = 9622;
while (index <= 9631) : (index += 1) {
instance.array[index] = true;
}
instance.array[9634] = true;
index = 9642;
while (index <= 9649) : (index += 1) {
instance.array[index] = true;
}
index = 9652;
while (index <= 9653) : (index += 1) {
instance.array[index] = true;
}
index = 9656;
while (index <= 9659) : (index += 1) {
instance.array[index] = true;
}
index = 9662;
while (index <= 9663) : (index += 1) {
instance.array[index] = true;
}
index = 9666;
while (index <= 9669) : (index += 1) {
instance.array[index] = true;
}
index = 9673;
while (index <= 9674) : (index += 1) {
instance.array[index] = true;
}
index = 9676;
while (index <= 9677) : (index += 1) {
instance.array[index] = true;
}
index = 9682;
while (index <= 9697) : (index += 1) {
instance.array[index] = true;
}
index = 9702;
while (index <= 9710) : (index += 1) {
instance.array[index] = true;
}
index = 9712;
while (index <= 9719) : (index += 1) {
instance.array[index] = true;
}
index = 9720;
while (index <= 9724) : (index += 1) {
instance.array[index] = true;
}
instance.array[9727] = true;
index = 9728;
while (index <= 9732) : (index += 1) {
instance.array[index] = true;
}
index = 9735;
while (index <= 9736) : (index += 1) {
instance.array[index] = true;
}
index = 9738;
while (index <= 9741) : (index += 1) {
instance.array[index] = true;
}
index = 9744;
while (index <= 9747) : (index += 1) {
instance.array[index] = true;
}
index = 9750;
while (index <= 9755) : (index += 1) {
instance.array[index] = true;
}
instance.array[9757] = true;
index = 9759;
while (index <= 9791) : (index += 1) {
instance.array[index] = true;
}
instance.array[9793] = true;
index = 9795;
while (index <= 9799) : (index += 1) {
instance.array[index] = true;
}
index = 9812;
while (index <= 9823) : (index += 1) {
instance.array[index] = true;
}
instance.array[9826] = true;
instance.array[9830] = true;
instance.array[9835] = true;
instance.array[9838] = true;
index = 9840;
while (index <= 9854) : (index += 1) {
instance.array[index] = true;
}
index = 9856;
while (index <= 9874) : (index += 1) {
instance.array[index] = true;
}
index = 9876;
while (index <= 9885) : (index += 1) {
instance.array[index] = true;
}
instance.array[9888] = true;
index = 9890;
while (index <= 9897) : (index += 1) {
instance.array[index] = true;
}
index = 9900;
while (index <= 9916) : (index += 1) {
instance.array[index] = true;
}
index = 9920;
while (index <= 9923) : (index += 1) {
instance.array[index] = true;
}
instance.array[9954] = true;
index = 9956;
while (index <= 9959) : (index += 1) {
instance.array[index] = true;
}
index = 9984;
while (index <= 9988) : (index += 1) {
instance.array[index] = true;
}
index = 9990;
while (index <= 9993) : (index += 1) {
instance.array[index] = true;
}
index = 9996;
while (index <= 10023) : (index += 1) {
instance.array[index] = true;
}
index = 10025;
while (index <= 10044) : (index += 1) {
instance.array[index] = true;
}
index = 10046;
while (index <= 10059) : (index += 1) {
instance.array[index] = true;
}
instance.array[10061] = true;
index = 10063;
while (index <= 10066) : (index += 1) {
instance.array[index] = true;
}
instance.array[10070] = true;
index = 10072;
while (index <= 10087) : (index += 1) {
instance.array[index] = true;
}
instance.array[10088] = true;
instance.array[10089] = true;
instance.array[10090] = true;
instance.array[10091] = true;
instance.array[10092] = true;
instance.array[10093] = true;
instance.array[10094] = true;
instance.array[10095] = true;
instance.array[10096] = true;
instance.array[10097] = true;
instance.array[10098] = true;
instance.array[10099] = true;
instance.array[10100] = true;
instance.array[10101] = true;
index = 10112;
while (index <= 10131) : (index += 1) {
instance.array[index] = true;
}
instance.array[10132] = true;
index = 10136;
while (index <= 10159) : (index += 1) {
instance.array[index] = true;
}
index = 10161;
while (index <= 10174) : (index += 1) {
instance.array[index] = true;
}
index = 10176;
while (index <= 10180) : (index += 1) {
instance.array[index] = true;
}
instance.array[10181] = true;
instance.array[10182] = true;
index = 10183;
while (index <= 10213) : (index += 1) {
instance.array[index] = true;
}
instance.array[10222] = true;
instance.array[10223] = true;
index = 10224;
while (index <= 10239) : (index += 1) {
instance.array[index] = true;
}
index = 10240;
while (index <= 10495) : (index += 1) {
instance.array[index] = true;
}
index = 10496;
while (index <= 10626) : (index += 1) {
instance.array[index] = true;
}
instance.array[10627] = true;
instance.array[10628] = true;
instance.array[10631] = true;
instance.array[10632] = true;
instance.array[10633] = true;
instance.array[10634] = true;
instance.array[10635] = true;
instance.array[10636] = true;
instance.array[10637] = true;
instance.array[10638] = true;
instance.array[10639] = true;
instance.array[10640] = true;
instance.array[10641] = true;
instance.array[10642] = true;
instance.array[10643] = true;
instance.array[10644] = true;
instance.array[10645] = true;
instance.array[10646] = true;
instance.array[10647] = true;
instance.array[10648] = true;
index = 10649;
while (index <= 10711) : (index += 1) {
instance.array[index] = true;
}
instance.array[10712] = true;
instance.array[10713] = true;
instance.array[10714] = true;
instance.array[10715] = true;
index = 10716;
while (index <= 10747) : (index += 1) {
instance.array[index] = true;
}
instance.array[10748] = true;
instance.array[10749] = true;
index = 10750;
while (index <= 11007) : (index += 1) {
instance.array[index] = true;
}
index = 11008;
while (index <= 11034) : (index += 1) {
instance.array[index] = true;
}
index = 11037;
while (index <= 11055) : (index += 1) {
instance.array[index] = true;
}
index = 11056;
while (index <= 11076) : (index += 1) {
instance.array[index] = true;
}
index = 11077;
while (index <= 11078) : (index += 1) {
instance.array[index] = true;
}
index = 11079;
while (index <= 11084) : (index += 1) {
instance.array[index] = true;
}
index = 11085;
while (index <= 11087) : (index += 1) {
instance.array[index] = true;
}
index = 11089;
while (index <= 11092) : (index += 1) {
instance.array[index] = true;
}
index = 11098;
while (index <= 11123) : (index += 1) {
instance.array[index] = true;
}
index = 11126;
while (index <= 11157) : (index += 1) {
instance.array[index] = true;
}
index = 11159;
while (index <= 11263) : (index += 1) {
instance.array[index] = true;
}
index = 11264;
while (index <= 11310) : (index += 1) {
instance.array[index] = true;
}
index = 11312;
while (index <= 11358) : (index += 1) {
instance.array[index] = true;
}
index = 11360;
while (index <= 11387) : (index += 1) {
instance.array[index] = true;
}
index = 11388;
while (index <= 11389) : (index += 1) {
instance.array[index] = true;
}
index = 11390;
while (index <= 11492) : (index += 1) {
instance.array[index] = true;
}
index = 11493;
while (index <= 11498) : (index += 1) {
instance.array[index] = true;
}
index = 11499;
while (index <= 11502) : (index += 1) {
instance.array[index] = true;
}
index = 11503;
while (index <= 11505) : (index += 1) {
instance.array[index] = true;
}
index = 11506;
while (index <= 11507) : (index += 1) {
instance.array[index] = true;
}
index = 11513;
while (index <= 11516) : (index += 1) {
instance.array[index] = true;
}
instance.array[11517] = true;
index = 11518;
while (index <= 11519) : (index += 1) {
instance.array[index] = true;
}
index = 11520;
while (index <= 11557) : (index += 1) {
instance.array[index] = true;
}
instance.array[11559] = true;
instance.array[11565] = true;
index = 11568;
while (index <= 11623) : (index += 1) {
instance.array[index] = true;
}
instance.array[11631] = true;
instance.array[11632] = true;
instance.array[11647] = true;
index = 11648;
while (index <= 11670) : (index += 1) {
instance.array[index] = true;
}
index = 11680;
while (index <= 11686) : (index += 1) {
instance.array[index] = true;
}
index = 11688;
while (index <= 11694) : (index += 1) {
instance.array[index] = true;
}
index = 11696;
while (index <= 11702) : (index += 1) {
instance.array[index] = true;
}
index = 11704;
while (index <= 11710) : (index += 1) {
instance.array[index] = true;
}
index = 11712;
while (index <= 11718) : (index += 1) {
instance.array[index] = true;
}
index = 11720;
while (index <= 11726) : (index += 1) {
instance.array[index] = true;
}
index = 11728;
while (index <= 11734) : (index += 1) {
instance.array[index] = true;
}
index = 11736;
while (index <= 11742) : (index += 1) {
instance.array[index] = true;
}
index = 11744;
while (index <= 11775) : (index += 1) {
instance.array[index] = true;
}
index = 11776;
while (index <= 11777) : (index += 1) {
instance.array[index] = true;
}
instance.array[11778] = true;
instance.array[11779] = true;
instance.array[11780] = true;
instance.array[11781] = true;
index = 11782;
while (index <= 11784) : (index += 1) {
instance.array[index] = true;
}
instance.array[11785] = true;
instance.array[11786] = true;
instance.array[11787] = true;
instance.array[11788] = true;
instance.array[11789] = true;
index = 11790;
while (index <= 11798) : (index += 1) {
instance.array[index] = true;
}
instance.array[11799] = true;
index = 11800;
while (index <= 11801) : (index += 1) {
instance.array[index] = true;
}
instance.array[11802] = true;
instance.array[11803] = true;
instance.array[11804] = true;
instance.array[11805] = true;
index = 11806;
while (index <= 11807) : (index += 1) {
instance.array[index] = true;
}
instance.array[11808] = true;
instance.array[11809] = true;
instance.array[11810] = true;
instance.array[11811] = true;
instance.array[11812] = true;
instance.array[11813] = true;
instance.array[11814] = true;
instance.array[11815] = true;
instance.array[11816] = true;
instance.array[11817] = true;
index = 11818;
while (index <= 11822) : (index += 1) {
instance.array[index] = true;
}
instance.array[11823] = true;
index = 11824;
while (index <= 11833) : (index += 1) {
instance.array[index] = true;
}
index = 11834;
while (index <= 11835) : (index += 1) {
instance.array[index] = true;
}
index = 11836;
while (index <= 11839) : (index += 1) {
instance.array[index] = true;
}
instance.array[11840] = true;
instance.array[11841] = true;
instance.array[11842] = true;
index = 11843;
while (index <= 11855) : (index += 1) {
instance.array[index] = true;
}
index = 11856;
while (index <= 11857) : (index += 1) {
instance.array[index] = true;
}
instance.array[11858] = true;
instance.array[12351] = true;
index = 19904;
while (index <= 19967) : (index += 1) {
instance.array[index] = true;
}
index = 42192;
while (index <= 42231) : (index += 1) {
instance.array[index] = true;
}
index = 42232;
while (index <= 42237) : (index += 1) {
instance.array[index] = true;
}
index = 42238;
while (index <= 42239) : (index += 1) {
instance.array[index] = true;
}
index = 42240;
while (index <= 42507) : (index += 1) {
instance.array[index] = true;
}
instance.array[42508] = true;
index = 42509;
while (index <= 42511) : (index += 1) {
instance.array[index] = true;
}
index = 42512;
while (index <= 42527) : (index += 1) {
instance.array[index] = true;
}
index = 42528;
while (index <= 42537) : (index += 1) {
instance.array[index] = true;
}
index = 42538;
while (index <= 42539) : (index += 1) {
instance.array[index] = true;
}
index = 42560;
while (index <= 42605) : (index += 1) {
instance.array[index] = true;
}
instance.array[42606] = true;
instance.array[42607] = true;
index = 42608;
while (index <= 42610) : (index += 1) {
instance.array[index] = true;
}
instance.array[42611] = true;
index = 42612;
while (index <= 42621) : (index += 1) {
instance.array[index] = true;
}
instance.array[42622] = true;
instance.array[42623] = true;
index = 42624;
while (index <= 42651) : (index += 1) {
instance.array[index] = true;
}
index = 42652;
while (index <= 42653) : (index += 1) {
instance.array[index] = true;
}
index = 42654;
while (index <= 42655) : (index += 1) {
instance.array[index] = true;
}
index = 42656;
while (index <= 42725) : (index += 1) {
instance.array[index] = true;
}
index = 42726;
while (index <= 42735) : (index += 1) {
instance.array[index] = true;
}
index = 42736;
while (index <= 42737) : (index += 1) {
instance.array[index] = true;
}
index = 42738;
while (index <= 42743) : (index += 1) {
instance.array[index] = true;
}
index = 42752;
while (index <= 42774) : (index += 1) {
instance.array[index] = true;
}
index = 42775;
while (index <= 42783) : (index += 1) {
instance.array[index] = true;
}
index = 42784;
while (index <= 42785) : (index += 1) {
instance.array[index] = true;
}
index = 42786;
while (index <= 42863) : (index += 1) {
instance.array[index] = true;
}
instance.array[42864] = true;
index = 42865;
while (index <= 42887) : (index += 1) {
instance.array[index] = true;
}
instance.array[42888] = true;
index = 42889;
while (index <= 42890) : (index += 1) {
instance.array[index] = true;
}
index = 42891;
while (index <= 42894) : (index += 1) {
instance.array[index] = true;
}
instance.array[42895] = true;
index = 42896;
while (index <= 42943) : (index += 1) {
instance.array[index] = true;
}
index = 42946;
while (index <= 42954) : (index += 1) {
instance.array[index] = true;
}
index = 42997;
while (index <= 42998) : (index += 1) {
instance.array[index] = true;
}
instance.array[42999] = true;
index = 43000;
while (index <= 43001) : (index += 1) {
instance.array[index] = true;
}
instance.array[43002] = true;
index = 43003;
while (index <= 43009) : (index += 1) {
instance.array[index] = true;
}
instance.array[43010] = true;
index = 43011;
while (index <= 43013) : (index += 1) {
instance.array[index] = true;
}
instance.array[43014] = true;
index = 43015;
while (index <= 43018) : (index += 1) {
instance.array[index] = true;
}
instance.array[43019] = true;
index = 43020;
while (index <= 43042) : (index += 1) {
instance.array[index] = true;
}
index = 43043;
while (index <= 43044) : (index += 1) {
instance.array[index] = true;
}
index = 43045;
while (index <= 43046) : (index += 1) {
instance.array[index] = true;
}
instance.array[43047] = true;
index = 43048;
while (index <= 43051) : (index += 1) {
instance.array[index] = true;
}
instance.array[43052] = true;
index = 43056;
while (index <= 43061) : (index += 1) {
instance.array[index] = true;
}
index = 43062;
while (index <= 43063) : (index += 1) {
instance.array[index] = true;
}
instance.array[43064] = true;
instance.array[43065] = true;
index = 43072;
while (index <= 43123) : (index += 1) {
instance.array[index] = true;
}
index = 43124;
while (index <= 43127) : (index += 1) {
instance.array[index] = true;
}
index = 43136;
while (index <= 43137) : (index += 1) {
instance.array[index] = true;
}
index = 43138;
while (index <= 43187) : (index += 1) {
instance.array[index] = true;
}
index = 43188;
while (index <= 43203) : (index += 1) {
instance.array[index] = true;
}
index = 43204;
while (index <= 43205) : (index += 1) {
instance.array[index] = true;
}
index = 43214;
while (index <= 43215) : (index += 1) {
instance.array[index] = true;
}
index = 43216;
while (index <= 43225) : (index += 1) {
instance.array[index] = true;
}
index = 43232;
while (index <= 43249) : (index += 1) {
instance.array[index] = true;
}
index = 43250;
while (index <= 43255) : (index += 1) {
instance.array[index] = true;
}
index = 43256;
while (index <= 43258) : (index += 1) {
instance.array[index] = true;
}
instance.array[43259] = true;
instance.array[43260] = true;
index = 43261;
while (index <= 43262) : (index += 1) {
instance.array[index] = true;
}
instance.array[43263] = true;
index = 43264;
while (index <= 43273) : (index += 1) {
instance.array[index] = true;
}
index = 43274;
while (index <= 43301) : (index += 1) {
instance.array[index] = true;
}
index = 43302;
while (index <= 43309) : (index += 1) {
instance.array[index] = true;
}
index = 43310;
while (index <= 43311) : (index += 1) {
instance.array[index] = true;
}
index = 43312;
while (index <= 43334) : (index += 1) {
instance.array[index] = true;
}
index = 43335;
while (index <= 43345) : (index += 1) {
instance.array[index] = true;
}
index = 43346;
while (index <= 43347) : (index += 1) {
instance.array[index] = true;
}
instance.array[43359] = true;
index = 43392;
while (index <= 43394) : (index += 1) {
instance.array[index] = true;
}
instance.array[43395] = true;
index = 43396;
while (index <= 43442) : (index += 1) {
instance.array[index] = true;
}
instance.array[43443] = true;
index = 43444;
while (index <= 43445) : (index += 1) {
instance.array[index] = true;
}
index = 43446;
while (index <= 43449) : (index += 1) {
instance.array[index] = true;
}
index = 43450;
while (index <= 43451) : (index += 1) {
instance.array[index] = true;
}
index = 43452;
while (index <= 43453) : (index += 1) {
instance.array[index] = true;
}
index = 43454;
while (index <= 43456) : (index += 1) {
instance.array[index] = true;
}
index = 43457;
while (index <= 43469) : (index += 1) {
instance.array[index] = true;
}
instance.array[43471] = true;
index = 43472;
while (index <= 43481) : (index += 1) {
instance.array[index] = true;
}
index = 43486;
while (index <= 43487) : (index += 1) {
instance.array[index] = true;
}
index = 43488;
while (index <= 43492) : (index += 1) {
instance.array[index] = true;
}
instance.array[43493] = true;
instance.array[43494] = true;
index = 43495;
while (index <= 43503) : (index += 1) {
instance.array[index] = true;
}
index = 43504;
while (index <= 43513) : (index += 1) {
instance.array[index] = true;
}
index = 43514;
while (index <= 43518) : (index += 1) {
instance.array[index] = true;
}
index = 43520;
while (index <= 43560) : (index += 1) {
instance.array[index] = true;
}
index = 43561;
while (index <= 43566) : (index += 1) {
instance.array[index] = true;
}
index = 43567;
while (index <= 43568) : (index += 1) {
instance.array[index] = true;
}
index = 43569;
while (index <= 43570) : (index += 1) {
instance.array[index] = true;
}
index = 43571;
while (index <= 43572) : (index += 1) {
instance.array[index] = true;
}
index = 43573;
while (index <= 43574) : (index += 1) {
instance.array[index] = true;
}
index = 43584;
while (index <= 43586) : (index += 1) {
instance.array[index] = true;
}
instance.array[43587] = true;
index = 43588;
while (index <= 43595) : (index += 1) {
instance.array[index] = true;
}
instance.array[43596] = true;
instance.array[43597] = true;
index = 43600;
while (index <= 43609) : (index += 1) {
instance.array[index] = true;
}
index = 43612;
while (index <= 43615) : (index += 1) {
instance.array[index] = true;
}
index = 43616;
while (index <= 43631) : (index += 1) {
instance.array[index] = true;
}
instance.array[43632] = true;
index = 43633;
while (index <= 43638) : (index += 1) {
instance.array[index] = true;
}
index = 43639;
while (index <= 43641) : (index += 1) {
instance.array[index] = true;
}
instance.array[43642] = true;
instance.array[43643] = true;
instance.array[43644] = true;
instance.array[43645] = true;
index = 43646;
while (index <= 43695) : (index += 1) {
instance.array[index] = true;
}
instance.array[43696] = true;
instance.array[43697] = true;
index = 43698;
while (index <= 43700) : (index += 1) {
instance.array[index] = true;
}
index = 43701;
while (index <= 43702) : (index += 1) {
instance.array[index] = true;
}
index = 43703;
while (index <= 43704) : (index += 1) {
instance.array[index] = true;
}
index = 43705;
while (index <= 43709) : (index += 1) {
instance.array[index] = true;
}
index = 43710;
while (index <= 43711) : (index += 1) {
instance.array[index] = true;
}
instance.array[43712] = true;
instance.array[43713] = true;
instance.array[43714] = true;
index = 43739;
while (index <= 43740) : (index += 1) {
instance.array[index] = true;
}
instance.array[43741] = true;
index = 43742;
while (index <= 43743) : (index += 1) {
instance.array[index] = true;
}
index = 43744;
while (index <= 43754) : (index += 1) {
instance.array[index] = true;
}
instance.array[43755] = true;
index = 43756;
while (index <= 43757) : (index += 1) {
instance.array[index] = true;
}
index = 43758;
while (index <= 43759) : (index += 1) {
instance.array[index] = true;
}
index = 43760;
while (index <= 43761) : (index += 1) {
instance.array[index] = true;
}
instance.array[43762] = true;
index = 43763;
while (index <= 43764) : (index += 1) {
instance.array[index] = true;
}
instance.array[43765] = true;
instance.array[43766] = true;
index = 43777;
while (index <= 43782) : (index += 1) {
instance.array[index] = true;
}
index = 43785;
while (index <= 43790) : (index += 1) {
instance.array[index] = true;
}
index = 43793;
while (index <= 43798) : (index += 1) {
instance.array[index] = true;
}
index = 43808;
while (index <= 43814) : (index += 1) {
instance.array[index] = true;
}
index = 43816;
while (index <= 43822) : (index += 1) {
instance.array[index] = true;
}
index = 43824;
while (index <= 43866) : (index += 1) {
instance.array[index] = true;
}
instance.array[43867] = true;
index = 43868;
while (index <= 43871) : (index += 1) {
instance.array[index] = true;
}
index = 43872;
while (index <= 43880) : (index += 1) {
instance.array[index] = true;
}
instance.array[43881] = true;
index = 43882;
while (index <= 43883) : (index += 1) {
instance.array[index] = true;
}
index = 43888;
while (index <= 43967) : (index += 1) {
instance.array[index] = true;
}
index = 43968;
while (index <= 44002) : (index += 1) {
instance.array[index] = true;
}
index = 44003;
while (index <= 44004) : (index += 1) {
instance.array[index] = true;
}
instance.array[44005] = true;
index = 44006;
while (index <= 44007) : (index += 1) {
instance.array[index] = true;
}
instance.array[44008] = true;
index = 44009;
while (index <= 44010) : (index += 1) {
instance.array[index] = true;
}
instance.array[44011] = true;
instance.array[44012] = true;
instance.array[44013] = true;
index = 44016;
while (index <= 44025) : (index += 1) {
instance.array[index] = true;
}
index = 55216;
while (index <= 55238) : (index += 1) {
instance.array[index] = true;
}
index = 55243;
while (index <= 55291) : (index += 1) {
instance.array[index] = true;
}
index = 64256;
while (index <= 64262) : (index += 1) {
instance.array[index] = true;
}
index = 64275;
while (index <= 64279) : (index += 1) {
instance.array[index] = true;
}
instance.array[64285] = true;
instance.array[64286] = true;
index = 64287;
while (index <= 64296) : (index += 1) {
instance.array[index] = true;
}
instance.array[64297] = true;
index = 64298;
while (index <= 64310) : (index += 1) {
instance.array[index] = true;
}
index = 64312;
while (index <= 64316) : (index += 1) {
instance.array[index] = true;
}
instance.array[64318] = true;
index = 64320;
while (index <= 64321) : (index += 1) {
instance.array[index] = true;
}
index = 64323;
while (index <= 64324) : (index += 1) {
instance.array[index] = true;
}
index = 64326;
while (index <= 64433) : (index += 1) {
instance.array[index] = true;
}
index = 64434;
while (index <= 64449) : (index += 1) {
instance.array[index] = true;
}
index = 64467;
while (index <= 64829) : (index += 1) {
instance.array[index] = true;
}
instance.array[64830] = true;
instance.array[64831] = true;
index = 64848;
while (index <= 64911) : (index += 1) {
instance.array[index] = true;
}
index = 64914;
while (index <= 64967) : (index += 1) {
instance.array[index] = true;
}
index = 65008;
while (index <= 65019) : (index += 1) {
instance.array[index] = true;
}
instance.array[65020] = true;
instance.array[65021] = true;
index = 65056;
while (index <= 65071) : (index += 1) {
instance.array[index] = true;
}
index = 65136;
while (index <= 65140) : (index += 1) {
instance.array[index] = true;
}
index = 65142;
while (index <= 65276) : (index += 1) {
instance.array[index] = true;
}
instance.array[65279] = true;
index = 65529;
while (index <= 65531) : (index += 1) {
instance.array[index] = true;
}
instance.array[65532] = true;
index = 65536;
while (index <= 65547) : (index += 1) {
instance.array[index] = true;
}
index = 65549;
while (index <= 65574) : (index += 1) {
instance.array[index] = true;
}
index = 65576;
while (index <= 65594) : (index += 1) {
instance.array[index] = true;
}
index = 65596;
while (index <= 65597) : (index += 1) {
instance.array[index] = true;
}
index = 65599;
while (index <= 65613) : (index += 1) {
instance.array[index] = true;
}
index = 65616;
while (index <= 65629) : (index += 1) {
instance.array[index] = true;
}
index = 65664;
while (index <= 65786) : (index += 1) {
instance.array[index] = true;
}
index = 65792;
while (index <= 65794) : (index += 1) {
instance.array[index] = true;
}
index = 65799;
while (index <= 65843) : (index += 1) {
instance.array[index] = true;
}
index = 65847;
while (index <= 65855) : (index += 1) {
instance.array[index] = true;
}
index = 65856;
while (index <= 65908) : (index += 1) {
instance.array[index] = true;
}
index = 65909;
while (index <= 65912) : (index += 1) {
instance.array[index] = true;
}
index = 65913;
while (index <= 65929) : (index += 1) {
instance.array[index] = true;
}
index = 65930;
while (index <= 65931) : (index += 1) {
instance.array[index] = true;
}
index = 65932;
while (index <= 65934) : (index += 1) {
instance.array[index] = true;
}
index = 65936;
while (index <= 65948) : (index += 1) {
instance.array[index] = true;
}
instance.array[65952] = true;
index = 66000;
while (index <= 66044) : (index += 1) {
instance.array[index] = true;
}
instance.array[66045] = true;
index = 66176;
while (index <= 66204) : (index += 1) {
instance.array[index] = true;
}
index = 66208;
while (index <= 66256) : (index += 1) {
instance.array[index] = true;
}
instance.array[66272] = true;
index = 66273;
while (index <= 66299) : (index += 1) {
instance.array[index] = true;
}
index = 66304;
while (index <= 66335) : (index += 1) {
instance.array[index] = true;
}
index = 66336;
while (index <= 66339) : (index += 1) {
instance.array[index] = true;
}
index = 66349;
while (index <= 66368) : (index += 1) {
instance.array[index] = true;
}
instance.array[66369] = true;
index = 66370;
while (index <= 66377) : (index += 1) {
instance.array[index] = true;
}
instance.array[66378] = true;
index = 66384;
while (index <= 66421) : (index += 1) {
instance.array[index] = true;
}
index = 66422;
while (index <= 66426) : (index += 1) {
instance.array[index] = true;
}
index = 66432;
while (index <= 66461) : (index += 1) {
instance.array[index] = true;
}
instance.array[66463] = true;
index = 66464;
while (index <= 66499) : (index += 1) {
instance.array[index] = true;
}
index = 66504;
while (index <= 66511) : (index += 1) {
instance.array[index] = true;
}
instance.array[66512] = true;
index = 66513;
while (index <= 66517) : (index += 1) {
instance.array[index] = true;
}
index = 66560;
while (index <= 66639) : (index += 1) {
instance.array[index] = true;
}
index = 66640;
while (index <= 66717) : (index += 1) {
instance.array[index] = true;
}
index = 66720;
while (index <= 66729) : (index += 1) {
instance.array[index] = true;
}
index = 66736;
while (index <= 66771) : (index += 1) {
instance.array[index] = true;
}
index = 66776;
while (index <= 66811) : (index += 1) {
instance.array[index] = true;
}
index = 66816;
while (index <= 66855) : (index += 1) {
instance.array[index] = true;
}
index = 66864;
while (index <= 66915) : (index += 1) {
instance.array[index] = true;
}
instance.array[66927] = true;
index = 67072;
while (index <= 67382) : (index += 1) {
instance.array[index] = true;
}
index = 67392;
while (index <= 67413) : (index += 1) {
instance.array[index] = true;
}
index = 67424;
while (index <= 67431) : (index += 1) {
instance.array[index] = true;
}
index = 67584;
while (index <= 67589) : (index += 1) {
instance.array[index] = true;
}
instance.array[67592] = true;
index = 67594;
while (index <= 67637) : (index += 1) {
instance.array[index] = true;
}
index = 67639;
while (index <= 67640) : (index += 1) {
instance.array[index] = true;
}
instance.array[67644] = true;
index = 67647;
while (index <= 67669) : (index += 1) {
instance.array[index] = true;
}
instance.array[67671] = true;
index = 67672;
while (index <= 67679) : (index += 1) {
instance.array[index] = true;
}
index = 67680;
while (index <= 67702) : (index += 1) {
instance.array[index] = true;
}
index = 67703;
while (index <= 67704) : (index += 1) {
instance.array[index] = true;
}
index = 67705;
while (index <= 67711) : (index += 1) {
instance.array[index] = true;
}
index = 67712;
while (index <= 67742) : (index += 1) {
instance.array[index] = true;
}
index = 67751;
while (index <= 67759) : (index += 1) {
instance.array[index] = true;
}
index = 67808;
while (index <= 67826) : (index += 1) {
instance.array[index] = true;
}
index = 67828;
while (index <= 67829) : (index += 1) {
instance.array[index] = true;
}
index = 67835;
while (index <= 67839) : (index += 1) {
instance.array[index] = true;
}
index = 67840;
while (index <= 67861) : (index += 1) {
instance.array[index] = true;
}
index = 67862;
while (index <= 67867) : (index += 1) {
instance.array[index] = true;
}
instance.array[67871] = true;
index = 67872;
while (index <= 67897) : (index += 1) {
instance.array[index] = true;
}
instance.array[67903] = true;
index = 67968;
while (index <= 68023) : (index += 1) {
instance.array[index] = true;
}
index = 68028;
while (index <= 68029) : (index += 1) {
instance.array[index] = true;
}
index = 68030;
while (index <= 68031) : (index += 1) {
instance.array[index] = true;
}
index = 68032;
while (index <= 68047) : (index += 1) {
instance.array[index] = true;
}
index = 68050;
while (index <= 68095) : (index += 1) {
instance.array[index] = true;
}
instance.array[68096] = true;
index = 68097;
while (index <= 68099) : (index += 1) {
instance.array[index] = true;
}
index = 68101;
while (index <= 68102) : (index += 1) {
instance.array[index] = true;
}
index = 68108;
while (index <= 68111) : (index += 1) {
instance.array[index] = true;
}
index = 68112;
while (index <= 68115) : (index += 1) {
instance.array[index] = true;
}
index = 68117;
while (index <= 68119) : (index += 1) {
instance.array[index] = true;
}
index = 68121;
while (index <= 68149) : (index += 1) {
instance.array[index] = true;
}
index = 68152;
while (index <= 68154) : (index += 1) {
instance.array[index] = true;
}
instance.array[68159] = true;
index = 68160;
while (index <= 68168) : (index += 1) {
instance.array[index] = true;
}
index = 68176;
while (index <= 68184) : (index += 1) {
instance.array[index] = true;
}
index = 68192;
while (index <= 68220) : (index += 1) {
instance.array[index] = true;
}
index = 68221;
while (index <= 68222) : (index += 1) {
instance.array[index] = true;
}
instance.array[68223] = true;
index = 68224;
while (index <= 68252) : (index += 1) {
instance.array[index] = true;
}
index = 68253;
while (index <= 68255) : (index += 1) {
instance.array[index] = true;
}
index = 68288;
while (index <= 68295) : (index += 1) {
instance.array[index] = true;
}
instance.array[68296] = true;
index = 68297;
while (index <= 68324) : (index += 1) {
instance.array[index] = true;
}
index = 68325;
while (index <= 68326) : (index += 1) {
instance.array[index] = true;
}
index = 68331;
while (index <= 68335) : (index += 1) {
instance.array[index] = true;
}
index = 68336;
while (index <= 68342) : (index += 1) {
instance.array[index] = true;
}
index = 68352;
while (index <= 68405) : (index += 1) {
instance.array[index] = true;
}
index = 68409;
while (index <= 68415) : (index += 1) {
instance.array[index] = true;
}
index = 68416;
while (index <= 68437) : (index += 1) {
instance.array[index] = true;
}
index = 68440;
while (index <= 68447) : (index += 1) {
instance.array[index] = true;
}
index = 68448;
while (index <= 68466) : (index += 1) {
instance.array[index] = true;
}
index = 68472;
while (index <= 68479) : (index += 1) {
instance.array[index] = true;
}
index = 68480;
while (index <= 68497) : (index += 1) {
instance.array[index] = true;
}
index = 68505;
while (index <= 68508) : (index += 1) {
instance.array[index] = true;
}
index = 68521;
while (index <= 68527) : (index += 1) {
instance.array[index] = true;
}
index = 68608;
while (index <= 68680) : (index += 1) {
instance.array[index] = true;
}
index = 68736;
while (index <= 68786) : (index += 1) {
instance.array[index] = true;
}
index = 68800;
while (index <= 68850) : (index += 1) {
instance.array[index] = true;
}
index = 68858;
while (index <= 68863) : (index += 1) {
instance.array[index] = true;
}
index = 68864;
while (index <= 68899) : (index += 1) {
instance.array[index] = true;
}
index = 68900;
while (index <= 68903) : (index += 1) {
instance.array[index] = true;
}
index = 68912;
while (index <= 68921) : (index += 1) {
instance.array[index] = true;
}
index = 69216;
while (index <= 69246) : (index += 1) {
instance.array[index] = true;
}
index = 69248;
while (index <= 69289) : (index += 1) {
instance.array[index] = true;
}
index = 69291;
while (index <= 69292) : (index += 1) {
instance.array[index] = true;
}
instance.array[69293] = true;
index = 69296;
while (index <= 69297) : (index += 1) {
instance.array[index] = true;
}
index = 69376;
while (index <= 69404) : (index += 1) {
instance.array[index] = true;
}
index = 69405;
while (index <= 69414) : (index += 1) {
instance.array[index] = true;
}
instance.array[69415] = true;
index = 69424;
while (index <= 69445) : (index += 1) {
instance.array[index] = true;
}
index = 69446;
while (index <= 69456) : (index += 1) {
instance.array[index] = true;
}
index = 69457;
while (index <= 69460) : (index += 1) {
instance.array[index] = true;
}
index = 69461;
while (index <= 69465) : (index += 1) {
instance.array[index] = true;
}
index = 69552;
while (index <= 69572) : (index += 1) {
instance.array[index] = true;
}
index = 69573;
while (index <= 69579) : (index += 1) {
instance.array[index] = true;
}
index = 69600;
while (index <= 69622) : (index += 1) {
instance.array[index] = true;
}
instance.array[69632] = true;
instance.array[69633] = true;
instance.array[69634] = true;
index = 69635;
while (index <= 69687) : (index += 1) {
instance.array[index] = true;
}
index = 69688;
while (index <= 69702) : (index += 1) {
instance.array[index] = true;
}
index = 69703;
while (index <= 69709) : (index += 1) {
instance.array[index] = true;
}
index = 69714;
while (index <= 69733) : (index += 1) {
instance.array[index] = true;
}
index = 69734;
while (index <= 69743) : (index += 1) {
instance.array[index] = true;
}
index = 69759;
while (index <= 69761) : (index += 1) {
instance.array[index] = true;
}
instance.array[69762] = true;
index = 69763;
while (index <= 69807) : (index += 1) {
instance.array[index] = true;
}
index = 69808;
while (index <= 69810) : (index += 1) {
instance.array[index] = true;
}
index = 69811;
while (index <= 69814) : (index += 1) {
instance.array[index] = true;
}
index = 69815;
while (index <= 69816) : (index += 1) {
instance.array[index] = true;
}
index = 69817;
while (index <= 69818) : (index += 1) {
instance.array[index] = true;
}
index = 69819;
while (index <= 69820) : (index += 1) {
instance.array[index] = true;
}
instance.array[69821] = true;
index = 69822;
while (index <= 69825) : (index += 1) {
instance.array[index] = true;
}
instance.array[69837] = true;
index = 69840;
while (index <= 69864) : (index += 1) {
instance.array[index] = true;
}
index = 69872;
while (index <= 69881) : (index += 1) {
instance.array[index] = true;
}
index = 69888;
while (index <= 69890) : (index += 1) {
instance.array[index] = true;
}
index = 69891;
while (index <= 69926) : (index += 1) {
instance.array[index] = true;
}
index = 69927;
while (index <= 69931) : (index += 1) {
instance.array[index] = true;
}
instance.array[69932] = true;
index = 69933;
while (index <= 69940) : (index += 1) {
instance.array[index] = true;
}
index = 69942;
while (index <= 69951) : (index += 1) {
instance.array[index] = true;
}
index = 69952;
while (index <= 69955) : (index += 1) {
instance.array[index] = true;
}
instance.array[69956] = true;
index = 69957;
while (index <= 69958) : (index += 1) {
instance.array[index] = true;
}
instance.array[69959] = true;
index = 69968;
while (index <= 70002) : (index += 1) {
instance.array[index] = true;
}
instance.array[70003] = true;
index = 70004;
while (index <= 70005) : (index += 1) {
instance.array[index] = true;
}
instance.array[70006] = true;
index = 70016;
while (index <= 70017) : (index += 1) {
instance.array[index] = true;
}
instance.array[70018] = true;
index = 70019;
while (index <= 70066) : (index += 1) {
instance.array[index] = true;
}
index = 70067;
while (index <= 70069) : (index += 1) {
instance.array[index] = true;
}
index = 70070;
while (index <= 70078) : (index += 1) {
instance.array[index] = true;
}
index = 70079;
while (index <= 70080) : (index += 1) {
instance.array[index] = true;
}
index = 70081;
while (index <= 70084) : (index += 1) {
instance.array[index] = true;
}
index = 70085;
while (index <= 70088) : (index += 1) {
instance.array[index] = true;
}
index = 70089;
while (index <= 70092) : (index += 1) {
instance.array[index] = true;
}
instance.array[70093] = true;
instance.array[70094] = true;
instance.array[70095] = true;
index = 70096;
while (index <= 70105) : (index += 1) {
instance.array[index] = true;
}
instance.array[70106] = true;
instance.array[70107] = true;
instance.array[70108] = true;
index = 70109;
while (index <= 70111) : (index += 1) {
instance.array[index] = true;
}
index = 70113;
while (index <= 70132) : (index += 1) {
instance.array[index] = true;
}
index = 70144;
while (index <= 70161) : (index += 1) {
instance.array[index] = true;
}
index = 70163;
while (index <= 70187) : (index += 1) {
instance.array[index] = true;
}
index = 70188;
while (index <= 70190) : (index += 1) {
instance.array[index] = true;
}
index = 70191;
while (index <= 70193) : (index += 1) {
instance.array[index] = true;
}
index = 70194;
while (index <= 70195) : (index += 1) {
instance.array[index] = true;
}
instance.array[70196] = true;
instance.array[70197] = true;
index = 70198;
while (index <= 70199) : (index += 1) {
instance.array[index] = true;
}
index = 70200;
while (index <= 70205) : (index += 1) {
instance.array[index] = true;
}
instance.array[70206] = true;
index = 70272;
while (index <= 70278) : (index += 1) {
instance.array[index] = true;
}
instance.array[70280] = true;
index = 70282;
while (index <= 70285) : (index += 1) {
instance.array[index] = true;
}
index = 70287;
while (index <= 70301) : (index += 1) {
instance.array[index] = true;
}
index = 70303;
while (index <= 70312) : (index += 1) {
instance.array[index] = true;
}
instance.array[70313] = true;
index = 70320;
while (index <= 70366) : (index += 1) {
instance.array[index] = true;
}
instance.array[70367] = true;
index = 70368;
while (index <= 70370) : (index += 1) {
instance.array[index] = true;
}
index = 70371;
while (index <= 70378) : (index += 1) {
instance.array[index] = true;
}
index = 70384;
while (index <= 70393) : (index += 1) {
instance.array[index] = true;
}
index = 70400;
while (index <= 70401) : (index += 1) {
instance.array[index] = true;
}
index = 70402;
while (index <= 70403) : (index += 1) {
instance.array[index] = true;
}
index = 70405;
while (index <= 70412) : (index += 1) {
instance.array[index] = true;
}
index = 70415;
while (index <= 70416) : (index += 1) {
instance.array[index] = true;
}
index = 70419;
while (index <= 70440) : (index += 1) {
instance.array[index] = true;
}
index = 70442;
while (index <= 70448) : (index += 1) {
instance.array[index] = true;
}
index = 70450;
while (index <= 70451) : (index += 1) {
instance.array[index] = true;
}
index = 70453;
while (index <= 70457) : (index += 1) {
instance.array[index] = true;
}
index = 70459;
while (index <= 70460) : (index += 1) {
instance.array[index] = true;
}
instance.array[70461] = true;
index = 70462;
while (index <= 70463) : (index += 1) {
instance.array[index] = true;
}
instance.array[70464] = true;
index = 70465;
while (index <= 70468) : (index += 1) {
instance.array[index] = true;
}
index = 70471;
while (index <= 70472) : (index += 1) {
instance.array[index] = true;
}
index = 70475;
while (index <= 70477) : (index += 1) {
instance.array[index] = true;
}
instance.array[70480] = true;
instance.array[70487] = true;
index = 70493;
while (index <= 70497) : (index += 1) {
instance.array[index] = true;
}
index = 70498;
while (index <= 70499) : (index += 1) {
instance.array[index] = true;
}
index = 70502;
while (index <= 70508) : (index += 1) {
instance.array[index] = true;
}
index = 70512;
while (index <= 70516) : (index += 1) {
instance.array[index] = true;
}
index = 70656;
while (index <= 70708) : (index += 1) {
instance.array[index] = true;
}
index = 70709;
while (index <= 70711) : (index += 1) {
instance.array[index] = true;
}
index = 70712;
while (index <= 70719) : (index += 1) {
instance.array[index] = true;
}
index = 70720;
while (index <= 70721) : (index += 1) {
instance.array[index] = true;
}
index = 70722;
while (index <= 70724) : (index += 1) {
instance.array[index] = true;
}
instance.array[70725] = true;
instance.array[70726] = true;
index = 70727;
while (index <= 70730) : (index += 1) {
instance.array[index] = true;
}
index = 70731;
while (index <= 70735) : (index += 1) {
instance.array[index] = true;
}
index = 70736;
while (index <= 70745) : (index += 1) {
instance.array[index] = true;
}
index = 70746;
while (index <= 70747) : (index += 1) {
instance.array[index] = true;
}
instance.array[70749] = true;
instance.array[70750] = true;
index = 70751;
while (index <= 70753) : (index += 1) {
instance.array[index] = true;
}
index = 70784;
while (index <= 70831) : (index += 1) {
instance.array[index] = true;
}
index = 70832;
while (index <= 70834) : (index += 1) {
instance.array[index] = true;
}
index = 70835;
while (index <= 70840) : (index += 1) {
instance.array[index] = true;
}
instance.array[70841] = true;
instance.array[70842] = true;
index = 70843;
while (index <= 70846) : (index += 1) {
instance.array[index] = true;
}
index = 70847;
while (index <= 70848) : (index += 1) {
instance.array[index] = true;
}
instance.array[70849] = true;
index = 70850;
while (index <= 70851) : (index += 1) {
instance.array[index] = true;
}
index = 70852;
while (index <= 70853) : (index += 1) {
instance.array[index] = true;
}
instance.array[70854] = true;
instance.array[70855] = true;
index = 70864;
while (index <= 70873) : (index += 1) {
instance.array[index] = true;
}
index = 71040;
while (index <= 71086) : (index += 1) {
instance.array[index] = true;
}
index = 71087;
while (index <= 71089) : (index += 1) {
instance.array[index] = true;
}
index = 71090;
while (index <= 71093) : (index += 1) {
instance.array[index] = true;
}
index = 71096;
while (index <= 71099) : (index += 1) {
instance.array[index] = true;
}
index = 71100;
while (index <= 71101) : (index += 1) {
instance.array[index] = true;
}
instance.array[71102] = true;
index = 71103;
while (index <= 71104) : (index += 1) {
instance.array[index] = true;
}
index = 71105;
while (index <= 71127) : (index += 1) {
instance.array[index] = true;
}
index = 71128;
while (index <= 71131) : (index += 1) {
instance.array[index] = true;
}
index = 71132;
while (index <= 71133) : (index += 1) {
instance.array[index] = true;
}
index = 71168;
while (index <= 71215) : (index += 1) {
instance.array[index] = true;
}
index = 71216;
while (index <= 71218) : (index += 1) {
instance.array[index] = true;
}
index = 71219;
while (index <= 71226) : (index += 1) {
instance.array[index] = true;
}
index = 71227;
while (index <= 71228) : (index += 1) {
instance.array[index] = true;
}
instance.array[71229] = true;
instance.array[71230] = true;
index = 71231;
while (index <= 71232) : (index += 1) {
instance.array[index] = true;
}
index = 71233;
while (index <= 71235) : (index += 1) {
instance.array[index] = true;
}
instance.array[71236] = true;
index = 71248;
while (index <= 71257) : (index += 1) {
instance.array[index] = true;
}
index = 71264;
while (index <= 71276) : (index += 1) {
instance.array[index] = true;
}
index = 71296;
while (index <= 71338) : (index += 1) {
instance.array[index] = true;
}
instance.array[71339] = true;
instance.array[71340] = true;
instance.array[71341] = true;
index = 71342;
while (index <= 71343) : (index += 1) {
instance.array[index] = true;
}
index = 71344;
while (index <= 71349) : (index += 1) {
instance.array[index] = true;
}
instance.array[71350] = true;
instance.array[71351] = true;
instance.array[71352] = true;
index = 71360;
while (index <= 71369) : (index += 1) {
instance.array[index] = true;
}
index = 71424;
while (index <= 71450) : (index += 1) {
instance.array[index] = true;
}
index = 71453;
while (index <= 71455) : (index += 1) {
instance.array[index] = true;
}
index = 71456;
while (index <= 71457) : (index += 1) {
instance.array[index] = true;
}
index = 71458;
while (index <= 71461) : (index += 1) {
instance.array[index] = true;
}
instance.array[71462] = true;
index = 71463;
while (index <= 71467) : (index += 1) {
instance.array[index] = true;
}
index = 71472;
while (index <= 71481) : (index += 1) {
instance.array[index] = true;
}
index = 71482;
while (index <= 71483) : (index += 1) {
instance.array[index] = true;
}
index = 71484;
while (index <= 71486) : (index += 1) {
instance.array[index] = true;
}
instance.array[71487] = true;
index = 71680;
while (index <= 71723) : (index += 1) {
instance.array[index] = true;
}
index = 71724;
while (index <= 71726) : (index += 1) {
instance.array[index] = true;
}
index = 71727;
while (index <= 71735) : (index += 1) {
instance.array[index] = true;
}
instance.array[71736] = true;
index = 71737;
while (index <= 71738) : (index += 1) {
instance.array[index] = true;
}
instance.array[71739] = true;
index = 71840;
while (index <= 71903) : (index += 1) {
instance.array[index] = true;
}
index = 71904;
while (index <= 71913) : (index += 1) {
instance.array[index] = true;
}
index = 71914;
while (index <= 71922) : (index += 1) {
instance.array[index] = true;
}
index = 71935;
while (index <= 71942) : (index += 1) {
instance.array[index] = true;
}
instance.array[71945] = true;
index = 71948;
while (index <= 71955) : (index += 1) {
instance.array[index] = true;
}
index = 71957;
while (index <= 71958) : (index += 1) {
instance.array[index] = true;
}
index = 71960;
while (index <= 71983) : (index += 1) {
instance.array[index] = true;
}
index = 71984;
while (index <= 71989) : (index += 1) {
instance.array[index] = true;
}
index = 71991;
while (index <= 71992) : (index += 1) {
instance.array[index] = true;
}
index = 71995;
while (index <= 71996) : (index += 1) {
instance.array[index] = true;
}
instance.array[71997] = true;
instance.array[71998] = true;
instance.array[71999] = true;
instance.array[72000] = true;
instance.array[72001] = true;
instance.array[72002] = true;
instance.array[72003] = true;
index = 72004;
while (index <= 72006) : (index += 1) {
instance.array[index] = true;
}
index = 72016;
while (index <= 72025) : (index += 1) {
instance.array[index] = true;
}
index = 72096;
while (index <= 72103) : (index += 1) {
instance.array[index] = true;
}
index = 72106;
while (index <= 72144) : (index += 1) {
instance.array[index] = true;
}
index = 72145;
while (index <= 72147) : (index += 1) {
instance.array[index] = true;
}
index = 72148;
while (index <= 72151) : (index += 1) {
instance.array[index] = true;
}
index = 72154;
while (index <= 72155) : (index += 1) {
instance.array[index] = true;
}
index = 72156;
while (index <= 72159) : (index += 1) {
instance.array[index] = true;
}
instance.array[72160] = true;
instance.array[72161] = true;
instance.array[72162] = true;
instance.array[72163] = true;
instance.array[72164] = true;
instance.array[72192] = true;
index = 72193;
while (index <= 72202) : (index += 1) {
instance.array[index] = true;
}
index = 72203;
while (index <= 72242) : (index += 1) {
instance.array[index] = true;
}
index = 72243;
while (index <= 72248) : (index += 1) {
instance.array[index] = true;
}
instance.array[72249] = true;
instance.array[72250] = true;
index = 72251;
while (index <= 72254) : (index += 1) {
instance.array[index] = true;
}
index = 72255;
while (index <= 72262) : (index += 1) {
instance.array[index] = true;
}
instance.array[72263] = true;
instance.array[72272] = true;
index = 72273;
while (index <= 72278) : (index += 1) {
instance.array[index] = true;
}
index = 72279;
while (index <= 72280) : (index += 1) {
instance.array[index] = true;
}
index = 72281;
while (index <= 72283) : (index += 1) {
instance.array[index] = true;
}
index = 72284;
while (index <= 72329) : (index += 1) {
instance.array[index] = true;
}
index = 72330;
while (index <= 72342) : (index += 1) {
instance.array[index] = true;
}
instance.array[72343] = true;
index = 72344;
while (index <= 72345) : (index += 1) {
instance.array[index] = true;
}
index = 72346;
while (index <= 72348) : (index += 1) {
instance.array[index] = true;
}
instance.array[72349] = true;
index = 72350;
while (index <= 72354) : (index += 1) {
instance.array[index] = true;
}
index = 72384;
while (index <= 72440) : (index += 1) {
instance.array[index] = true;
}
index = 72704;
while (index <= 72712) : (index += 1) {
instance.array[index] = true;
}
index = 72714;
while (index <= 72750) : (index += 1) {
instance.array[index] = true;
}
instance.array[72751] = true;
index = 72752;
while (index <= 72758) : (index += 1) {
instance.array[index] = true;
}
index = 72760;
while (index <= 72765) : (index += 1) {
instance.array[index] = true;
}
instance.array[72766] = true;
instance.array[72767] = true;
instance.array[72768] = true;
index = 72769;
while (index <= 72773) : (index += 1) {
instance.array[index] = true;
}
index = 72784;
while (index <= 72793) : (index += 1) {
instance.array[index] = true;
}
index = 72794;
while (index <= 72812) : (index += 1) {
instance.array[index] = true;
}
index = 72816;
while (index <= 72817) : (index += 1) {
instance.array[index] = true;
}
index = 72818;
while (index <= 72847) : (index += 1) {
instance.array[index] = true;
}
index = 72850;
while (index <= 72871) : (index += 1) {
instance.array[index] = true;
}
instance.array[72873] = true;
index = 72874;
while (index <= 72880) : (index += 1) {
instance.array[index] = true;
}
instance.array[72881] = true;
index = 72882;
while (index <= 72883) : (index += 1) {
instance.array[index] = true;
}
instance.array[72884] = true;
index = 72885;
while (index <= 72886) : (index += 1) {
instance.array[index] = true;
}
index = 72960;
while (index <= 72966) : (index += 1) {
instance.array[index] = true;
}
index = 72968;
while (index <= 72969) : (index += 1) {
instance.array[index] = true;
}
index = 72971;
while (index <= 73008) : (index += 1) {
instance.array[index] = true;
}
index = 73009;
while (index <= 73014) : (index += 1) {
instance.array[index] = true;
}
instance.array[73018] = true;
index = 73020;
while (index <= 73021) : (index += 1) {
instance.array[index] = true;
}
index = 73023;
while (index <= 73029) : (index += 1) {
instance.array[index] = true;
}
instance.array[73030] = true;
instance.array[73031] = true;
index = 73040;
while (index <= 73049) : (index += 1) {
instance.array[index] = true;
}
index = 73056;
while (index <= 73061) : (index += 1) {
instance.array[index] = true;
}
index = 73063;
while (index <= 73064) : (index += 1) {
instance.array[index] = true;
}
index = 73066;
while (index <= 73097) : (index += 1) {
instance.array[index] = true;
}
index = 73098;
while (index <= 73102) : (index += 1) {
instance.array[index] = true;
}
index = 73104;
while (index <= 73105) : (index += 1) {
instance.array[index] = true;
}
index = 73107;
while (index <= 73108) : (index += 1) {
instance.array[index] = true;
}
instance.array[73109] = true;
instance.array[73110] = true;
instance.array[73111] = true;
instance.array[73112] = true;
index = 73120;
while (index <= 73129) : (index += 1) {
instance.array[index] = true;
}
index = 73440;
while (index <= 73458) : (index += 1) {
instance.array[index] = true;
}
index = 73459;
while (index <= 73460) : (index += 1) {
instance.array[index] = true;
}
index = 73461;
while (index <= 73462) : (index += 1) {
instance.array[index] = true;
}
index = 73463;
while (index <= 73464) : (index += 1) {
instance.array[index] = true;
}
instance.array[73648] = true;
index = 73664;
while (index <= 73684) : (index += 1) {
instance.array[index] = true;
}
index = 73685;
while (index <= 73692) : (index += 1) {
instance.array[index] = true;
}
index = 73693;
while (index <= 73696) : (index += 1) {
instance.array[index] = true;
}
index = 73697;
while (index <= 73713) : (index += 1) {
instance.array[index] = true;
}
instance.array[73727] = true;
index = 73728;
while (index <= 74649) : (index += 1) {
instance.array[index] = true;
}
index = 74752;
while (index <= 74862) : (index += 1) {
instance.array[index] = true;
}
index = 74864;
while (index <= 74868) : (index += 1) {
instance.array[index] = true;
}
index = 74880;
while (index <= 75075) : (index += 1) {
instance.array[index] = true;
}
index = 77824;
while (index <= 78894) : (index += 1) {
instance.array[index] = true;
}
index = 78896;
while (index <= 78904) : (index += 1) {
instance.array[index] = true;
}
index = 82944;
while (index <= 83526) : (index += 1) {
instance.array[index] = true;
}
index = 92160;
while (index <= 92728) : (index += 1) {
instance.array[index] = true;
}
index = 92736;
while (index <= 92766) : (index += 1) {
instance.array[index] = true;
}
index = 92768;
while (index <= 92777) : (index += 1) {
instance.array[index] = true;
}
index = 92782;
while (index <= 92783) : (index += 1) {
instance.array[index] = true;
}
index = 92880;
while (index <= 92909) : (index += 1) {
instance.array[index] = true;
}
index = 92912;
while (index <= 92916) : (index += 1) {
instance.array[index] = true;
}
instance.array[92917] = true;
index = 92928;
while (index <= 92975) : (index += 1) {
instance.array[index] = true;
}
index = 92976;
while (index <= 92982) : (index += 1) {
instance.array[index] = true;
}
index = 92983;
while (index <= 92987) : (index += 1) {
instance.array[index] = true;
}
index = 92988;
while (index <= 92991) : (index += 1) {
instance.array[index] = true;
}
index = 92992;
while (index <= 92995) : (index += 1) {
instance.array[index] = true;
}
instance.array[92996] = true;
instance.array[92997] = true;
index = 93008;
while (index <= 93017) : (index += 1) {
instance.array[index] = true;
}
index = 93019;
while (index <= 93025) : (index += 1) {
instance.array[index] = true;
}
index = 93027;
while (index <= 93047) : (index += 1) {
instance.array[index] = true;
}
index = 93053;
while (index <= 93071) : (index += 1) {
instance.array[index] = true;
}
index = 93760;
while (index <= 93823) : (index += 1) {
instance.array[index] = true;
}
index = 93824;
while (index <= 93846) : (index += 1) {
instance.array[index] = true;
}
index = 93847;
while (index <= 93850) : (index += 1) {
instance.array[index] = true;
}
index = 93952;
while (index <= 94026) : (index += 1) {
instance.array[index] = true;
}
instance.array[94031] = true;
instance.array[94032] = true;
index = 94033;
while (index <= 94087) : (index += 1) {
instance.array[index] = true;
}
index = 94095;
while (index <= 94098) : (index += 1) {
instance.array[index] = true;
}
index = 94099;
while (index <= 94111) : (index += 1) {
instance.array[index] = true;
}
index = 113664;
while (index <= 113770) : (index += 1) {
instance.array[index] = true;
}
index = 113776;
while (index <= 113788) : (index += 1) {
instance.array[index] = true;
}
index = 113792;
while (index <= 113800) : (index += 1) {
instance.array[index] = true;
}
index = 113808;
while (index <= 113817) : (index += 1) {
instance.array[index] = true;
}
instance.array[113820] = true;
index = 113821;
while (index <= 113822) : (index += 1) {
instance.array[index] = true;
}
instance.array[113823] = true;
index = 113824;
while (index <= 113827) : (index += 1) {
instance.array[index] = true;
}
index = 118784;
while (index <= 119029) : (index += 1) {
instance.array[index] = true;
}
index = 119040;
while (index <= 119078) : (index += 1) {
instance.array[index] = true;
}
index = 119081;
while (index <= 119140) : (index += 1) {
instance.array[index] = true;
}
index = 119141;
while (index <= 119142) : (index += 1) {
instance.array[index] = true;
}
index = 119143;
while (index <= 119145) : (index += 1) {
instance.array[index] = true;
}
index = 119146;
while (index <= 119148) : (index += 1) {
instance.array[index] = true;
}
index = 119149;
while (index <= 119154) : (index += 1) {
instance.array[index] = true;
}
index = 119155;
while (index <= 119162) : (index += 1) {
instance.array[index] = true;
}
index = 119163;
while (index <= 119170) : (index += 1) {
instance.array[index] = true;
}
index = 119171;
while (index <= 119172) : (index += 1) {
instance.array[index] = true;
}
index = 119173;
while (index <= 119179) : (index += 1) {
instance.array[index] = true;
}
index = 119180;
while (index <= 119209) : (index += 1) {
instance.array[index] = true;
}
index = 119210;
while (index <= 119213) : (index += 1) {
instance.array[index] = true;
}
index = 119214;
while (index <= 119272) : (index += 1) {
instance.array[index] = true;
}
index = 119296;
while (index <= 119361) : (index += 1) {
instance.array[index] = true;
}
index = 119362;
while (index <= 119364) : (index += 1) {
instance.array[index] = true;
}
instance.array[119365] = true;
index = 119520;
while (index <= 119539) : (index += 1) {
instance.array[index] = true;
}
index = 119552;
while (index <= 119638) : (index += 1) {
instance.array[index] = true;
}
index = 119648;
while (index <= 119672) : (index += 1) {
instance.array[index] = true;
}
index = 119808;
while (index <= 119892) : (index += 1) {
instance.array[index] = true;
}
index = 119894;
while (index <= 119964) : (index += 1) {
instance.array[index] = true;
}
index = 119966;
while (index <= 119967) : (index += 1) {
instance.array[index] = true;
}
instance.array[119970] = true;
index = 119973;
while (index <= 119974) : (index += 1) {
instance.array[index] = true;
}
index = 119977;
while (index <= 119980) : (index += 1) {
instance.array[index] = true;
}
index = 119982;
while (index <= 119993) : (index += 1) {
instance.array[index] = true;
}
instance.array[119995] = true;
index = 119997;
while (index <= 120003) : (index += 1) {
instance.array[index] = true;
}
index = 120005;
while (index <= 120069) : (index += 1) {
instance.array[index] = true;
}
index = 120071;
while (index <= 120074) : (index += 1) {
instance.array[index] = true;
}
index = 120077;
while (index <= 120084) : (index += 1) {
instance.array[index] = true;
}
index = 120086;
while (index <= 120092) : (index += 1) {
instance.array[index] = true;
}
index = 120094;
while (index <= 120121) : (index += 1) {
instance.array[index] = true;
}
index = 120123;
while (index <= 120126) : (index += 1) {
instance.array[index] = true;
}
index = 120128;
while (index <= 120132) : (index += 1) {
instance.array[index] = true;
}
instance.array[120134] = true;
index = 120138;
while (index <= 120144) : (index += 1) {
instance.array[index] = true;
}
index = 120146;
while (index <= 120485) : (index += 1) {
instance.array[index] = true;
}
index = 120488;
while (index <= 120512) : (index += 1) {
instance.array[index] = true;
}
instance.array[120513] = true;
index = 120514;
while (index <= 120538) : (index += 1) {
instance.array[index] = true;
}
instance.array[120539] = true;
index = 120540;
while (index <= 120570) : (index += 1) {
instance.array[index] = true;
}
instance.array[120571] = true;
index = 120572;
while (index <= 120596) : (index += 1) {
instance.array[index] = true;
}
instance.array[120597] = true;
index = 120598;
while (index <= 120628) : (index += 1) {
instance.array[index] = true;
}
instance.array[120629] = true;
index = 120630;
while (index <= 120654) : (index += 1) {
instance.array[index] = true;
}
instance.array[120655] = true;
index = 120656;
while (index <= 120686) : (index += 1) {
instance.array[index] = true;
}
instance.array[120687] = true;
index = 120688;
while (index <= 120712) : (index += 1) {
instance.array[index] = true;
}
instance.array[120713] = true;
index = 120714;
while (index <= 120744) : (index += 1) {
instance.array[index] = true;
}
instance.array[120745] = true;
index = 120746;
while (index <= 120770) : (index += 1) {
instance.array[index] = true;
}
instance.array[120771] = true;
index = 120772;
while (index <= 120779) : (index += 1) {
instance.array[index] = true;
}
index = 120782;
while (index <= 120831) : (index += 1) {
instance.array[index] = true;
}
index = 120832;
while (index <= 121343) : (index += 1) {
instance.array[index] = true;
}
index = 121344;
while (index <= 121398) : (index += 1) {
instance.array[index] = true;
}
index = 121399;
while (index <= 121402) : (index += 1) {
instance.array[index] = true;
}
index = 121403;
while (index <= 121452) : (index += 1) {
instance.array[index] = true;
}
index = 121453;
while (index <= 121460) : (index += 1) {
instance.array[index] = true;
}
instance.array[121461] = true;
index = 121462;
while (index <= 121475) : (index += 1) {
instance.array[index] = true;
}
instance.array[121476] = true;
index = 121477;
while (index <= 121478) : (index += 1) {
instance.array[index] = true;
}
index = 121479;
while (index <= 121483) : (index += 1) {
instance.array[index] = true;
}
index = 121499;
while (index <= 121503) : (index += 1) {
instance.array[index] = true;
}
index = 121505;
while (index <= 121519) : (index += 1) {
instance.array[index] = true;
}
index = 122880;
while (index <= 122886) : (index += 1) {
instance.array[index] = true;
}
index = 122888;
while (index <= 122904) : (index += 1) {
instance.array[index] = true;
}
index = 122907;
while (index <= 122913) : (index += 1) {
instance.array[index] = true;
}
index = 122915;
while (index <= 122916) : (index += 1) {
instance.array[index] = true;
}
index = 122918;
while (index <= 122922) : (index += 1) {
instance.array[index] = true;
}
index = 123136;
while (index <= 123180) : (index += 1) {
instance.array[index] = true;
}
index = 123184;
while (index <= 123190) : (index += 1) {
instance.array[index] = true;
}
index = 123191;
while (index <= 123197) : (index += 1) {
instance.array[index] = true;
}
index = 123200;
while (index <= 123209) : (index += 1) {
instance.array[index] = true;
}
instance.array[123214] = true;
instance.array[123215] = true;
index = 123584;
while (index <= 123627) : (index += 1) {
instance.array[index] = true;
}
index = 123628;
while (index <= 123631) : (index += 1) {
instance.array[index] = true;
}
index = 123632;
while (index <= 123641) : (index += 1) {
instance.array[index] = true;
}
instance.array[123647] = true;
index = 124928;
while (index <= 125124) : (index += 1) {
instance.array[index] = true;
}
index = 125127;
while (index <= 125135) : (index += 1) {
instance.array[index] = true;
}
index = 125136;
while (index <= 125142) : (index += 1) {
instance.array[index] = true;
}
index = 125184;
while (index <= 125251) : (index += 1) {
instance.array[index] = true;
}
index = 125252;
while (index <= 125258) : (index += 1) {
instance.array[index] = true;
}
instance.array[125259] = true;
index = 125264;
while (index <= 125273) : (index += 1) {
instance.array[index] = true;
}
index = 125278;
while (index <= 125279) : (index += 1) {
instance.array[index] = true;
}
index = 126065;
while (index <= 126123) : (index += 1) {
instance.array[index] = true;
}
instance.array[126124] = true;
index = 126125;
while (index <= 126127) : (index += 1) {
instance.array[index] = true;
}
instance.array[126128] = true;
index = 126129;
while (index <= 126132) : (index += 1) {
instance.array[index] = true;
}
index = 126209;
while (index <= 126253) : (index += 1) {
instance.array[index] = true;
}
instance.array[126254] = true;
index = 126255;
while (index <= 126269) : (index += 1) {
instance.array[index] = true;
}
index = 126464;
while (index <= 126467) : (index += 1) {
instance.array[index] = true;
}
index = 126469;
while (index <= 126495) : (index += 1) {
instance.array[index] = true;
}
index = 126497;
while (index <= 126498) : (index += 1) {
instance.array[index] = true;
}
instance.array[126500] = true;
instance.array[126503] = true;
index = 126505;
while (index <= 126514) : (index += 1) {
instance.array[index] = true;
}
index = 126516;
while (index <= 126519) : (index += 1) {
instance.array[index] = true;
}
instance.array[126521] = true;
instance.array[126523] = true;
instance.array[126530] = true;
instance.array[126535] = true;
instance.array[126537] = true;
instance.array[126539] = true;
index = 126541;
while (index <= 126543) : (index += 1) {
instance.array[index] = true;
}
index = 126545;
while (index <= 126546) : (index += 1) {
instance.array[index] = true;
}
instance.array[126548] = true;
instance.array[126551] = true;
instance.array[126553] = true;
instance.array[126555] = true;
instance.array[126557] = true;
instance.array[126559] = true;
index = 126561;
while (index <= 126562) : (index += 1) {
instance.array[index] = true;
}
instance.array[126564] = true;
index = 126567;
while (index <= 126570) : (index += 1) {
instance.array[index] = true;
}
index = 126572;
while (index <= 126578) : (index += 1) {
instance.array[index] = true;
}
index = 126580;
while (index <= 126583) : (index += 1) {
instance.array[index] = true;
}
index = 126585;
while (index <= 126588) : (index += 1) {
instance.array[index] = true;
}
instance.array[126590] = true;
index = 126592;
while (index <= 126601) : (index += 1) {
instance.array[index] = true;
}
index = 126603;
while (index <= 126619) : (index += 1) {
instance.array[index] = true;
}
index = 126625;
while (index <= 126627) : (index += 1) {
instance.array[index] = true;
}
index = 126629;
while (index <= 126633) : (index += 1) {
instance.array[index] = true;
}
index = 126635;
while (index <= 126651) : (index += 1) {
instance.array[index] = true;
}
index = 126704;
while (index <= 126705) : (index += 1) {
instance.array[index] = true;
}
index = 126976;
while (index <= 126979) : (index += 1) {
instance.array[index] = true;
}
index = 126981;
while (index <= 127019) : (index += 1) {
instance.array[index] = true;
}
index = 127024;
while (index <= 127123) : (index += 1) {
instance.array[index] = true;
}
index = 127136;
while (index <= 127150) : (index += 1) {
instance.array[index] = true;
}
index = 127153;
while (index <= 127167) : (index += 1) {
instance.array[index] = true;
}
index = 127169;
while (index <= 127182) : (index += 1) {
instance.array[index] = true;
}
index = 127185;
while (index <= 127221) : (index += 1) {
instance.array[index] = true;
}
index = 127243;
while (index <= 127244) : (index += 1) {
instance.array[index] = true;
}
index = 127245;
while (index <= 127247) : (index += 1) {
instance.array[index] = true;
}
index = 127278;
while (index <= 127279) : (index += 1) {
instance.array[index] = true;
}
index = 127338;
while (index <= 127343) : (index += 1) {
instance.array[index] = true;
}
instance.array[127405] = true;
index = 127462;
while (index <= 127487) : (index += 1) {
instance.array[index] = true;
}
index = 127777;
while (index <= 127788) : (index += 1) {
instance.array[index] = true;
}
instance.array[127798] = true;
instance.array[127869] = true;
index = 127892;
while (index <= 127903) : (index += 1) {
instance.array[index] = true;
}
index = 127947;
while (index <= 127950) : (index += 1) {
instance.array[index] = true;
}
index = 127956;
while (index <= 127967) : (index += 1) {
instance.array[index] = true;
}
index = 127985;
while (index <= 127987) : (index += 1) {
instance.array[index] = true;
}
index = 127989;
while (index <= 127991) : (index += 1) {
instance.array[index] = true;
}
instance.array[128063] = true;
instance.array[128065] = true;
index = 128253;
while (index <= 128254) : (index += 1) {
instance.array[index] = true;
}
index = 128318;
while (index <= 128330) : (index += 1) {
instance.array[index] = true;
}
instance.array[128335] = true;
index = 128360;
while (index <= 128377) : (index += 1) {
instance.array[index] = true;
}
index = 128379;
while (index <= 128404) : (index += 1) {
instance.array[index] = true;
}
index = 128407;
while (index <= 128419) : (index += 1) {
instance.array[index] = true;
}
index = 128421;
while (index <= 128506) : (index += 1) {
instance.array[index] = true;
}
index = 128592;
while (index <= 128639) : (index += 1) {
instance.array[index] = true;
}
index = 128710;
while (index <= 128715) : (index += 1) {
instance.array[index] = true;
}
index = 128717;
while (index <= 128719) : (index += 1) {
instance.array[index] = true;
}
index = 128723;
while (index <= 128724) : (index += 1) {
instance.array[index] = true;
}
index = 128736;
while (index <= 128746) : (index += 1) {
instance.array[index] = true;
}
index = 128752;
while (index <= 128755) : (index += 1) {
instance.array[index] = true;
}
index = 128768;
while (index <= 128883) : (index += 1) {
instance.array[index] = true;
}
index = 128896;
while (index <= 128984) : (index += 1) {
instance.array[index] = true;
}
index = 129024;
while (index <= 129035) : (index += 1) {
instance.array[index] = true;
}
index = 129040;
while (index <= 129095) : (index += 1) {
instance.array[index] = true;
}
index = 129104;
while (index <= 129113) : (index += 1) {
instance.array[index] = true;
}
index = 129120;
while (index <= 129159) : (index += 1) {
instance.array[index] = true;
}
index = 129168;
while (index <= 129197) : (index += 1) {
instance.array[index] = true;
}
index = 129200;
while (index <= 129201) : (index += 1) {
instance.array[index] = true;
}
index = 129280;
while (index <= 129291) : (index += 1) {
instance.array[index] = true;
}
instance.array[129339] = true;
instance.array[129350] = true;
index = 129536;
while (index <= 129619) : (index += 1) {
instance.array[index] = true;
}
index = 129632;
while (index <= 129645) : (index += 1) {
instance.array[index] = true;
}
index = 129792;
while (index <= 129938) : (index += 1) {
instance.array[index] = true;
}
index = 129940;
while (index <= 129994) : (index += 1) {
instance.array[index] = true;
}
index = 130032;
while (index <= 130041) : (index += 1) {
instance.array[index] = true;
}
instance.array[917505] = true;
index = 917536;
while (index <= 917631) : (index += 1) {
instance.array[index] = true;
}
// Placeholder: 0. Struct name, 1. Code point kind
return instance;
}
pub fn deinit(self: *Neutral) void {
self.allocator.free(self.array);
}
// isNeutral checks if cp is of the kind Neutral.
pub fn isNeutral(self: Neutral, cp: u21) bool {
if (cp < self.lo or cp > self.hi) return false;
const index = cp - self.lo;
return if (index >= self.array.len) false else self.array[index];
}
|
src/components/autogen/DerivedEastAsianWidth/Neutral.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const odbc = @import("odbc");
const ResultSet = @import("result_set.zig").ResultSet;
const FetchResult = @import("result_set.zig").FetchResult;
const sql_parameter = @import("parameter.zig");
const ParameterBucket = sql_parameter.ParameterBucket;
const Cursor = @import("cursor.zig").Cursor;
pub const CommitMode = enum(u1) {
auto,
manual
};
pub const ConnectionInfo = struct {
pub const Config = struct {
driver: ?[]const u8 = null,
dsn: ?[]const u8 = null,
username: ?[]const u8 = null,
password: ?[]const u8 = null,
};
attributes: std.StringHashMap([]const u8),
/// Initialize a blank `ConnectionInfo` struct with an initialized `attributes` hash map
/// and arena allocator.
pub fn init(allocator: *Allocator) ConnectionInfo {
return .{
.attributes = std.StringHashMap([]const u8).init(allocator),
};
}
/// Initialize a `ConnectionInfo` using the information provided in the config data.
pub fn initWithConfig(allocator: *Allocator, config: Config) !ConnectionInfo {
var connection_info = ConnectionInfo.init(allocator);
if (config.driver) |driver| try connection_info.setDriver(driver);
if (config.dsn) |dsn| try connection_info.setDSN(dsn);
if (config.username) |username| try connection_info.setUsername(username);
if (config.password) |password| try connection_info.setPassword(password);
return connection_info;
}
pub fn deinit(self: *ConnectionInfo) void {
self.attributes.deinit();
}
pub fn setAttribute(self: *ConnectionInfo, attr_name: []const u8, attr_value: []const u8) !void {
try self.attributes.put(attr_name, attr_value);
}
pub fn getAttribute(self: *ConnectionInfo, attr_name: []const u8) ?[]const u8 {
return self.attributes.get(attr_name);
}
pub fn setDriver(self: *ConnectionInfo, driver_value: []const u8) !void {
try self.setAttribute("DRIVER", driver_value);
}
pub fn getDriver(self: *ConnectionInfo) ?[]const u8 {
return self.getAttribute("DRIVER");
}
pub fn setUsername(self: *ConnectionInfo, user_value: []const u8) !void {
try self.setAttribute("UID", user_value);
}
pub fn getUsername(self: *ConnectionInfo) ?[]const u8 {
return self.getAttribute("UID");
}
pub fn setPassword(self: *ConnectionInfo, password_value: []const u8) !void {
try self.setAttribute("PWD", password_value);
}
pub fn getPassword(self: *ConnectionInfo) ?[]const u8 {
return self.getAttribute("PWD");
}
pub fn setDSN(self: *ConnectionInfo, dsn_value: []const u8) !void {
try self.setAttribute("DSN", dsn_value);
}
pub fn getDSN(self: *ConnectionInfo) ?[]const u8 {
return self.getAttribute("DSN");
}
pub fn toConnectionString(self: *ConnectionInfo, allocator: *Allocator) ![]const u8 {
var string_builder = std.ArrayList(u8).init(allocator);
errdefer string_builder.deinit();
_ = try string_builder.writer().write("ODBC;");
var attribute_iter = self.attributes.iterator();
while (attribute_iter.next()) |entry| {
_ = try string_builder.writer().write(entry.key_ptr.*);
// _ = try string_builder.writer().write(entry.key);
_ = try string_builder.writer().write("=");
_ = try string_builder.writer().write(entry.value_ptr.*);
// _ = try string_builder.writer().write(entry.value);
_ = try string_builder.writer().write(";");
}
return string_builder.toOwnedSlice();
}
pub fn fromConnectionString(allocator: *Allocator, conn_str: []const u8) !ConnectionInfo {
var conn_info = ConnectionInfo.init(allocator);
var attr_start: usize = 0;
var attr_sep_index: usize = 0;
var current_index: usize = 0;
while (current_index < conn_str.len) : (current_index += 1) {
if (conn_str[current_index] == '=') {
attr_sep_index = current_index;
continue;
}
if (conn_str[current_index] == ';') {
const attr_name = conn_str[attr_start..attr_sep_index];
const attr_value = conn_str[attr_sep_index + 1..current_index];
try conn_info.setAttribute(attr_name, attr_value);
attr_start = current_index + 1;
} else if (current_index == conn_str.len - 1) {
const attr_name = conn_str[attr_start..attr_sep_index];
const attr_value = conn_str[attr_sep_index + 1..];
try conn_info.setAttribute(attr_name, attr_value);
}
}
return conn_info;
}
};
pub const DBConnection = struct {
environment: odbc.Environment,
connection: odbc.Connection,
pub fn init(server_name: []const u8, username: []const u8, password: []const u8) !DBConnection {
var result: DBConnection = undefined;
result.environment = odbc.Environment.init() catch return error.EnvironmentError;
errdefer result.environment.deinit() catch {};
result.environment.setOdbcVersion(.Odbc3) catch return error.EnvironmentError;
result.connection = odbc.Connection.init(&result.environment) catch return error.ConnectionError;
errdefer result.connection.deinit() catch {};
try result.connection.connect(server_name, username, password);
return result;
}
pub fn initWithConnectionString(connection_string: []const u8) !DBConnection {
var result: DBConnection = undefined;
result.environment = odbc.Environment.init() catch return error.EnvironmentError;
errdefer result.environment.deinit() catch {};
result.environment.setOdbcVersion(.Odbc3) catch return error.EnvironmentError;
result.connection = odbc.Connection.init(&result.environment) catch return error.ConnectionError;
errdefer result.connection.deinit() catch {};
try result.connection.connectExtended(connection_string, .NoPrompt);
return result;
}
pub fn deinit(self: *DBConnection) void {
self.connection.deinit() catch {};
self.environment.deinit() catch {};
}
pub fn setCommitMode(self: *DBConnection, mode: CommitMode) !void {
try self.connection.setAttribute(.{ .Autocommit = mode == .auto });
}
pub fn getCursor(self: *DBConnection, allocator: *Allocator) !Cursor {
return try Cursor.init(allocator, self.connection);
}
};
test "ConnectionInfo" {
const allocator = std.testing.allocator;
var connection_info = ConnectionInfo.init(allocator);
defer connection_info.deinit();
try connection_info.setDriver("A Driver");
try connection_info.setDSN("Some DSN Value");
try connection_info.setUsername("User");
try connection_info.setPassword("Password");
try connection_info.setAttribute("RandomAttr", "Random Value");
const connection_string = try connection_info.toConnectionString(allocator);
defer allocator.free(connection_string);
var derived_conn_info = try ConnectionInfo.fromConnectionString(allocator, connection_string);
defer derived_conn_info.deinit();
try std.testing.expectEqualStrings("A Driver", derived_conn_info.getDriver().?);
try std.testing.expectEqualStrings("Some DSN Value", derived_conn_info.getDSN().?);
try std.testing.expectEqualStrings("User", derived_conn_info.getUsername().?);
try std.testing.expectEqualStrings("Password", derived_conn_info.getPassword().?);
try std.testing.expectEqualStrings("Random Value", derived_conn_info.getAttribute("RandomAttr").?);
}
|
src/connection.zig
|
const std = @import("std");
const log = std.log.scoped(.dynamic_tree);
const assert = std.debug.assert;
const basic_type = @import("basic_type.zig");
const IndexLinkList = @import("IndexLinkList.zig");
const Rect = basic_type.Rect;
const Vec2 = basic_type.Vec2;
const Index = basic_type.Index;
const node_null = basic_type.node_null;
const DynamicTree = @This();
const Node = struct {
/// If child1 == node_null, child2 is entity
child1: Index = node_null,
child2: Index = node_null,
parent: Index = node_null,
height: i16 = 0,
aabb: Rect = Rect.zero(),
};
const NodeList = std.MultiArrayList(Node);
root: Index,
free_list: IndexLinkList,
node_list: NodeList,
nodes: NodeList.Slice,
allocator: *std.mem.Allocator,
pub fn init(allocator: *std.mem.Allocator) DynamicTree {
var node_list = NodeList{};
return .{
.root = node_null,
.free_list = IndexLinkList{},
.node_list = NodeList{},
.nodes = node_list.slice(),
.allocator = allocator,
};
}
pub fn deinit(self: *DynamicTree) void {
self.node_list.deinit(self.allocator);
}
pub fn query(self: DynamicTree, stack: *std.ArrayList(Index), aabb: Rect, payload: anytype, callback: anytype) !void {
const CallBack = std.meta.Child(@TypeOf(callback));
if (!@hasDecl(CallBack, "onOverlap")) {
@compileError("Expect " ++ @typeName(@TypeOf(callback)) ++ " has onCallback function");
}
defer stack.clearRetainingCapacity();
try stack.append(self.root);
const aabbs = self.nodes.items(.aabb);
const child1s = self.nodes.items(.child1);
const child2s = self.nodes.items(.child2);
while (stack.items.len > 0) {
const node_id = stack.pop();
if (node_id == node_null) continue;
if (aabbs[node_id].testOverlap(aabb)) {
if (isLeaf(node_id, child1s)) {
callback.onOverlap(payload, child2s[node_id]);
} else {
try stack.append(child1s[node_id]);
try stack.append(child2s[node_id]);
}
}
}
}
pub fn print(self: DynamicTree, extra: []const u8) void {
const h = self.nodes.items(.height);
const c1 = self.nodes.items(.child1);
const c2 = self.nodes.items(.child2);
const p = self.nodes.items(.parent);
log.debug("==== {s} ====\n", .{extra});
log.debug("height: {any}\n", .{h});
log.debug("child1: {any}\n", .{c1});
log.debug("child2: {any}\n", .{c2});
log.debug("parent: {any}\n", .{p});
var next = self.free_list;
log.debug("free_list: ", .{});
while (next != node_null) : (next = c1[next]) {
log.debug("{},", .{next});
}
std.debug.print("\n", .{});
}
pub fn addNode(self: *DynamicTree, aabb: Rect, entity: Index) Index {
const node_id = self.allocateNode();
self.nodes.items(.aabb)[node_id] = aabb;
self.nodes.items(.height)[node_id] = 0;
self.nodes.items(.child2)[node_id] = entity;
self.insertLeaf(node_id);
return node_id;
}
pub fn removeNode(self: *DynamicTree, node_id: Index) void {
self.removeLeaf(node_id);
self.freeNode(node_id);
}
// TODO: fatten aabb?
// pub fn moveNode(tree: *DynamicTree, node_id: u32, aabb: Rect, displacement: Vec2) bool {
pub fn moveNode(tree: *DynamicTree, node_id: Index, aabb: Rect) bool {
var aabbs = tree.nodes.items(.aabb);
if (aabbs[node_id].contains(aabb)) {
return false;
}
tree.removeLeaf(node_id);
aabbs[node_id] = aabb;
tree.insertLeaf(node_id);
// TODO: tree.aabbs[proxyId].moved = true;
return true;
}
fn allocateNode(self: *DynamicTree) Index {
const node_id = self.free_list.popFirst(self.nodes.items(.child1));
if (node_id) |value| {
self.node_list.set(value, .{});
return value;
}
self.node_list.append(self.allocator, .{}) catch unreachable;
self.nodes = self.node_list.slice();
return @intCast(Index, self.node_list.len - 1);
}
fn freeNode(self: *DynamicTree, node_id: Index) void {
// TODO: b2Assert(0 <= nodeId && nodeId < m_nodeCapacity);
self.nodes.items(.height)[node_id] = -1;
self.free_list.prepend(node_id, self.nodes.items(.child1));
}
fn isLeaf(node_id: Index, child1s: []const Index) bool {
return child1s[node_id] == node_null;
}
fn insertLeaf(self: *DynamicTree, leaf: Index) void {
// TODO: what is this? self.m_insertionCount += 1;
var parents = self.nodes.items(.parent);
if (self.root == node_null) {
self.root = leaf;
parents[leaf] = node_null;
return;
}
// Find the best sibling for this node
var aabbs = self.nodes.items(.aabb);
var child1s = self.nodes.items(.child1);
var child2s = self.nodes.items(.child2);
const leaf_aabb = aabbs[leaf];
var index = self.root;
while (isLeaf(index, child1s) == false) {
const child1 = child1s[index];
const child2 = child2s[index];
const area = aabbs[index].getPerimeter();
const combined_aabb = leaf_aabb.combine(aabbs[index]);
const combined_area = combined_aabb.getPerimeter();
// Cost of creating a new parent for this node and the new leaf
const cost = 2 * combined_area;
// Minimum cost of pushing the leaf further down the tree
const inheritance_cost = 2 * (combined_area - area);
// Cost of descending into child1
const cost1 = blk: {
if (isLeaf(child1, child1s)) {
const aabb = leaf_aabb.combine(aabbs[child1]);
break :blk aabb.getPerimeter() + inheritance_cost;
} else {
const aabb = leaf_aabb.combine(aabbs[child1]);
const old_area = aabbs[child1].getPerimeter();
const new_area = aabb.getPerimeter();
break :blk (new_area - old_area) + inheritance_cost;
}
};
// Cost of descending into child2
const cost2 = blk: {
if (isLeaf(child2, child1s)) {
const aabb = leaf_aabb.combine(aabbs[child2]);
break :blk aabb.getPerimeter() + inheritance_cost;
} else {
const aabb = leaf_aabb.combine(aabbs[child2]);
const old_area = aabbs[child2].getPerimeter();
const new_area = aabb.getPerimeter();
break :blk new_area - old_area + inheritance_cost;
}
};
// Descend according to the minimum cost.
if (cost < cost1 and cost < cost2) break;
// Descend
if (cost1 < cost2) {
index = child1;
} else {
index = child2;
}
}
const sibling = index;
// Create a new parent.
const old_parent = parents[sibling];
const new_parent = self.allocateNode();
parents = self.nodes.items(.parent);
aabbs = self.nodes.items(.aabb);
var heights = self.nodes.items(.height);
parents[new_parent] = old_parent;
aabbs[new_parent] = leaf_aabb.combine(aabbs[sibling]);
heights[new_parent] = heights[sibling] + 1;
child1s = self.nodes.items(.child1);
child2s = self.nodes.items(.child2);
if (old_parent != node_null) {
// The sibling was not the root.
if (child1s[old_parent] == sibling) {
child1s[old_parent] = new_parent;
} else {
child2s[old_parent] = new_parent;
}
parents[leaf] = new_parent;
} else {
// The sibling was the root.
parents[leaf] = new_parent;
self.root = new_parent;
}
child1s[new_parent] = sibling;
child2s[new_parent] = leaf;
parents[sibling] = new_parent;
// Walk back up the tree fixing heights and AABBs
var i = parents[leaf];
while (i != node_null) : (i = parents[i]) {
i = self.balance(i);
const child1 = child1s[i];
const child2 = child2s[i];
assert(child1 != node_null);
heights[i] = 1 + std.math.max(
heights[child1],
heights[child2],
);
aabbs[i] = aabbs[child1].combine(aabbs[child2]);
}
}
fn balance(self: *DynamicTree, node_id: Index) Index {
var heights = self.nodes.items(.height);
var child1s = self.nodes.items(.child1);
assert(node_id != node_null);
const ia = node_id;
if (isLeaf(ia, child1s) or heights[ia] < 2) {
return ia;
}
var child2s = self.nodes.items(.child2);
const ib = child1s[ia];
const ic = child2s[ia];
// TODO: b2Assert(0 <= iB && iB < m_nodeCapacity);
//b2Assert(0 <= iC && iC < m_nodeCapacity);
// var b = &self.nodes[ib];
// var c = &self.nodes[ic];
const balance_height = heights[ic] - heights[ib];
// Rotate C up
if (balance_height > 1) {
var parents = self.nodes.items(.parent);
const @"if" = child1s[ic];
const ig = child2s[ic];
// var f = &self.nodes[@"if"];
// var g = &self.nodes[ig];
// TODO: b2Assert(0 <= iF && iF < m_nodeCapacity);
//b2Assert(0 <= iG && iG < m_nodeCapacity);
// Swap A and C
child1s[ic] = ia;
parents[ic] = parents[ia];
parents[ia] = ic;
// A's old parent should point to C
if (parents[ic] != node_null) {
const c_parent = parents[ic];
if (child1s[c_parent] == ia) {
child1s[c_parent] = ic;
} else {
assert(child2s[c_parent] == ia);
child2s[c_parent] = ic;
}
} else {
self.root = ic;
}
// Rotate
if (heights[@"if"] > heights[ig]) {
var aabbs = self.nodes.items(.aabb);
child2s[ic] = @"if";
child2s[ia] = ig;
parents[ig] = ia;
aabbs[ia] = aabbs[ib].combine(aabbs[ig]);
aabbs[ic] = aabbs[ia].combine(aabbs[@"if"]);
heights[ia] = 1 + std.math.max(heights[ib], heights[ig]);
heights[ic] = 1 + std.math.max(heights[ia], heights[@"if"]);
} else {
var aabbs = self.nodes.items(.aabb);
child2s[ic] = ig;
child2s[ia] = @"if";
parents[@"if"] = ia;
aabbs[ia] = aabbs[ib].combine(aabbs[@"if"]);
aabbs[ic] = aabbs[ia].combine(aabbs[ig]);
heights[ia] = 1 + std.math.max(heights[ib], heights[@"if"]);
heights[ic] = 1 + std.math.max(heights[ia], heights[ig]);
}
return ic;
}
// Rotate B up
if (balance_height < -1) {
var parents = self.nodes.items(.parent);
const id = child1s[ib];
const ie = child2s[ib];
// var d = &self.nodes[id];
// var e = &self.nodes[ie];
// TODO: b2Assert(0 <= iD && iD < m_nodeCapacity);
//b2Assert(0 <= iE && iE < m_nodeCapacity);
// Swap A and B
child1s[ib] = ia;
parents[ib] = parents[ia];
parents[ia] = ib;
// A's old parent should point to B
if (parents[ib] != node_null) {
const b_parent = parents[ib];
if (child1s[b_parent] == ia) {
child1s[b_parent] = ib;
} else {
assert(child2s[b_parent] == ia);
child2s[b_parent] = ib;
}
} else {
self.root = ib;
}
// Rotate
if (heights[id] > heights[ie]) {
var aabbs = self.nodes.items(.aabb);
child2s[ib] = id;
child1s[ia] = ie;
parents[ie] = ia;
aabbs[ia] = aabbs[ic].combine(aabbs[ie]);
aabbs[ib] = aabbs[ia].combine(aabbs[id]);
heights[ia] = 1 + std.math.max(heights[ic], heights[ie]);
heights[ib] = 1 + std.math.max(heights[ia], heights[id]);
} else {
var aabbs = self.nodes.items(.aabb);
child2s[ib] = ie;
child1s[ia] = id;
parents[id] = ia;
aabbs[ia] = aabbs[ic].combine(aabbs[id]);
aabbs[ib] = aabbs[ia].combine(aabbs[ie]);
heights[ia] = 1 + std.math.max(heights[ic], heights[id]);
heights[ib] = 1 + std.math.max(heights[ia], heights[ie]);
}
return ib;
}
return ia;
}
fn removeLeaf(self: *DynamicTree, leaf: Index) void {
assert(self.root != node_null);
if (leaf == self.root) {
self.root = node_null;
return;
}
var parents = self.nodes.items(.parent);
var child1s = self.nodes.items(.child1);
var child2s = self.nodes.items(.child2);
const parent = parents[leaf];
var grand_parent = parents[parent];
const sibling = if (child1s[parent] == leaf) child2s[parent] else child1s[parent];
self.freeNode(parent);
if (grand_parent != node_null) {
// Destroy parent and connect sibling to grandParent.
if (child1s[grand_parent] == parent) {
child1s[grand_parent] = sibling;
} else {
child2s[grand_parent] = sibling;
}
parents[sibling] = grand_parent;
// Adjust ancestor bounds.
var index = grand_parent;
var aabbs = self.nodes.items(.aabb);
var heights = self.nodes.items(.height);
while (index != node_null) : (index = parents[index]) {
index = self.balance(index);
const child1 = child1s[index];
const child2 = child2s[index];
aabbs[index] = aabbs[child1].combine(aabbs[child2]);
heights[index] = 1 + std.math.max(heights[child1], heights[child2]);
}
} else {
self.root = sibling;
parents[sibling] = node_null;
}
}
fn floatFromRange(prng: std.rand.Random, min: i32, max: i32) f32 {
return @intToFloat(f32, prng.intRangeAtMost(i32, min, max));
}
test "behavior" {
std.debug.print("\n", .{});
var dt = DynamicTree.init(std.testing.allocator);
defer dt.deinit();
var rects = std.ArrayList(Rect).init(std.testing.allocator);
defer rects.deinit();
try rects.append(Rect.newFromCenter(
Vec2.new(-2, -2),
Vec2.new(2, 2),
));
try rects.append(Rect.newFromCenter(
Vec2.new(-1, -1),
Vec2.new(3, 3),
));
try rects.append(Rect.newFromCenter(
Vec2.new(-3, -3),
Vec2.new(4, 4),
));
try rects.append(Rect.newFromCenter(
Vec2.new(2, 2),
Vec2.new(4, 4),
));
try rects.append(Rect.newFromCenter(
Vec2.new(-3, -3),
Vec2.new(-1, -1),
));
for (rects.items) |entry, i| {
_ = dt.addNode(entry, @intCast(u32, i));
}
const expected_output = [_]u32{
0, 3,
0, 4,
0, 1,
0, 2,
1, 3,
1, 4,
1, 2,
2, 3,
2, 4,
};
{
const QueryCallback = struct {
const fixed_cap = 1048;
couter: u32 = 0,
array: [expected_output.len]u32 = undefined,
buffer: [fixed_cap]u8 = undefined,
pub fn onOverlap(self: *@This(), payload: u32, entity: u32) void {
if (payload >= entity) return;
self.array[self.couter * 2] = payload;
self.array[self.couter * 2 + 1] = entity;
self.couter += 1;
// std.debug.print("{} - {}\n", .{ payload, entity });
}
};
var callback = QueryCallback{};
var stack = std.ArrayList(Index).init(std.testing.allocator);
defer stack.deinit();
for (rects.items) |aabb, i| {
try dt.query(&stack, aabb, @intCast(u32, i), &callback);
}
for (&expected_output) |entry, i| {
try std.testing.expect(entry == callback.array[i]);
}
}
}
test "Performance" {
std.debug.print("\n", .{});
var dt = DynamicTree.init(std.testing.allocator);
defer dt.deinit();
const total = 10000;
var random = std.rand.Xoshiro256.init(0).random();
const max_size = 20;
const min_size = 1;
const max = 3000;
const min = -3000;
var entities = std.ArrayList(Rect).init(std.testing.allocator);
defer entities.deinit();
try entities.ensureTotalCapacity(total);
{
var timer = try std.time.Timer.start();
var entity: u32 = 0;
while (entity < total) : (entity += 1) {
const aabb = Rect.newFromCenter(
Vec2.new(
floatFromRange(random, min, max),
floatFromRange(random, min, max),
),
Vec2.new(
floatFromRange(random, min_size, max_size),
floatFromRange(random, min_size, max_size),
),
);
_ = dt.addNode(aabb, entity);
try entities.append(aabb);
}
const time_0 = timer.read();
std.debug.print("add {} entity take {}ms\n", .{ total, time_0 / std.time.ns_per_ms });
}
{
const QueryCallback = struct {
const fixed_cap = 1048;
total: u32 = 0,
buffer: [fixed_cap]u8 = undefined,
pub fn onOverlap(self: *@This(), payload: u32, entity: u32) void {
if (payload >= entity) return;
self.total += 1;
}
};
var callback = QueryCallback{};
var stack = std.ArrayList(Index).init(std.testing.allocator);
defer stack.deinit();
var timer = try std.time.Timer.start();
for (entities.items) |aabb, i| {
try dt.query(&stack, aabb, @intCast(u32, i), &callback);
}
var time_0 = timer.read();
std.debug.print("callback query {} entity, with {} callback take {}ms\n", .{
total,
callback.total,
time_0 / std.time.ns_per_ms,
});
}
}
|
src/physic/DynamicTree.zig
|
const std = @import("std");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const gc = @cImport({
@cInclude("gc.h");
});
/// Returns the Allocator used for APIs in Zig
pub fn allocator() Allocator {
// Initialize libgc
if (gc.GC_is_init_called() == 0) {
gc.GC_init();
}
return Allocator{
.ptr = undefined,
.vtable = &gc_allocator_vtable,
};
}
/// Returns the current heap size of used memory.
pub fn getHeapSize() u64 {
return gc.GC_get_heap_size();
}
/// Disable garbage collection.
pub fn disable() void {
gc.GC_disable();
}
/// Enables garbage collection. GC is enabled by default so this is
/// only useful if you called disable earlier.
pub fn enable() void {
gc.GC_enable();
}
// Performs a full, stop-the-world garbage collection. With leak detection
// enabled this will output any leaks as well.
pub fn collect() void {
gc.GC_gcollect();
}
/// Perform some garbage collection. Returns zero when work is done.
pub fn collectLittle() u8 {
return @intCast(u8, gc.GC_collect_a_little());
}
/// Enables leak-finding mode. See the libgc docs for more details.
pub fn setFindLeak(v: bool) void {
return gc.GC_set_find_leak(@boolToInt(v));
}
// TODO(mitchellh): there are so many more functions to add here
// from gc.h, just add em as they're useful.
/// GcAllocator is an implementation of std.mem.Allocator that uses
/// libgc under the covers. This means that all memory allocated with
/// this allocated doesn't need to be explicitly freed (but can be).
///
/// The GC is a singleton that is globally shared. Multiple GcAllocators
/// do not allocate separate pages of memory; they share the same underlying
/// pages.
///
// NOTE(mitchellh): this is basically just a copy of the standard CAllocator
// since libgc has a malloc/free-style interface. There are very slight differences
// due to API differences but overall the same.
pub const GcAllocator = struct {
fn alloc(
_: *anyopaque,
len: usize,
alignment: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
_ = return_address;
assert(len > 0);
assert(std.math.isPowerOfTwo(alignment));
var ptr = alignedAlloc(len, alignment) orelse return error.OutOfMemory;
if (len_align == 0) {
return ptr[0..len];
}
const full_len = init: {
const s = alignedAllocSize(ptr);
assert(s >= len);
break :init s;
};
return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)];
}
fn resize(
_: *anyopaque,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
return_address: usize,
) ?usize {
_ = buf_align;
_ = return_address;
if (new_len <= buf.len) {
return mem.alignAllocLen(buf.len, new_len, len_align);
}
const full_len = alignedAllocSize(buf.ptr);
if (new_len <= full_len) {
return mem.alignAllocLen(full_len, new_len, len_align);
}
return null;
}
fn free(
_: *anyopaque,
buf: []u8,
buf_align: u29,
return_address: usize,
) void {
_ = buf_align;
_ = return_address;
alignedFree(buf.ptr);
}
fn getHeader(ptr: [*]u8) *[*]u8 {
return @intToPtr(*[*]u8, @ptrToInt(ptr) - @sizeOf(usize));
}
fn alignedAlloc(len: usize, alignment: usize) ?[*]u8 {
// Thin wrapper around regular malloc, overallocate to account for
// alignment padding and store the orignal malloc()'ed pointer before
// the aligned address.
var unaligned_ptr = @ptrCast([*]u8, gc.GC_malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null);
const unaligned_addr = @ptrToInt(unaligned_ptr);
const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), alignment);
var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
getHeader(aligned_ptr).* = unaligned_ptr;
return aligned_ptr;
}
fn alignedFree(ptr: [*]u8) void {
const unaligned_ptr = getHeader(ptr).*;
gc.GC_free(unaligned_ptr);
}
fn alignedAllocSize(ptr: [*]u8) usize {
const unaligned_ptr = getHeader(ptr).*;
const delta = @ptrToInt(ptr) - @ptrToInt(unaligned_ptr);
return gc.GC_size(unaligned_ptr) - delta;
}
};
const gc_allocator_vtable = Allocator.VTable{
.alloc = GcAllocator.alloc,
.resize = GcAllocator.resize,
.free = GcAllocator.free,
};
test "GcAllocator" {
const alloc = allocator();
try std.heap.testAllocator(alloc);
try std.heap.testAllocatorAligned(alloc);
try std.heap.testAllocatorLargeAlignment(alloc);
try std.heap.testAllocatorAlignedShrink(alloc);
}
test "heap size" {
// No garbage so should be 0
try testing.expect(collectLittle() == 0);
// Force a collection should work
collect();
try testing.expect(getHeapSize() > 0);
}
|
src/gc.zig
|
const std = @import("std");
const with_trace = true;
const assert = std.debug.assert;
fn trace(comptime fmt: []const u8, args: anytype) void {
if (with_trace) std.debug.print(fmt, args);
}
const Object = struct {
cost: i32,
damage: i32,
armor: i32,
};
const Carac = struct {
points: i32,
damage: i32,
armor: i32,
};
fn playfight(boss: Carac, player: Carac, objects: []Object) bool {
var b = boss;
var p = player;
for (objects) |o| {
p.damage += o.damage;
p.armor += o.armor;
}
while (b.points > 0 and p.points > 0) {
{
var dmg = p.damage - b.armor;
if (dmg <= 0)
dmg = 1;
b.points -= dmg;
if (b.points <= 0)
break;
}
{
var dmg = b.damage - p.armor;
if (dmg <= 0)
dmg = 1;
p.points -= dmg;
if (p.points <= 0)
break;
}
}
return p.points > 0;
}
pub fn main() anyerror!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const boss = Carac{ .points = 103, .damage = 9, .armor = 2 };
const player = Carac{ .points = 100, .damage = 0, .armor = 0 };
const weapons = [_]Object{
.{ .cost = 8, .damage = 4, .armor = 0 },
.{ .cost = 10, .damage = 5, .armor = 0 },
.{ .cost = 25, .damage = 6, .armor = 0 },
.{ .cost = 40, .damage = 7, .armor = 0 },
.{ .cost = 74, .damage = 8, .armor = 0 },
};
const armors = [_]Object{
.{ .cost = 0, .damage = 0, .armor = 0 },
.{ .cost = 13, .damage = 0, .armor = 1 },
.{ .cost = 31, .damage = 0, .armor = 2 },
.{ .cost = 53, .damage = 0, .armor = 3 },
.{ .cost = 75, .damage = 0, .armor = 4 },
.{ .cost = 102, .damage = 0, .armor = 5 },
};
const rings = [_]Object{
.{ .cost = 0, .damage = 0, .armor = 0 },
.{ .cost = 0, .damage = 0, .armor = 0 },
.{ .cost = 25, .damage = 1, .armor = 0 },
.{ .cost = 50, .damage = 2, .armor = 0 },
.{ .cost = 100, .damage = 3, .armor = 0 },
.{ .cost = 20, .damage = 0, .armor = 1 },
.{ .cost = 40, .damage = 0, .armor = 2 },
.{ .cost = 80, .damage = 0, .armor = 3 },
};
var bestcost: i32 = 0;
var objects: [4]Object = undefined;
for (weapons) |w| {
objects[0] = w;
for (armors) |a| {
objects[1] = a;
for (rings) |r1, i| {
objects[2] = r1;
for (rings[i + 1 ..]) |r2| {
objects[3] = r2;
const victory = playfight(boss, player, &objects);
const cost = w.cost + a.cost + r1.cost + r2.cost;
trace("combi {}, cost={} victory={}\n", objects, cost, victory);
if (!victory and cost > bestcost) {
bestcost = cost;
}
}
}
}
}
const out = std.io.getStdOut().writer();
try out.print("ans = {}\n", bestcost);
// return error.SolutionNotFound;
}
|
2015/day21.zig
|
const std = @import("std");
const util = @import("util.zig");
const fs = std.fs;
const io = std.io;
const math = std.math;
const mem = std.mem;
const os = std.os;
const testing = std.testing;
pub const bufsize = mem.page_size * 2;
pub fn Fifo(comptime buffer_type: std.fifo.LinearFifoBufferType) type {
return std.fifo.LinearFifo(u8, buffer_type);
}
/// Reads lines from `reader` using a `Fifo` for buffering.
///
/// NOTE: using `readUntilDelimitorArrayList` over this function results in
/// tm35-rand-trainers to be around 2x slower. This function is therefor
/// still better to use until zigs std gets a better `readUntilDelimitor`
/// implementation. Replacement code bellow:
///```
///buf_reader.reader().readUntilDelimiterArrayList(buffer, '\n', std.math.maxInt(usize)) catch |err| switch (err) {
/// error.StreamTooLong => unreachable,
/// error.EndOfStream => {
/// if (buffer.items.len != 0)
/// return buffer.items;
/// return null;
/// },
/// else => |err2| return err2,
///};
///return buffer.items;
///```
pub fn readUntil(reader: anytype, fifo: anytype, byte: u8) !?[]const u8 {
while (true) {
const buf = fifo.readableSlice(0);
if (mem.indexOfScalar(u8, buf, byte)) |index| {
defer fifo.head += index + 1;
defer fifo.count -= index + 1;
return buf[0..index];
}
const new_buf = blk: {
fifo.realign();
const slice = fifo.writableSlice(0);
if (slice.len != 0)
break :blk slice;
break :blk try fifo.writableWithSize(math.max(bufsize, fifo.buf.len));
};
const num = try reader.read(new_buf);
fifo.update(num);
if (num == 0) {
if (fifo.count != 0) {
// Ensure that the buffer returned always have `byte` terminating
// it, so that wrappers can return `[:Z]const u8` if they want to.
// This is used by `readLine`.
try fifo.writeItem(byte);
const res = fifo.readableSlice(0);
fifo.count = 0;
return res[0 .. res.len - 1];
}
return null;
}
}
}
pub fn readLine(reader: anytype, fifo: anytype) !?[:'\n']const u8 {
const res = (try readUntil(reader, fifo, '\n')) orelse return null;
if (mem.endsWith(u8, res, "\r")) {
// Right now, readableSliceMut for fifo is private, so i cannot implement
// this easily without casting away const, as `readUntil` cannot return
// a mutable slice.
const res_mut = util.unsafe.castAwayConst(res);
res_mut[res.len - 1] = '\n';
return res[0 .. res.len - 1 :'\n'];
}
return res.ptr[0..res.len :'\n'];
}
test "readLine" {
try testReadLine(
\\a
\\b
\\c
, &[_][]const u8{
"a",
"b",
"c",
});
try testReadLine(
"a\r\n" ++
"b\n" ++
"c",
&[_][]const u8{
"a",
"b",
"c",
},
);
}
fn testReadLine(str: []const u8, lines: []const []const u8) !void {
var fbs = std.io.fixedBufferStream(str);
var fifo = Fifo(.{ .Static = 3 }).init();
for (lines) |expected_line| {
const actual_line = (try readLine(fbs.reader(), &fifo)).?;
try testing.expectEqualStrings(expected_line, actual_line);
try testing.expectEqual(@as(u8, '\n'), actual_line[actual_line.len]);
}
try testing.expectEqual(@as(?[:'\n']const u8, null), try readLine(fbs.reader(), &fifo));
}
|
src/common/io.zig
|
const Allocator = std.mem.Allocator;
const FormatInterface = @import("../format_interface.zig").FormatInterface;
const ImageFormat = image.ImageFormat;
const ImageInStream = image.ImageInStream;
const ImageInfo = image.ImageInfo;
const ImageSeekStream = image.ImageSeekStream;
const PixelFormat = @import("../pixel_format.zig").PixelFormat;
const color = @import("../color.zig");
const errors = @import("../errors.zig");
const image = @import("../image.zig");
const std = @import("std");
const utils = @import("../utils.zig");
// this file implements the Portable Anymap specification provided by
// http://netpbm.sourceforge.net/doc/pbm.html // P1, P4 => Bitmap
// http://netpbm.sourceforge.net/doc/pgm.html // P2, P5 => Graymap
// http://netpbm.sourceforge.net/doc/ppm.html // P3, P6 => Pixmap
/// one of the three types a netbpm graphic could be stored in.
pub const Format = enum {
/// the image contains black-and-white pixels.
Bitmap,
/// the image contains grayscale pixels.
Grayscale,
/// the image contains RGB pixels.
Rgb,
};
pub const Header = struct {
format: Format,
binary: bool,
width: usize,
height: usize,
max_value: usize,
};
fn parseHeader(stream: ImageInStream) !Header {
var hdr: Header = undefined;
var magic: [2]u8 = undefined;
_ = try stream.read(magic[0..]);
if (std.mem.eql(u8, &magic, "P1")) {
hdr.binary = false;
hdr.format = .Bitmap;
hdr.max_value = 1;
} else if (std.mem.eql(u8, &magic, "P2")) {
hdr.binary = false;
hdr.format = .Grayscale;
} else if (std.mem.eql(u8, &magic, "P3")) {
hdr.binary = false;
hdr.format = .Rgb;
} else if (std.mem.eql(u8, &magic, "P4")) {
hdr.binary = true;
hdr.format = .Bitmap;
hdr.max_value = 1;
} else if (std.mem.eql(u8, &magic, "P5")) {
hdr.binary = true;
hdr.format = .Grayscale;
} else if (std.mem.eql(u8, &magic, "P6")) {
hdr.binary = true;
hdr.format = .Rgb;
} else {
return errors.ImageError.InvalidMagicHeader;
}
var readBuffer: [16]u8 = undefined;
hdr.width = try parseNumber(stream, readBuffer[0..]);
hdr.height = try parseNumber(stream, readBuffer[0..]);
if (hdr.format != .Bitmap) {
hdr.max_value = try parseNumber(stream, readBuffer[0..]);
}
return hdr;
}
fn isWhitespace(b: u8) bool {
return switch (b) {
// Whitespace (blanks, TABs, CRs, LFs).
'\n', '\r', ' ', '\t' => true,
else => false,
};
}
fn readNextByte(stream: ImageInStream) !u8 {
while (true) {
var b = try stream.readByte();
switch (b) {
// Before the whitespace character that delimits the raster, any characters
// from a "#" through the next carriage return or newline character, is a
// comment and is ignored. Note that this is rather unconventional, because
// a comment can actually be in the middle of what you might consider a token.
// Note also that this means if you have a comment right before the raster,
// the newline at the end of the comment is not sufficient to delimit the raster.
'#' => {
// eat up comment
while (true) {
var c = try stream.readByte();
switch (c) {
'\r', '\n' => break,
else => {},
}
}
},
else => return b,
}
}
}
/// skips whitespace and comments, then reads a number from the stream.
/// this function reads one whitespace behind the number as a terminator.
fn parseNumber(stream: ImageInStream, buffer: []u8) !usize {
var inputLength: usize = 0;
while (true) {
var b = try readNextByte(stream);
if (isWhitespace(b)) {
if (inputLength > 0) {
return try std.fmt.parseInt(usize, buffer[0..inputLength], 10);
} else {
continue;
}
} else {
if (inputLength >= buffer.len)
return error.OutOfMemory;
buffer[inputLength] = b;
inputLength += 1;
}
}
}
fn loadBinaryBitmap(header: Header, data: []color.Monochrome, stream: ImageInStream) !void {
var dataIndex: usize = 0;
const dataEnd = header.width * header.height;
while (dataIndex < dataEnd) : (dataIndex += 1) {
var b = try stream.readByte();
var i: usize = 0;
while (dataIndex < dataEnd and i < 8) {
const pixel = if ((b & (@as(u8, 1) << @truncate(u3, 7 - i))) != 0) @as(u1, 0) else @as(u1, 1);
// set bit is black, cleared bit is white
// bits are "left to right" (so msb to lsb)
data[dataIndex] = color.Monochrome{ .value = pixel };
dataIndex += 1;
i += 1;
}
}
}
fn loadAsciiBitmap(header: Header, data: []color.Monochrome, stream: ImageInStream) !void {
var dataIndex: usize = 0;
const dataEnd = header.width * header.height;
while (dataIndex < dataEnd) {
var b = try stream.readByte();
if (isWhitespace(b)) {
continue;
}
// 1 is black, 0 is white in PBM spec.
// we use 1=white, 0=black in u1 format
const pixel = if (b == '0') @as(u1, 1) else @as(u1, 0);
data[dataIndex] = color.Monochrome{ .value = pixel };
dataIndex += 1;
}
}
fn readLinearizedValue(stream: ImageInStream, maxValue: usize) !u8 {
return if (maxValue > 255)
@truncate(u8, 255 * @as(usize, try stream.readIntBig(u16)) / maxValue)
else
@truncate(u8, 255 * @as(usize, try stream.readByte()) / maxValue);
}
fn loadBinaryGraymap(header: Header, data: []color.Grayscale8, stream: ImageInStream) !void {
var dataIndex: usize = 0;
const dataEnd = header.width * header.height;
while (dataIndex < dataEnd) : (dataIndex += 1) {
data[dataIndex] = color.Grayscale8{ .value = try readLinearizedValue(stream, header.max_value) };
}
}
fn loadAsciiGraymap(header: Header, data: []color.Grayscale8, stream: ImageInStream) !void {
var readBuffer: [16]u8 = undefined;
var dataIndex: usize = 0;
const dataEnd = header.width * header.height;
while (dataIndex < dataEnd) : (dataIndex += 1) {
var b = try parseNumber(stream, readBuffer[0..]);
data[dataIndex] = color.Grayscale8{ .value = @truncate(u8, 255 * b / header.max_value) };
}
}
fn loadBinaryRgbmap(header: Header, data: []color.Rgb24, stream: ImageInStream) !void {
var dataIndex: usize = 0;
const dataEnd = header.width * header.height;
while (dataIndex < dataEnd) : (dataIndex += 1) {
data[dataIndex] = color.Rgb24{
.R = try readLinearizedValue(stream, header.max_value),
.G = try readLinearizedValue(stream, header.max_value),
.B = try readLinearizedValue(stream, header.max_value),
};
}
}
fn loadAsciiRgbmap(header: Header, data: []color.Rgb24, stream: ImageInStream) !void {
var readBuffer: [16]u8 = undefined;
var dataIndex: usize = 0;
const dataEnd = header.width * header.height;
while (dataIndex < dataEnd) : (dataIndex += 1) {
var r = try parseNumber(stream, readBuffer[0..]);
var g = try parseNumber(stream, readBuffer[0..]);
var b = try parseNumber(stream, readBuffer[0..]);
data[dataIndex] = color.Rgb24{
.R = @truncate(u8, 255 * r / header.max_value),
.G = @truncate(u8, 255 * g / header.max_value),
.B = @truncate(u8, 255 * b / header.max_value),
};
}
}
fn Netpbm(comptime imageFormat: ImageFormat, comptime headerNumbers: []const u8) type {
return struct {
header: Header = undefined,
pixel_format: PixelFormat = undefined,
const Self = @This();
pub fn formatInterface() FormatInterface {
return FormatInterface{
.format = @ptrCast(FormatInterface.FormatFn, format),
.formatDetect = @ptrCast(FormatInterface.FormatDetectFn, formatDetect),
.readForImage = @ptrCast(FormatInterface.ReadForImageFn, readForImage),
};
}
pub fn format() ImageFormat {
return imageFormat;
}
pub fn formatDetect(inStream: ImageInStream, seekStream: ImageSeekStream) !bool {
var magicNumberBuffer: [2]u8 = undefined;
_ = try inStream.read(magicNumberBuffer[0..]);
if (magicNumberBuffer[0] != 'P') {
return false;
}
var found = false;
for (headerNumbers) |number| {
if (magicNumberBuffer[1] == number) {
found = true;
break;
}
}
return found;
}
pub fn readForImage(allocator: *Allocator, inStream: ImageInStream, seekStream: ImageSeekStream, pixels: *?color.ColorStorage) !ImageInfo {
var netpbmFile = Self{};
try netpbmFile.read(allocator, inStream, seekStream, pixels);
var imageInfo = ImageInfo{};
imageInfo.width = netpbmFile.header.width;
imageInfo.height = netpbmFile.header.height;
imageInfo.pixel_format = netpbmFile.pixel_format;
return imageInfo;
}
pub fn read(self: *Self, allocator: *Allocator, inStream: ImageInStream, seekStream: ImageSeekStream, pixelsOpt: *?color.ColorStorage) !void {
self.header = try parseHeader(inStream);
self.pixel_format = switch (self.header.format) {
.Bitmap => PixelFormat.Monochrome,
.Grayscale => PixelFormat.Grayscale8,
.Rgb => PixelFormat.Rgb24,
};
pixelsOpt.* = try color.ColorStorage.init(allocator, self.pixel_format, self.header.width * self.header.height);
if (pixelsOpt.*) |pixels| {
switch (self.header.format) {
.Bitmap => {
if (self.header.binary) {
try loadBinaryBitmap(self.header, pixels.Monochrome, inStream);
} else {
try loadAsciiBitmap(self.header, pixels.Monochrome, inStream);
}
},
.Grayscale => {
if (self.header.binary) {
try loadBinaryGraymap(self.header, pixels.Grayscale8, inStream);
} else {
try loadAsciiGraymap(self.header, pixels.Grayscale8, inStream);
}
},
.Rgb => {
if (self.header.binary) {
try loadBinaryRgbmap(self.header, pixels.Rgb24, inStream);
} else {
try loadAsciiRgbmap(self.header, pixels.Rgb24, inStream);
}
},
}
}
}
};
}
pub const PBM = Netpbm(ImageFormat.Pbm, &[_]u8{ '1', '4' });
pub const PGM = Netpbm(ImageFormat.Pgm, &[_]u8{ '2', '5' });
pub const PPM = Netpbm(ImageFormat.Ppm, &[_]u8{ '3', '6' });
|
src/formats/netpbm.zig
|
const std = @import("std");
const version = @import("version");
const zfetch = @import("zfetch");
const tar = @import("tar");
const zzz = @import("zzz");
const uri = @import("uri");
const Dependency = @import("Dependency.zig");
const Package = @import("Package.zig");
const utils = @import("utils.zig");
const Allocator = std.mem.Allocator;
pub fn getLatest(
allocator: *Allocator,
repository: []const u8,
user: []const u8,
package: []const u8,
range: ?version.Range,
) !version.Semver {
const url = if (range) |r|
try std.fmt.allocPrint(allocator, "https://{s}/pkgs/{s}/{s}/latest?v={}", .{
repository,
user,
package,
r,
})
else
try std.fmt.allocPrint(allocator, "https://{s}/pkgs/{s}/{s}/latest", .{
repository,
user,
package,
});
defer allocator.free(url);
var headers = zfetch.Headers.init(allocator);
defer headers.deinit();
const link = try uri.parse(url);
try headers.set("Accept", "*/*");
try headers.set("User-Agent", "gyro");
try headers.set("Host", link.host orelse return error.NoHost);
var req = try zfetch.Request.init(allocator, url, null);
defer req.deinit();
try req.do(.GET, headers, null);
switch (req.status.code) {
200 => {},
404 => {
if (range) |r| {
std.log.err("failed to find {} for {s}/{s} on {s}", .{
r,
user,
package,
repository,
});
} else {
std.log.err("failed to find latest for {s}/{s} on {s}", .{
user,
package,
repository,
});
}
return error.Explained;
},
else => {
const body = try req.reader().readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(body);
const stderr = std.io.getStdErr().writer();
try stderr.print("got http status code for {s}: {}", .{ url, req.status.code });
try stderr.print("{s}\n", .{body});
return error.Explained;
},
}
var buf: [10]u8 = undefined;
return version.Semver.parse(allocator, buf[0..try req.reader().readAll(&buf)]);
}
pub fn getHeadCommit(
allocator: *Allocator,
user: []const u8,
repo: []const u8,
ref: []const u8,
) ![]const u8 {
const url = try std.fmt.allocPrint(
allocator,
"https://api.github.com/repos/{s}/{s}/tarball/{s}",
.{ user, repo, ref },
);
defer allocator.free(url);
var headers = zfetch.Headers.init(allocator);
defer headers.deinit();
try headers.set("Accept", "*/*");
var req = try request(allocator, .GET, url, &headers, null);
defer req.deinit();
var gzip = try std.compress.gzip.gzipStream(allocator, req.reader());
defer gzip.deinit();
var pax_header = try tar.PaxHeaderMap.init(allocator, gzip.reader());
defer pax_header.deinit();
return allocator.dupe(u8, pax_header.get("comment") orelse return error.MissingCommitKey);
}
pub fn getPkg(
allocator: *Allocator,
repository: []const u8,
user: []const u8,
package: []const u8,
semver: version.Semver,
dir: std.fs.Dir,
cb: ?HttpCallback,
payload: ?usize,
) !void {
const url = try std.fmt.allocPrint(
allocator,
"https://{s}/archive/{s}/{s}/{}",
.{
repository,
user,
package,
semver,
},
);
defer allocator.free(url);
try getTarGz(allocator, url, dir, cb, payload);
}
const HttpCallback = fn (count: usize, total: usize, payload: usize) void;
const HttpCallbackStream = struct {
req_reader: zfetch.Request.Reader,
cb: ?HttpCallback,
payload: ?usize,
pub const Reader = std.io.Reader(*Self, ReaderError, Self.read);
const ReaderError = zfetch.Request.Reader.Error;
const Self = @This();
fn read(self: *Self, buf: []u8) ReaderError!usize {
const ret = try self.req_reader.read(buf);
if (self.cb) |cb| cb(
self.req_reader.context.parser.read_needed,
self.req_reader.context.parser.read_current,
self.payload orelse 0,
);
return ret;
}
fn reader(self: *Self) Reader {
return Reader{ .context = self };
}
};
fn getTarGzImpl(
allocator: *Allocator,
url: []const u8,
dir: std.fs.Dir,
skip_depth: usize,
cb: ?HttpCallback,
payload: ?usize,
) !void {
var headers = zfetch.Headers.init(allocator);
defer headers.deinit();
try headers.set("Accept", "*/*");
var req = try request(allocator, .GET, url, &headers, null);
defer req.deinit();
var callback_stream = HttpCallbackStream{
.req_reader = req.reader(),
.cb = cb,
.payload = payload,
};
var gzip = try std.compress.gzip.gzipStream(allocator, callback_stream.reader());
defer gzip.deinit();
try tar.instantiate(allocator, dir, gzip.reader(), skip_depth);
}
pub fn getTarGz(
allocator: *Allocator,
url: []const u8,
dir: std.fs.Dir,
cb: ?HttpCallback,
payload: ?usize,
) !void {
try getTarGzImpl(allocator, url, dir, 0, cb, payload);
}
pub fn getGithubTarGz(
allocator: *Allocator,
user: []const u8,
repo: []const u8,
commit: []const u8,
dir: std.fs.Dir,
) !void {
const url = try std.fmt.allocPrint(
allocator,
"https://api.github.com/repos/{s}/{s}/tarball/{s}",
.{
user,
repo,
commit,
},
);
defer allocator.free(url);
try getTarGzImpl(allocator, url, dir, 1);
}
pub fn getGithubRepo(
allocator: *Allocator,
user: []const u8,
repo: []const u8,
) !std.json.ValueTree {
const url = try std.fmt.allocPrint(
allocator,
"https://api.github.com/repos/{s}/{s}",
.{ user, repo },
);
defer allocator.free(url);
var headers = zfetch.Headers.init(allocator);
defer headers.deinit();
try headers.set("Accept", "application/vnd.github.v3+json");
var req = try request(allocator, .GET, url, &headers, null);
defer req.deinit();
var text = try req.reader().readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(text);
var parser = std.json.Parser.init(allocator, true);
defer parser.deinit();
return try parser.parse(text);
}
pub fn getGithubTopics(
allocator: *Allocator,
user: []const u8,
repo: []const u8,
) !std.json.ValueTree {
const url = try std.fmt.allocPrint(allocator, "https://api.github.com/repos/{s}/{s}/topics", .{ user, repo });
defer allocator.free(url);
var headers = zfetch.Headers.init(allocator);
defer headers.deinit();
try headers.set("Accept", "application/vnd.github.mercy-preview+json");
var req = try request(allocator, .GET, url, &headers, null);
defer req.deinit();
var body = try req.reader().readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(body);
var parser = std.json.Parser.init(allocator, true);
defer parser.deinit();
return try parser.parse(body);
}
pub fn getGithubGyroFile(
allocator: *Allocator,
user: []const u8,
repo: []const u8,
commit: []const u8,
) !?[]const u8 {
const url = try std.fmt.allocPrint(
allocator,
"https://api.github.com/repos/{s}/{s}/tarball/{s}",
.{ user, repo, commit },
);
defer allocator.free(url);
var headers = zfetch.Headers.init(allocator);
defer headers.deinit();
//std.log.info("fetching tarball: {s}", .{url});
try headers.set("Accept", "*/*");
var req = try request(allocator, .GET, url, &headers, null);
defer req.deinit();
const subpath = try std.fmt.allocPrint(allocator, "{s}-{s}-{s}/gyro.zzz", .{ user, repo, commit[0..7] });
defer allocator.free(subpath);
var gzip = try std.compress.gzip.gzipStream(allocator, req.reader());
defer gzip.deinit();
var extractor = tar.fileExtractor(subpath, gzip.reader());
return extractor.reader().readAllAlloc(allocator, std.math.maxInt(usize)) catch |err|
return if (err == error.FileNotFound) null else err;
}
pub const DeviceCodeResponse = struct {
device_code: []const u8,
user_code: []const u8,
verification_uri: []const u8,
expires_in: u64,
interval: u64,
};
pub fn postDeviceCode(
allocator: *Allocator,
client_id: []const u8,
scope: []const u8,
) !DeviceCodeResponse {
const url = "https://github.com/login/device/code";
const payload = try std.fmt.allocPrint(allocator, "client_id={s}&scope={s}", .{ client_id, scope });
defer allocator.free(payload);
var headers = zfetch.Headers.init(allocator);
defer headers.deinit();
try headers.set("Accept", "application/json");
var req = try request(allocator, .POST, url, &headers, payload);
defer req.deinit();
const body = try req.reader().readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(body);
var token_stream = std.json.TokenStream.init(body);
return std.json.parse(DeviceCodeResponse, &token_stream, .{ .allocator = allocator });
}
const PollDeviceCodeResponse = struct {
access_token: []const u8,
token_type: []const u8,
scope: []const u8,
};
pub fn pollDeviceCode(
allocator: *Allocator,
client_id: []const u8,
device_code: []const u8,
) !?[]const u8 {
const url = "https://github.com/login/oauth/access_token";
const payload = try std.fmt.allocPrint(
allocator,
"client_id={s}&device_code={s}&grant_type=urn:ietf:params:oauth:grant-type:device_code",
.{ client_id, device_code },
);
defer allocator.free(payload);
var headers = zfetch.Headers.init(allocator);
defer headers.deinit();
try headers.set("Accept", "application/json");
var req = try request(allocator, .POST, url, &headers, payload);
defer req.deinit();
const body = try req.reader().readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(body);
var parser = std.json.Parser.init(allocator, false);
defer parser.deinit();
var value_tree = try parser.parse(body);
defer value_tree.deinit();
// TODO: error handling based on the json error codes
return if (value_tree.root.Object.get("access_token")) |value| switch (value) {
.String => |str| try allocator.dupe(u8, str),
else => null,
} else null;
}
pub fn postPublish(
allocator: *Allocator,
access_token: []const u8,
pkg: *Package,
) !void {
try pkg.bundle(std.fs.cwd(), std.fs.cwd());
const filename = try pkg.filename(allocator);
defer allocator.free(filename);
const file = try std.fs.cwd().openFile(filename, .{});
defer {
file.close();
std.fs.cwd().deleteFile(filename) catch {};
}
const authorization = try std.fmt.allocPrint(allocator, "Bearer github {s}", .{access_token});
defer allocator.free(authorization);
const url = "https://" ++ utils.default_repo ++ "/publish";
var headers = zfetch.Headers.init(allocator);
defer headers.deinit();
try headers.set("Content-Type", "application/octet-stream");
try headers.set("Accept", "*/*");
try headers.set("Authorization", authorization);
const payload = try file.reader().readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(payload);
var req = try request(allocator, .POST, url, &headers, payload);
defer req.deinit();
const body = try req.reader().readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(body);
const stderr = std.io.getStdErr().writer();
defer stderr.print("{s}\n", .{body}) catch {};
}
// HTTP request with redirect
fn request(
allocator: *std.mem.Allocator,
method: zfetch.Method,
url: []const u8,
headers: *zfetch.Headers,
payload: ?[]const u8,
) !*zfetch.Request {
try headers.set("User-Agent", "gyro");
var real_url = try allocator.dupe(u8, url);
defer allocator.free(real_url);
var redirects: usize = 0;
return while (redirects < 128) {
var ret = try zfetch.Request.init(allocator, real_url, null);
errdefer ret.deinit();
const link = try uri.parse(real_url);
try headers.set("Host", link.host orelse return error.NoHost);
try ret.do(method, headers.*, payload);
switch (ret.status.code) {
200 => break ret,
302 => {
// tmp needed for memory safety
const tmp = real_url;
const location = ret.headers.get("location") orelse return error.NoLocation;
real_url = try allocator.dupe(u8, location);
allocator.free(tmp);
ret.deinit();
},
else => {
const body = try ret.reader().readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(body);
const stderr = std.io.getStdErr().writer();
try stderr.print("got http status code for {s}: {}", .{ url, ret.status.code });
try stderr.print("{s}\n", .{body});
return error.Explained;
},
}
} else return error.TooManyRedirects;
}
|
src/api.zig
|
const std = @import("std");
const testing = std.testing;
const assert = std.debug.assert;
pub fn RingBuffer(comptime T: type) type {
return RingBufferAligned(T, null);
}
pub fn RingBufferAligned(comptime T: type, comptime alignment: ?u29) type {
if (alignment) |a| {
if (a == @alignOf(T)) {
return RingBufferAligned(T, null);
}
}
return struct {
pub const Self = @This();
items: Slice,
first: usize,
last: usize,
allocator: *std.mem.Allocator,
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
pub const SliceConst = if (alignment) |a| ([]align(a) const T) else []const T;
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn init(allocator: *std.mem.Allocator) !Self {
return Self{
.items = try allocator.alloc(T, 2),
.first = 0,
.last = 0,
.allocator = allocator,
};
}
/// Initialize with capacity to hold at least `num` elements.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: *std.mem.Allocator, num: usize) !Self {
var self = Self.init(allocator);
self.items = try self.allocator.allocAdvanced(T, alignment, num, .at_least);
return self;
}
/// Release all allocated memory.
pub fn deinit(self: Self) void {
self.allocator.free(self.items);
}
/// Modify the array so that it can hold at least `new_capacity` items.
/// Invalidates pointers if additional memory is needed.
pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
var better_capacity = self.items.len;
if (better_capacity > new_capacity) return;
while (true) {
better_capacity += better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
// TODO This can be optimized to avoid needlessly copying undefined memory.
const new_memory = try self.allocator.allocAdvanced(T, alignment, better_capacity, .at_least);
if (self.last > self.first) {
std.mem.copy(T, new_memory, self.items[self.first..self.last]);
self.allocator.free(self.items);
self.last -= self.first;
self.first = 0;
self.items = new_memory;
} else if (self.last < self.first) {
std.mem.copy(T, new_memory, self.items[self.first..]);
std.mem.copy(T, new_memory[self.items.len - self.first ..], self.items[0..self.last]);
self.allocator.free(self.items);
self.last += self.items.len - self.first;
self.first = 0;
self.items = new_memory;
} else {
self.last = 0;
self.first = 0;
}
}
pub fn len(self: *Self) usize {
if (self.last >= self.first) {
return self.last - self.first;
} else {
return self.last + self.items.len - self.first;
}
}
/// Increase length by 1, returning pointer to the new item.
/// The returned pointer becomes invalid when the list resized.
pub fn addOne(self: *Self) !*T {
const newlen = self.len() + 1;
try self.ensureCapacity(newlen);
return self.addOneAssumeCapacity();
}
/// Increase length by 1, returning pointer to the new item.
/// Asserts that there is already space for the new item without allocating more.
/// **Does not** invalidate pointers.
/// The returned pointer becomes invalid when the list resized.
pub fn addOneAssumeCapacity(self: *Self) *T {
assert(self.len() < self.items.len);
const result = &self.items[self.last];
self.last = (self.last + 1) % self.items.len;
return result;
}
pub fn push(self: *Self, value: T) !void {
const ptr = try self.addOne();
ptr.* = value;
}
pub fn pop(self: *Self) ?T {
if (self.first == self.last)
return null;
const result = self.items[self.first];
self.first = (self.first + 1) % self.items.len;
return result;
}
pub fn at(self: *Self, index: usize) !T {
if (index >= self.len())
return error.IndexOutOfBounds;
return self.items[(self.first + index) % self.items.len];
}
pub const Iterator = struct {
ringBuffer: *Self,
index: usize,
pub fn next(self: *@This()) ?T {
if (self.index < self.ringBuffer.len()) {
self.index += 1;
return self.ringBuffer.at(self.index - 1) catch unreachable;
} else {
return null;
}
}
};
pub fn iterator(self: *Self) Iterator {
return Iterator{ .ringBuffer = self, .index = 0 };
}
};
}
test "RingBuffer.len" {
const a = testing.allocator;
var buff = try RingBuffer(usize).init(a);
defer buff.deinit();
testing.expectEqual(@as(usize, 0), buff.len());
_ = try buff.addOne();
testing.expectEqual(@as(usize, 1), buff.len());
_ = try buff.addOne();
testing.expectEqual(@as(usize, 2), buff.len());
_ = try buff.addOne();
testing.expectEqual(@as(usize, 3), buff.len());
_ = try buff.addOne();
testing.expectEqual(@as(usize, 4), buff.len());
_ = try buff.addOne();
testing.expectEqual(@as(usize, 5), buff.len());
_ = try buff.addOne();
testing.expectEqual(@as(usize, 6), buff.len());
_ = buff.pop();
testing.expectEqual(@as(usize, 5), buff.len());
_ = buff.pop();
testing.expectEqual(@as(usize, 4), buff.len());
_ = buff.pop();
testing.expectEqual(@as(usize, 3), buff.len());
_ = buff.pop();
testing.expectEqual(@as(usize, 2), buff.len());
_ = buff.pop();
testing.expectEqual(@as(usize, 1), buff.len());
_ = buff.pop();
testing.expectEqual(@as(usize, 0), buff.len());
_ = buff.pop();
testing.expectEqual(@as(usize, 0), buff.len());
}
test "RingBuffer.push_and_pop" {
const a = testing.allocator;
var buff = try RingBuffer(usize).init(a);
defer buff.deinit();
var i: usize = 0;
while (i < 100) : (i += 1) {
testing.expectEqual(@as(usize, i), buff.len());
try buff.push(i);
}
i = 0;
while (i < 100) : (i += 1) {
testing.expectEqual(@as(usize, 100 - i), buff.len());
testing.expectEqual(@as(?usize, i), buff.pop());
}
testing.expectEqual(@as(usize, 0), buff.len());
testing.expectEqual(@as(?usize, null), buff.pop());
}
test "RingBuffer.iterator" {
const a = testing.allocator;
var buff = try RingBuffer(usize).init(a);
defer buff.deinit();
var i: usize = 0;
while (i < 100) : (i += 1) {
try buff.push(i);
}
var it = buff.iterator();
i = 0;
while (i < 100) : (i += 1) {
testing.expectEqual(@as(?usize, i), it.next());
}
testing.expectEqual(@as(?usize, null), it.next());
}
test "RingBuffer" {
const a = testing.allocator;
var buff = try RingBuffer(usize).init(a);
defer buff.deinit();
try buff.push(0);
testing.expectEqual(@as(usize, 1), buff.len());
testing.expectEqual(@as(?usize, 0), buff.pop());
try buff.push(1);
testing.expectEqual(@as(usize, 1), buff.len());
try buff.push(2);
testing.expectEqual(@as(usize, 2), buff.len());
testing.expectEqual(@as(?usize, 1), buff.pop());
try buff.push(3);
testing.expectEqual(@as(usize, 2), buff.len());
testing.expectEqual(@as(?usize, 2), buff.pop());
try buff.push(4);
try buff.push(5);
testing.expectEqual(@as(?usize, 3), buff.pop());
try buff.push(6);
try buff.push(7);
testing.expectEqual(@as(?usize, 4), buff.pop());
testing.expectEqual(@as(?usize, 5), buff.pop());
try buff.push(8);
testing.expectEqual(@as(?usize, 6), buff.pop());
testing.expectEqual(@as(?usize, 7), buff.pop());
try buff.push(9);
testing.expectEqual(@as(?usize, 8), buff.pop());
testing.expectEqual(@as(?usize, 9), buff.pop());
testing.expectEqual(@as(?usize, null), buff.pop());
}
|
src/ring_buffer.zig
|
const std = @import("std");
const Components = @import("components.zig").Components;
const Entity = @import("Entity.zig");
const Entities = @import("Entities.zig");
const MAX_ENTITIES = @import("./main.zig").MAX_ENTITIES;
const Bitset = std.bit_set.StaticBitSet(MAX_ENTITIES);
const benchmark = @import("benchmark");
/// Creates an iterator over the values of the provided argument pointers tuple.
/// The argument must be a tuple containing pointers (const or not) to a struct which
/// provides the following elements or operations:
/// - A field bitset: std.bit_set.StaticBitSet
/// - A function get(u32) -> ?*const T
/// - A function getMut(u32) -> ?*T
/// There is an exception to those rules: if the type is `Entities`, the get function will
/// return *const Entity (not an option (?T)).
pub fn join(elems: anytype) Iter(@TypeOf(elems)) {
var bitset = elems.@"0".*.bitset;
var next_id = std.math.inf_u32;
inline for (std.meta.fields(@TypeOf(elems))) |field| {
if (!std.mem.eql(u8, field.name, "0")) {
bitset.setIntersection(@field(elems, field.name).bitset);
}
const cur_next_id = @field(elems, field.name).next_id;
next_id = std.math.min(next_id, cur_next_id);
}
return Iter(@TypeOf(elems)){
.bitset = bitset,
.inputs = elems,
.next_id = next_id,
};
}
pub fn Iter(comptime input_types: type) type {
return struct {
bitset: Bitset = undefined,
inputs: input_types,
next_id: u32,
current_position: u32 = 0,
// The tuple or anon list here should use the inner types extracted from the out_type arg.
// Components(T) -> T
// Entities -> Entity
pub fn next(this: *@This()) ?extractInnerTypes(input_types) {
while (!this.bitset.isSet(this.current_position) and this.current_position < this.next_id) {
this.current_position += 1;
}
if (this.current_position < this.next_id) {
var ret: extractInnerTypes(input_types) = undefined;
inline for (std.meta.fields(@TypeOf(this.inputs))) |field| {
if (@typeInfo(field.field_type).Pointer.is_const) {
@field(ret, field.name) = @field(this.inputs, field.name).get(this.current_position) orelse @panic("Iterated over a storage which doesn't have the requested index. The calculated iteration bitset must be wrong.");
} else {
@field(ret, field.name) = @field(this.inputs, field.name).getMut(this.current_position) orelse @panic("Iterated over a storage which doesn't have the requested index. The calculated iteration bitset must be wrong.");
}
}
// Needed in case we return, because it wouldn't increase the outer
// current_position.
this.current_position += 1;
return ret;
} else {
this.current_position += 1;
return null;
}
}
};
}
/// Converts a type which is tuple of pointers to containers into a tuple of the
/// internal types of those containers.
/// .{*Entities, *Components(u32)} -> .{Entity, u32}
fn extractInnerTypes(comptime args: type) type {
comptime const args_info = @typeInfo(args);
if (args_info != .Struct or !args_info.Struct.is_tuple) {
@compileError("Argument must be a tuple.");
}
comptime var types: [std.meta.fields(args).len]type = undefined;
inline for (std.meta.fields(args)) |arg, i| {
const arg_info = @typeInfo(arg.field_type);
if (arg_info != .Pointer) {
@compileError("Elements inside of the tuple must be pointers.");
}
const child = arg_info.Pointer.child;
const typename = @typeName(child);
const typename_len = typename.len;
const child_info = @typeInfo(child);
if (child_info != .Struct) {
@compileError("Elements pointed to inside of the tuple must be structs.");
}
types[i] = getInnerType(child_info, arg_info, typename_len, typename);
}
comptime const tuple = std.meta.Tuple(&types);
return tuple;
}
fn getInnerType(
comptime child_info: std.builtin.TypeInfo,
comptime ptr_info: std.builtin.TypeInfo,
comptime typename_len: u32,
comptime typename: *const [typename_len:0]u8,
) type {
inline for (child_info.Struct.decls) |decl| {
if (std.mem.eql(u8, decl.name, "InnerType")) {
if (std.mem.eql(u8, typename, "Entities")) {
return decl.data.Type;
} else {
comptime var ret_type = std.builtin.TypeInfo{
.Pointer = std.builtin.TypeInfo.Pointer{
.size = .One,
.is_const = ptr_info.Pointer.is_const,
.is_volatile = ptr_info.Pointer.is_volatile,
.alignment = @alignOf(decl.data.Type),
.child = decl.data.Type,
.is_allowzero = ptr_info.Pointer.is_allowzero,
.sentinel = ptr_info.Pointer.sentinel,
//.sentinel = ?decl.data.Type,
},
};
return @Type(ret_type);
}
}
}
@compileError("Failed to find InnerType: type inside of the provided type.");
}
test "extract inner types" {
const MyStruct = struct {
const InnerType = u32;
};
const my_struct = MyStruct{};
const new_types = extractInnerTypes(@TypeOf(.{&my_struct}));
}
test "Iter Entities" {
var entities = try Entities.init(std.testing.allocator);
defer entities.deinit();
_ = entities.create();
_ = entities.create();
_ = entities.create();
_ = entities.create();
const entities_ptr: *const Entities = &entities;
var count = @as(u32, 0);
var iter = join(.{entities_ptr});
while (iter.next()) |e| {
count += 1;
}
try std.testing.expect(count == 4);
}
test "simple join" {
var comps1 = try Components(u32).init(std.testing.allocator);
defer comps1.deinit();
var comps2 = try Components(u33).init(std.testing.allocator);
defer comps2.deinit();
var comps3 = try Components(u34).init(std.testing.allocator);
defer comps3.deinit();
_ = try comps1.insert(Entity{ .index = 0, .generation = 0 }, 0);
_ = try comps1.insert(Entity{ .index = 1, .generation = 0 }, 1);
_ = try comps2.insert(Entity{ .index = 1, .generation = 0 }, 2);
_ = try comps3.insert(Entity{ .index = 1, .generation = 0 }, 3);
const comps2_ptr: *const Components(u33) = &comps2;
var count = @as(u32, 0);
var iter = join(.{ &comps1, comps2_ptr, &comps3 });
while (iter.next()) |tuple| {
count += 1;
try std.testing.expect(tuple.@"0".* == 1);
try std.testing.expect(tuple.@"1".* == 2);
try std.testing.expect(tuple.@"2".* == 3);
}
try std.testing.expect(count == 1);
}
test "Benchmark Join" {
const b = struct {
fn bench(ctx: *benchmark.Context) void {
const A = struct {
v: f32,
};
const B = struct {
v: f32,
};
var entities = Entities.init(std.testing.allocator) catch unreachable;
defer entities.deinit();
var a = Components(A).init(std.testing.allocator) catch unreachable;
defer a.deinit();
var b = Components(B).init(std.testing.allocator) catch unreachable;
defer b.deinit();
var count = @as(u32, 0);
while (count < 10000) : (count += 1) {
const e = entities.create();
_ = a.insert(e, A{ .v = 1.0 }) catch unreachable;
_ = b.insert(e, B{ .v = 1.0 }) catch unreachable;
}
const b_ptr: *const Components(B) = &b;
while (ctx.run()) {
var iter = join(.{ &a, b_ptr });
}
}
}.bench;
benchmark.benchmark("join", b);
}
fn benchIterSpeed(ctx: *benchmark.Context) void {
const A = struct {
v: f32,
};
const B = struct {
v: f32,
};
var alloc = std.testing.allocator;
//var alloc = std.heap.c_allocator;
var entities = Entities.init(alloc) catch unreachable;
defer entities.deinit();
var a = Components(A).init(alloc) catch unreachable;
defer a.deinit();
var b = Components(B).init(alloc) catch unreachable;
defer b.deinit();
var count = @as(u32, 0);
while (count < 10000) : (count += 1) {
const e = entities.create();
_ = a.insert(e, A{ .v = 1.0 }) catch unreachable;
_ = b.insert(e, B{ .v = 1.0 }) catch unreachable;
}
const b_ptr: *const Components(B) = &b;
while (ctx.run()) {
var iter = join(.{ &a, b_ptr });
while (iter.next()) |tuple| {
//var ptr1 = tuple.@"0";
//const ptr2 = tuple.@"1";
//ptr1.v += ptr2.v;
tuple.@"0".*.v += tuple.@"1".*.v;
}
}
}
test "Benchmark Join Iter Speed" {
benchmark.benchmark("join iter speed", benchIterSpeed);
}
|
src/join.zig
|
const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const dlist = @import("dlist.zig");
const ListNode = dlist.ListNode;
const params = @import("params.zig");
//===========================================================================//
const HyperblockMask = @IntType(false, params.chunk_denominator);
const HyperblockMaskShift =
@IntType(false, std.math.log2_int_ceil(u32, params.chunk_denominator));
pub fn HyperblockHeader(comptime Heap: type) type {
return packed struct {
/// This field always stores params.hyperblock_magic_number.
magic_number: u32,
/// The heap that this hyperblock belongs to.
heap: *Heap,
/// A bitmask indicating which chunks in the hyperblock are currently
/// allocated (1) or free (0). The lowest bit indicates the first
/// chunk, and the second-to-highest bit indicates the last chunk (the
/// highest bit is unused).
allocated_mask: HyperblockMask,
/// Intrusive doubly-linked list node.
list_node: ListNode,
};
}
pub fn Hyperblock(comptime Heap: type) type {
return packed struct {
const ThisType = this;
header: HyperblockHeader(Heap),
_padding: [params.chunk_size - @sizeOf(HyperblockHeader(Heap))]u8,
chunks: [params.hyperblock_num_chunks][params.chunk_size]u8,
pub fn init(heap: *Heap, hyperblock_list: *HyperblockList(Heap),
free_span_lists: *FreeSpanLists(Heap),
child_allocator: *Allocator) !*Hyperblock(Heap) {
// Allocate a new hyperblock from the child allocator:
var hyperblocks = try child_allocator.alignedAlloc(
Hyperblock(Heap), params.chunk_size, 1);
var hyperblock = &hyperblocks[0];
// Initialize header values:
hyperblock.header.magic_number = params.hyperblock_magic_number;
hyperblock.header.heap = heap;
hyperblock.header.allocated_mask = 1;
hyperblock.header.list_node.init();
// Initialize header for free span:
var free_span = hyperblock.freeSpanStartingAt(0);
free_span.init(hyperblock, params.hyperblock_num_chunks);
free_span_lists.insert(free_span);
// Add the hyperblock to the list.
hyperblock_list.insert(hyperblock);
return hyperblock;
}
pub fn deinit(self: *ThisType, child_allocator: *Allocator) void {
assert(self.isEmpty());
var free_span = self.freeSpanStartingAt(0);
assert(free_span.hyperblock == self);
assert(free_span.num_chunks == params.hyperblock_num_chunks);
free_span.list_node.remove();
self.header.list_node.remove();
child_allocator.destroy(self);
}
/// Returns true if none of the chunks in this hyperblock are currently
/// allocated, false otherwise.
pub fn isEmpty(self: *const ThisType) bool {
return self.header.allocated_mask == 0;
}
pub fn allocFrom(self: *ThisType, span: *FreeSpanHeader(Heap),
size: usize,
free_span_lists: *FreeSpanLists(Heap)) []u8 {
assert(span.hyperblock == self);
assert(span.num_chunks > 0);
assert(@ptrToInt(span) % params.chunk_size == 0);
assert(@ptrToInt(span) >= @ptrToInt(&self.chunks[0]));
const start_chunk =
@divExact(@ptrToInt(span) - @ptrToInt(&self.chunks[0]),
params.chunk_size);
assert(start_chunk < params.hyperblock_num_chunks);
assert(start_chunk + span.num_chunks <=
params.hyperblock_num_chunks);
const num_chunks = numChunksForSize(size);
assert(num_chunks >= 1);
assert(num_chunks <= params.allocated_span_max_num_chunks);
assert(num_chunks <= span.num_chunks);
const mask = ((HyperblockMask(1) << num_chunks) - 1) <<
@intCast(HyperblockMaskShift, start_chunk);
assert(self.header.allocated_mask & ~mask == 0);
// Update free span header.
span.list_node.remove();
if (span.num_chunks > num_chunks) {
const new_start = start_chunk + num_chunks;
const new_free_span =
@ptrCast(*FreeSpanHeader(Heap),
@alignCast(params.chunk_size,
&self.chunks[new_start]));
new_free_span.init(self, span.num_chunks - num_chunks);
free_span_lists.insert(new_free_span);
}
self.header.allocated_mask |= mask;
// Put a pointer to this hyperblock at the end of the last chunk.
assert(num_chunks * usize(params.chunk_size) -
@sizeOf(@typeOf(self)) >= size);
@ptrCast(*?*ThisType,
@alignCast(@sizeOf(?*ThisType),
&self.chunks[start_chunk + num_chunks - 1]
[params.chunk_size - @sizeOf(?*ThisType)])).* =
self;
var new_mem = @ptrCast([*]u8, &self.chunks[start_chunk])[0..size];
// In debug mode, memset the allocated portion of the span to 0xaa,
// and the unallocated portion of the span (other than the
// hyperblock pointer) to 0xdd.
if (builtin.mode == builtin.Mode.Debug) {
std.mem.set(u8, new_mem, params.allocated_byte_memset);
const start = @ptrToInt(new_mem.ptr) + new_mem.len;
const len = usize(num_chunks) * params.chunk_size -
(new_mem.len + @sizeOf(?*ThisType));
std.mem.set(u8, @intToPtr([*]u8, start)[0..len],
params.deallocated_byte_memset);
}
return new_mem;
}
pub fn free(self: *ThisType, old_mem: []u8,
free_span_lists: *FreeSpanLists(Heap)) void {
assert(old_mem.len <= params.allocated_span_max_size);
assert(@ptrToInt(old_mem.ptr) % params.chunk_size == 0);
assert(@ptrToInt(old_mem.ptr) >= @ptrToInt(&self.chunks[0]));
const start_chunk =
@intCast(HyperblockMaskShift,
@divExact(@ptrToInt(old_mem.ptr) -
@ptrToInt(&self.chunks[0]),
params.chunk_size));
assert(start_chunk < params.hyperblock_num_chunks);
const num_chunks = numChunksForSize(old_mem.len);
assert(num_chunks >= 1);
assert(num_chunks <= params.allocated_span_max_num_chunks);
assert(start_chunk + num_chunks <= params.hyperblock_num_chunks);
const mask = ((HyperblockMask(1) << num_chunks) - 1) <<
@intCast(HyperblockMaskShift, start_chunk);
assert(self.header.allocated_mask & mask == mask);
// Merge prev/next free spans, if any.
var prev_free_span: ?*FreeSpanHeader(Heap) = blk: {
if (start_chunk > 0 and
self.header.allocated_mask &
@shlExact(HyperblockMask(1),
(start_chunk - 1)) == 0) {
var span = self.freeSpanStartingAt(
HyperblockMask.bit_count -
@clz(self.header.allocated_mask &
(@shlExact(HyperblockMask(1),
start_chunk) - 1)));
span.list_node.remove();
break :blk span;
}
break :blk null;
};
var next_free_span: ?*FreeSpanHeader(Heap) = blk: {
if (start_chunk + num_chunks < params.hyperblock_num_chunks and
(self.header.allocated_mask >>
(start_chunk + num_chunks)) & 1 == 0) {
var span =
self.freeSpanStartingAt(start_chunk + num_chunks);
span.list_node.remove();
break :blk span;
}
break :blk null;
};
var new_free_span =
prev_free_span orelse self.freeSpanStartingAt(start_chunk);
new_free_span.init(
self,
(if (prev_free_span) |span| span.num_chunks else 0) +
num_chunks +
(if (next_free_span) |span| span.num_chunks else 0));
free_span_lists.insert(new_free_span);
// Mark this span as free.
self.header.allocated_mask &= ~mask;
}
fn freeSpanStartingAt(self: *ThisType,
idx: usize) *FreeSpanHeader(Heap) {
return @ptrCast(*FreeSpanHeader(Heap),
@alignCast(params.chunk_size, &self.chunks[idx]));
}
pub fn numChunksForSize(size: usize) HyperblockMaskShift {
const num_chunks =
@divFloor(size + @sizeOf(*ThisType) + params.chunk_size - 1,
params.chunk_size);
assert(num_chunks <= params.hyperblock_num_chunks);
return @intCast(HyperblockMaskShift, num_chunks);
}
pub fn allocSpanHyperblockPtr(mem: []u8) ?*ThisType {
if (mem.len > params.allocated_span_max_size or
@ptrToInt(mem.ptr) % params.chunk_size != 0) {
return null;
}
const num_chunks = numChunksForSize(mem.len);
return @intToPtr(*?*ThisType,
@ptrToInt(mem.ptr) +
params.chunk_size * usize(num_chunks) -
@sizeOf(?*ThisType)).*;
}
};
}
pub fn HyperblockList(comptime Heap: type) type {
return struct {
node: ListNode,
/// Initializes this list to empty.
pub fn init(self: *this) void {
self.node.init();
}
/// Frees all hyperblocks in this list.
fn deinit(self: *this, child_allocator: *Allocator) void {
while (!self.isEmpty()) {
const header =
@fieldParentPtr(HyperblockHeader(Heap), "list_node",
self.node.next);
const hyperblock =
@fieldParentPtr(Hyperblock(Heap), "header", header);
hyperblock.deinit(child_allocator);
}
}
/// Returns true if the list is empty.
fn isEmpty(self: *const this) bool {
return self.node.isEmpty();
}
fn insert(self: *this, hyperblock: *Hyperblock(Heap)) void {
hyperblock.header.list_node.insertAfter(&self.node);
}
};
}
//===========================================================================//
pub fn FreeSpanHeader(comptime Heap: type) type {
return struct {
const ThisType = this;
hyperblock: *Hyperblock(Heap),
list_node: ListNode,
num_chunks: u32,
fn init(self: *ThisType, hyperblock: *Hyperblock(Heap),
num_chunks: u32) void {
assert(num_chunks >= 1);
assert(num_chunks <= params.hyperblock_num_chunks);
assert(@ptrToInt(self) % params.chunk_size == 0);
assert(@ptrToInt(self) > @ptrToInt(hyperblock));
assert(@ptrToInt(self) <
@ptrToInt(hyperblock) + params.hyperblock_size);
self.hyperblock = hyperblock;
self.list_node.init();
self.num_chunks = num_chunks;
// In debug mode, memset the rest of the free span to 0xdd.
if (builtin.mode == builtin.Mode.Debug) {
const header_size = @sizeOf(ThisType);
const start = @ptrToInt(self) + header_size;
const len = num_chunks * params.chunk_size - header_size;
std.mem.set(u8, @intToPtr([*]u8, start)[0..len],
params.deallocated_byte_memset);
}
}
};
}
pub fn FreeSpanList(comptime Heap: type) type {
return struct {
node: ListNode,
/// Initializes this list to empty.
pub fn init(self: *this) void {
self.node.init();
}
/// Returns true if the list is empty.
fn isEmpty(self: *const this) bool {
return self.node.isEmpty();
}
/// Returns the free span at the head of the list, or null if the list
/// is empty.
fn head(self: *const this) ?*FreeSpanHeader(Heap) {
if (self.isEmpty()) {
return null;
} else {
return @fieldParentPtr(FreeSpanHeader(Heap), "list_node",
self.node.next);
}
}
fn insert(self: *this, free_span: *FreeSpanHeader(Heap)) void {
free_span.list_node.insertAfter(&self.node);
}
};
}
pub fn FreeSpanLists(comptime Heap: type) type {
return struct {
lists: [params.allocated_span_max_num_chunks]FreeSpanList(Heap),
pub fn init(self: *this) void {
for (self.lists) |*list| {
list.init();
}
}
pub fn deinit(self: *this) void {
for (self.lists) |*list| {
assert(list.isEmpty());
}
}
pub fn lastList(self: *this) *FreeSpanList(Heap) {
return &self.lists[params.allocated_span_max_num_chunks - 1];
}
pub fn insert(self: *this, free_span: *FreeSpanHeader(Heap)) void {
self.listForNumChunks(free_span.num_chunks).insert(free_span);
}
fn listForNumChunks(self: *this,
num_chunks: usize) *FreeSpanList(Heap) {
assert(num_chunks >= 1);
if (num_chunks > params.allocated_span_max_num_chunks) {
return self.lastList();
} else {
return &self.lists[num_chunks - 1];
}
}
};
}
//===========================================================================//
test "HyperblockHeader fits in a chunk" {
comptime assert(@sizeOf(HyperblockHeader(@OpaqueType())) <=
params.chunk_size);
}
test "Hyperblock struct size is correct" {
comptime assert(@sizeOf(Hyperblock(@OpaqueType())) ==
params.hyperblock_size);
}
test "FreeSpanHeader fits in a chunk" {
comptime assert(@sizeOf(FreeSpanHeader(@OpaqueType())) <=
params.chunk_size);
}
test "Hyperblock.numChunksForSize" {
const HB = Hyperblock(@OpaqueType());
assert(HB.numChunksForSize(1) == 1);
// Storing `params.chunk_size` bytes actually takes two chunks, because we
// also have to store the hyperblock pointer.
assert(HB.numChunksForSize(params.chunk_size) == 2);
assert(HB.numChunksForSize(params.chunk_size - @sizeOf(*HB)) == 1);
assert(HB.numChunksForSize(params.chunk_size * 10 + 100) == 11);
}
test "Hyperblock alloc and free" {
var direct = std.heap.DirectAllocator.init();
defer direct.deinit();
const Heap = i32;
var heap: Heap = 0;
var free_span_lists: FreeSpanLists(Heap) = undefined;
free_span_lists.init();
defer free_span_lists.deinit();
var hyperblocks: HyperblockList(Heap) = undefined;
hyperblocks.init();
defer hyperblocks.deinit(&direct.allocator);
var hyperblock =
try Hyperblock(Heap).init(&heap, &hyperblocks, &free_span_lists,
&direct.allocator);
var last_list = free_span_lists.lastList();
assert(last_list.node.next != &last_list.node);
var free_span = @fieldParentPtr(FreeSpanHeader(Heap), "list_node",
last_list.node.next);
assert(free_span.hyperblock == hyperblock);
assert(free_span.num_chunks == params.hyperblock_num_chunks);
var mem = hyperblock.allocFrom(free_span, 1000, &free_span_lists);
assert(mem.len == 1000);
hyperblock.free(mem, &free_span_lists);
}
//===========================================================================//
|
src/ziegfried/hblock.zig
|
const std = @import("std");
const pkg = std.build.Pkg{
.name = "zsw",
.path = .{ .path = "src/main.zig" },
};
pub fn build(b: *std.build.Builder) !void {
const target = b.standardTargetOptions(.{});
const mode = b.standardReleaseOptions();
const examples = try getExamples(b.allocator);
addTests(b, mode, examples);
try createExamples(b, mode, target, examples);
}
fn createExamples(b: *std.build.Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget, examples: []const Example) !void {
for (examples) |example| {
const example_exe = b.addExecutable(example.name, example.path);
example_exe.setBuildMode(mode);
example_exe.setTarget(target);
example_exe.addPackage(pkg);
const run = example_exe.run();
const desc = try std.fmt.allocPrint(b.allocator, "run example '{s}' from '{s}' section", .{ example.name, example.section });
const example_step = b.step(example.name, desc);
example_step.dependOn(&run.step);
}
}
fn addTests(b: *std.build.Builder, mode: std.builtin.Mode, examples: []const Example) void {
const test_step = b.step("test", "Run all tests");
const lib_tests = b.addTest("src/main.zig");
lib_tests.setBuildMode(mode);
test_step.dependOn(&lib_tests.step);
for (examples) |example| {
const example_test = b.addTest(example.path);
example_test.setBuildMode(mode);
example_test.addPackage(pkg);
test_step.dependOn(&example_test.step);
}
b.default_step = test_step;
}
const Example = struct {
section: []const u8,
name: []const u8,
path: []const u8,
pub fn deinit(self: Example, allocator: std.mem.Allocator) void {
allocator.free(self.path);
}
};
const CURRENT_FOLDER = getFileFolder();
fn getFileFolder() []const u8 {
return std.fs.path.dirname(@src().file) orelse @panic("root");
}
fn getExamples(allocator: std.mem.Allocator) ![]const Example {
var examples = std.ArrayList(Example).init(allocator);
errdefer {
for (examples.items) |s| s.deinit(allocator);
examples.deinit();
}
const example_sections: []const []const u8 = &.{"file_system"};
inline for (example_sections) |example_section| {
var examples_dir = try std.fs.cwd().openDir("examples/" ++ example_section, .{ .iterate = true });
defer examples_dir.close();
var iter = examples_dir.iterate();
while (try iter.next()) |entry| {
if (entry.kind != .File) continue;
const path = try std.fmt.allocPrint(allocator, CURRENT_FOLDER ++ "/examples/" ++ example_section ++ "/{s}", .{entry.name});
errdefer allocator.free(path);
const extension = std.fs.path.extension(path);
const name = path[(path.len - entry.name.len)..(path.len - extension.len)];
try examples.append(.{
.section = example_section,
.name = name,
.path = path,
});
}
}
return examples.toOwnedSlice();
}
|
build.zig
|
const std = @import("std");
const allocator = std.heap.page_allocator;
const assert = std.debug.assert;
const hash = std.hash;
const math = std.math;
const mem = std.mem;
const deflate = @import("./deflate.zig");
const time = @import("./time.zig");
const zip = @import("./zip.zig");
const UINT16_MAX = math.maxInt(u16);
const UINT32_MAX = math.maxInt(u32);
const VERSION = "2.1";
fn printf(comptime fmt: []const u8, args: anytype) !void {
var out = try std.fmt.allocPrint(allocator, fmt, args);
var stdout = std.io.getStdOut().writer();
try stdout.writeAll(out);
}
fn print(comptime fmt: []const u8) !void {
try printf(fmt, .{});
}
fn crc32(data: [*]const u8, size: usize) u32 {
return hash.Crc32.hash(data[0..size]);
}
fn read_file(filename: [*:0]const u8, file_sz: *usize) ![]u8 {
const file = try std.fs.cwd().openFile(
filename[0..mem.indexOfSentinel(u8, 0x0, filename)],
.{ .read = true },
);
defer file.close();
file_sz.* = (try file.stat()).size;
const contents = try file.reader().readAllAlloc(
allocator,
file_sz.*,
);
return contents;
}
fn write_file(filename: [*:0]const u8, data: [*]const u8, n: usize) !void {
const file = try std.fs.cwd().createFile(
filename[0..mem.indexOfSentinel(u8, 0x0, filename)],
.{ .read = true },
);
defer file.close();
_ = try file.write(data[0..n]);
}
fn list_zip(filename: [*:0]const u8) !void {
var zip_data: [*]u8 = undefined;
var zip_sz: usize = 0;
var z: zip.zip_t = undefined;
var it: zip.zipiter_t = undefined;
var m: zip.zipmemb_t = undefined;
try printf("Listing ZIP archive: {s}\n\n", .{filename});
var zip_data_mem = try read_file(filename, &zip_sz);
zip_data = zip_data_mem.ptr;
if (!zip.zip_read(&z, zip_data, zip_sz)) {
try print("Failed to parse ZIP file!\n");
std.os.exit(1);
}
if (z.comment_len != 0) {
try printf("{s}\n\n", .{z.comment[0..@intCast(u32, z.comment_len)]});
}
it = z.members_begin;
while (it != z.members_end) : (it = m.next) {
m = zip.zip_member(&z, it);
try printf("{s}\n", .{m.name[0..@intCast(u32, m.name_len)]});
}
try print("\n");
allocator.free(zip_data_mem);
}
fn terminate_str(str: [*]const u8, n: usize) ![]u8 {
var p = try allocator.alloc(u8, n + 1);
mem.copy(u8, p[0..n], str[0..n]);
p[n] = 0x00;
return p;
}
fn extract_zip(filename: [*:0]const u8) !void {
var zip_data: [*]u8 = undefined;
var zip_sz: usize = 0;
var z: zip.zip_t = undefined;
var it: zip.zipiter_t = undefined;
var m: zip.zipmemb_t = undefined;
var tname: [*:0]const u8 = undefined;
var uncomp_data: [*]u8 = undefined;
try printf("Extracting ZIP archive: {s}\n\n", .{filename});
var zip_data_mem = try read_file(filename, &zip_sz);
zip_data = zip_data_mem.ptr;
if (!zip.zip_read(&z, zip_data, zip_sz)) {
try print("Failed to read ZIP file!\n");
std.os.exit(1);
}
if (z.comment_len != 0) {
try printf("{s}\n\n", .{z.comment[0..@intCast(u32, z.comment_len)]});
}
it = z.members_begin;
while (it != z.members_end) : (it = m.next) {
m = zip.zip_member(&z, it);
if (m.is_dir) {
try printf(" (Skipping dir: {s})\n", .{m.name[0..@intCast(u32, m.name_len)]});
continue;
}
if (mem.indexOfScalar(u8, m.name[0..m.name_len], '/') != null or
mem.indexOfScalar(u8, m.name[0..m.name_len], '\\') != null)
{
try printf(" (Skipping file in dir: {s})\n", .{m.name[0..@intCast(u32, m.name_len)]});
continue;
}
switch (m.method) {
zip.method_t.ZIP_STORE => try print(" Extracting: "),
zip.method_t.ZIP_SHRINK => try print(" Unshrinking: "),
zip.method_t.ZIP_REDUCE1,
zip.method_t.ZIP_REDUCE2,
zip.method_t.ZIP_REDUCE3,
zip.method_t.ZIP_REDUCE4,
=> try print(" Expanding: "),
zip.method_t.ZIP_IMPLODE => try print(" Exploding: "),
zip.method_t.ZIP_DEFLATE => try print(" Inflating: "),
else => unreachable,
}
try printf("{s}", .{m.name[0..@intCast(u32, m.name_len)]});
var uncomp_data_mem = try allocator.alloc(u8, m.uncomp_size);
uncomp_data = uncomp_data_mem.ptr;
if (!try zip.zip_extract_member(&m, uncomp_data)) {
try print(" Error: decompression failed!\n");
std.os.exit(1);
}
if (crc32(uncomp_data, m.uncomp_size) != m.crc32) {
try print(" Error: CRC-32 mismatch!\n");
std.os.exit(1);
}
var tname_mem = try terminate_str(m.name, m.name_len);
tname = @ptrCast([*:0]const u8, tname_mem.ptr);
try write_file(tname, uncomp_data, m.uncomp_size);
try print("\n");
allocator.free(uncomp_data_mem);
allocator.free(tname_mem);
}
try print("\n");
allocator.free(zip_data_mem);
}
fn zip_callback(
filename: [*:0]const u8,
method: zip.method_t,
size: u32,
comp_size: u32,
) zip.CallbackError!void {
switch (method) {
zip.method_t.ZIP_STORE => print(" Stored: ") catch {},
zip.method_t.ZIP_SHRINK => print(" Shrunk: ") catch {},
zip.method_t.ZIP_REDUCE1 => print(" Reduced: ") catch {},
zip.method_t.ZIP_REDUCE2 => print(" Reduced: ") catch {},
zip.method_t.ZIP_REDUCE3 => print(" Reduced: ") catch {},
zip.method_t.ZIP_REDUCE4 => print(" Reduced: ") catch {},
zip.method_t.ZIP_IMPLODE => print(" Imploded: ") catch {},
zip.method_t.ZIP_DEFLATE => print(" Deflated: ") catch {},
else => unreachable,
}
printf("{s}", .{filename}) catch {};
if (method != zip.method_t.ZIP_STORE) {
assert(size != 0); // "Empty files should use Store."
printf(" ({d:.0}%)", .{100.0 - 100.0 * @intToFloat(f64, comp_size) / @intToFloat(f64, size)}) catch {};
}
print("\n") catch {};
}
fn create_zip(
zip_filename: [*:0]const u8,
comment: ?[*:0]const u8,
method: zip.method_t,
n: u16,
filenames: [*][*:0]const u8,
) !void {
var mtime: time.time_t = undefined;
var mtimes: [*]time.time_t = undefined;
var file_data: [*][*]u8 = undefined;
var file_sizes: [*]u32 = undefined;
var file_size: usize = 0;
var zip_size: usize = 0;
var zip_data: [*]u8 = undefined;
var i: u16 = 0;
try printf("Creating ZIP archive: {s}\n\n", .{zip_filename});
if (comment != null) {
try printf("{s}\n\n", .{comment.?[0..mem.indexOfSentinel(u8, 0x00, comment.?)]});
}
mtime = @divTrunc(std.time.milliTimestamp(), 1000);
var file_mem = try allocator.alloc([]u8, n);
var file_data_mem = try allocator.alloc([*]u8, n);
file_data = file_data_mem.ptr;
var file_sizes_mem = try allocator.alloc(u32, n);
file_sizes = file_sizes_mem.ptr;
var mtimes_mem = try allocator.alloc(time.time_t, n);
mtimes = mtimes_mem.ptr;
i = 0;
while (i < n) : (i += 1) {
file_mem[i] = try read_file(filenames[i], &file_size);
file_data[i] = file_mem[i].ptr;
if (file_size >= UINT32_MAX) {
try printf("{s} is too large!\n", .{filenames[i]});
std.os.exit(1);
}
file_sizes[i] = @intCast(u32, file_size);
mtimes[i] = mtime;
}
zip_size = zip.zip_max_size(n, filenames, file_sizes, comment);
if (zip_size == 0) {
try print("zip writing not possible");
std.os.exit(1);
}
var zip_data_mem = try allocator.alloc(u8, zip_size);
zip_data = zip_data_mem.ptr;
zip_size = try zip.zip_write(
zip_data,
n,
filenames,
file_data,
file_sizes,
mtimes,
comment,
method,
zip_callback,
);
try write_file(zip_filename, zip_data, zip_size);
try print("\n");
allocator.free(file_mem);
allocator.free(zip_data_mem);
allocator.free(mtimes_mem);
allocator.free(file_sizes_mem);
allocator.free(file_data_mem);
}
fn print_usage(argv0: []u8) !void {
try print("Usage:\n\n");
try printf(" {s} list <zipfile>\n", .{argv0});
try printf(" {s} extract <zipfile>\n", .{argv0});
try printf(" {s} create <zipfile> [-m <method>] [-c <comment>] <files...>\n", .{argv0});
try print("\n");
try print(" Supported compression methods: \n");
try print(" store, shrink, reduce, implode, deflate (default).\n");
try print("\n");
}
fn parse_method_flag(argc: usize, argv: [][:0]u8, i: *u32, m: *zip.method_t) !bool {
var method_i = i.* + 1;
if (method_i >= argc) { // verify that -m is followed by a method name
return false;
}
if (!mem.eql(u8, argv[i.*], "-m")) {
return false;
}
if (mem.eql(u8, argv[method_i], "store")) {
m.* = zip.method_t.ZIP_STORE;
} else if (mem.eql(u8, argv[method_i], "shrink")) {
m.* = zip.method_t.ZIP_SHRINK;
} else if (mem.eql(u8, argv[method_i], "reduce")) {
m.* = zip.method_t.ZIP_REDUCE4;
} else if (mem.eql(u8, argv[method_i], "implode")) {
m.* = zip.method_t.ZIP_IMPLODE;
} else if (mem.eql(u8, argv[method_i], "deflate")) {
m.* = zip.method_t.ZIP_DEFLATE;
} else {
try print_usage(argv[0]);
try printf("Unknown compression method: '{s}'.\n", .{argv[method_i]});
std.os.exit(1);
}
i.* += 2;
return true;
}
fn parse_comment_flag(argc: usize, argv: [][:0]u8, i: *u32, c: *?[*:0]u8) bool {
var comment_i = i.* + 1;
if (comment_i >= argc) {
return false;
}
if (!mem.eql(u8, argv[i.*], "-c")) {
return false;
}
c.* = argv[comment_i].ptr;
i.* += 2;
return true;
}
pub fn main() !u8 {
var argv = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, argv);
var argc = argv.len;
var comment: ?[*:0]u8 = null;
var method: zip.method_t = zip.method_t.ZIP_DEFLATE;
var i: u32 = 0;
try print("\n");
try print("HWZIP " ++ VERSION ++ " -- A simple ZIP program ");
try print("from https://www.hanshq.net/zip.html\n");
try print("\n");
if (argc == 3 and mem.eql(u8, argv[1], "list")) {
try list_zip(argv[2]);
} else if (argc == 3 and mem.eql(u8, argv[1], "extract")) {
try extract_zip(argv[2]);
} else if (argc >= 3 and mem.eql(u8, argv[1], "create")) {
i = 3;
while ((try parse_method_flag(argc, argv, &i, &method)) or
parse_comment_flag(argc, argv, &i, &comment))
{}
assert(i <= argc);
var files_count = @intCast(u16, argc - i);
var filenames = try allocator.alloc([*:0]const u8, files_count);
defer allocator.free(filenames);
for (argv[i..]) |_, fi| {
filenames[fi] = argv[fi + i];
}
try create_zip(argv[2], comment, method, files_count, filenames.ptr);
} else {
try print_usage(argv[0]);
return 1;
}
return 0;
}
|
src/hwzip.zig
|
const std = @import("std");
const shared = @import("shared.zig");
const zsw = @import("zsw");
const builtin = @import("builtin");
const log = std.log.scoped(.subcommand);
pub const SUBCOMMANDS = [_]type{
@import("subcommands/basename.zig"),
@import("subcommands/dirname.zig"),
@import("subcommands/false.zig"),
@import("subcommands/groups.zig"),
@import("subcommands/true.zig"),
@import("subcommands/yes.zig"),
};
pub const ExecuteError = error{
NoSubcommand,
} || SubcommandErrors;
const SubcommandErrors = error{
OutOfMemory,
UnableToParseArguments,
};
const SubcommandNonErrors = error{
Help,
Version,
};
pub const Error = SubcommandErrors || SubcommandNonErrors;
pub fn execute(
allocator: std.mem.Allocator,
arg_iter: *std.process.ArgIterator,
io: anytype,
basename: []const u8,
system: zsw.System,
exe_path: []const u8,
) ExecuteError!u8 {
const z = shared.tracy.traceNamed(@src(), "execute");
defer z.end();
z.addText(basename);
z.addText(exe_path);
inline for (SUBCOMMANDS) |subcommand| {
if (std.mem.eql(u8, subcommand.name, basename)) return executeSubcommand(
subcommand,
allocator,
arg_iter,
io,
system,
exe_path,
);
}
return error.NoSubcommand;
}
fn executeSubcommand(
comptime subcommand: type,
allocator: std.mem.Allocator,
arg_iter: anytype,
io: anytype,
system: zsw.System,
exe_path: []const u8,
) SubcommandErrors!u8 {
const z = shared.tracy.traceNamed(@src(), "execute subcommand");
defer z.end();
var arg_iterator = shared.ArgIterator(@TypeOf(arg_iter)).init(arg_iter);
return subcommand.execute(allocator, io, &arg_iterator, system, exe_path) catch |err| switch (err) {
error.Help => shared.printHelp(subcommand, io, exe_path),
error.Version => shared.printVersion(subcommand, io),
else => |narrow_err| narrow_err,
};
}
pub fn testExecute(comptime subcommand: type, arguments: []const []const u8, settings: anytype) SubcommandErrors!u8 {
const SettingsType = @TypeOf(settings);
const stdin = if (@hasField(SettingsType, "stdin")) settings.stdin else VoidReader.reader();
const stdout = if (@hasField(SettingsType, "stdout")) settings.stdout else VoidWriter.writer();
const stderr = if (@hasField(SettingsType, "stderr")) settings.stderr else VoidWriter.writer();
const system = if (@hasField(SettingsType, "system")) settings.system else zsw.host_system;
var arg_iter = SliceArgIterator{ .slice = arguments };
return executeSubcommand(
subcommand,
std.testing.allocator,
&arg_iter,
.{
.stderr = stderr,
.stdin = stdin,
.stdout = stdout,
},
system,
subcommand.name,
);
}
pub fn testError(
comptime subcommand: type,
arguments: []const []const u8,
settings: anytype,
expected_error: []const u8,
) !void {
const SettingsType = @TypeOf(settings);
if (@hasField(SettingsType, "stderr")) @compileError("there is already a stderr defined on this settings type");
const stdin = if (@hasField(SettingsType, "stdin")) settings.stdin else VoidReader.reader();
const stdout = if (@hasField(SettingsType, "stdout")) settings.stdout else VoidWriter.writer();
const system = if (@hasField(SettingsType, "system")) settings.system else zsw.host_system;
var stderr = std.ArrayList(u8).init(std.testing.allocator);
defer stderr.deinit();
try std.testing.expectEqual(
@as(u8, 1),
try testExecute(
subcommand,
arguments,
.{
.stderr = stderr.writer(),
.stdin = stdin,
.stdout = stdout,
.system = system,
},
),
);
try std.testing.expect(std.mem.indexOf(u8, stderr.items, expected_error) != null);
}
pub fn testHelp(comptime subcommand: type) !void {
const expected = try std.fmt.allocPrint(std.testing.allocator, subcommand.usage, .{subcommand.name});
defer std.testing.allocator.free(expected);
var out = std.ArrayList(u8).init(std.testing.allocator);
defer out.deinit();
var always_fail_system = try AlwaysFailSystem.init(std.testing.allocator);
defer always_fail_system.deinit();
try std.testing.expectEqual(
@as(u8, 0),
try testExecute(
subcommand,
&.{"--help"},
.{
.stdout = out.writer(),
.system = always_fail_system.backend.system(),
},
),
);
try std.testing.expectEqualStrings(expected, out.items);
out.deinit();
out = std.ArrayList(u8).init(std.testing.allocator);
try std.testing.expectEqual(
@as(u8, 0),
try testExecute(
subcommand,
&.{"-h"},
.{ .stdout = out.writer() },
),
);
try std.testing.expectEqualStrings(expected, out.items);
}
pub fn testVersion(comptime subcommand: type) !void {
var out = std.ArrayList(u8).init(std.testing.allocator);
defer out.deinit();
var always_fail_system = try AlwaysFailSystem.init(std.testing.allocator);
defer always_fail_system.deinit();
try std.testing.expectEqual(
@as(u8, 0),
try testExecute(
subcommand,
&.{"--version"},
.{
.stdout = out.writer(),
.system = always_fail_system.backend.system(),
},
),
);
const expected = try std.fmt.allocPrint(std.testing.allocator, shared.version_string, .{subcommand.name});
defer std.testing.allocator.free(expected);
try std.testing.expectEqualStrings(expected, out.items);
}
const AlwaysFailSystem = struct {
backend: BackendType,
const BackendType = zsw.Backend(.{ .fallback_to_host = false });
pub fn init(allocator: std.mem.Allocator) !AlwaysFailSystem {
var backend = try BackendType.init(allocator, .{});
errdefer backend.deinit();
return AlwaysFailSystem{
.backend = backend,
};
}
pub fn deinit(self: *AlwaysFailSystem) void {
self.backend.deinit();
}
};
const SliceArgIterator = struct {
slice: []const []const u8,
index: usize = 0,
pub fn next(self: *SliceArgIterator) ?[]const u8 {
if (self.index < self.slice.len) {
defer self.index += 1;
return self.slice[self.index];
}
return null;
}
};
const VoidReader = struct {
pub const Reader = std.io.Reader(void, error{}, read);
pub fn reader() Reader {
return .{ .context = {} };
}
fn read(_: void, buffer: []u8) error{}!usize {
_ = buffer;
return 0;
}
};
const VoidWriter = struct {
pub const Writer = std.io.Writer(void, error{}, write);
pub fn writer() Writer {
return .{ .context = {} };
}
fn write(_: void, bytes: []const u8) error{}!usize {
_ = bytes;
return bytes.len;
}
};
comptime {
std.testing.refAllDecls(@This());
}
|
src/subcommands.zig
|
const std = @import("std");
const Logger = struct {
is_enabled: bool,
show_thread_id: bool,
pub fn print(comptime self: Logger, comptime fmt: []const u8, args: anytype) void {
if (!self.is_enabled) return;
warn(self.show_thread_id, fmt, args);
}
pub fn deepPrint(comptime self: Logger, prefix: []const u8, something: anytype) void {
if (!self.is_enabled) return;
deep_print(prefix, something);
}
};
pub const thread_lifecycle = Logger{ .is_enabled = false, .show_thread_id = true };
pub const testing = Logger{ .is_enabled = true, .show_thread_id = true };
pub const happening = Logger{ .is_enabled = false, .show_thread_id = true };
pub const record_macro = Logger{ .is_enabled = false, .show_thread_id = false };
fn warn(comptime show_thread_id: bool, comptime fmt: []const u8, args: anytype) void {
// format to a buffer, then write in a single (or as few as possible)
// posix write calls so that the output from multiple processes
// doesn't interleave on the same line.
var buffer: [0x1000]u8 = undefined;
var buffer1: [0x1000]u8 = undefined;
var buffer2: [0x1000]u8 = undefined;
if (show_thread_id) {
const debug_thread_id = blk: {
const me = std.Thread.getCurrentId();
for (thread_names.items) |it| {
if (it.thread_id == me) break :blk it;
}
@panic("thread not named");
};
const prefix: []const u8 = std.fmt.bufPrint(buffer1[0..], "{}({})", .{ debug_thread_id.name, debug_thread_id.client_id }) catch @panic("make the buffer bigger");
const msg: []const u8 = std.fmt.bufPrint(buffer2[0..], fmt, args) catch @panic("make the buffer bigger");
const line: []const u8 = std.fmt.bufPrint(buffer[0..], "{}: {}\n", .{ prefix, msg }) catch @panic("make the buffer bigger");
std.debug.warn("{}", .{line});
} else {
std.debug.warn("{}", .{std.fmt.bufPrint(buffer[0..], fmt ++ "\n", args) catch {
@panic("make the buffer bigger");
}});
}
}
var mutex = std.Thread.Mutex{};
pub fn init() void {}
const DebugThreadId = struct {
thread_id: std.Thread.Id,
name: []const u8,
client_id: u32,
};
var _thread_names_buffer = [_]u8{0} ** 0x1000;
var _thread_names_allocator = std.heap.FixedBufferAllocator.init(_thread_names_buffer[0..]);
var thread_names = std.ArrayList(DebugThreadId).init(&_thread_names_allocator.allocator);
pub fn nameThisThread(name: []const u8) void {
return nameThisThreadWithClientId(name, 0);
}
pub fn nameThisThreadWithClientId(name: []const u8, client_id: u32) void {
var held = mutex.acquire();
defer held.release();
const thread_id = std.Thread.getCurrentId();
for (thread_names.items) |it| {
std.debug.assert(it.thread_id != thread_id);
std.debug.assert(!(std.mem.eql(u8, it.name, name) and it.client_id == client_id));
continue;
}
thread_names.append(DebugThreadId{
.thread_id = thread_id,
.name = name,
.client_id = client_id,
}) catch @panic("too many threads");
}
pub fn unnameThisThread() void {
var held = mutex.acquire();
defer held.release();
const me = std.Thread.getCurrentId();
for (thread_names.items) |it, i| {
if (it.thread_id == me) {
_ = thread_names.swapRemove(i);
return;
}
} else {
@panic("thread not named");
}
}
/// i kinda wish std.fmt did this.
fn deep_print(prefix: []const u8, something: anytype) void {
std.debug.warn("{}", .{prefix});
struct {
pub fn recurse(obj: anytype, comptime indent: comptime_int) void {
const T = @TypeOf(obj);
const indentation = (" " ** indent)[0..];
if (comptime std.mem.startsWith(u8, @typeName(T), "std.array_list.AlignedArrayList(")) {
return recurse(obj.items, indent);
}
if (comptime std.mem.startsWith(u8, @typeName(T), "std.hash_map.HashMap(u32,")) {
if (obj.count() == 0) {
return std.debug.warn("{{}}", .{});
}
std.debug.warn("{{", .{});
var iterator = obj.iterator();
while (iterator.next()) |kv| {
std.debug.warn("\n{} {}: ", .{ indentation, kv.key });
recurse(kv.value, indent + 1);
std.debug.warn(",", .{});
}
return std.debug.warn("\n{}}}", .{indentation});
}
switch (@typeInfo(T)) {
.Pointer => |ptr_info| switch (ptr_info.size) {
.One => return recurse(obj.*, indent),
.Slice => {
if (obj.len == 0) {
return std.debug.warn("[]", .{});
}
std.debug.warn("[\n", .{});
for (obj) |x| {
std.debug.warn("{} ", .{indentation});
recurse(x, indent + 1);
std.debug.warn(",\n", .{});
}
return std.debug.warn("{}]", .{indentation});
},
else => {},
},
.Array => {
return recurse(obj[0..], indent);
},
.Struct => |StructT| {
const multiline = @sizeOf(T) >= 12;
comptime var field_i = 0;
std.debug.warn(".{{", .{});
inline for (StructT.fields) |field, i| {
if (i > 0) {
if (!multiline) {
std.debug.warn(", ", .{});
}
} else if (!multiline) {
std.debug.warn(" ", .{});
}
if (multiline) {
std.debug.warn("\n{} ", .{indentation});
}
std.debug.warn(".{} = ", .{field.name});
if (std.mem.eql(u8, field.name, "terrain")) {
// hide terrain, because it's so bulky.
std.debug.warn("<...>", .{});
} else {
recurse(@field(obj, field.name), indent + 1);
}
if (multiline) std.debug.warn(",", .{});
}
if (multiline) {
std.debug.warn("\n{}}}", .{indentation});
} else {
std.debug.warn(" }}", .{});
}
return;
},
.Union => |info| {
if (info.tag_type) |_| {
std.debug.warn(".{{ .{} = ", .{@tagName(obj)});
inline for (info.fields) |u_field| {
if (@enumToInt(obj) == u_field.enum_field.?.value) {
recurse(@field(obj, u_field.name), indent);
}
}
std.debug.warn(" }}", .{});
return;
}
},
.Enum => |info| {
return std.debug.warn(".{}", .{@tagName(obj)});
},
.Void => {
return std.debug.warn("{{}}", .{});
},
else => {},
}
return std.debug.warn("{}", .{obj});
}
}.recurse(something, 0);
std.debug.warn("\n", .{});
}
|
src/core/debug.zig
|
pub const Kind = enum {
L,
T,
V,
LV,
LVT,
};
/// `syllableType` maps the code point to its Hangul Syllable Type.
pub fn syllableType(cp: u21) ?Kind {
return switch (cp) {
0x1100...0x115F => .L,
0xA960...0xA97C => .L,
0x1160...0x11A7 => .V,
0xD7B0...0xD7C6 => .V,
0x11A8...0x11FF => .T,
0xD7CB...0xD7FB => .T,
0xAC00 => .LV,
0xAC1C => .LV,
0xAC38 => .LV,
0xAC54 => .LV,
0xAC70 => .LV,
0xAC8C => .LV,
0xACA8 => .LV,
0xACC4 => .LV,
0xACE0 => .LV,
0xACFC => .LV,
0xAD18 => .LV,
0xAD34 => .LV,
0xAD50 => .LV,
0xAD6C => .LV,
0xAD88 => .LV,
0xADA4 => .LV,
0xADC0 => .LV,
0xADDC => .LV,
0xADF8 => .LV,
0xAE14 => .LV,
0xAE30 => .LV,
0xAE4C => .LV,
0xAE68 => .LV,
0xAE84 => .LV,
0xAEA0 => .LV,
0xAEBC => .LV,
0xAED8 => .LV,
0xAEF4 => .LV,
0xAF10 => .LV,
0xAF2C => .LV,
0xAF48 => .LV,
0xAF64 => .LV,
0xAF80 => .LV,
0xAF9C => .LV,
0xAFB8 => .LV,
0xAFD4 => .LV,
0xAFF0 => .LV,
0xB00C => .LV,
0xB028 => .LV,
0xB044 => .LV,
0xB060 => .LV,
0xB07C => .LV,
0xB098 => .LV,
0xB0B4 => .LV,
0xB0D0 => .LV,
0xB0EC => .LV,
0xB108 => .LV,
0xB124 => .LV,
0xB140 => .LV,
0xB15C => .LV,
0xB178 => .LV,
0xB194 => .LV,
0xB1B0 => .LV,
0xB1CC => .LV,
0xB1E8 => .LV,
0xB204 => .LV,
0xB220 => .LV,
0xB23C => .LV,
0xB258 => .LV,
0xB274 => .LV,
0xB290 => .LV,
0xB2AC => .LV,
0xB2C8 => .LV,
0xB2E4 => .LV,
0xB300 => .LV,
0xB31C => .LV,
0xB338 => .LV,
0xB354 => .LV,
0xB370 => .LV,
0xB38C => .LV,
0xB3A8 => .LV,
0xB3C4 => .LV,
0xB3E0 => .LV,
0xB3FC => .LV,
0xB418 => .LV,
0xB434 => .LV,
0xB450 => .LV,
0xB46C => .LV,
0xB488 => .LV,
0xB4A4 => .LV,
0xB4C0 => .LV,
0xB4DC => .LV,
0xB4F8 => .LV,
0xB514 => .LV,
0xB530 => .LV,
0xB54C => .LV,
0xB568 => .LV,
0xB584 => .LV,
0xB5A0 => .LV,
0xB5BC => .LV,
0xB5D8 => .LV,
0xB5F4 => .LV,
0xB610 => .LV,
0xB62C => .LV,
0xB648 => .LV,
0xB664 => .LV,
0xB680 => .LV,
0xB69C => .LV,
0xB6B8 => .LV,
0xB6D4 => .LV,
0xB6F0 => .LV,
0xB70C => .LV,
0xB728 => .LV,
0xB744 => .LV,
0xB760 => .LV,
0xB77C => .LV,
0xB798 => .LV,
0xB7B4 => .LV,
0xB7D0 => .LV,
0xB7EC => .LV,
0xB808 => .LV,
0xB824 => .LV,
0xB840 => .LV,
0xB85C => .LV,
0xB878 => .LV,
0xB894 => .LV,
0xB8B0 => .LV,
0xB8CC => .LV,
0xB8E8 => .LV,
0xB904 => .LV,
0xB920 => .LV,
0xB93C => .LV,
0xB958 => .LV,
0xB974 => .LV,
0xB990 => .LV,
0xB9AC => .LV,
0xB9C8 => .LV,
0xB9E4 => .LV,
0xBA00 => .LV,
0xBA1C => .LV,
0xBA38 => .LV,
0xBA54 => .LV,
0xBA70 => .LV,
0xBA8C => .LV,
0xBAA8 => .LV,
0xBAC4 => .LV,
0xBAE0 => .LV,
0xBAFC => .LV,
0xBB18 => .LV,
0xBB34 => .LV,
0xBB50 => .LV,
0xBB6C => .LV,
0xBB88 => .LV,
0xBBA4 => .LV,
0xBBC0 => .LV,
0xBBDC => .LV,
0xBBF8 => .LV,
0xBC14 => .LV,
0xBC30 => .LV,
0xBC4C => .LV,
0xBC68 => .LV,
0xBC84 => .LV,
0xBCA0 => .LV,
0xBCBC => .LV,
0xBCD8 => .LV,
0xBCF4 => .LV,
0xBD10 => .LV,
0xBD2C => .LV,
0xBD48 => .LV,
0xBD64 => .LV,
0xBD80 => .LV,
0xBD9C => .LV,
0xBDB8 => .LV,
0xBDD4 => .LV,
0xBDF0 => .LV,
0xBE0C => .LV,
0xBE28 => .LV,
0xBE44 => .LV,
0xBE60 => .LV,
0xBE7C => .LV,
0xBE98 => .LV,
0xBEB4 => .LV,
0xBED0 => .LV,
0xBEEC => .LV,
0xBF08 => .LV,
0xBF24 => .LV,
0xBF40 => .LV,
0xBF5C => .LV,
0xBF78 => .LV,
0xBF94 => .LV,
0xBFB0 => .LV,
0xBFCC => .LV,
0xBFE8 => .LV,
0xC004 => .LV,
0xC020 => .LV,
0xC03C => .LV,
0xC058 => .LV,
0xC074 => .LV,
0xC090 => .LV,
0xC0AC => .LV,
0xC0C8 => .LV,
0xC0E4 => .LV,
0xC100 => .LV,
0xC11C => .LV,
0xC138 => .LV,
0xC154 => .LV,
0xC170 => .LV,
0xC18C => .LV,
0xC1A8 => .LV,
0xC1C4 => .LV,
0xC1E0 => .LV,
0xC1FC => .LV,
0xC218 => .LV,
0xC234 => .LV,
0xC250 => .LV,
0xC26C => .LV,
0xC288 => .LV,
0xC2A4 => .LV,
0xC2C0 => .LV,
0xC2DC => .LV,
0xC2F8 => .LV,
0xC314 => .LV,
0xC330 => .LV,
0xC34C => .LV,
0xC368 => .LV,
0xC384 => .LV,
0xC3A0 => .LV,
0xC3BC => .LV,
0xC3D8 => .LV,
0xC3F4 => .LV,
0xC410 => .LV,
0xC42C => .LV,
0xC448 => .LV,
0xC464 => .LV,
0xC480 => .LV,
0xC49C => .LV,
0xC4B8 => .LV,
0xC4D4 => .LV,
0xC4F0 => .LV,
0xC50C => .LV,
0xC528 => .LV,
0xC544 => .LV,
0xC560 => .LV,
0xC57C => .LV,
0xC598 => .LV,
0xC5B4 => .LV,
0xC5D0 => .LV,
0xC5EC => .LV,
0xC608 => .LV,
0xC624 => .LV,
0xC640 => .LV,
0xC65C => .LV,
0xC678 => .LV,
0xC694 => .LV,
0xC6B0 => .LV,
0xC6CC => .LV,
0xC6E8 => .LV,
0xC704 => .LV,
0xC720 => .LV,
0xC73C => .LV,
0xC758 => .LV,
0xC774 => .LV,
0xC790 => .LV,
0xC7AC => .LV,
0xC7C8 => .LV,
0xC7E4 => .LV,
0xC800 => .LV,
0xC81C => .LV,
0xC838 => .LV,
0xC854 => .LV,
0xC870 => .LV,
0xC88C => .LV,
0xC8A8 => .LV,
0xC8C4 => .LV,
0xC8E0 => .LV,
0xC8FC => .LV,
0xC918 => .LV,
0xC934 => .LV,
0xC950 => .LV,
0xC96C => .LV,
0xC988 => .LV,
0xC9A4 => .LV,
0xC9C0 => .LV,
0xC9DC => .LV,
0xC9F8 => .LV,
0xCA14 => .LV,
0xCA30 => .LV,
0xCA4C => .LV,
0xCA68 => .LV,
0xCA84 => .LV,
0xCAA0 => .LV,
0xCABC => .LV,
0xCAD8 => .LV,
0xCAF4 => .LV,
0xCB10 => .LV,
0xCB2C => .LV,
0xCB48 => .LV,
0xCB64 => .LV,
0xCB80 => .LV,
0xCB9C => .LV,
0xCBB8 => .LV,
0xCBD4 => .LV,
0xCBF0 => .LV,
0xCC0C => .LV,
0xCC28 => .LV,
0xCC44 => .LV,
0xCC60 => .LV,
0xCC7C => .LV,
0xCC98 => .LV,
0xCCB4 => .LV,
0xCCD0 => .LV,
0xCCEC => .LV,
0xCD08 => .LV,
0xCD24 => .LV,
0xCD40 => .LV,
0xCD5C => .LV,
0xCD78 => .LV,
0xCD94 => .LV,
0xCDB0 => .LV,
0xCDCC => .LV,
0xCDE8 => .LV,
0xCE04 => .LV,
0xCE20 => .LV,
0xCE3C => .LV,
0xCE58 => .LV,
0xCE74 => .LV,
0xCE90 => .LV,
0xCEAC => .LV,
0xCEC8 => .LV,
0xCEE4 => .LV,
0xCF00 => .LV,
0xCF1C => .LV,
0xCF38 => .LV,
0xCF54 => .LV,
0xCF70 => .LV,
0xCF8C => .LV,
0xCFA8 => .LV,
0xCFC4 => .LV,
0xCFE0 => .LV,
0xCFFC => .LV,
0xD018 => .LV,
0xD034 => .LV,
0xD050 => .LV,
0xD06C => .LV,
0xD088 => .LV,
0xD0A4 => .LV,
0xD0C0 => .LV,
0xD0DC => .LV,
0xD0F8 => .LV,
0xD114 => .LV,
0xD130 => .LV,
0xD14C => .LV,
0xD168 => .LV,
0xD184 => .LV,
0xD1A0 => .LV,
0xD1BC => .LV,
0xD1D8 => .LV,
0xD1F4 => .LV,
0xD210 => .LV,
0xD22C => .LV,
0xD248 => .LV,
0xD264 => .LV,
0xD280 => .LV,
0xD29C => .LV,
0xD2B8 => .LV,
0xD2D4 => .LV,
0xD2F0 => .LV,
0xD30C => .LV,
0xD328 => .LV,
0xD344 => .LV,
0xD360 => .LV,
0xD37C => .LV,
0xD398 => .LV,
0xD3B4 => .LV,
0xD3D0 => .LV,
0xD3EC => .LV,
0xD408 => .LV,
0xD424 => .LV,
0xD440 => .LV,
0xD45C => .LV,
0xD478 => .LV,
0xD494 => .LV,
0xD4B0 => .LV,
0xD4CC => .LV,
0xD4E8 => .LV,
0xD504 => .LV,
0xD520 => .LV,
0xD53C => .LV,
0xD558 => .LV,
0xD574 => .LV,
0xD590 => .LV,
0xD5AC => .LV,
0xD5C8 => .LV,
0xD5E4 => .LV,
0xD600 => .LV,
0xD61C => .LV,
0xD638 => .LV,
0xD654 => .LV,
0xD670 => .LV,
0xD68C => .LV,
0xD6A8 => .LV,
0xD6C4 => .LV,
0xD6E0 => .LV,
0xD6FC => .LV,
0xD718 => .LV,
0xD734 => .LV,
0xD750 => .LV,
0xD76C => .LV,
0xD788 => .LV,
0xAC01...0xAC1B => .LVT,
0xAC1D...0xAC37 => .LVT,
0xAC39...0xAC53 => .LVT,
0xAC55...0xAC6F => .LVT,
0xAC71...0xAC8B => .LVT,
0xAC8D...0xACA7 => .LVT,
0xACA9...0xACC3 => .LVT,
0xACC5...0xACDF => .LVT,
0xACE1...0xACFB => .LVT,
0xACFD...0xAD17 => .LVT,
0xAD19...0xAD33 => .LVT,
0xAD35...0xAD4F => .LVT,
0xAD51...0xAD6B => .LVT,
0xAD6D...0xAD87 => .LVT,
0xAD89...0xADA3 => .LVT,
0xADA5...0xADBF => .LVT,
0xADC1...0xADDB => .LVT,
0xADDD...0xADF7 => .LVT,
0xADF9...0xAE13 => .LVT,
0xAE15...0xAE2F => .LVT,
0xAE31...0xAE4B => .LVT,
0xAE4D...0xAE67 => .LVT,
0xAE69...0xAE83 => .LVT,
0xAE85...0xAE9F => .LVT,
0xAEA1...0xAEBB => .LVT,
0xAEBD...0xAED7 => .LVT,
0xAED9...0xAEF3 => .LVT,
0xAEF5...0xAF0F => .LVT,
0xAF11...0xAF2B => .LVT,
0xAF2D...0xAF47 => .LVT,
0xAF49...0xAF63 => .LVT,
0xAF65...0xAF7F => .LVT,
0xAF81...0xAF9B => .LVT,
0xAF9D...0xAFB7 => .LVT,
0xAFB9...0xAFD3 => .LVT,
0xAFD5...0xAFEF => .LVT,
0xAFF1...0xB00B => .LVT,
0xB00D...0xB027 => .LVT,
0xB029...0xB043 => .LVT,
0xB045...0xB05F => .LVT,
0xB061...0xB07B => .LVT,
0xB07D...0xB097 => .LVT,
0xB099...0xB0B3 => .LVT,
0xB0B5...0xB0CF => .LVT,
0xB0D1...0xB0EB => .LVT,
0xB0ED...0xB107 => .LVT,
0xB109...0xB123 => .LVT,
0xB125...0xB13F => .LVT,
0xB141...0xB15B => .LVT,
0xB15D...0xB177 => .LVT,
0xB179...0xB193 => .LVT,
0xB195...0xB1AF => .LVT,
0xB1B1...0xB1CB => .LVT,
0xB1CD...0xB1E7 => .LVT,
0xB1E9...0xB203 => .LVT,
0xB205...0xB21F => .LVT,
0xB221...0xB23B => .LVT,
0xB23D...0xB257 => .LVT,
0xB259...0xB273 => .LVT,
0xB275...0xB28F => .LVT,
0xB291...0xB2AB => .LVT,
0xB2AD...0xB2C7 => .LVT,
0xB2C9...0xB2E3 => .LVT,
0xB2E5...0xB2FF => .LVT,
0xB301...0xB31B => .LVT,
0xB31D...0xB337 => .LVT,
0xB339...0xB353 => .LVT,
0xB355...0xB36F => .LVT,
0xB371...0xB38B => .LVT,
0xB38D...0xB3A7 => .LVT,
0xB3A9...0xB3C3 => .LVT,
0xB3C5...0xB3DF => .LVT,
0xB3E1...0xB3FB => .LVT,
0xB3FD...0xB417 => .LVT,
0xB419...0xB433 => .LVT,
0xB435...0xB44F => .LVT,
0xB451...0xB46B => .LVT,
0xB46D...0xB487 => .LVT,
0xB489...0xB4A3 => .LVT,
0xB4A5...0xB4BF => .LVT,
0xB4C1...0xB4DB => .LVT,
0xB4DD...0xB4F7 => .LVT,
0xB4F9...0xB513 => .LVT,
0xB515...0xB52F => .LVT,
0xB531...0xB54B => .LVT,
0xB54D...0xB567 => .LVT,
0xB569...0xB583 => .LVT,
0xB585...0xB59F => .LVT,
0xB5A1...0xB5BB => .LVT,
0xB5BD...0xB5D7 => .LVT,
0xB5D9...0xB5F3 => .LVT,
0xB5F5...0xB60F => .LVT,
0xB611...0xB62B => .LVT,
0xB62D...0xB647 => .LVT,
0xB649...0xB663 => .LVT,
0xB665...0xB67F => .LVT,
0xB681...0xB69B => .LVT,
0xB69D...0xB6B7 => .LVT,
0xB6B9...0xB6D3 => .LVT,
0xB6D5...0xB6EF => .LVT,
0xB6F1...0xB70B => .LVT,
0xB70D...0xB727 => .LVT,
0xB729...0xB743 => .LVT,
0xB745...0xB75F => .LVT,
0xB761...0xB77B => .LVT,
0xB77D...0xB797 => .LVT,
0xB799...0xB7B3 => .LVT,
0xB7B5...0xB7CF => .LVT,
0xB7D1...0xB7EB => .LVT,
0xB7ED...0xB807 => .LVT,
0xB809...0xB823 => .LVT,
0xB825...0xB83F => .LVT,
0xB841...0xB85B => .LVT,
0xB85D...0xB877 => .LVT,
0xB879...0xB893 => .LVT,
0xB895...0xB8AF => .LVT,
0xB8B1...0xB8CB => .LVT,
0xB8CD...0xB8E7 => .LVT,
0xB8E9...0xB903 => .LVT,
0xB905...0xB91F => .LVT,
0xB921...0xB93B => .LVT,
0xB93D...0xB957 => .LVT,
0xB959...0xB973 => .LVT,
0xB975...0xB98F => .LVT,
0xB991...0xB9AB => .LVT,
0xB9AD...0xB9C7 => .LVT,
0xB9C9...0xB9E3 => .LVT,
0xB9E5...0xB9FF => .LVT,
0xBA01...0xBA1B => .LVT,
0xBA1D...0xBA37 => .LVT,
0xBA39...0xBA53 => .LVT,
0xBA55...0xBA6F => .LVT,
0xBA71...0xBA8B => .LVT,
0xBA8D...0xBAA7 => .LVT,
0xBAA9...0xBAC3 => .LVT,
0xBAC5...0xBADF => .LVT,
0xBAE1...0xBAFB => .LVT,
0xBAFD...0xBB17 => .LVT,
0xBB19...0xBB33 => .LVT,
0xBB35...0xBB4F => .LVT,
0xBB51...0xBB6B => .LVT,
0xBB6D...0xBB87 => .LVT,
0xBB89...0xBBA3 => .LVT,
0xBBA5...0xBBBF => .LVT,
0xBBC1...0xBBDB => .LVT,
0xBBDD...0xBBF7 => .LVT,
0xBBF9...0xBC13 => .LVT,
0xBC15...0xBC2F => .LVT,
0xBC31...0xBC4B => .LVT,
0xBC4D...0xBC67 => .LVT,
0xBC69...0xBC83 => .LVT,
0xBC85...0xBC9F => .LVT,
0xBCA1...0xBCBB => .LVT,
0xBCBD...0xBCD7 => .LVT,
0xBCD9...0xBCF3 => .LVT,
0xBCF5...0xBD0F => .LVT,
0xBD11...0xBD2B => .LVT,
0xBD2D...0xBD47 => .LVT,
0xBD49...0xBD63 => .LVT,
0xBD65...0xBD7F => .LVT,
0xBD81...0xBD9B => .LVT,
0xBD9D...0xBDB7 => .LVT,
0xBDB9...0xBDD3 => .LVT,
0xBDD5...0xBDEF => .LVT,
0xBDF1...0xBE0B => .LVT,
0xBE0D...0xBE27 => .LVT,
0xBE29...0xBE43 => .LVT,
0xBE45...0xBE5F => .LVT,
0xBE61...0xBE7B => .LVT,
0xBE7D...0xBE97 => .LVT,
0xBE99...0xBEB3 => .LVT,
0xBEB5...0xBECF => .LVT,
0xBED1...0xBEEB => .LVT,
0xBEED...0xBF07 => .LVT,
0xBF09...0xBF23 => .LVT,
0xBF25...0xBF3F => .LVT,
0xBF41...0xBF5B => .LVT,
0xBF5D...0xBF77 => .LVT,
0xBF79...0xBF93 => .LVT,
0xBF95...0xBFAF => .LVT,
0xBFB1...0xBFCB => .LVT,
0xBFCD...0xBFE7 => .LVT,
0xBFE9...0xC003 => .LVT,
0xC005...0xC01F => .LVT,
0xC021...0xC03B => .LVT,
0xC03D...0xC057 => .LVT,
0xC059...0xC073 => .LVT,
0xC075...0xC08F => .LVT,
0xC091...0xC0AB => .LVT,
0xC0AD...0xC0C7 => .LVT,
0xC0C9...0xC0E3 => .LVT,
0xC0E5...0xC0FF => .LVT,
0xC101...0xC11B => .LVT,
0xC11D...0xC137 => .LVT,
0xC139...0xC153 => .LVT,
0xC155...0xC16F => .LVT,
0xC171...0xC18B => .LVT,
0xC18D...0xC1A7 => .LVT,
0xC1A9...0xC1C3 => .LVT,
0xC1C5...0xC1DF => .LVT,
0xC1E1...0xC1FB => .LVT,
0xC1FD...0xC217 => .LVT,
0xC219...0xC233 => .LVT,
0xC235...0xC24F => .LVT,
0xC251...0xC26B => .LVT,
0xC26D...0xC287 => .LVT,
0xC289...0xC2A3 => .LVT,
0xC2A5...0xC2BF => .LVT,
0xC2C1...0xC2DB => .LVT,
0xC2DD...0xC2F7 => .LVT,
0xC2F9...0xC313 => .LVT,
0xC315...0xC32F => .LVT,
0xC331...0xC34B => .LVT,
0xC34D...0xC367 => .LVT,
0xC369...0xC383 => .LVT,
0xC385...0xC39F => .LVT,
0xC3A1...0xC3BB => .LVT,
0xC3BD...0xC3D7 => .LVT,
0xC3D9...0xC3F3 => .LVT,
0xC3F5...0xC40F => .LVT,
0xC411...0xC42B => .LVT,
0xC42D...0xC447 => .LVT,
0xC449...0xC463 => .LVT,
0xC465...0xC47F => .LVT,
0xC481...0xC49B => .LVT,
0xC49D...0xC4B7 => .LVT,
0xC4B9...0xC4D3 => .LVT,
0xC4D5...0xC4EF => .LVT,
0xC4F1...0xC50B => .LVT,
0xC50D...0xC527 => .LVT,
0xC529...0xC543 => .LVT,
0xC545...0xC55F => .LVT,
0xC561...0xC57B => .LVT,
0xC57D...0xC597 => .LVT,
0xC599...0xC5B3 => .LVT,
0xC5B5...0xC5CF => .LVT,
0xC5D1...0xC5EB => .LVT,
0xC5ED...0xC607 => .LVT,
0xC609...0xC623 => .LVT,
0xC625...0xC63F => .LVT,
0xC641...0xC65B => .LVT,
0xC65D...0xC677 => .LVT,
0xC679...0xC693 => .LVT,
0xC695...0xC6AF => .LVT,
0xC6B1...0xC6CB => .LVT,
0xC6CD...0xC6E7 => .LVT,
0xC6E9...0xC703 => .LVT,
0xC705...0xC71F => .LVT,
0xC721...0xC73B => .LVT,
0xC73D...0xC757 => .LVT,
0xC759...0xC773 => .LVT,
0xC775...0xC78F => .LVT,
0xC791...0xC7AB => .LVT,
0xC7AD...0xC7C7 => .LVT,
0xC7C9...0xC7E3 => .LVT,
0xC7E5...0xC7FF => .LVT,
0xC801...0xC81B => .LVT,
0xC81D...0xC837 => .LVT,
0xC839...0xC853 => .LVT,
0xC855...0xC86F => .LVT,
0xC871...0xC88B => .LVT,
0xC88D...0xC8A7 => .LVT,
0xC8A9...0xC8C3 => .LVT,
0xC8C5...0xC8DF => .LVT,
0xC8E1...0xC8FB => .LVT,
0xC8FD...0xC917 => .LVT,
0xC919...0xC933 => .LVT,
0xC935...0xC94F => .LVT,
0xC951...0xC96B => .LVT,
0xC96D...0xC987 => .LVT,
0xC989...0xC9A3 => .LVT,
0xC9A5...0xC9BF => .LVT,
0xC9C1...0xC9DB => .LVT,
0xC9DD...0xC9F7 => .LVT,
0xC9F9...0xCA13 => .LVT,
0xCA15...0xCA2F => .LVT,
0xCA31...0xCA4B => .LVT,
0xCA4D...0xCA67 => .LVT,
0xCA69...0xCA83 => .LVT,
0xCA85...0xCA9F => .LVT,
0xCAA1...0xCABB => .LVT,
0xCABD...0xCAD7 => .LVT,
0xCAD9...0xCAF3 => .LVT,
0xCAF5...0xCB0F => .LVT,
0xCB11...0xCB2B => .LVT,
0xCB2D...0xCB47 => .LVT,
0xCB49...0xCB63 => .LVT,
0xCB65...0xCB7F => .LVT,
0xCB81...0xCB9B => .LVT,
0xCB9D...0xCBB7 => .LVT,
0xCBB9...0xCBD3 => .LVT,
0xCBD5...0xCBEF => .LVT,
0xCBF1...0xCC0B => .LVT,
0xCC0D...0xCC27 => .LVT,
0xCC29...0xCC43 => .LVT,
0xCC45...0xCC5F => .LVT,
0xCC61...0xCC7B => .LVT,
0xCC7D...0xCC97 => .LVT,
0xCC99...0xCCB3 => .LVT,
0xCCB5...0xCCCF => .LVT,
0xCCD1...0xCCEB => .LVT,
0xCCED...0xCD07 => .LVT,
0xCD09...0xCD23 => .LVT,
0xCD25...0xCD3F => .LVT,
0xCD41...0xCD5B => .LVT,
0xCD5D...0xCD77 => .LVT,
0xCD79...0xCD93 => .LVT,
0xCD95...0xCDAF => .LVT,
0xCDB1...0xCDCB => .LVT,
0xCDCD...0xCDE7 => .LVT,
0xCDE9...0xCE03 => .LVT,
0xCE05...0xCE1F => .LVT,
0xCE21...0xCE3B => .LVT,
0xCE3D...0xCE57 => .LVT,
0xCE59...0xCE73 => .LVT,
0xCE75...0xCE8F => .LVT,
0xCE91...0xCEAB => .LVT,
0xCEAD...0xCEC7 => .LVT,
0xCEC9...0xCEE3 => .LVT,
0xCEE5...0xCEFF => .LVT,
0xCF01...0xCF1B => .LVT,
0xCF1D...0xCF37 => .LVT,
0xCF39...0xCF53 => .LVT,
0xCF55...0xCF6F => .LVT,
0xCF71...0xCF8B => .LVT,
0xCF8D...0xCFA7 => .LVT,
0xCFA9...0xCFC3 => .LVT,
0xCFC5...0xCFDF => .LVT,
0xCFE1...0xCFFB => .LVT,
0xCFFD...0xD017 => .LVT,
0xD019...0xD033 => .LVT,
0xD035...0xD04F => .LVT,
0xD051...0xD06B => .LVT,
0xD06D...0xD087 => .LVT,
0xD089...0xD0A3 => .LVT,
0xD0A5...0xD0BF => .LVT,
0xD0C1...0xD0DB => .LVT,
0xD0DD...0xD0F7 => .LVT,
0xD0F9...0xD113 => .LVT,
0xD115...0xD12F => .LVT,
0xD131...0xD14B => .LVT,
0xD14D...0xD167 => .LVT,
0xD169...0xD183 => .LVT,
0xD185...0xD19F => .LVT,
0xD1A1...0xD1BB => .LVT,
0xD1BD...0xD1D7 => .LVT,
0xD1D9...0xD1F3 => .LVT,
0xD1F5...0xD20F => .LVT,
0xD211...0xD22B => .LVT,
0xD22D...0xD247 => .LVT,
0xD249...0xD263 => .LVT,
0xD265...0xD27F => .LVT,
0xD281...0xD29B => .LVT,
0xD29D...0xD2B7 => .LVT,
0xD2B9...0xD2D3 => .LVT,
0xD2D5...0xD2EF => .LVT,
0xD2F1...0xD30B => .LVT,
0xD30D...0xD327 => .LVT,
0xD329...0xD343 => .LVT,
0xD345...0xD35F => .LVT,
0xD361...0xD37B => .LVT,
0xD37D...0xD397 => .LVT,
0xD399...0xD3B3 => .LVT,
0xD3B5...0xD3CF => .LVT,
0xD3D1...0xD3EB => .LVT,
0xD3ED...0xD407 => .LVT,
0xD409...0xD423 => .LVT,
0xD425...0xD43F => .LVT,
0xD441...0xD45B => .LVT,
0xD45D...0xD477 => .LVT,
0xD479...0xD493 => .LVT,
0xD495...0xD4AF => .LVT,
0xD4B1...0xD4CB => .LVT,
0xD4CD...0xD4E7 => .LVT,
0xD4E9...0xD503 => .LVT,
0xD505...0xD51F => .LVT,
0xD521...0xD53B => .LVT,
0xD53D...0xD557 => .LVT,
0xD559...0xD573 => .LVT,
0xD575...0xD58F => .LVT,
0xD591...0xD5AB => .LVT,
0xD5AD...0xD5C7 => .LVT,
0xD5C9...0xD5E3 => .LVT,
0xD5E5...0xD5FF => .LVT,
0xD601...0xD61B => .LVT,
0xD61D...0xD637 => .LVT,
0xD639...0xD653 => .LVT,
0xD655...0xD66F => .LVT,
0xD671...0xD68B => .LVT,
0xD68D...0xD6A7 => .LVT,
0xD6A9...0xD6C3 => .LVT,
0xD6C5...0xD6DF => .LVT,
0xD6E1...0xD6FB => .LVT,
0xD6FD...0xD717 => .LVT,
0xD719...0xD733 => .LVT,
0xD735...0xD74F => .LVT,
0xD751...0xD76B => .LVT,
0xD76D...0xD787 => .LVT,
0xD789...0xD7A3 => .LVT,
else => null,
};
}
|
.gyro/ziglyph-jecolon-github.com-c37d93b6/pkg/src/autogen/hangul_syllable_type.zig
|
const std = @import("std");
const c = @import("c.zig").c;
const Window = @import("Window.zig");
const Error = @import("errors.zig").Error;
const getError = @import("errors.zig").getError;
const internal_debug = @import("internal_debug.zig");
/// Makes the context of the specified window current for the calling thread.
///
/// This function makes the OpenGL or OpenGL ES context of the specified window current on the
/// calling thread. A context must only be made current on a single thread at a time and each
/// thread can have only a single current context at a time.
///
/// When moving a context between threads, you must make it non-current on the old thread before
/// making it current on the new one.
///
/// By default, making a context non-current implicitly forces a pipeline flush. On machines that
/// support `GL_KHR_context_flush_control`, you can control whether a context performs this flush
/// by setting the glfw.context_release_behavior hint.
///
/// The specified window must have an OpenGL or OpenGL ES context. Specifying a window without a
/// context will generate Error.NoWindowContext.
///
/// @param[in] window The window whose context to make current, or null to
/// detach the current context.
///
/// Possible errors include glfw.Error.NotInitialized, glfw.Error.NoWindowContext and glfw.Error.PlatformError.
///
/// @thread_safety This function may be called from any thread.
///
/// see also: context_current, glfwGetCurrentContext
pub inline fn makeContextCurrent(window: ?Window) error{ NoWindowContext, PlatformError }!void {
internal_debug.assertInitialized();
if (window) |w| c.glfwMakeContextCurrent(w.handle) else c.glfwMakeContextCurrent(null);
getError() catch |err| return switch (err) {
Error.NotInitialized => unreachable,
Error.NoWindowContext, Error.PlatformError => |e| e,
else => unreachable,
};
}
/// Returns the window whose context is current on the calling thread.
///
/// This function returns the window whose OpenGL or OpenGL ES context is current on the calling
/// thread.
///
/// Returns he window whose context is current, or null if no window's context is current.
///
/// Possible errors include glfw.Error.NotInitialized.
///
/// @thread_safety This function may be called from any thread.
///
/// see also: context_current, glfwMakeContextCurrent
pub inline fn getCurrentContext() ?Window {
internal_debug.assertInitialized();
if (c.glfwGetCurrentContext()) |handle| return Window.from(handle);
getError() catch |err| return switch (err) {
Error.NotInitialized => unreachable,
else => unreachable,
};
return null;
}
/// Sets the swap interval for the current context.
///
/// This function sets the swap interval for the current OpenGL or OpenGL ES context, i.e. the
/// number of screen updates to wait from the time glfw.SwapBuffers was called before swapping the
/// buffers and returning. This is sometimes called _vertical synchronization_, _vertical retrace
/// synchronization_ or just _vsync_.
///
/// A context that supports either of the `WGL_EXT_swap_control_tear` and `GLX_EXT_swap_control_tear`
/// extensions also accepts _negative_ swap intervals, which allows the driver to swap immediately
/// even if a frame arrives a little bit late. You can check for these extensions with glfw.extensionSupported.
///
/// A context must be current on the calling thread. Calling this function without a current context
/// will cause Error.NoCurrentContext.
///
/// This function does not apply to Vulkan. If you are rendering with Vulkan, see the present mode
/// of your swapchain instead.
///
/// @param[in] interval The minimum number of screen updates to wait for until the buffers are
/// swapped by glfw.swapBuffers.
///
/// Possible errors include glfw.Error.NotInitialized, glfw.Error.NoCurrentContext and glfw.Error.PlatformError.
///
/// This function is not called during context creation, leaving the swap interval set to whatever
/// is the default for that API. This is done because some swap interval extensions used by
/// GLFW do not allow the swap interval to be reset to zero once it has been set to a non-zero
/// value.
///
/// Some GPU drivers do not honor the requested swap interval, either because of a user setting
/// that overrides the application's request or due to bugs in the driver.
///
/// @thread_safety This function may be called from any thread.
///
/// see also: buffer_swap, glfwSwapBuffers
pub inline fn swapInterval(interval: i32) error{ NoCurrentContext, PlatformError }!void {
internal_debug.assertInitialized();
c.glfwSwapInterval(@intCast(c_int, interval));
getError() catch |err| return switch (err) {
Error.NotInitialized => unreachable,
Error.NoCurrentContext, Error.PlatformError => |e| e,
else => unreachable,
};
}
/// Returns whether the specified extension is available.
///
/// This function returns whether the specified API extension (see context_glext) is supported by
/// the current OpenGL or OpenGL ES context. It searches both for client API extension and context
/// creation API extensions.
///
/// A context must be current on the calling thread. Calling this function without a current
/// context will cause Error.NoCurrentContext.
///
/// As this functions retrieves and searches one or more extension strings each call, it is
/// recommended that you cache its results if it is going to be used frequently. The extension
/// strings will not change during the lifetime of a context, so there is no danger in doing this.
///
/// This function does not apply to Vulkan. If you are using Vulkan, see glfw.getRequiredInstanceExtensions,
/// `vkEnumerateInstanceExtensionProperties` and `vkEnumerateDeviceExtensionProperties` instead.
///
/// @param[in] extension The ASCII encoded name of the extension.
/// @return `true` if the extension is available, or `false` otherwise.
///
/// Possible errors include glfw.Error.NotInitialized, glfw.Error.NoCurrentContext, glfw.Error.InvalidValue
/// and glfw.Error.PlatformError.
///
/// @thread_safety This function may be called from any thread.
///
/// see also: context_glext, glfw.getProcAddress
pub inline fn extensionSupported(extension: [:0]const u8) error{ NoCurrentContext, PlatformError }!bool {
internal_debug.assertInitialized();
std.debug.assert(extension.len != 0);
std.debug.assert(extension[0] != 0);
const supported = c.glfwExtensionSupported(extension.ptr);
getError() catch |err| return switch (err) {
Error.NoCurrentContext, Error.PlatformError => |e| e,
Error.NotInitialized => unreachable,
Error.InvalidValue => unreachable,
else => unreachable,
};
return supported == c.GLFW_TRUE;
}
const builtin = @import("builtin");
/// Client API function pointer type.
///
/// Generic function pointer used for returning client API function pointers.
///
/// see also: context_glext, glfwGetProcAddress
pub const GLProc = if (builtin.zig_backend == .stage1 or builtin.zig_backend == .other) fn () callconv(.C) void else *const fn () callconv(.C) void;
/// Returns the address of the specified function for the current context.
///
/// This function returns the address of the specified OpenGL or OpenGL ES core or extension
/// function (see context_glext), if it is supported by the current context.
///
/// A context must be current on the calling thread. Calling this function without a current
/// context will cause Error.NoCurrentContext.
///
/// This function does not apply to Vulkan. If you are rendering with Vulkan, see glfw.getInstanceProcAddress,
/// `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` instead.
///
/// @param[in] procname The ASCII encoded name of the function.
/// @return The address of the function, or null if an error occurred.
///
/// To maintain ABI compatability with the C glfwGetProcAddress, as it is commonly passed into
/// libraries expecting that exact ABI, this function does not return an error. Instead, if
/// glfw.Error.NotInitialized, glfw.Error.NoCurrentContext, or glfw.Error.PlatformError would
/// occur this function will panic. You should ensure a valid OpenGL context exists and the
/// GLFW is initialized before calling this function.
///
/// The address of a given function is not guaranteed to be the same between contexts.
///
/// This function may return a non-null address despite the associated version or extension
/// not being available. Always check the context version or extension string first.
///
/// @pointer_lifetime The returned function pointer is valid until the context is destroyed or the
/// library is terminated.
///
/// @thread_safety This function may be called from any thread.
///
/// see also: context_glext, glfwExtensionSupported
pub fn getProcAddress(proc_name: [*:0]const u8) callconv(.C) ?GLProc {
internal_debug.assertInitialized();
if (c.glfwGetProcAddress(proc_name)) |proc_address| return proc_address;
getError() catch |err| @panic(@errorName(err));
return null;
}
test "makeContextCurrent" {
const glfw = @import("main.zig");
try glfw.init(.{});
defer glfw.terminate();
const window = Window.create(640, 480, "Hello, Zig!", null, null, .{}) catch |err| {
// return without fail, because most of our CI environments are headless / we cannot open
// windows on them.
std.debug.print("note: failed to create window: {}\n", .{err});
return;
};
defer window.destroy();
try glfw.makeContextCurrent(window);
}
test "getCurrentContext" {
const glfw = @import("main.zig");
try glfw.init(.{});
defer glfw.terminate();
const current_context = glfw.getCurrentContext();
std.debug.assert(current_context == null);
}
test "swapInterval" {
const glfw = @import("main.zig");
try glfw.init(.{});
defer glfw.terminate();
const window = Window.create(640, 480, "Hello, Zig!", null, null, .{}) catch |err| {
// return without fail, because most of our CI environments are headless / we cannot open
// windows on them.
std.debug.print("note: failed to create window: {}\n", .{err});
return;
};
defer window.destroy();
try glfw.makeContextCurrent(window);
glfw.swapInterval(1) catch |err| std.debug.print("failed to set swap interval, error={}\n", .{err});
}
test "getProcAddress" {
const glfw = @import("main.zig");
try glfw.init(.{});
defer glfw.terminate();
const window = Window.create(640, 480, "<NAME>!", null, null, .{}) catch |err| {
// return without fail, because most of our CI environments are headless / we cannot open
// windows on them.
std.debug.print("note: failed to create window: {}\n", .{err});
return;
};
defer window.destroy();
try glfw.makeContextCurrent(window);
_ = glfw.getProcAddress("foobar");
}
test "extensionSupported" {
const glfw = @import("main.zig");
try glfw.init(.{});
defer glfw.terminate();
const window = Window.create(640, 480, "<NAME>!", null, null, .{}) catch |err| {
// return without fail, because most of our CI environments are headless / we cannot open
// windows on them.
std.debug.print("note: failed to create window: {}\n", .{err});
return;
};
defer window.destroy();
try glfw.makeContextCurrent(window);
_ = glfw.extensionSupported("foobar") catch |err| std.debug.print("failed to check if extension supported, error={}\n", .{err});
}
|
src/opengl.zig
|
const std = @import("std");
const Order = std.math.Order;
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const RangeSet = @This();
const SwitchProngSrc = @import("Module.zig").SwitchProngSrc;
ranges: std.ArrayList(Range),
pub const Range = struct {
first: Value,
last: Value,
src: SwitchProngSrc,
};
pub fn init(allocator: std.mem.Allocator) RangeSet {
return .{
.ranges = std.ArrayList(Range).init(allocator),
};
}
pub fn deinit(self: *RangeSet) void {
self.ranges.deinit();
}
pub fn add(
self: *RangeSet,
first: Value,
last: Value,
ty: Type,
src: SwitchProngSrc,
) !?SwitchProngSrc {
for (self.ranges.items) |range| {
if (last.compare(.gte, range.first, ty) and first.compare(.lte, range.last, ty)) {
return range.src; // They overlap.
}
}
try self.ranges.append(.{
.first = first,
.last = last,
.src = src,
});
return null;
}
/// Assumes a and b do not overlap
fn lessThan(ty: Type, a: Range, b: Range) bool {
return a.first.compare(.lt, b.first, ty);
}
pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
if (self.ranges.items.len == 0)
return false;
std.sort.sort(Range, self.ranges.items, ty, lessThan);
if (!self.ranges.items[0].first.eql(first, ty) or
!self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty))
{
return false;
}
var space: Value.BigIntSpace = undefined;
var counter = try std.math.big.int.Managed.init(self.ranges.allocator);
defer counter.deinit();
// look for gaps
for (self.ranges.items[1..]) |cur, i| {
// i starts counting from the second item.
const prev = self.ranges.items[i];
// prev.last + 1 == cur.first
try counter.copy(prev.last.toBigInt(&space));
try counter.addScalar(counter.toConst(), 1);
const cur_start_int = cur.first.toBigInt(&space);
if (!cur_start_int.eq(counter.toConst())) {
return false;
}
}
return true;
}
|
src/RangeSet.zig
|
const std = @import("../std.zig");
const mem = std.mem;
const math = std.math;
const debug = std.debug;
const assert = std.debug.assert;
const testing = std.testing;
const htest = @import("test.zig");
pub const State = struct {
pub const BLOCKBYTES = 48;
pub const RATE = 16;
// TODO: https://github.com/ziglang/zig/issues/2673#issuecomment-501763017
data: [BLOCKBYTES / 4]u32,
const Self = @This();
pub fn toSlice(self: *Self) []u8 {
return @sliceToBytes(self.data[0..]);
}
pub fn toSliceConst(self: *Self) []const u8 {
return @sliceToBytes(self.data[0..]);
}
pub fn permute(self: *Self) void {
const state = &self.data;
var round = @as(u32, 24);
while (round > 0) : (round -= 1) {
var column = @as(usize, 0);
while (column < 4) : (column += 1) {
const x = math.rotl(u32, state[column], 24);
const y = math.rotl(u32, state[4 + column], 9);
const z = state[8 + column];
state[8 + column] = ((x ^ (z << 1)) ^ ((y & z) << 2));
state[4 + column] = ((y ^ x) ^ ((x | z) << 1));
state[column] = ((z ^ y) ^ ((x & y) << 3));
}
switch (round & 3) {
0 => {
mem.swap(u32, &state[0], &state[1]);
mem.swap(u32, &state[2], &state[3]);
state[0] ^= round | 0x9e377900;
},
2 => {
mem.swap(u32, &state[0], &state[2]);
mem.swap(u32, &state[1], &state[3]);
},
else => {},
}
}
}
pub fn squeeze(self: *Self, out: []u8) void {
var i = @as(usize, 0);
while (i + RATE <= out.len) : (i += RATE) {
self.permute();
mem.copy(u8, out[i..], self.toSliceConst()[0..RATE]);
}
const leftover = out.len - i;
if (leftover != 0) {
self.permute();
mem.copy(u8, out[i..], self.toSliceConst()[0..leftover]);
}
}
};
test "permute" {
// test vector from gimli-20170627
var state = State{
.data = blk: {
var input: [12]u32 = undefined;
var i = @as(u32, 0);
while (i < 12) : (i += 1) {
input[i] = i * i * i + i *% 0x9e3779b9;
}
testing.expectEqualSlices(u32, input, [_]u32{
0x00000000, 0x9e3779ba, 0x3c6ef37a, 0xdaa66d46,
0x78dde724, 0x1715611a, 0xb54cdb2e, 0x53845566,
0xf1bbcfc8, 0x8ff34a5a, 0x2e2ac522, 0xcc624026,
});
break :blk input;
},
};
state.permute();
testing.expectEqualSlices(u32, state.data, [_]u32{
0xba11c85a, 0x91bad119, 0x380ce880, 0xd24c2c68,
0x3eceffea, 0x277a921c, 0x4f73a0bd, 0xda5a9cd8,
0x84b673f0, 0x34e52ff7, 0x9e2bef49, 0xf41bb8d6,
});
}
pub const Hash = struct {
state: State,
buf_off: usize,
const Self = @This();
pub fn init() Self {
return Self{
.state = State{
.data = [_]u32{0} ** (State.BLOCKBYTES / 4),
},
.buf_off = 0,
};
}
/// Also known as 'absorb'
pub fn update(self: *Self, data: []const u8) void {
const buf = self.state.toSlice();
var in = data;
while (in.len > 0) {
var left = State.RATE - self.buf_off;
if (left == 0) {
self.state.permute();
self.buf_off = 0;
left = State.RATE;
}
const ps = math.min(in.len, left);
for (buf[self.buf_off .. self.buf_off + ps]) |*p, i| {
p.* ^= in[i];
}
self.buf_off += ps;
in = in[ps..];
}
}
/// Finish the current hashing operation, writing the hash to `out`
///
/// From 4.9 "Application to hashing"
/// By default, Gimli-Hash provides a fixed-length output of 32 bytes
/// (the concatenation of two 16-byte blocks). However, Gimli-Hash can
/// be used as an “extendable one-way function” (XOF).
pub fn final(self: *Self, out: []u8) void {
const buf = self.state.toSlice();
// XOR 1 into the next byte of the state
buf[self.buf_off] ^= 1;
// XOR 1 into the last byte of the state, position 47.
buf[buf.len - 1] ^= 1;
self.state.squeeze(out);
}
};
pub fn hash(out: []u8, in: []const u8) void {
var st = Hash.init();
st.update(in);
st.final(out);
}
test "hash" {
// a test vector (30) from NIST KAT submission.
var msg: [58 / 2]u8 = undefined;
try std.fmt.hexToBytes(&msg, "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C");
var md: [32]u8 = undefined;
hash(&md, msg);
htest.assertEqual("1C9A03DC6A5DDC5444CFC6F4B154CFF5CF081633B2CEA4D7D0AE7CCFED5AAA44", md);
}
|
lib/std/crypto/gimli.zig
|
const std = @import("std");
const lexer = @import("lexer.zig");
const parser = @import("parser.zig");
const Ast = parser.Ast;
const AstNode = parser.AstNode;
const Token = lexer.Token;
const ArenaAllocator = std.heap.ArenaAllocator;
const alloc = std.testing.allocator;
const expectEqualSlices = std.testing.expectEqualSlices;
const expectEqualStrings = std.testing.expectEqualStrings;
const expectEqual = std.testing.expectEqual;
test "Lexer.tokenize: blank stuff" {
var tokens = try lexer.tokenize(alloc, " ");
defer tokens.deinit();
try expectEqualSlices(Token, tokens.items, &[_]Token{});
}
test "Lexer.tokenize: AND" {
var tokens = try lexer.tokenize(alloc, "&&");
defer tokens.deinit();
try expectEqualSlices(Token, tokens.items, &[_]Token{
.{
.kind = .AND,
.col = 0,
.value = "&&",
},
});
}
test "Lexer.tokenize: OR" {
var tokens = try lexer.tokenize(alloc, "||");
defer tokens.deinit();
try expectEqualSlices(Token, tokens.items, &[_]Token{
.{
.kind = .OR,
.col = 0,
.value = "||",
},
});
}
test "Lexer.tokenize: PIPE" {
var tokens = try lexer.tokenize(alloc, "|");
defer tokens.deinit();
try expectEqualSlices(Token, tokens.items, &[_]Token{
.{
.kind = .PIPE,
.col = 0,
.value = "|",
},
});
}
test "Lexer.tokenize: CMD" {
var tokens = try lexer.tokenize(alloc, "ls");
defer tokens.deinit();
try expectEqualSlices(Token, tokens.items, &[_]Token{
.{
.kind = .CMD,
.col = 0,
.value = "ls",
},
});
}
test "Lexer.tokenize: CMD (complete)" {
var tokens = try lexer.tokenize(alloc, "ls -la | cd ../src");
defer tokens.deinit();
try expectEqual(tokens.items.len, 5);
try expectEqual(tokens.items[0].col, 0);
try expectEqual(tokens.items[0].kind, .CMD);
try expectEqualStrings(tokens.items[0].value, "ls");
try expectEqual(tokens.items[1].col, 3);
try expectEqual(tokens.items[1].kind, .CMD);
try expectEqualStrings(tokens.items[1].value, "-la");
try expectEqual(tokens.items[2].col, 7);
try expectEqual(tokens.items[2].kind, .PIPE);
try expectEqualStrings(tokens.items[2].value, "|");
try expectEqual(tokens.items[3].col, 9);
try expectEqual(tokens.items[3].kind, .CMD);
try expectEqualStrings(tokens.items[3].value, "cd");
try expectEqual(tokens.items[4].col, 12);
try expectEqual(tokens.items[4].kind, .CMD);
try expectEqualStrings(tokens.items[4].value, "../src");
}
test "Parser.parse: `ls -la -file *.zig`" {
var tokens = [_]Token{
.{
.kind = .CMD,
.value = "ls",
},
.{
.kind = .CMD,
.value = "-la",
},
.{
.kind = .CMD,
.value = "-file *.zig",
},
};
const ast = try parser.parse(alloc, &tokens);
defer ast.arena.deinit();
try expectEqual(ast.op_count, 0);
try expectEqual(ast.cmd_count, 1);
try expectEqual(ast.args_count, 2);
try expectEqual(ast.root.children.items.len, 0);
try expectEqual(ast.root.cmd, "ls");
try expectEqual(ast.root.args.items.len, 2);
}
test "Parser.parse: `ls -la | cd ../src`" {
var tokens = [_]Token{
.{
.kind = .CMD,
.value = "ls",
},
.{
.kind = .CMD,
.value = "-la",
},
.{
.kind = .PIPE,
.value = "|",
},
.{
.kind = .CMD,
.value = "cd",
},
.{
.kind = .CMD,
.value = "../src",
},
};
const ast = try parser.parse(alloc, &tokens);
defer ast.arena.deinit();
try expectEqual(ast.op_count, 1);
try expectEqual(ast.cmd_count, 2);
try expectEqual(ast.args_count, 2);
try expectEqual(ast.root.kind, .PIPE);
try expectEqual(ast.root.children.items.len, 2);
try expectEqual(ast.root.children.items[0].cmd, "ls");
try expectEqual(ast.root.children.items[0].args.items[0], "-la");
try expectEqual(ast.root.children.items[1].cmd, "cd");
try expectEqual(ast.root.children.items[1].args.items[0], "../src");
}
|
zig/src/_tests.zig
|
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const c = @import("../../c.zig");
const zupnp = @import("../../lib.zig");
const xml = @import("xml");
const Manager = @This();
const logger = std.log.scoped(.@"zupnp.upnp.device.Manager");
const RegisteredDevice = struct {
const DeinitFn = fn(*anyopaque)void;
const HandleActionFn = fn(*anyopaque, zupnp.upnp.device.ActionRequest) zupnp.upnp.device.ActionResult;
const HandleEventSubscriptionFn = fn(*anyopaque, zupnp.upnp.device.EventSubscriptionRequest) zupnp.upnp.device.EventSubscriptionResult;
instance: *anyopaque,
allocator: Allocator,
deinitFn: ?DeinitFn,
handleActionFn: HandleActionFn,
handleEventSubscriptionFn: HandleEventSubscriptionFn,
device_handle: c.UpnpDevice_Handle = undefined,
fn deinit(self: *RegisteredDevice) void {
if (self.deinitFn) |deinitFn| {
deinitFn(self.instance);
}
}
};
const DeviceMap = std.StringHashMap(RegisteredDevice);
arena: ArenaAllocator,
devices: DeviceMap,
scpd_endpoint: ?*ScpdEndpoint = null,
pub fn init(allocator: Allocator) Manager {
return .{
.arena = ArenaAllocator.init(allocator),
.devices = DeviceMap.init(allocator),
};
}
pub fn deinit(self: *Manager) void {
var devices_iter = self.devices.valueIterator();
while (devices_iter.next) |dev| {
dev.deinit();
_ = c.UpnpUnRegisterRootDevice(dev.device_handle);
}
self.devices.deinit();
self.arena.deinit();
}
pub fn createDevice(
self: *Manager,
comptime T: type,
device_parameters: zupnp.upnp.definition.UserDefinedDeviceParameters,
config: anytype,
) !*T {
if (self.scpd_endpoint == null) {
// TODO this is stupidly hacky
// and yeah it has to be a pointer, otherwise it'll copy the server object
var server = &@fieldParentPtr(zupnp.ZUPnP, "device_manager", self).server;
self.scpd_endpoint = try server.createEndpoint(ScpdEndpoint, .{ .allocator = self.arena.allocator() }, ScpdEndpoint.base_url);
}
var instance = try self.arena.allocator().create(T);
errdefer self.arena.allocator().destroy(instance);
var arena = ArenaAllocator.init(self.arena.allocator());
defer arena.deinit();
var service_definitions = std.ArrayList(zupnp.upnp.definition.DeviceServiceDefinition).init(arena.allocator());
try instance.prepare(self.arena.allocator(), config, &service_definitions);
const service_list = try arena.allocator().alloc(zupnp.upnp.definition.ServiceDefinition, service_definitions.items.len);
for (service_definitions.items) |service_definition, i| {
const scpd_url = try self.scpd_endpoint.?.addFile(device_parameters.UDN, service_definition.service_id, service_definition.scpd_xml);
service_list[i] = .{
.serviceType = service_definition.service_type,
.serviceId = service_definition.service_id,
.SCPDURL = scpd_url,
.controlURL = try std.fmt.allocPrint(arena.allocator(), "/control/{s}/{s}", .{device_parameters.UDN, service_definition.service_id}),
.eventSubURL = try std.fmt.allocPrint(arena.allocator(), "/event/{s}/{s}", .{device_parameters.UDN, service_definition.service_id}),
};
}
const device = zupnp.upnp.definition.Device {
.root = .{
.device = .{
.deviceType = T.device_type,
.UDN = device_parameters.UDN,
.friendlyName = device_parameters.friendlyName,
.manufacturer = device_parameters.manufacturer,
.manufacturerURL = device_parameters.manufacturerURL,
.modelDescription = device_parameters.modelDescription,
.modelName = device_parameters.modelName,
.modelNumber = device_parameters.modelNumber,
.modelURL = device_parameters.modelURL,
.serialNumber = device_parameters.serialNumber,
.UPC = device_parameters.UPC,
.iconList = device_parameters.iconList,
.serviceList = .{
.service = service_list,
},
}
}
};
const device_document = try xml.encode(arena.allocator(), device);
defer device_document.deinit();
var device_str = try device_document.toString();
defer device_str.deinit();
// TODO Put all of this much earlier than here
if (self.devices.contains(device_parameters.UDN)) {
logger.err("Device {s} is already registered", .{device_parameters.UDN});
return zupnp.Error;
}
const entry = try self.devices.getOrPutValue(device_parameters.UDN, .{
.instance = @ptrCast(*anyopaque, instance),
.allocator = self.arena.allocator(),
.deinitFn = c.mutateCallback(T, "deinit", RegisteredDevice.DeinitFn),
.handleActionFn = c.mutateCallback(T, "handleAction", RegisteredDevice.HandleActionFn).?,
.handleEventSubscriptionFn = c.mutateCallback(T, "handleEventSubscription", RegisteredDevice.HandleEventSubscriptionFn).?,
});
errdefer {
entry.value_ptr.deinit();
_ = self.devices.remove(device_parameters.UDN);
}
if (c.is_error(c.UpnpRegisterRootDevice2(
c.UPNPREG_BUF_DESC,
device_str.string.ptr,
device_str.string.len,
1, // TODO wtf does this do?
onEvent,
@ptrCast(*const anyopaque, entry.value_ptr),
&entry.value_ptr.device_handle
))) |err| {
logger.err("Failed to register device: {s}", .{err});
return zupnp.Error;
}
logger.info("Registered device {s} with UDN {s}", .{T, device_parameters.UDN});
return instance;
}
fn onEvent(event_type: c.Upnp_EventType, event: ?*const anyopaque, cookie: ?*anyopaque) callconv(.C) c_int {
var device = c.mutate(*RegisteredDevice, cookie);
switch (event_type) {
c.UPNP_CONTROL_ACTION_REQUEST => onAction(device, event),
c.UPNP_EVENT_SUBSCRIPTION_REQUEST => onEventSubscribe(device, event),
else => logger.info("Unhandled event type {}", .{event_type})
}
return 0;
}
fn onAction(device: *RegisteredDevice, event: ?*const anyopaque) void {
var arena = ArenaAllocator.init(device.allocator);
defer arena.deinit();
var action_request = c.mutate(*c.UpnpActionRequest, event);
const action = zupnp.upnp.device.ActionRequest {
.allocator = arena.allocator(),
.handle = action_request,
};
var result: zupnp.upnp.device.ActionResult = device.handleActionFn(device.instance, action);
if (result.action_result) |action_result| {
_ = c.UpnpActionRequest_set_ActionResult(action_request, c.mutate(*c.IXML_Document, action_result.handle));
}
_ = c.UpnpActionRequest_set_ErrCode(action_request, result.err_code);
}
fn onEventSubscribe(device: *RegisteredDevice, event: ?*const anyopaque) void {
var event_subscription_request = c.mutate(*c.UpnpSubscriptionRequest, event);
var event_subscription = zupnp.upnp.device.EventSubscriptionRequest {
.handle = event_subscription_request,
};
var result: zupnp.upnp.device.EventSubscriptionResult = device.handleEventSubscriptionFn(device.instance, event_subscription);
defer result.deinit();
if (result.property_set) |property_set| {
if (c.is_error(c.UpnpAcceptSubscriptionExt(
device.device_handle,
event_subscription.getDeviceUdn(),
event_subscription.getServiceId(),
c.mutate(*c.IXML_Document, property_set.handle),
event_subscription.getSid()
))) |err| {
logger.warn("Failed to accept event subscription: {s}", .{err});
}
}
}
const ScpdEndpoint = struct {
const base_url = "/scpd";
allocator: Allocator,
xml_files: std.StringHashMap([]const u8),
pub fn prepare(self: *ScpdEndpoint, config: struct { allocator: Allocator }) !void {
self.allocator = config.allocator;
self.xml_files = std.StringHashMap([]const u8).init(config.allocator);
}
pub fn addFile(
self: *ScpdEndpoint,
udn: []const u8,
service_id: []const u8,
contents: []const u8
) ![]const u8 {
const url = try std.fmt.allocPrint(self.allocator, "{s}/{s}/{s}", .{base_url, udn, service_id});
try self.xml_files.put(url, contents);
return url;
}
pub fn get(self: *ScpdEndpoint, request: *const zupnp.web.ServerGetRequest) zupnp.web.ServerResponse {
return if (self.xml_files.get(request.filename)) |contents|
zupnp.web.ServerResponse.contents(.{ .content_type = "text/xml", .contents = contents })
else
zupnp.web.ServerResponse.notFound()
;
}
};
|
src/upnp/device/manager.zig
|
const std = @import("std");
const Point = struct {
x: u32,
y: u32,
fn parse(text: []const u8) !Point {
var it = std.mem.tokenize(u8, text, ",");
const x_str = it.next() orelse return error.NoX;
const y_str = it.next() orelse return error.NoY;
if (it.next() != null)
return error.TrailingText;
return Point{
.x = try std.fmt.parseInt(u32, x_str, 10),
.y = try std.fmt.parseInt(u32, y_str, 10),
};
}
fn eql(self: Point, other: Point) bool {
return self.x == other.x and self.y == other.y;
}
};
const Line = struct {
begin: Point,
end: Point,
const Iterator = struct {
current: ?Point,
end: Point,
delta: struct {
x: i32,
y: i32,
},
fn next(self: *Iterator) ?Point {
return if (self.current) |current| blk: {
const next_val: ?Point = if (current.eql(self.end))
null
else
Point{
.x = @intCast(u32, self.delta.x + @intCast(i32, current.x)),
.y = @intCast(u32, self.delta.y + @intCast(i32, current.y)),
};
const ret = current;
self.current = next_val;
break :blk ret;
} else null;
}
};
fn parse(text: []const u8) !Line {
const arrow = " -> ";
return if (std.mem.indexOf(u8, text, arrow)) |idx|
Line{
.begin = try Point.parse(text[0..idx]),
.end = try Point.parse(text[idx + arrow.len ..]),
}
else
error.InvalidArrow;
}
fn iterate(self: Line) Iterator {
const x1 = self.begin.x;
const x2 = self.end.x;
const y1 = self.begin.y;
const y2 = self.end.y;
return Iterator{
.current = self.begin,
.end = self.end,
.delta = .{
.x = if (x1 < x2) @as(i32, 1) else if (x1 > x2) @as(i32, -1) else @as(i32, 0),
.y = if (y1 < y2) @as(i32, 1) else if (y1 > y2) @as(i32, -1) else @as(i32, 0),
},
};
}
};
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
defer _ = gpa.deinit();
var points = std.AutoArrayHashMap(Point, usize).init(allocator);
defer points.deinit();
var buf: [80]u8 = undefined;
const stdin = std.io.getStdIn().reader();
while (try stdin.readUntilDelimiterOrEof(&buf, '\n')) |line_str| {
const line = try Line.parse(line_str);
var it = line.iterate();
while (it.next()) |point| {
const result = try points.getOrPut(point);
if (result.found_existing)
result.value_ptr.* += 1
else
result.value_ptr.* = 1;
}
}
// count number of points where count > 1
var count: usize = 0;
var it = points.iterator();
while (it.next()) |entry| {
if (entry.value_ptr.* > 1) {
count += 1;
}
}
std.log.info("count: {}", .{count});
}
test "Point.parse()" {
const expectEqual = std.testing.expectEqual;
const point = try Point.parse("0,9");
try expectEqual(@as(u32, 0), point.x);
try expectEqual(@as(u32, 9), point.y);
}
test "Line.parse()" {
const expectEqual = std.testing.expectEqual;
const line = try Line.parse("0,9 -> 5,9");
try expectEqual(@as(u32, 0), line.begin.x);
try expectEqual(@as(u32, 9), line.begin.y);
try expectEqual(@as(u32, 5), line.end.x);
try expectEqual(@as(u32, 9), line.end.y);
}
test "Line.iterate()" {
const expectEqual = std.testing.expectEqual;
const line = try Line.parse("3,4 -> 1,4");
var it = line.iterate();
try expectEqual(Point{ .x = 3, .y = 4 }, it.next().?);
try expectEqual(Point{ .x = 2, .y = 4 }, it.next().?);
try expectEqual(Point{ .x = 1, .y = 4 }, it.next().?);
try expectEqual(@as(?Point, null), it.next());
}
|
src/05.zig
|
const std = @import("std");
const testing = std.testing;
pub const ROM = struct {
preamble_length: usize,
line_count: usize,
numbers: [1500]usize,
seen: std.AutoHashMap(usize, void),
pub fn init(len: usize) ROM {
const allocator = std.heap.page_allocator;
var self = ROM{
.preamble_length = len,
.numbers = undefined,
.seen = std.AutoHashMap(usize, void).init(allocator),
.line_count = 0,
};
return self;
}
pub fn deinit(self: *ROM) void {
self.seen.deinit();
}
pub fn add_number(self: *ROM, line: []const u8) usize {
const number = std.fmt.parseInt(usize, line, 10) catch unreachable;
if (self.line_count >= self.preamble_length) {
var found = false;
const start = self.line_count - self.preamble_length;
var pos: usize = start;
while (pos < self.line_count) : (pos += 1) {
const candidate = self.numbers[pos];
if (candidate > number) continue;
const missing = number - candidate;
if (candidate == missing) continue;
if (self.seen.contains(missing)) {
// std.debug.warn("FOUND {} {} = {}\n", .{ candidate, missing, number });
found = true;
break;
}
}
if (!found) {
// std.debug.warn("NOT FOUND {}\n", .{number});
return number;
}
_ = self.seen.remove(self.numbers[start]);
}
self.numbers[self.line_count] = number;
_ = self.seen.put(number, {}) catch unreachable;
self.line_count += 1;
return 0;
}
pub fn find_contiguous_sum(self: *ROM, target: usize) usize {
var start: usize = 0;
while (start < self.line_count) : (start += 1) {
var sum: usize = 0;
var end: usize = start;
while (end < self.line_count) : (end += 1) {
sum += self.numbers[end];
if (sum < target) continue;
if (sum > target) break;
// std.debug.warn("FOUND SUM {} =", .{target});
var min: usize = std.math.maxInt(usize);
var max: usize = 0;
var pos: usize = start;
while (pos <= end) : (pos += 1) {
// std.debug.warn(" {}", .{self.numbers[pos]});
if (min > self.numbers[pos]) min = self.numbers[pos];
if (max < self.numbers[pos]) max = self.numbers[pos];
}
// std.debug.warn("\n", .{});
// std.debug.warn("MIN {} MAX {}\n", .{ min, max });
return min + max;
}
}
return 0;
}
};
test "sample" {
const data: []const u8 =
\\35
\\20
\\15
\\25
\\47
\\40
\\62
\\55
\\65
\\95
\\102
\\117
\\150
\\182
\\127
\\219
\\299
\\277
\\309
\\576
;
var rom = ROM.init(5);
defer rom.deinit();
var bad: usize = 0;
var it = std.mem.split(u8, data, "\n");
while (it.next()) |line| {
bad = rom.add_number(line);
if (bad > 0) {
break;
}
}
try testing.expect(bad == 127);
const sum = rom.find_contiguous_sum(127);
try testing.expect(sum == 62);
}
|
2020/p09/rom.zig
|
const std = @import("std");
const print = std.debug.print;
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
var allocator = &gpa.allocator;
var file = try std.fs.cwd().openFile(
"./inputs/day16.txt",
.{
.read = true,
},
);
var reader = std.io.bufferedReader(file.reader()).reader();
var line_buffer: [1024]u8 = undefined;
var attrs = std.StringHashMap(usize).init(allocator);
defer attrs.deinit();
try attrs.put("children", 3);
try attrs.put("cats", 7);
try attrs.put("samoyeds", 2);
try attrs.put("pomeranians", 3);
try attrs.put("akitas", 0);
try attrs.put("vizslas", 0);
try attrs.put("goldfish", 5);
try attrs.put("trees", 3);
try attrs.put("cars", 2);
try attrs.put("perfumes", 1);
line: while (try reader.readUntilDelimiterOrEof(&line_buffer, '\n')) |line| {
var tokens = std.mem.tokenize(u8, line, " ");
_ = tokens.next(); // Sue
const number = try parseInt(tokens.next().?);
while (tokens.next()) |key| {
const val = try parseInt(tokens.next().?);
if (attrs.get(std.mem.trimRight(u8, key, ":"))) |v| {
if (val != v) {
continue :line;
}
}
}
print("Part 1: {d}\n", .{number});
break;
}
try file.seekTo(0);
reader = std.io.bufferedReader(file.reader()).reader();
line: while (try reader.readUntilDelimiterOrEof(&line_buffer, '\n')) |line| {
var tokens = std.mem.tokenize(u8, line, " ");
_ = tokens.next(); // Sue
const number = try parseInt(tokens.next().?);
while (tokens.next()) |key| {
const val = try parseInt(tokens.next().?);
const k = std.mem.trimRight(u8, key, ":");
if (attrs.get(k)) |v| {
if (std.mem.eql(u8, "cats", k) or std.mem.eql(u8, "trees", k)) {
if (val <= v) {
continue :line;
}
} else if (std.mem.eql(u8, "pomeranians", k) or std.mem.eql(u8, "goldfish", k)) {
if (val >= v) {
continue :line;
}
} else if (val != v) {
continue :line;
}
}
}
print("Part 2: {d}\n", .{number});
break;
}
}
fn parseInt(token: []const u8) !usize {
return std.fmt.parseInt(usize, std.mem.trimRight(u8, token, ":,"), 10);
}
|
src/day16.zig
|